1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2014 Matteo Landi 5 * Copyright (C) 2012-2016 Luigi Rizzo 6 * Copyright (C) 2012-2016 Giuseppe Lettieri 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #ifdef linux 32 #include "bsd_glue.h" 33 #endif /* linux */ 34 35 #ifdef __APPLE__ 36 #include "osx_glue.h" 37 #endif /* __APPLE__ */ 38 39 #ifdef __FreeBSD__ 40 #include <sys/types.h> 41 #include <sys/domainset.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> /* MALLOC_DEFINE */ 44 #include <sys/proc.h> 45 #include <vm/vm.h> /* vtophys */ 46 #include <vm/pmap.h> /* vtophys */ 47 #include <sys/socket.h> /* sockaddrs */ 48 #include <sys/selinfo.h> 49 #include <sys/sysctl.h> 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/vnet.h> 53 #include <machine/bus.h> /* bus_dmamap_* */ 54 55 /* M_NETMAP only used in here */ 56 MALLOC_DECLARE(M_NETMAP); 57 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 58 59 #endif /* __FreeBSD__ */ 60 61 #ifdef _WIN32 62 #include <win_glue.h> 63 #endif 64 65 #include <net/netmap.h> 66 #include <dev/netmap/netmap_kern.h> 67 #include <net/netmap_virt.h> 68 #include "netmap_mem2.h" 69 70 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY 71 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ 72 #else 73 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 74 #endif 75 76 #define NETMAP_POOL_MAX_NAMSZ 32 77 78 79 enum { 80 NETMAP_IF_POOL = 0, 81 NETMAP_RING_POOL, 82 NETMAP_BUF_POOL, 83 NETMAP_POOLS_NR 84 }; 85 86 87 struct netmap_obj_params { 88 u_int size; 89 u_int num; 90 91 u_int last_size; 92 u_int last_num; 93 }; 94 95 struct netmap_obj_pool { 96 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 97 98 /* ---------------------------------------------------*/ 99 /* these are only meaningful if the pool is finalized */ 100 /* (see 'finalized' field in netmap_mem_d) */ 101 size_t memtotal; /* actual total memory space */ 102 103 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 104 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 105 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ 106 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 107 108 u_int objtotal; /* actual total number of objects. */ 109 u_int numclusters; /* actual number of clusters */ 110 u_int objfree; /* number of free objects. */ 111 112 int alloc_done; /* we have allocated the memory */ 113 /* ---------------------------------------------------*/ 114 115 /* limits */ 116 u_int objminsize; /* minimum object size */ 117 u_int objmaxsize; /* maximum object size */ 118 u_int nummin; /* minimum number of objects */ 119 u_int nummax; /* maximum number of objects */ 120 121 /* these are changed only by config */ 122 u_int _objtotal; /* total number of objects */ 123 u_int _objsize; /* object size */ 124 u_int _clustsize; /* cluster size */ 125 u_int _clustentries; /* objects per cluster */ 126 u_int _numclusters; /* number of clusters */ 127 128 /* requested values */ 129 u_int r_objtotal; 130 u_int r_objsize; 131 }; 132 133 #define NMA_LOCK_T NM_MTX_T 134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) 138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 139 140 struct netmap_mem_ops { 141 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 142 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, 143 u_int *memflags, uint16_t *id); 144 145 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 146 int (*nmd_config)(struct netmap_mem_d *); 147 int (*nmd_finalize)(struct netmap_mem_d *, struct netmap_adapter *); 148 void (*nmd_deref)(struct netmap_mem_d *, struct netmap_adapter *); 149 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 150 void (*nmd_delete)(struct netmap_mem_d *); 151 152 struct netmap_if * (*nmd_if_new)(struct netmap_mem_d *, 153 struct netmap_adapter *, struct netmap_priv_d *); 154 void (*nmd_if_delete)(struct netmap_mem_d *, 155 struct netmap_adapter *, struct netmap_if *); 156 int (*nmd_rings_create)(struct netmap_mem_d *, 157 struct netmap_adapter *); 158 void (*nmd_rings_delete)(struct netmap_mem_d *, 159 struct netmap_adapter *); 160 }; 161 162 struct netmap_mem_d { 163 NMA_LOCK_T nm_mtx; /* protect the allocator */ 164 size_t nm_totalsize; /* shorthand */ 165 166 u_int flags; 167 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 168 #define NETMAP_MEM_HIDDEN 0x8 /* being prepared */ 169 #define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */ 170 int lasterr; /* last error for curr config */ 171 int active; /* active users */ 172 int refcount; 173 /* the three allocators */ 174 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 175 176 nm_memid_t nm_id; /* allocator identifier */ 177 int nm_grp; /* iommu group id */ 178 int nm_numa_domain; /* local NUMA domain */ 179 180 /* list of all existing allocators, sorted by nm_id */ 181 struct netmap_mem_d *prev, *next; 182 183 const struct netmap_mem_ops *ops; 184 185 struct netmap_obj_params params[NETMAP_POOLS_NR]; 186 187 #define NM_MEM_NAMESZ 16 188 char name[NM_MEM_NAMESZ]; 189 }; 190 191 int 192 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 193 { 194 int rv; 195 196 NMA_LOCK(nmd); 197 rv = nmd->ops->nmd_get_lut(nmd, lut); 198 NMA_UNLOCK(nmd); 199 200 return rv; 201 } 202 203 int 204 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, 205 u_int *memflags, nm_memid_t *memid) 206 { 207 int rv; 208 209 NMA_LOCK(nmd); 210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); 211 NMA_UNLOCK(nmd); 212 213 return rv; 214 } 215 216 vm_paddr_t 217 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 218 { 219 vm_paddr_t pa; 220 221 #if defined(__FreeBSD__) 222 /* This function is called by netmap_dev_pager_fault(), which holds a 223 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we 224 * spin on the trylock. */ 225 NMA_SPINLOCK(nmd); 226 #else 227 NMA_LOCK(nmd); 228 #endif 229 pa = nmd->ops->nmd_ofstophys(nmd, off); 230 NMA_UNLOCK(nmd); 231 232 return pa; 233 } 234 235 static int 236 netmap_mem_config(struct netmap_mem_d *nmd) 237 { 238 if (nmd->active) { 239 /* already in use. Not fatal, but we 240 * cannot change the configuration 241 */ 242 return 0; 243 } 244 245 return nmd->ops->nmd_config(nmd); 246 } 247 248 ssize_t 249 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) 250 { 251 ssize_t rv; 252 253 NMA_LOCK(nmd); 254 rv = nmd->ops->nmd_if_offset(nmd, off); 255 NMA_UNLOCK(nmd); 256 257 return rv; 258 } 259 260 static void 261 netmap_mem_delete(struct netmap_mem_d *nmd) 262 { 263 nmd->ops->nmd_delete(nmd); 264 } 265 266 struct netmap_if * 267 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 268 { 269 struct netmap_if *nifp; 270 struct netmap_mem_d *nmd = na->nm_mem; 271 272 NMA_LOCK(nmd); 273 nifp = nmd->ops->nmd_if_new(nmd, na, priv); 274 NMA_UNLOCK(nmd); 275 276 return nifp; 277 } 278 279 void 280 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) 281 { 282 struct netmap_mem_d *nmd = na->nm_mem; 283 284 NMA_LOCK(nmd); 285 nmd->ops->nmd_if_delete(nmd, na, nif); 286 NMA_UNLOCK(nmd); 287 } 288 289 int 290 netmap_mem_rings_create(struct netmap_adapter *na) 291 { 292 int rv; 293 struct netmap_mem_d *nmd = na->nm_mem; 294 295 NMA_LOCK(nmd); 296 rv = nmd->ops->nmd_rings_create(nmd, na); 297 NMA_UNLOCK(nmd); 298 299 return rv; 300 } 301 302 void 303 netmap_mem_rings_delete(struct netmap_adapter *na) 304 { 305 struct netmap_mem_d *nmd = na->nm_mem; 306 307 NMA_LOCK(nmd); 308 nmd->ops->nmd_rings_delete(nmd, na); 309 NMA_UNLOCK(nmd); 310 } 311 312 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 313 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 314 static int nm_mem_check_group(struct netmap_mem_d *, void *); 315 static void nm_mem_release_id(struct netmap_mem_d *); 316 317 nm_memid_t 318 netmap_mem_get_id(struct netmap_mem_d *nmd) 319 { 320 return nmd->nm_id; 321 } 322 323 #ifdef NM_DEBUG_MEM_PUTGET 324 #define NM_DBG_REFC(nmd, func, line) \ 325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount); 326 #else 327 #define NM_DBG_REFC(nmd, func, line) 328 #endif 329 330 /* circular list of all existing allocators */ 331 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 332 static NM_MTX_T nm_mem_list_lock; 333 334 struct netmap_mem_d * 335 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 336 { 337 NM_MTX_LOCK(nm_mem_list_lock); 338 nmd->refcount++; 339 NM_DBG_REFC(nmd, func, line); 340 NM_MTX_UNLOCK(nm_mem_list_lock); 341 return nmd; 342 } 343 344 void 345 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 346 { 347 int last; 348 NM_MTX_LOCK(nm_mem_list_lock); 349 last = (--nmd->refcount == 0); 350 if (last) 351 nm_mem_release_id(nmd); 352 NM_DBG_REFC(nmd, func, line); 353 NM_MTX_UNLOCK(nm_mem_list_lock); 354 if (last) 355 netmap_mem_delete(nmd); 356 } 357 358 int 359 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 360 { 361 int lasterr = 0; 362 if (nm_mem_check_group(nmd, na->pdev) < 0) { 363 return ENOMEM; 364 } 365 366 NMA_LOCK(nmd); 367 368 if (netmap_mem_config(nmd)) 369 goto out; 370 371 nmd->active++; 372 373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na); 374 375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) { 376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 377 } 378 379 out: 380 lasterr = nmd->lasterr; 381 NMA_UNLOCK(nmd); 382 383 if (lasterr) 384 netmap_mem_deref(nmd, na); 385 386 return lasterr; 387 } 388 389 static int 390 nm_isset(uint32_t *bitmap, u_int i) 391 { 392 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); 393 } 394 395 396 static int 397 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) 398 { 399 u_int n, j; 400 401 if (p->bitmap == NULL) { 402 /* Allocate the bitmap */ 403 n = (p->objtotal + 31) / 32; 404 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n); 405 if (p->bitmap == NULL) { 406 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 407 p->name); 408 return ENOMEM; 409 } 410 p->bitmap_slots = n; 411 } else { 412 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0])); 413 } 414 415 p->objfree = 0; 416 /* 417 * Set all the bits in the bitmap that have 418 * corresponding buffers to 1 to indicate they are 419 * free. 420 */ 421 for (j = 0; j < p->objtotal; j++) { 422 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { 423 if (netmap_debug & NM_DEBUG_MEM) 424 nm_prinf("skipping %s %d", p->name, j); 425 continue; 426 } 427 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); 428 p->objfree++; 429 } 430 431 if (netmap_verbose) 432 nm_prinf("%s free %u", p->name, p->objfree); 433 if (p->objfree == 0) { 434 if (netmap_verbose) 435 nm_prerr("%s: no objects available", p->name); 436 return ENOMEM; 437 } 438 439 return 0; 440 } 441 442 static int 443 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) 444 { 445 int i, error = 0; 446 447 for (i = 0; i < NETMAP_POOLS_NR; i++) { 448 struct netmap_obj_pool *p = &nmd->pools[i]; 449 450 error = netmap_init_obj_allocator_bitmap(p); 451 if (error) 452 return error; 453 } 454 455 /* 456 * buffers 0 and 1 are reserved 457 */ 458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { 459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name); 460 return ENOMEM; 461 } 462 463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) { 465 /* XXX This check is a workaround that prevents a 466 * NULL pointer crash which currently happens only 467 * with ptnetmap guests. 468 * Removed shared-info --> is the bug still there? */ 469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; 470 } 471 return 0; 472 } 473 474 int 475 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 476 { 477 int last_user = 0; 478 NMA_LOCK(nmd); 479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP)) 480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 481 if (nmd->active == 1) { 482 last_user = 1; 483 /* 484 * Reset the allocator when it falls out of use so that any 485 * pool resources leaked by unclean application exits are 486 * reclaimed. 487 */ 488 netmap_mem_init_bitmaps(nmd); 489 } 490 nmd->ops->nmd_deref(nmd, na); 491 492 nmd->active--; 493 if (last_user) { 494 nmd->lasterr = 0; 495 } 496 497 NMA_UNLOCK(nmd); 498 return last_user; 499 } 500 501 502 /* accessor functions */ 503 static int 504 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 505 { 506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 507 #ifdef __FreeBSD__ 508 lut->plut = lut->lut; 509 #endif 510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 512 513 return 0; 514 } 515 516 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 517 [NETMAP_IF_POOL] = { 518 .size = 1024, 519 .num = 2, 520 }, 521 [NETMAP_RING_POOL] = { 522 .size = 5*PAGE_SIZE, 523 .num = 4, 524 }, 525 [NETMAP_BUF_POOL] = { 526 .size = 2048, 527 .num = 4098, 528 }, 529 }; 530 531 532 /* 533 * nm_mem is the memory allocator used for all physical interfaces 534 * running in netmap mode. 535 * Virtual (VALE) ports will have each its own allocator. 536 */ 537 extern const struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 538 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 539 .pools = { 540 [NETMAP_IF_POOL] = { 541 .name = "netmap_if", 542 .objminsize = sizeof(struct netmap_if), 543 .objmaxsize = 4096, 544 .nummin = 10, /* don't be stingy */ 545 .nummax = 10000, /* XXX very large */ 546 }, 547 [NETMAP_RING_POOL] = { 548 .name = "netmap_ring", 549 .objminsize = sizeof(struct netmap_ring), 550 .objmaxsize = 32*PAGE_SIZE, 551 .nummin = 2, 552 .nummax = 1024, 553 }, 554 [NETMAP_BUF_POOL] = { 555 .name = "netmap_buf", 556 .objminsize = 64, 557 .objmaxsize = 65536, 558 .nummin = 4, 559 .nummax = 1000000, /* one million! */ 560 }, 561 }, 562 563 .params = { 564 [NETMAP_IF_POOL] = { 565 .size = 1024, 566 .num = 100, 567 }, 568 [NETMAP_RING_POOL] = { 569 .size = 9*PAGE_SIZE, 570 .num = 200, 571 }, 572 [NETMAP_BUF_POOL] = { 573 .size = 2048, 574 .num = NETMAP_BUF_MAX_NUM, 575 }, 576 }, 577 578 .nm_id = 1, 579 .nm_grp = -1, 580 .nm_numa_domain = -1, 581 582 .prev = &nm_mem, 583 .next = &nm_mem, 584 585 .ops = &netmap_mem_global_ops, 586 587 .name = "1" 588 }; 589 590 static struct netmap_mem_d nm_mem_blueprint; 591 592 /* blueprint for the private memory allocators */ 593 /* XXX clang is not happy about using name as a print format */ 594 static const struct netmap_mem_d nm_blueprint = { 595 .pools = { 596 [NETMAP_IF_POOL] = { 597 .name = "%s_if", 598 .objminsize = sizeof(struct netmap_if), 599 .objmaxsize = 4096, 600 .nummin = 1, 601 .nummax = 100, 602 }, 603 [NETMAP_RING_POOL] = { 604 .name = "%s_ring", 605 .objminsize = sizeof(struct netmap_ring), 606 .objmaxsize = 32*PAGE_SIZE, 607 .nummin = 2, 608 .nummax = 1024, 609 }, 610 [NETMAP_BUF_POOL] = { 611 .name = "%s_buf", 612 .objminsize = 64, 613 .objmaxsize = 65536, 614 .nummin = 4, 615 .nummax = 1000000, /* one million! */ 616 }, 617 }, 618 619 .nm_grp = -1, 620 .nm_numa_domain = -1, 621 622 .flags = NETMAP_MEM_PRIVATE, 623 624 .ops = &netmap_mem_global_ops, 625 }; 626 627 /* memory allocator related sysctls */ 628 629 #define STRINGIFY(x) #x 630 631 #define DECLARE_SYSCTLS(id, name) \ 632 SYSBEGIN(mem2_ ## name); \ 633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 634 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 636 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 637 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 638 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 639 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 640 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 641 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 642 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 643 "Default size of private netmap " STRINGIFY(name) "s"); \ 644 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 645 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 646 "Default number of private netmap " STRINGIFY(name) "s"); \ 647 SYSEND 648 649 SYSCTL_DECL(_dev_netmap); 650 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 651 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 652 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 653 654 int netmap_port_numa_affinity = 0; 655 SYSCTL_INT(_dev_netmap, OID_AUTO, port_numa_affinity, 656 CTLFLAG_RDTUN, &netmap_port_numa_affinity, 0, 657 "Use NUMA-local memory for memory pools when possible"); 658 659 /* call with nm_mem_list_lock held */ 660 static int 661 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id, int domain) 662 { 663 nm_memid_t id; 664 struct netmap_mem_d *scan = netmap_last_mem_d; 665 int error = ENOMEM; 666 667 do { 668 /* we rely on unsigned wrap around */ 669 id = scan->nm_id + 1; 670 if (id == 0) /* reserve 0 as error value */ 671 id = 1; 672 scan = scan->next; 673 if (id != scan->nm_id) { 674 nmd->nm_id = id; 675 nmd->nm_grp = grp_id; 676 nmd->nm_numa_domain = domain; 677 nmd->prev = scan->prev; 678 nmd->next = scan; 679 scan->prev->next = nmd; 680 scan->prev = nmd; 681 netmap_last_mem_d = nmd; 682 nmd->refcount = 1; 683 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 684 error = 0; 685 break; 686 } 687 } while (scan != netmap_last_mem_d); 688 689 return error; 690 } 691 692 /* call with nm_mem_list_lock *not* held */ 693 static int 694 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id) 695 { 696 int ret; 697 698 NM_MTX_LOCK(nm_mem_list_lock); 699 ret = nm_mem_assign_id_locked(nmd, grp_id, -1); 700 NM_MTX_UNLOCK(nm_mem_list_lock); 701 702 return ret; 703 } 704 705 /* call with nm_mem_list_lock held */ 706 static void 707 nm_mem_release_id(struct netmap_mem_d *nmd) 708 { 709 nmd->prev->next = nmd->next; 710 nmd->next->prev = nmd->prev; 711 712 if (netmap_last_mem_d == nmd) 713 netmap_last_mem_d = nmd->prev; 714 715 nmd->prev = nmd->next = NULL; 716 } 717 718 struct netmap_mem_d * 719 netmap_mem_find(nm_memid_t id) 720 { 721 struct netmap_mem_d *nmd; 722 723 NM_MTX_LOCK(nm_mem_list_lock); 724 nmd = netmap_last_mem_d; 725 do { 726 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { 727 nmd->refcount++; 728 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 729 NM_MTX_UNLOCK(nm_mem_list_lock); 730 return nmd; 731 } 732 nmd = nmd->next; 733 } while (nmd != netmap_last_mem_d); 734 NM_MTX_UNLOCK(nm_mem_list_lock); 735 return NULL; 736 } 737 738 static int 739 nm_mem_check_group(struct netmap_mem_d *nmd, void *dev) 740 { 741 int err = 0, id; 742 743 /* Skip not hw adapters. 744 * Vale port can use particular allocator through vale-ctl -m option 745 */ 746 if (!dev) 747 return 0; 748 id = nm_iommu_group_id(dev); 749 if (netmap_debug & NM_DEBUG_MEM) 750 nm_prinf("iommu_group %d", id); 751 752 NMA_LOCK(nmd); 753 754 if (nmd->nm_grp != id) { 755 if (netmap_verbose) 756 nm_prerr("iommu group mismatch: %d vs %d", 757 nmd->nm_grp, id); 758 nmd->lasterr = err = ENOMEM; 759 } 760 761 NMA_UNLOCK(nmd); 762 return err; 763 } 764 765 static struct lut_entry * 766 nm_alloc_lut(u_int nobj) 767 { 768 size_t n = sizeof(struct lut_entry) * nobj; 769 struct lut_entry *lut; 770 #ifdef linux 771 lut = vmalloc(n); 772 #else 773 lut = nm_os_malloc(n); 774 #endif 775 return lut; 776 } 777 778 static void 779 nm_free_lut(struct lut_entry *lut, u_int objtotal) 780 { 781 bzero(lut, sizeof(struct lut_entry) * objtotal); 782 #ifdef linux 783 vfree(lut); 784 #else 785 nm_os_free(lut); 786 #endif 787 } 788 789 #if defined(linux) || defined(_WIN32) 790 static struct plut_entry * 791 nm_alloc_plut(u_int nobj) 792 { 793 size_t n = sizeof(struct plut_entry) * nobj; 794 struct plut_entry *lut; 795 lut = vmalloc(n); 796 return lut; 797 } 798 799 static void 800 nm_free_plut(struct plut_entry * lut) 801 { 802 vfree(lut); 803 } 804 #endif /* linux or _WIN32 */ 805 806 807 /* 808 * First, find the allocator that contains the requested offset, 809 * then locate the cluster through a lookup table. 810 */ 811 static vm_paddr_t 812 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 813 { 814 int i; 815 vm_ooffset_t o = offset; 816 vm_paddr_t pa; 817 struct netmap_obj_pool *p; 818 819 p = nmd->pools; 820 821 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 822 if (offset >= p[i].memtotal) 823 continue; 824 // now lookup the cluster's address 825 #ifndef _WIN32 826 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 827 offset % p[i]._objsize; 828 #else 829 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); 830 pa.QuadPart += offset % p[i]._objsize; 831 #endif 832 return pa; 833 } 834 /* this is only in case of errors */ 835 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o, 836 p[NETMAP_IF_POOL].memtotal, 837 p[NETMAP_IF_POOL].memtotal 838 + p[NETMAP_RING_POOL].memtotal, 839 p[NETMAP_IF_POOL].memtotal 840 + p[NETMAP_RING_POOL].memtotal 841 + p[NETMAP_BUF_POOL].memtotal); 842 #ifndef _WIN32 843 return 0; /* bad address */ 844 #else 845 vm_paddr_t res; 846 res.QuadPart = 0; 847 return res; 848 #endif 849 } 850 851 #ifdef _WIN32 852 853 /* 854 * win32_build_virtual_memory_for_userspace 855 * 856 * This function get all the object making part of the pools and maps 857 * a contiguous virtual memory space for the userspace 858 * It works this way 859 * 1 - allocate a Memory Descriptor List wide as the sum 860 * of the memory needed for the pools 861 * 2 - cycle all the objects in every pool and for every object do 862 * 863 * 2a - cycle all the objects in every pool, get the list 864 * of the physical address descriptors 865 * 2b - calculate the offset in the array of pages descriptor in the 866 * main MDL 867 * 2c - copy the descriptors of the object in the main MDL 868 * 869 * 3 - return the resulting MDL that needs to be mapped in userland 870 * 871 * In this way we will have an MDL that describes all the memory for the 872 * objects in a single object 873 */ 874 875 PMDL 876 win32_build_user_vm_map(struct netmap_mem_d* nmd) 877 { 878 u_int memflags, ofs = 0; 879 PMDL mainMdl, tempMdl; 880 uint64_t memsize; 881 int i, j; 882 883 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { 884 nm_prerr("memory not finalised yet"); 885 return NULL; 886 } 887 888 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); 889 if (mainMdl == NULL) { 890 nm_prerr("failed to allocate mdl"); 891 return NULL; 892 } 893 894 NMA_LOCK(nmd); 895 for (i = 0; i < NETMAP_POOLS_NR; i++) { 896 struct netmap_obj_pool *p = &nmd->pools[i]; 897 int clsz = p->_clustsize; 898 int clobjs = p->_clustentries; /* objects per cluster */ 899 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); 900 PPFN_NUMBER pSrc, pDst; 901 902 /* each pool has a different cluster size so we need to reallocate */ 903 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); 904 if (tempMdl == NULL) { 905 NMA_UNLOCK(nmd); 906 nm_prerr("fail to allocate tempMdl"); 907 IoFreeMdl(mainMdl); 908 return NULL; 909 } 910 pSrc = MmGetMdlPfnArray(tempMdl); 911 /* create one entry per cluster, the lut[] has one entry per object */ 912 for (j = 0; j < p->numclusters; j++, ofs += clsz) { 913 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; 914 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); 915 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ 916 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ 917 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ 918 } 919 IoFreeMdl(tempMdl); 920 } 921 NMA_UNLOCK(nmd); 922 return mainMdl; 923 } 924 925 #endif /* _WIN32 */ 926 927 /* 928 * helper function for OS-specific mmap routines (currently only windows). 929 * Given an nmd and a pool index, returns the cluster size and number of clusters. 930 * Returns 0 if memory is finalised and the pool is valid, otherwise 1. 931 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. 932 */ 933 934 int 935 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) 936 { 937 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) 938 return 1; /* invalid arguments */ 939 // NMA_LOCK_ASSERT(nmd); 940 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 941 *clustsize = *numclusters = 0; 942 return 1; /* not ready yet */ 943 } 944 *clustsize = nmd->pools[pool]._clustsize; 945 *numclusters = nmd->pools[pool].numclusters; 946 return 0; /* success */ 947 } 948 949 static int 950 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, 951 u_int *memflags, nm_memid_t *id) 952 { 953 int error = 0; 954 error = netmap_mem_config(nmd); 955 if (error) 956 goto out; 957 if (size) { 958 if (nmd->flags & NETMAP_MEM_FINALIZED) { 959 *size = nmd->nm_totalsize; 960 } else { 961 int i; 962 *size = 0; 963 for (i = 0; i < NETMAP_POOLS_NR; i++) { 964 struct netmap_obj_pool *p = nmd->pools + i; 965 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize); 966 } 967 } 968 } 969 if (memflags) 970 *memflags = nmd->flags; 971 if (id) 972 *id = nmd->nm_id; 973 out: 974 return error; 975 } 976 977 /* 978 * we store objects by kernel address, need to find the offset 979 * within the pool to export the value to userspace. 980 * Algorithm: scan until we find the cluster, then add the 981 * actual offset in the cluster 982 */ 983 static ssize_t 984 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 985 { 986 int i, k = p->_clustentries, n = p->objtotal; 987 ssize_t ofs = 0; 988 989 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 990 const char *base = p->lut[i].vaddr; 991 ssize_t relofs = (const char *) vaddr - base; 992 993 if (relofs < 0 || relofs >= p->_clustsize) 994 continue; 995 996 ofs = ofs + relofs; 997 nm_prdis("%s: return offset %d (cluster %d) for pointer %p", 998 p->name, ofs, i, vaddr); 999 return ofs; 1000 } 1001 nm_prerr("address %p is not contained inside any cluster (%s)", 1002 vaddr, p->name); 1003 return 0; /* An error occurred */ 1004 } 1005 1006 /* Helper functions which convert virtual addresses to offsets */ 1007 #define netmap_if_offset(n, v) \ 1008 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 1009 1010 #define netmap_ring_offset(n, v) \ 1011 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 1012 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 1013 1014 static ssize_t 1015 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 1016 { 1017 return netmap_if_offset(nmd, addr); 1018 } 1019 1020 /* 1021 * report the index, and use start position as a hint, 1022 * otherwise buffer allocation becomes terribly expensive. 1023 */ 1024 static void * 1025 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 1026 { 1027 uint32_t i = 0; /* index in the bitmap */ 1028 uint32_t mask, j = 0; /* slot counter */ 1029 void *vaddr = NULL; 1030 1031 if (len > p->_objsize) { 1032 nm_prerr("%s request size %d too large", p->name, len); 1033 return NULL; 1034 } 1035 1036 if (p->objfree == 0) { 1037 nm_prerr("no more %s objects", p->name); 1038 return NULL; 1039 } 1040 if (start) 1041 i = *start; 1042 1043 /* termination is guaranteed by p->free, but better check bounds on i */ 1044 while (vaddr == NULL && i < p->bitmap_slots) { 1045 uint32_t cur = p->bitmap[i]; 1046 if (cur == 0) { /* bitmask is fully used */ 1047 i++; 1048 continue; 1049 } 1050 /* locate a slot */ 1051 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 1052 ; 1053 1054 p->bitmap[i] &= ~mask; /* mark object as in use */ 1055 p->objfree--; 1056 1057 vaddr = p->lut[i * 32 + j].vaddr; 1058 if (index) 1059 *index = i * 32 + j; 1060 } 1061 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); 1062 1063 if (start) 1064 *start = i; 1065 return vaddr; 1066 } 1067 1068 1069 /* 1070 * free by index, not by address. 1071 * XXX should we also cleanup the content ? 1072 */ 1073 static int 1074 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 1075 { 1076 uint32_t *ptr, mask; 1077 1078 if (j >= p->objtotal) { 1079 nm_prerr("invalid index %u, max %u", j, p->objtotal); 1080 return 1; 1081 } 1082 ptr = &p->bitmap[j / 32]; 1083 mask = (1 << (j % 32)); 1084 if (*ptr & mask) { 1085 nm_prerr("ouch, double free on buffer %d", j); 1086 return 1; 1087 } else { 1088 *ptr |= mask; 1089 p->objfree++; 1090 return 0; 1091 } 1092 } 1093 1094 /* 1095 * free by address. This is slow but is only used for a few 1096 * objects (rings, nifp) 1097 */ 1098 static void 1099 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 1100 { 1101 u_int i, j, n = p->numclusters; 1102 1103 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 1104 void *base = p->lut[i * p->_clustentries].vaddr; 1105 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 1106 1107 /* Given address, is out of the scope of the current cluster.*/ 1108 if (base == NULL || vaddr < base || relofs >= p->_clustsize) 1109 continue; 1110 1111 j = j + relofs / p->_objsize; 1112 /* KASSERT(j != 0, ("Cannot free object 0")); */ 1113 netmap_obj_free(p, j); 1114 return; 1115 } 1116 nm_prerr("address %p is not contained inside any cluster (%s)", 1117 vaddr, p->name); 1118 } 1119 1120 unsigned 1121 netmap_mem_bufsize(struct netmap_mem_d *nmd) 1122 { 1123 return nmd->pools[NETMAP_BUF_POOL]._objsize; 1124 } 1125 1126 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 1127 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 1128 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 1129 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 1130 #define netmap_buf_malloc(n, _pos, _index) \ 1131 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 1132 1133 1134 #if 0 /* currently unused */ 1135 /* Return the index associated to the given packet buffer */ 1136 #define netmap_buf_index(n, v) \ 1137 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 1138 #endif 1139 1140 /* 1141 * allocate extra buffers in a linked list. 1142 * returns the actual number. 1143 */ 1144 uint32_t 1145 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 1146 { 1147 struct netmap_mem_d *nmd = na->nm_mem; 1148 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 1149 1150 NMA_LOCK(nmd); 1151 1152 *head = 0; /* default, 'null' index ie empty list */ 1153 for (i = 0 ; i < n; i++) { 1154 uint32_t cur = *head; /* save current head */ 1155 uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 1156 if (p == NULL) { 1157 nm_prerr("no more buffers after %d of %d", i, n); 1158 *head = cur; /* restore */ 1159 break; 1160 } 1161 nm_prdis(5, "allocate buffer %d -> %d", *head, cur); 1162 *p = cur; /* link to previous head */ 1163 } 1164 1165 NMA_UNLOCK(nmd); 1166 1167 return i; 1168 } 1169 1170 static void 1171 netmap_extra_free(struct netmap_adapter *na, uint32_t head) 1172 { 1173 struct lut_entry *lut = na->na_lut.lut; 1174 struct netmap_mem_d *nmd = na->nm_mem; 1175 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1176 uint32_t i, cur, *buf; 1177 1178 nm_prdis("freeing the extra list"); 1179 for (i = 0; head >=2 && head < p->objtotal; i++) { 1180 cur = head; 1181 buf = lut[head].vaddr; 1182 head = *buf; 1183 *buf = 0; 1184 if (netmap_obj_free(p, cur)) 1185 break; 1186 } 1187 if (head != 0) 1188 nm_prerr("breaking with head %d", head); 1189 if (netmap_debug & NM_DEBUG_MEM) 1190 nm_prinf("freed %d buffers", i); 1191 } 1192 1193 1194 /* Return nonzero on error */ 1195 static int 1196 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1197 { 1198 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1199 u_int i = 0; /* slot counter */ 1200 uint32_t pos = 0; /* slot in p->bitmap */ 1201 uint32_t index = 0; /* buffer index */ 1202 1203 for (i = 0; i < n; i++) { 1204 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 1205 if (vaddr == NULL) { 1206 nm_prerr("no more buffers after %d of %d", i, n); 1207 goto cleanup; 1208 } 1209 slot[i].buf_idx = index; 1210 slot[i].len = p->_objsize; 1211 slot[i].flags = 0; 1212 slot[i].ptr = 0; 1213 } 1214 1215 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); 1216 return (0); 1217 1218 cleanup: 1219 while (i > 0) { 1220 i--; 1221 netmap_obj_free(p, slot[i].buf_idx); 1222 } 1223 bzero(slot, n * sizeof(slot[0])); 1224 return (ENOMEM); 1225 } 1226 1227 static void 1228 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 1229 { 1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1231 u_int i; 1232 1233 for (i = 0; i < n; i++) { 1234 slot[i].buf_idx = index; 1235 slot[i].len = p->_objsize; 1236 slot[i].flags = 0; 1237 } 1238 } 1239 1240 1241 static void 1242 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 1243 { 1244 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1245 1246 if (i < 2 || i >= p->objtotal) { 1247 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 1248 return; 1249 } 1250 netmap_obj_free(p, i); 1251 } 1252 1253 1254 static void 1255 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1256 { 1257 u_int i; 1258 1259 for (i = 0; i < n; i++) { 1260 if (slot[i].buf_idx > 1) 1261 netmap_free_buf(nmd, slot[i].buf_idx); 1262 } 1263 nm_prdis("%s: released some buffers, available: %u", 1264 p->name, p->objfree); 1265 } 1266 1267 static void 1268 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 1269 { 1270 1271 if (p == NULL) 1272 return; 1273 if (p->bitmap) 1274 nm_os_free(p->bitmap); 1275 p->bitmap = NULL; 1276 if (p->invalid_bitmap) 1277 nm_os_free(p->invalid_bitmap); 1278 p->invalid_bitmap = NULL; 1279 if (!p->alloc_done) { 1280 /* allocation was done by somebody else. 1281 * Let them clean up after themselves. 1282 */ 1283 return; 1284 } 1285 if (p->lut) { 1286 u_int i; 1287 1288 /* 1289 * Free each cluster allocated in 1290 * netmap_finalize_obj_allocator(). The cluster start 1291 * addresses are stored at multiples of p->_clusterentries 1292 * in the lut. 1293 */ 1294 for (i = 0; i < p->objtotal; i += p->_clustentries) { 1295 free(p->lut[i].vaddr, M_NETMAP); 1296 } 1297 nm_free_lut(p->lut, p->objtotal); 1298 } 1299 p->lut = NULL; 1300 p->objtotal = 0; 1301 p->memtotal = 0; 1302 p->numclusters = 0; 1303 p->objfree = 0; 1304 p->alloc_done = 0; 1305 } 1306 1307 /* 1308 * Free all resources related to an allocator. 1309 */ 1310 static void 1311 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 1312 { 1313 if (p == NULL) 1314 return; 1315 netmap_reset_obj_allocator(p); 1316 } 1317 1318 /* 1319 * We receive a request for objtotal objects, of size objsize each. 1320 * Internally we may round up both numbers, as we allocate objects 1321 * in small clusters multiple of the page size. 1322 * We need to keep track of objtotal and clustentries, 1323 * as they are needed when freeing memory. 1324 * 1325 * XXX note -- userspace needs the buffers to be contiguous, 1326 * so we cannot afford gaps at the end of a cluster. 1327 */ 1328 1329 1330 /* call with NMA_LOCK held */ 1331 static int 1332 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 1333 { 1334 int i; 1335 u_int clustsize; /* the cluster size, multiple of page size */ 1336 u_int clustentries; /* how many objects per entry */ 1337 1338 /* we store the current request, so we can 1339 * detect configuration changes later */ 1340 p->r_objtotal = objtotal; 1341 p->r_objsize = objsize; 1342 1343 #define MAX_CLUSTSIZE (1<<22) // 4 MB 1344 #define LINE_ROUND NM_BUF_ALIGN // 64 1345 if (objsize >= MAX_CLUSTSIZE) { 1346 /* we could do it but there is no point */ 1347 nm_prerr("unsupported allocation for %d bytes", objsize); 1348 return EINVAL; 1349 } 1350 /* make sure objsize is a multiple of LINE_ROUND */ 1351 i = (objsize & (LINE_ROUND - 1)); 1352 if (i) { 1353 nm_prinf("aligning object by %d bytes", LINE_ROUND - i); 1354 objsize += LINE_ROUND - i; 1355 } 1356 if (objsize < p->objminsize || objsize > p->objmaxsize) { 1357 nm_prerr("requested objsize %d out of range [%d, %d]", 1358 objsize, p->objminsize, p->objmaxsize); 1359 return EINVAL; 1360 } 1361 if (objtotal < p->nummin || objtotal > p->nummax) { 1362 nm_prerr("requested objtotal %d out of range [%d, %d]", 1363 objtotal, p->nummin, p->nummax); 1364 return EINVAL; 1365 } 1366 /* 1367 * Compute number of objects using a brute-force approach: 1368 * given a max cluster size, 1369 * we try to fill it with objects keeping track of the 1370 * wasted space to the next page boundary. 1371 */ 1372 for (clustentries = 0, i = 1;; i++) { 1373 u_int delta, used = i * objsize; 1374 if (used > MAX_CLUSTSIZE) 1375 break; 1376 delta = used % PAGE_SIZE; 1377 if (delta == 0) { // exact solution 1378 clustentries = i; 1379 break; 1380 } 1381 } 1382 /* exact solution not found */ 1383 if (clustentries == 0) { 1384 nm_prerr("unsupported allocation for %d bytes", objsize); 1385 return EINVAL; 1386 } 1387 /* compute clustsize */ 1388 clustsize = clustentries * objsize; 1389 if (netmap_debug & NM_DEBUG_MEM) 1390 nm_prinf("objsize %d clustsize %d objects %d", 1391 objsize, clustsize, clustentries); 1392 1393 /* 1394 * The number of clusters is n = ceil(objtotal/clustentries) 1395 * objtotal' = n * clustentries 1396 */ 1397 p->_clustentries = clustentries; 1398 p->_clustsize = clustsize; 1399 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 1400 1401 /* actual values (may be larger than requested) */ 1402 p->_objsize = objsize; 1403 p->_objtotal = p->_numclusters * clustentries; 1404 1405 return 0; 1406 } 1407 1408 /* call with NMA_LOCK held */ 1409 static int 1410 netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p) 1411 { 1412 int i; /* must be signed */ 1413 1414 if (p->lut) { 1415 /* if the lut is already there we assume that also all the 1416 * clusters have already been allocated, possibly by somebody 1417 * else (e.g., extmem). In the latter case, the alloc_done flag 1418 * will remain at zero, so that we will not attempt to 1419 * deallocate the clusters by ourselves in 1420 * netmap_reset_obj_allocator. 1421 */ 1422 return 0; 1423 } 1424 1425 /* optimistically assume we have enough memory */ 1426 p->numclusters = p->_numclusters; 1427 p->objtotal = p->_objtotal; 1428 p->alloc_done = 1; 1429 1430 p->lut = nm_alloc_lut(p->objtotal); 1431 if (p->lut == NULL) { 1432 nm_prerr("Unable to create lookup table for '%s'", p->name); 1433 goto clean; 1434 } 1435 1436 /* 1437 * Allocate clusters, init pointers 1438 */ 1439 1440 for (i = 0; i < (int)p->objtotal;) { 1441 int lim = i + p->_clustentries; 1442 char *clust; 1443 1444 /* 1445 * XXX Note, we only need contigmalloc() for buffers attached 1446 * to native interfaces. In all other cases (nifp, netmap rings 1447 * and even buffers for VALE ports or emulated interfaces) we 1448 * can live with standard malloc, because the hardware will not 1449 * access the pages directly. 1450 */ 1451 if (nmd->nm_numa_domain == -1) { 1452 clust = contigmalloc(p->_clustsize, M_NETMAP, 1453 M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); 1454 } else { 1455 struct domainset *ds; 1456 1457 ds = DOMAINSET_PREF(nmd->nm_numa_domain); 1458 clust = contigmalloc_domainset(p->_clustsize, M_NETMAP, 1459 ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); 1460 } 1461 if (clust == NULL) { 1462 /* 1463 * If we get here, there is a severe memory shortage, 1464 * so halve the allocated memory to reclaim some. 1465 */ 1466 nm_prerr("Unable to create cluster at %d for '%s' allocator", 1467 i, p->name); 1468 if (i < 2) /* nothing to halve */ 1469 goto out; 1470 lim = i / 2; 1471 for (i--; i >= lim; i--) { 1472 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1473 free(p->lut[i].vaddr, M_NETMAP); 1474 p->lut[i].vaddr = NULL; 1475 } 1476 out: 1477 p->objtotal = i; 1478 /* we may have stopped in the middle of a cluster */ 1479 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1480 break; 1481 } 1482 /* 1483 * Set lut state for all buffers in the current cluster. 1484 * 1485 * [i, lim) is the set of buffer indexes that cover the 1486 * current cluster. 1487 * 1488 * 'clust' is really the address of the current buffer in 1489 * the current cluster as we index through it with a stride 1490 * of p->_objsize. 1491 */ 1492 for (; i < lim; i++, clust += p->_objsize) { 1493 p->lut[i].vaddr = clust; 1494 #if !defined(linux) && !defined(_WIN32) 1495 p->lut[i].paddr = vtophys(clust); 1496 #endif 1497 } 1498 } 1499 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize; 1500 if (netmap_verbose) 1501 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'", 1502 p->numclusters, p->_clustsize >> 10, 1503 p->memtotal >> 10, p->name); 1504 1505 return 0; 1506 1507 clean: 1508 netmap_reset_obj_allocator(p); 1509 return ENOMEM; 1510 } 1511 1512 /* call with lock held */ 1513 static int 1514 netmap_mem_params_changed(struct netmap_obj_params* p) 1515 { 1516 int i, rv = 0; 1517 1518 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1519 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { 1520 p[i].last_size = p[i].size; 1521 p[i].last_num = p[i].num; 1522 rv = 1; 1523 } 1524 } 1525 return rv; 1526 } 1527 1528 static void 1529 netmap_mem_reset_all(struct netmap_mem_d *nmd) 1530 { 1531 int i; 1532 1533 if (netmap_debug & NM_DEBUG_MEM) 1534 nm_prinf("resetting %p", nmd); 1535 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1536 netmap_reset_obj_allocator(&nmd->pools[i]); 1537 } 1538 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1539 } 1540 1541 static int 1542 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 1543 { 1544 int i, lim = p->objtotal; 1545 struct netmap_lut *lut; 1546 if (na == NULL || na->pdev == NULL) 1547 return 0; 1548 1549 lut = &na->na_lut; 1550 1551 1552 1553 #if defined(__FreeBSD__) 1554 /* On FreeBSD mapping and unmapping is performed by the txsync 1555 * and rxsync routine, packet by packet. */ 1556 (void)i; 1557 (void)lim; 1558 (void)lut; 1559 #elif defined(_WIN32) 1560 (void)i; 1561 (void)lim; 1562 (void)lut; 1563 nm_prerr("unsupported on Windows"); 1564 #else /* linux */ 1565 nm_prdis("unmapping and freeing plut for %s", na->name); 1566 if (lut->plut == NULL || na->pdev == NULL) 1567 return 0; 1568 for (i = 0; i < lim; i += p->_clustentries) { 1569 if (lut->plut[i].paddr) 1570 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); 1571 } 1572 nm_free_plut(lut->plut); 1573 lut->plut = NULL; 1574 #endif /* linux */ 1575 1576 return 0; 1577 } 1578 1579 static int 1580 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 1581 { 1582 int error = 0; 1583 int i, lim = p->objtotal; 1584 struct netmap_lut *lut = &na->na_lut; 1585 1586 if (na->pdev == NULL) 1587 return 0; 1588 1589 #if defined(__FreeBSD__) 1590 /* On FreeBSD mapping and unmapping is performed by the txsync 1591 * and rxsync routine, packet by packet. */ 1592 (void)i; 1593 (void)lim; 1594 (void)lut; 1595 #elif defined(_WIN32) 1596 (void)i; 1597 (void)lim; 1598 (void)lut; 1599 nm_prerr("unsupported on Windows"); 1600 #else /* linux */ 1601 1602 if (lut->plut != NULL) { 1603 nm_prdis("plut already allocated for %s", na->name); 1604 return 0; 1605 } 1606 1607 nm_prdis("allocating physical lut for %s", na->name); 1608 lut->plut = nm_alloc_plut(lim); 1609 if (lut->plut == NULL) { 1610 nm_prerr("Failed to allocate physical lut for %s", na->name); 1611 return ENOMEM; 1612 } 1613 1614 for (i = 0; i < lim; i += p->_clustentries) { 1615 lut->plut[i].paddr = 0; 1616 } 1617 1618 for (i = 0; i < lim; i += p->_clustentries) { 1619 int j; 1620 1621 if (p->lut[i].vaddr == NULL) 1622 continue; 1623 1624 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, 1625 p->lut[i].vaddr, p->_clustsize); 1626 if (error) { 1627 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name); 1628 break; 1629 } 1630 1631 for (j = 1; j < p->_clustentries; j++) { 1632 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; 1633 } 1634 } 1635 1636 if (error) 1637 netmap_mem_unmap(p, na); 1638 1639 #endif /* linux */ 1640 1641 return error; 1642 } 1643 1644 static int 1645 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1646 { 1647 int i; 1648 if (nmd->flags & NETMAP_MEM_FINALIZED) 1649 return 0; 1650 nmd->lasterr = 0; 1651 nmd->nm_totalsize = 0; 1652 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1653 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]); 1654 if (nmd->lasterr) 1655 goto error; 1656 nmd->nm_totalsize += nmd->pools[i].memtotal; 1657 } 1658 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 1659 nmd->lasterr = netmap_mem_init_bitmaps(nmd); 1660 if (nmd->lasterr) 1661 goto error; 1662 1663 nmd->flags |= NETMAP_MEM_FINALIZED; 1664 1665 if (netmap_verbose) 1666 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB", 1667 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1668 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1669 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1670 1671 if (netmap_verbose) 1672 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1673 1674 1675 return 0; 1676 error: 1677 netmap_mem_reset_all(nmd); 1678 return nmd->lasterr; 1679 } 1680 1681 /* 1682 * allocator for private memory 1683 */ 1684 static void * 1685 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id, 1686 const struct netmap_mem_ops *ops, uint64_t memtotal, int *perr) 1687 { 1688 struct netmap_mem_d *d = NULL; 1689 int i, err = 0; 1690 int checksz = 0; 1691 1692 /* if memtotal is !=0 we check that the request fits the available 1693 * memory. Moreover, any surprlus memory is assigned to buffers. 1694 */ 1695 checksz = (memtotal > 0); 1696 1697 d = nm_os_malloc(size); 1698 if (d == NULL) { 1699 err = ENOMEM; 1700 goto error; 1701 } 1702 1703 *d = nm_blueprint; 1704 d->ops = ops; 1705 1706 err = nm_mem_assign_id(d, grp_id); 1707 if (err) 1708 goto error_free; 1709 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); 1710 1711 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1712 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1713 nm_blueprint.pools[i].name, 1714 d->name); 1715 if (checksz) { 1716 uint64_t poolsz = (uint64_t)p[i].num * p[i].size; 1717 if (memtotal < poolsz) { 1718 nm_prerr("%s: request too large", d->pools[i].name); 1719 err = ENOMEM; 1720 goto error_rel_id; 1721 } 1722 memtotal -= poolsz; 1723 } 1724 d->params[i].num = p[i].num; 1725 d->params[i].size = p[i].size; 1726 } 1727 if (checksz && memtotal > 0) { 1728 uint64_t sz = d->params[NETMAP_BUF_POOL].size; 1729 uint64_t n = (memtotal + sz - 1) / sz; 1730 1731 if (n) { 1732 if (netmap_verbose) { 1733 nm_prinf("%s: adding %llu more buffers", 1734 d->pools[NETMAP_BUF_POOL].name, 1735 (unsigned long long)n); 1736 } 1737 d->params[NETMAP_BUF_POOL].num += n; 1738 } 1739 } 1740 1741 NMA_LOCK_INIT(d); 1742 1743 err = netmap_mem_config(d); 1744 if (err) 1745 goto error_destroy_lock; 1746 1747 d->flags &= ~NETMAP_MEM_FINALIZED; 1748 1749 return d; 1750 1751 error_destroy_lock: 1752 NMA_LOCK_DESTROY(d); 1753 error_rel_id: 1754 nm_mem_release_id(d); 1755 error_free: 1756 nm_os_free(d); 1757 error: 1758 if (perr) 1759 *perr = err; 1760 return NULL; 1761 } 1762 1763 struct netmap_mem_d * 1764 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, 1765 u_int extra_bufs, u_int npipes, int *perr) 1766 { 1767 struct netmap_mem_d *d = NULL; 1768 struct netmap_obj_params p[NETMAP_POOLS_NR]; 1769 int i; 1770 u_int v, maxd; 1771 /* account for the fake host rings */ 1772 txr++; 1773 rxr++; 1774 1775 /* copy the min values */ 1776 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1777 p[i] = netmap_min_priv_params[i]; 1778 } 1779 1780 /* possibly increase them to fit user request */ 1781 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1782 if (p[NETMAP_IF_POOL].size < v) 1783 p[NETMAP_IF_POOL].size = v; 1784 v = 2 + 4 * npipes; 1785 if (p[NETMAP_IF_POOL].num < v) 1786 p[NETMAP_IF_POOL].num = v; 1787 maxd = (txd > rxd) ? txd : rxd; 1788 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1789 if (p[NETMAP_RING_POOL].size < v) 1790 p[NETMAP_RING_POOL].size = v; 1791 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1792 * and two rx rings (again, 1 normal and 1 fake host) 1793 */ 1794 v = txr + rxr + 8 * npipes; 1795 if (p[NETMAP_RING_POOL].num < v) 1796 p[NETMAP_RING_POOL].num = v; 1797 /* for each pipe we only need the buffers for the 4 "real" rings. 1798 * On the other end, the pipe ring dimension may be different from 1799 * the parent port ring dimension. As a compromise, we allocate twice the 1800 * space actually needed if the pipe rings were the same size as the parent rings 1801 */ 1802 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1803 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1804 if (p[NETMAP_BUF_POOL].num < v) 1805 p[NETMAP_BUF_POOL].num = v; 1806 1807 if (netmap_verbose) 1808 nm_prinf("req if %d*%d ring %d*%d buf %d*%d", 1809 p[NETMAP_IF_POOL].num, 1810 p[NETMAP_IF_POOL].size, 1811 p[NETMAP_RING_POOL].num, 1812 p[NETMAP_RING_POOL].size, 1813 p[NETMAP_BUF_POOL].num, 1814 p[NETMAP_BUF_POOL].size); 1815 1816 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr); 1817 1818 return d; 1819 } 1820 1821 /* Reference IOMMU and NUMA local allocator - find existing or create new, 1822 * for non-hw adapters, fall back to global allocator. 1823 */ 1824 struct netmap_mem_d * 1825 netmap_mem_get_allocator(struct netmap_adapter *na) 1826 { 1827 int i, domain, err, grp_id; 1828 struct netmap_mem_d *nmd; 1829 1830 if (na == NULL || na->pdev == NULL) 1831 return netmap_mem_get(&nm_mem); 1832 1833 domain = nm_numa_domain(na->pdev); 1834 grp_id = nm_iommu_group_id(na->pdev); 1835 1836 NM_MTX_LOCK(nm_mem_list_lock); 1837 nmd = netmap_last_mem_d; 1838 do { 1839 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && 1840 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) { 1841 nmd->refcount++; 1842 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 1843 NM_MTX_UNLOCK(nm_mem_list_lock); 1844 return nmd; 1845 } 1846 nmd = nmd->next; 1847 } while (nmd != netmap_last_mem_d); 1848 1849 nmd = nm_os_malloc(sizeof(*nmd)); 1850 if (nmd == NULL) 1851 goto error; 1852 1853 *nmd = nm_mem_blueprint; 1854 1855 err = nm_mem_assign_id_locked(nmd, grp_id, domain); 1856 if (err) 1857 goto error_free; 1858 1859 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id); 1860 1861 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1862 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s", 1863 nm_mem_blueprint.pools[i].name, nmd->name); 1864 } 1865 1866 NMA_LOCK_INIT(nmd); 1867 1868 NM_MTX_UNLOCK(nm_mem_list_lock); 1869 return nmd; 1870 1871 error_free: 1872 nm_os_free(nmd); 1873 error: 1874 NM_MTX_UNLOCK(nm_mem_list_lock); 1875 return NULL; 1876 } 1877 1878 /* call with lock held */ 1879 static int 1880 netmap_mem2_config(struct netmap_mem_d *nmd) 1881 { 1882 int i; 1883 1884 if (!netmap_mem_params_changed(nmd->params)) 1885 goto out; 1886 1887 nm_prdis("reconfiguring"); 1888 1889 if (nmd->flags & NETMAP_MEM_FINALIZED) { 1890 /* reset previous allocation */ 1891 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1892 netmap_reset_obj_allocator(&nmd->pools[i]); 1893 } 1894 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1895 } 1896 1897 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1898 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1899 nmd->params[i].num, nmd->params[i].size); 1900 if (nmd->lasterr) 1901 goto out; 1902 } 1903 1904 out: 1905 1906 return nmd->lasterr; 1907 } 1908 1909 static int 1910 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 1911 { 1912 if (nmd->flags & NETMAP_MEM_FINALIZED) 1913 goto out; 1914 1915 if (netmap_mem_finalize_all(nmd)) 1916 goto out; 1917 1918 nmd->lasterr = 0; 1919 1920 out: 1921 return nmd->lasterr; 1922 } 1923 1924 static void 1925 netmap_mem2_delete(struct netmap_mem_d *nmd) 1926 { 1927 int i; 1928 1929 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1930 netmap_destroy_obj_allocator(&nmd->pools[i]); 1931 } 1932 1933 NMA_LOCK_DESTROY(nmd); 1934 if (nmd != &nm_mem) 1935 nm_os_free(nmd); 1936 } 1937 1938 #ifdef WITH_EXTMEM 1939 /* doubly linekd list of all existing external allocators */ 1940 static struct netmap_mem_ext *netmap_mem_ext_list = NULL; 1941 NM_MTX_T nm_mem_ext_list_lock; 1942 #endif /* WITH_EXTMEM */ 1943 1944 int 1945 netmap_mem_init(void) 1946 { 1947 nm_mem_blueprint = nm_mem; 1948 NM_MTX_INIT(nm_mem_list_lock); 1949 NMA_LOCK_INIT(&nm_mem); 1950 netmap_mem_get(&nm_mem); 1951 #ifdef WITH_EXTMEM 1952 NM_MTX_INIT(nm_mem_ext_list_lock); 1953 #endif /* WITH_EXTMEM */ 1954 return (0); 1955 } 1956 1957 void 1958 netmap_mem_fini(void) 1959 { 1960 netmap_mem_put(&nm_mem); 1961 } 1962 1963 static int 1964 netmap_mem_ring_needed(struct netmap_kring *kring) 1965 { 1966 return kring->ring == NULL && 1967 (kring->users > 0 || 1968 (kring->nr_kflags & NKR_NEEDRING)); 1969 } 1970 1971 static int 1972 netmap_mem_ring_todelete(struct netmap_kring *kring) 1973 { 1974 return kring->ring != NULL && 1975 kring->users == 0 && 1976 !(kring->nr_kflags & NKR_NEEDRING); 1977 } 1978 1979 1980 /* call with NMA_LOCK held * 1981 * 1982 * Allocate netmap rings and buffers for this card 1983 * The rings are contiguous, but have variable size. 1984 * The kring array must follow the layout described 1985 * in netmap_krings_create(). 1986 */ 1987 static int 1988 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na) 1989 { 1990 enum txrx t; 1991 1992 for_rx_tx(t) { 1993 u_int i; 1994 1995 for (i = 0; i < netmap_all_rings(na, t); i++) { 1996 struct netmap_kring *kring = NMR(na, t)[i]; 1997 struct netmap_ring *ring = kring->ring; 1998 u_int len, ndesc; 1999 2000 if (!netmap_mem_ring_needed(kring)) { 2001 /* unneeded, or already created by somebody else */ 2002 if (netmap_debug & NM_DEBUG_MEM) 2003 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)", 2004 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 2005 continue; 2006 } 2007 if (netmap_debug & NM_DEBUG_MEM) 2008 nm_prinf("creating %s", kring->name); 2009 ndesc = kring->nkr_num_slots; 2010 len = sizeof(struct netmap_ring) + 2011 ndesc * sizeof(struct netmap_slot); 2012 ring = netmap_ring_malloc(nmd, len); 2013 if (ring == NULL) { 2014 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t)); 2015 goto cleanup; 2016 } 2017 nm_prdis("txring at %p", ring); 2018 kring->ring = ring; 2019 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 2020 *(int64_t *)(uintptr_t)&ring->buf_ofs = 2021 (nmd->pools[NETMAP_IF_POOL].memtotal + 2022 nmd->pools[NETMAP_RING_POOL].memtotal) - 2023 netmap_ring_offset(nmd, ring); 2024 2025 /* copy values from kring */ 2026 ring->head = kring->rhead; 2027 ring->cur = kring->rcur; 2028 ring->tail = kring->rtail; 2029 *(uint32_t *)(uintptr_t)&ring->nr_buf_size = 2030 netmap_mem_bufsize(nmd); 2031 nm_prdis("%s h %d c %d t %d", kring->name, 2032 ring->head, ring->cur, ring->tail); 2033 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t)); 2034 if (!(kring->nr_kflags & NKR_FAKERING)) { 2035 /* this is a real ring */ 2036 if (netmap_debug & NM_DEBUG_MEM) 2037 nm_prinf("allocating buffers for %s", kring->name); 2038 if (netmap_new_bufs(nmd, ring->slot, ndesc)) { 2039 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 2040 goto cleanup; 2041 } 2042 } else { 2043 /* this is a fake ring, set all indices to 0 */ 2044 if (netmap_debug & NM_DEBUG_MEM) 2045 nm_prinf("NOT allocating buffers for %s", kring->name); 2046 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0); 2047 } 2048 /* ring info */ 2049 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 2050 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 2051 } 2052 } 2053 2054 return 0; 2055 2056 cleanup: 2057 /* we cannot actually cleanup here, since we don't own kring->users 2058 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement 2059 * the first or zero-out the second, then call netmap_free_rings() 2060 * to do the cleanup 2061 */ 2062 2063 return ENOMEM; 2064 } 2065 2066 static void 2067 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na) 2068 { 2069 enum txrx t; 2070 2071 for_rx_tx(t) { 2072 u_int i; 2073 for (i = 0; i < netmap_all_rings(na, t); i++) { 2074 struct netmap_kring *kring = NMR(na, t)[i]; 2075 struct netmap_ring *ring = kring->ring; 2076 2077 if (!netmap_mem_ring_todelete(kring)) { 2078 if (netmap_debug & NM_DEBUG_MEM) 2079 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)", 2080 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 2081 continue; 2082 } 2083 if (netmap_debug & NM_DEBUG_MEM) 2084 nm_prinf("deleting ring %s", kring->name); 2085 if (!(kring->nr_kflags & NKR_FAKERING)) { 2086 nm_prdis("freeing bufs for %s", kring->name); 2087 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots); 2088 } else { 2089 nm_prdis("NOT freeing bufs for %s", kring->name); 2090 } 2091 netmap_ring_free(nmd, ring); 2092 kring->ring = NULL; 2093 } 2094 } 2095 } 2096 2097 /* call with NMA_LOCK held */ 2098 /* 2099 * Allocate the per-fd structure netmap_if. 2100 * 2101 * We assume that the configuration stored in na 2102 * (number of tx/rx rings and descs) does not change while 2103 * the interface is in netmap mode. 2104 */ 2105 static struct netmap_if * 2106 netmap_mem2_if_new(struct netmap_mem_d *nmd, 2107 struct netmap_adapter *na, struct netmap_priv_d *priv) 2108 { 2109 struct netmap_if *nifp; 2110 ssize_t base; /* handy for relative offsets between rings and nifp */ 2111 u_int i, len, n[NR_TXRX], ntot; 2112 enum txrx t; 2113 2114 ntot = 0; 2115 for_rx_tx(t) { 2116 /* account for the (eventually fake) host rings */ 2117 n[t] = netmap_all_rings(na, t); 2118 ntot += n[t]; 2119 } 2120 /* 2121 * the descriptor is followed inline by an array of offsets 2122 * to the tx and rx rings in the shared memory region. 2123 */ 2124 2125 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 2126 nifp = netmap_if_malloc(nmd, len); 2127 if (nifp == NULL) { 2128 return NULL; 2129 } 2130 2131 /* initialize base fields -- override const */ 2132 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 2133 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 2134 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings = 2135 (na->num_host_tx_rings ? na->num_host_tx_rings : 1); 2136 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings = 2137 (na->num_host_rx_rings ? na->num_host_rx_rings : 1); 2138 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name)); 2139 2140 /* 2141 * fill the slots for the rx and tx rings. They contain the offset 2142 * between the ring and nifp, so the information is usable in 2143 * userspace to reach the ring from the nifp. 2144 */ 2145 base = netmap_if_offset(nmd, nifp); 2146 for (i = 0; i < n[NR_TX]; i++) { 2147 /* XXX instead of ofs == 0 maybe use the offset of an error 2148 * ring, like we do for buffers? */ 2149 ssize_t ofs = 0; 2150 2151 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] 2152 && i < priv->np_qlast[NR_TX]) { 2153 ofs = netmap_ring_offset(nmd, 2154 na->tx_rings[i]->ring) - base; 2155 } 2156 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; 2157 } 2158 for (i = 0; i < n[NR_RX]; i++) { 2159 /* XXX instead of ofs == 0 maybe use the offset of an error 2160 * ring, like we do for buffers? */ 2161 ssize_t ofs = 0; 2162 2163 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] 2164 && i < priv->np_qlast[NR_RX]) { 2165 ofs = netmap_ring_offset(nmd, 2166 na->rx_rings[i]->ring) - base; 2167 } 2168 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; 2169 } 2170 2171 return (nifp); 2172 } 2173 2174 static void 2175 netmap_mem2_if_delete(struct netmap_mem_d *nmd, 2176 struct netmap_adapter *na, struct netmap_if *nifp) 2177 { 2178 if (nifp == NULL) 2179 /* nothing to do */ 2180 return; 2181 if (nifp->ni_bufs_head) 2182 netmap_extra_free(na, nifp->ni_bufs_head); 2183 netmap_if_free(nmd, nifp); 2184 } 2185 2186 static void 2187 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 2188 { 2189 2190 if (netmap_debug & NM_DEBUG_MEM) 2191 nm_prinf("active = %d", nmd->active); 2192 2193 } 2194 2195 const struct netmap_mem_ops netmap_mem_global_ops = { 2196 .nmd_get_lut = netmap_mem2_get_lut, 2197 .nmd_get_info = netmap_mem2_get_info, 2198 .nmd_ofstophys = netmap_mem2_ofstophys, 2199 .nmd_config = netmap_mem2_config, 2200 .nmd_finalize = netmap_mem2_finalize, 2201 .nmd_deref = netmap_mem2_deref, 2202 .nmd_delete = netmap_mem2_delete, 2203 .nmd_if_offset = netmap_mem2_if_offset, 2204 .nmd_if_new = netmap_mem2_if_new, 2205 .nmd_if_delete = netmap_mem2_if_delete, 2206 .nmd_rings_create = netmap_mem2_rings_create, 2207 .nmd_rings_delete = netmap_mem2_rings_delete 2208 }; 2209 2210 int 2211 netmap_mem_pools_info_get(struct nmreq_pools_info *req, 2212 struct netmap_mem_d *nmd) 2213 { 2214 int ret; 2215 2216 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, 2217 &req->nr_mem_id); 2218 if (ret) { 2219 return ret; 2220 } 2221 2222 NMA_LOCK(nmd); 2223 req->nr_if_pool_offset = 0; 2224 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; 2225 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; 2226 2227 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; 2228 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; 2229 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; 2230 2231 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + 2232 nmd->pools[NETMAP_RING_POOL].memtotal; 2233 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 2234 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2235 NMA_UNLOCK(nmd); 2236 2237 return 0; 2238 } 2239 2240 #ifdef WITH_EXTMEM 2241 struct netmap_mem_ext { 2242 struct netmap_mem_d up; 2243 2244 struct nm_os_extmem *os; 2245 struct netmap_mem_ext *next, *prev; 2246 }; 2247 2248 /* call with nm_mem_list_lock held */ 2249 static void 2250 netmap_mem_ext_register(struct netmap_mem_ext *e) 2251 { 2252 NM_MTX_LOCK(nm_mem_ext_list_lock); 2253 if (netmap_mem_ext_list) 2254 netmap_mem_ext_list->prev = e; 2255 e->next = netmap_mem_ext_list; 2256 netmap_mem_ext_list = e; 2257 e->prev = NULL; 2258 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2259 } 2260 2261 /* call with nm_mem_list_lock held */ 2262 static void 2263 netmap_mem_ext_unregister(struct netmap_mem_ext *e) 2264 { 2265 if (e->prev) 2266 e->prev->next = e->next; 2267 else 2268 netmap_mem_ext_list = e->next; 2269 if (e->next) 2270 e->next->prev = e->prev; 2271 e->prev = e->next = NULL; 2272 } 2273 2274 static struct netmap_mem_ext * 2275 netmap_mem_ext_search(struct nm_os_extmem *os) 2276 { 2277 struct netmap_mem_ext *e; 2278 2279 NM_MTX_LOCK(nm_mem_ext_list_lock); 2280 for (e = netmap_mem_ext_list; e; e = e->next) { 2281 if (nm_os_extmem_isequal(e->os, os)) { 2282 netmap_mem_get(&e->up); 2283 break; 2284 } 2285 } 2286 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2287 return e; 2288 } 2289 2290 2291 static void 2292 netmap_mem_ext_delete(struct netmap_mem_d *d) 2293 { 2294 int i; 2295 struct netmap_mem_ext *e = 2296 (struct netmap_mem_ext *)d; 2297 2298 netmap_mem_ext_unregister(e); 2299 2300 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2301 struct netmap_obj_pool *p = &d->pools[i]; 2302 2303 if (p->lut) { 2304 nm_free_lut(p->lut, p->objtotal); 2305 p->lut = NULL; 2306 } 2307 } 2308 if (e->os) 2309 nm_os_extmem_delete(e->os); 2310 netmap_mem2_delete(d); 2311 } 2312 2313 static int 2314 netmap_mem_ext_config(struct netmap_mem_d *nmd) 2315 { 2316 return 0; 2317 } 2318 2319 struct netmap_mem_ops netmap_mem_ext_ops = { 2320 .nmd_get_lut = netmap_mem2_get_lut, 2321 .nmd_get_info = netmap_mem2_get_info, 2322 .nmd_ofstophys = netmap_mem2_ofstophys, 2323 .nmd_config = netmap_mem_ext_config, 2324 .nmd_finalize = netmap_mem2_finalize, 2325 .nmd_deref = netmap_mem2_deref, 2326 .nmd_delete = netmap_mem_ext_delete, 2327 .nmd_if_offset = netmap_mem2_if_offset, 2328 .nmd_if_new = netmap_mem2_if_new, 2329 .nmd_if_delete = netmap_mem2_if_delete, 2330 .nmd_rings_create = netmap_mem2_rings_create, 2331 .nmd_rings_delete = netmap_mem2_rings_delete 2332 }; 2333 2334 struct netmap_mem_d * 2335 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) 2336 { 2337 int error = 0; 2338 int i, j; 2339 struct netmap_mem_ext *nme; 2340 char *clust; 2341 size_t off; 2342 struct nm_os_extmem *os = NULL; 2343 int nr_pages; 2344 2345 // XXX sanity checks 2346 if (pi->nr_if_pool_objtotal == 0) 2347 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; 2348 if (pi->nr_if_pool_objsize == 0) 2349 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; 2350 if (pi->nr_ring_pool_objtotal == 0) 2351 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; 2352 if (pi->nr_ring_pool_objsize == 0) 2353 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; 2354 if (pi->nr_buf_pool_objtotal == 0) 2355 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; 2356 if (pi->nr_buf_pool_objsize == 0) 2357 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; 2358 if (netmap_verbose & NM_DEBUG_MEM) 2359 nm_prinf("if %d %d ring %d %d buf %d %d", 2360 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, 2361 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, 2362 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); 2363 2364 os = nm_os_extmem_create(usrptr, pi, &error); 2365 if (os == NULL) { 2366 nm_prerr("os extmem creation failed"); 2367 goto out; 2368 } 2369 2370 nme = netmap_mem_ext_search(os); 2371 if (nme) { 2372 nm_os_extmem_delete(os); 2373 return &nme->up; 2374 } 2375 if (netmap_verbose & NM_DEBUG_MEM) 2376 nm_prinf("not found, creating new"); 2377 2378 nme = _netmap_mem_private_new(sizeof(*nme), 2379 2380 (struct netmap_obj_params[]){ 2381 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, 2382 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, 2383 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, 2384 -1, 2385 &netmap_mem_ext_ops, 2386 pi->nr_memsize, 2387 &error); 2388 if (nme == NULL) 2389 goto out_unmap; 2390 2391 nr_pages = nm_os_extmem_nr_pages(os); 2392 2393 /* from now on pages will be released by nme destructor; 2394 * we let res = 0 to prevent release in out_unmap below 2395 */ 2396 nme->os = os; 2397 os = NULL; /* pass ownership */ 2398 2399 clust = nm_os_extmem_nextpage(nme->os); 2400 off = 0; 2401 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2402 struct netmap_obj_pool *p = &nme->up.pools[i]; 2403 struct netmap_obj_params *o = &nme->up.params[i]; 2404 2405 p->_objsize = o->size; 2406 p->_clustsize = o->size; 2407 p->_clustentries = 1; 2408 2409 p->lut = nm_alloc_lut(o->num); 2410 if (p->lut == NULL) { 2411 error = ENOMEM; 2412 goto out_delete; 2413 } 2414 2415 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); 2416 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); 2417 if (p->invalid_bitmap == NULL) { 2418 error = ENOMEM; 2419 goto out_delete; 2420 } 2421 2422 if (nr_pages == 0) { 2423 p->objtotal = 0; 2424 p->memtotal = 0; 2425 p->objfree = 0; 2426 continue; 2427 } 2428 2429 for (j = 0; j < o->num && nr_pages > 0; j++) { 2430 size_t noff; 2431 2432 p->lut[j].vaddr = clust + off; 2433 #if !defined(linux) && !defined(_WIN32) 2434 p->lut[j].paddr = vtophys(p->lut[j].vaddr); 2435 #endif 2436 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr); 2437 noff = off + p->_objsize; 2438 if (noff < PAGE_SIZE) { 2439 off = noff; 2440 continue; 2441 } 2442 nm_prdis("too big, recomputing offset..."); 2443 while (noff >= PAGE_SIZE) { 2444 char *old_clust = clust; 2445 noff -= PAGE_SIZE; 2446 clust = nm_os_extmem_nextpage(nme->os); 2447 nr_pages--; 2448 nm_prdis("noff %zu page %p nr_pages %d", noff, 2449 page_to_virt(*pages), nr_pages); 2450 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && 2451 (nr_pages == 0 || 2452 old_clust + PAGE_SIZE != clust)) 2453 { 2454 /* out of space or non contiguous, 2455 * drop this object 2456 * */ 2457 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); 2458 nm_prdis("non contiguous at off %zu, drop", noff); 2459 } 2460 if (nr_pages == 0) 2461 break; 2462 } 2463 off = noff; 2464 } 2465 p->objtotal = j; 2466 p->numclusters = p->objtotal; 2467 p->memtotal = j * (size_t)p->_objsize; 2468 nm_prdis("%d memtotal %zu", j, p->memtotal); 2469 } 2470 2471 netmap_mem_ext_register(nme); 2472 2473 return &nme->up; 2474 2475 out_delete: 2476 netmap_mem_put(&nme->up); 2477 out_unmap: 2478 if (os) 2479 nm_os_extmem_delete(os); 2480 out: 2481 if (perror) 2482 *perror = error; 2483 return NULL; 2484 2485 } 2486 #endif /* WITH_EXTMEM */ 2487 2488 2489 #ifdef WITH_PTNETMAP 2490 struct mem_pt_if { 2491 struct mem_pt_if *next; 2492 if_t ifp; 2493 unsigned int nifp_offset; 2494 }; 2495 2496 /* Netmap allocator for ptnetmap guests. */ 2497 struct netmap_mem_ptg { 2498 struct netmap_mem_d up; 2499 2500 vm_paddr_t nm_paddr; /* physical address in the guest */ 2501 void *nm_addr; /* virtual address in the guest */ 2502 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ 2503 nm_memid_t host_mem_id; /* allocator identifier in the host */ 2504 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ 2505 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ 2506 }; 2507 2508 /* Link a passthrough interface to a passthrough netmap allocator. */ 2509 static int 2510 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, if_t ifp, 2511 unsigned int nifp_offset) 2512 { 2513 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2514 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); 2515 2516 if (!ptif) { 2517 return ENOMEM; 2518 } 2519 2520 NMA_LOCK(nmd); 2521 2522 ptif->ifp = ifp; 2523 ptif->nifp_offset = nifp_offset; 2524 2525 if (ptnmd->pt_ifs) { 2526 ptif->next = ptnmd->pt_ifs; 2527 } 2528 ptnmd->pt_ifs = ptif; 2529 2530 NMA_UNLOCK(nmd); 2531 2532 nm_prinf("ifp=%s,nifp_offset=%u", 2533 if_name(ptif->ifp), ptif->nifp_offset); 2534 2535 return 0; 2536 } 2537 2538 /* Called with NMA_LOCK(nmd) held. */ 2539 static struct mem_pt_if * 2540 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, if_t ifp) 2541 { 2542 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2543 struct mem_pt_if *curr; 2544 2545 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2546 if (curr->ifp == ifp) { 2547 return curr; 2548 } 2549 } 2550 2551 return NULL; 2552 } 2553 2554 /* Unlink a passthrough interface from a passthrough netmap allocator. */ 2555 int 2556 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, if_t ifp) 2557 { 2558 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2559 struct mem_pt_if *prev = NULL; 2560 struct mem_pt_if *curr; 2561 int ret = -1; 2562 2563 NMA_LOCK(nmd); 2564 2565 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2566 if (curr->ifp == ifp) { 2567 if (prev) { 2568 prev->next = curr->next; 2569 } else { 2570 ptnmd->pt_ifs = curr->next; 2571 } 2572 nm_prinf("removed (ifp=%s,nifp_offset=%u)", 2573 if_name(curr->ifp), curr->nifp_offset); 2574 nm_os_free(curr); 2575 ret = 0; 2576 break; 2577 } 2578 prev = curr; 2579 } 2580 2581 NMA_UNLOCK(nmd); 2582 2583 return ret; 2584 } 2585 2586 static int 2587 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 2588 { 2589 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2590 2591 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 2592 return EINVAL; 2593 } 2594 2595 *lut = ptnmd->buf_lut; 2596 return 0; 2597 } 2598 2599 static int 2600 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, 2601 u_int *memflags, uint16_t *id) 2602 { 2603 int error = 0; 2604 2605 error = nmd->ops->nmd_config(nmd); 2606 if (error) 2607 goto out; 2608 2609 if (size) 2610 *size = nmd->nm_totalsize; 2611 if (memflags) 2612 *memflags = nmd->flags; 2613 if (id) 2614 *id = nmd->nm_id; 2615 2616 out: 2617 2618 return error; 2619 } 2620 2621 static vm_paddr_t 2622 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 2623 { 2624 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2625 vm_paddr_t paddr; 2626 /* if the offset is valid, just return csb->base_addr + off */ 2627 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); 2628 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr); 2629 return paddr; 2630 } 2631 2632 static int 2633 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) 2634 { 2635 /* nothing to do, we are configured on creation 2636 * and configuration never changes thereafter 2637 */ 2638 return 0; 2639 } 2640 2641 static int 2642 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 2643 { 2644 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2645 uint64_t mem_size; 2646 uint32_t bufsize; 2647 uint32_t nbuffers; 2648 uint32_t poolofs; 2649 vm_paddr_t paddr; 2650 char *vaddr; 2651 int i; 2652 int error = 0; 2653 2654 if (nmd->flags & NETMAP_MEM_FINALIZED) 2655 goto out; 2656 2657 if (ptnmd->ptn_dev == NULL) { 2658 nm_prerr("ptnetmap memdev not attached"); 2659 error = ENOMEM; 2660 goto out; 2661 } 2662 /* Map memory through ptnetmap-memdev BAR. */ 2663 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, 2664 &ptnmd->nm_addr, &mem_size); 2665 if (error) 2666 goto out; 2667 2668 /* Initialize the lut using the information contained in the 2669 * ptnetmap memory device. */ 2670 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2671 PTNET_MDEV_IO_BUF_POOL_OBJSZ); 2672 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2673 PTNET_MDEV_IO_BUF_POOL_OBJNUM); 2674 2675 /* allocate the lut */ 2676 if (ptnmd->buf_lut.lut == NULL) { 2677 nm_prinf("allocating lut"); 2678 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); 2679 if (ptnmd->buf_lut.lut == NULL) { 2680 nm_prerr("lut allocation failed"); 2681 return ENOMEM; 2682 } 2683 } 2684 2685 /* we have physically contiguous memory mapped through PCI BAR */ 2686 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2687 PTNET_MDEV_IO_BUF_POOL_OFS); 2688 vaddr = (char *)(ptnmd->nm_addr) + poolofs; 2689 paddr = ptnmd->nm_paddr + poolofs; 2690 2691 for (i = 0; i < nbuffers; i++) { 2692 ptnmd->buf_lut.lut[i].vaddr = vaddr; 2693 vaddr += bufsize; 2694 paddr += bufsize; 2695 } 2696 2697 ptnmd->buf_lut.objtotal = nbuffers; 2698 ptnmd->buf_lut.objsize = bufsize; 2699 nmd->nm_totalsize = mem_size; 2700 2701 /* Initialize these fields as are needed by 2702 * netmap_mem_bufsize(). 2703 * XXX please improve this, why do we need this 2704 * replication? maybe we nmd->pools[] should no be 2705 * there for the guest allocator? */ 2706 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; 2707 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; 2708 2709 nmd->flags |= NETMAP_MEM_FINALIZED; 2710 out: 2711 return error; 2712 } 2713 2714 static void 2715 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 2716 { 2717 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2718 2719 if (nmd->active == 1 && 2720 (nmd->flags & NETMAP_MEM_FINALIZED)) { 2721 nmd->flags &= ~NETMAP_MEM_FINALIZED; 2722 /* unmap ptnetmap-memdev memory */ 2723 if (ptnmd->ptn_dev) { 2724 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); 2725 } 2726 ptnmd->nm_addr = NULL; 2727 ptnmd->nm_paddr = 0; 2728 } 2729 } 2730 2731 static ssize_t 2732 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) 2733 { 2734 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2735 2736 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); 2737 } 2738 2739 static void 2740 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) 2741 { 2742 if (nmd == NULL) 2743 return; 2744 if (netmap_verbose) 2745 nm_prinf("deleting %p", nmd); 2746 if (nmd->active > 0) 2747 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active); 2748 if (netmap_verbose) 2749 nm_prinf("done deleting %p", nmd); 2750 NMA_LOCK_DESTROY(nmd); 2751 nm_os_free(nmd); 2752 } 2753 2754 static struct netmap_if * 2755 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd, 2756 struct netmap_adapter *na, struct netmap_priv_d *priv) 2757 { 2758 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2759 struct mem_pt_if *ptif; 2760 struct netmap_if *nifp = NULL; 2761 2762 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); 2763 if (ptif == NULL) { 2764 nm_prerr("interface %s is not in passthrough", na->name); 2765 goto out; 2766 } 2767 2768 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + 2769 ptif->nifp_offset); 2770 out: 2771 return nifp; 2772 } 2773 2774 static void 2775 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd, 2776 struct netmap_adapter *na, struct netmap_if *nifp) 2777 { 2778 struct mem_pt_if *ptif; 2779 2780 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); 2781 if (ptif == NULL) { 2782 nm_prerr("interface %s is not in passthrough", na->name); 2783 } 2784 } 2785 2786 static int 2787 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd, 2788 struct netmap_adapter *na) 2789 { 2790 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2791 struct mem_pt_if *ptif; 2792 struct netmap_if *nifp; 2793 int i, error = -1; 2794 2795 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); 2796 if (ptif == NULL) { 2797 nm_prerr("interface %s is not in passthrough", na->name); 2798 goto out; 2799 } 2800 2801 2802 /* point each kring to the corresponding backend ring */ 2803 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); 2804 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) { 2805 struct netmap_kring *kring = na->tx_rings[i]; 2806 if (kring->ring) 2807 continue; 2808 kring->ring = (struct netmap_ring *) 2809 ((char *)nifp + nifp->ring_ofs[i]); 2810 } 2811 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) { 2812 struct netmap_kring *kring = na->rx_rings[i]; 2813 if (kring->ring) 2814 continue; 2815 kring->ring = (struct netmap_ring *) 2816 ((char *)nifp + 2817 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]); 2818 } 2819 2820 error = 0; 2821 out: 2822 return error; 2823 } 2824 2825 static void 2826 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na) 2827 { 2828 #if 0 2829 enum txrx t; 2830 2831 for_rx_tx(t) { 2832 u_int i; 2833 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 2834 struct netmap_kring *kring = &NMR(na, t)[i]; 2835 2836 kring->ring = NULL; 2837 } 2838 } 2839 #endif 2840 (void)nmd; 2841 (void)na; 2842 } 2843 2844 static struct netmap_mem_ops netmap_mem_pt_guest_ops = { 2845 .nmd_get_lut = netmap_mem_pt_guest_get_lut, 2846 .nmd_get_info = netmap_mem_pt_guest_get_info, 2847 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, 2848 .nmd_config = netmap_mem_pt_guest_config, 2849 .nmd_finalize = netmap_mem_pt_guest_finalize, 2850 .nmd_deref = netmap_mem_pt_guest_deref, 2851 .nmd_if_offset = netmap_mem_pt_guest_if_offset, 2852 .nmd_delete = netmap_mem_pt_guest_delete, 2853 .nmd_if_new = netmap_mem_pt_guest_if_new, 2854 .nmd_if_delete = netmap_mem_pt_guest_if_delete, 2855 .nmd_rings_create = netmap_mem_pt_guest_rings_create, 2856 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete 2857 }; 2858 2859 /* Called with nm_mem_list_lock held. */ 2860 static struct netmap_mem_d * 2861 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) 2862 { 2863 struct netmap_mem_d *mem = NULL; 2864 struct netmap_mem_d *scan = netmap_last_mem_d; 2865 2866 do { 2867 /* find ptnetmap allocator through host ID */ 2868 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && 2869 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { 2870 mem = scan; 2871 mem->refcount++; 2872 NM_DBG_REFC(mem, __FUNCTION__, __LINE__); 2873 break; 2874 } 2875 scan = scan->next; 2876 } while (scan != netmap_last_mem_d); 2877 2878 return mem; 2879 } 2880 2881 /* Called with nm_mem_list_lock held. */ 2882 static struct netmap_mem_d * 2883 netmap_mem_pt_guest_create(nm_memid_t mem_id) 2884 { 2885 struct netmap_mem_ptg *ptnmd; 2886 int err = 0; 2887 2888 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); 2889 if (ptnmd == NULL) { 2890 err = ENOMEM; 2891 goto error; 2892 } 2893 2894 ptnmd->up.ops = &netmap_mem_pt_guest_ops; 2895 ptnmd->host_mem_id = mem_id; 2896 ptnmd->pt_ifs = NULL; 2897 2898 /* Assign new id in the guest (We have the lock) */ 2899 err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1); 2900 if (err) 2901 goto error; 2902 2903 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; 2904 ptnmd->up.flags |= NETMAP_MEM_IO; 2905 2906 NMA_LOCK_INIT(&ptnmd->up); 2907 2908 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); 2909 2910 2911 return &ptnmd->up; 2912 error: 2913 netmap_mem_pt_guest_delete(&ptnmd->up); 2914 return NULL; 2915 } 2916 2917 /* 2918 * find host id in guest allocators and create guest allocator 2919 * if it is not there 2920 */ 2921 static struct netmap_mem_d * 2922 netmap_mem_pt_guest_get(nm_memid_t mem_id) 2923 { 2924 struct netmap_mem_d *nmd; 2925 2926 NM_MTX_LOCK(nm_mem_list_lock); 2927 nmd = netmap_mem_pt_guest_find_memid(mem_id); 2928 if (nmd == NULL) { 2929 nmd = netmap_mem_pt_guest_create(mem_id); 2930 } 2931 NM_MTX_UNLOCK(nm_mem_list_lock); 2932 2933 return nmd; 2934 } 2935 2936 /* 2937 * The guest allocator can be created by ptnetmap_memdev (during the device 2938 * attach) or by ptnetmap device (ptnet), during the netmap_attach. 2939 * 2940 * The order is not important (we have different order in LINUX and FreeBSD). 2941 * The first one, creates the device, and the second one simply attaches it. 2942 */ 2943 2944 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in 2945 * the guest */ 2946 struct netmap_mem_d * 2947 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) 2948 { 2949 struct netmap_mem_d *nmd; 2950 struct netmap_mem_ptg *ptnmd; 2951 2952 nmd = netmap_mem_pt_guest_get(mem_id); 2953 2954 /* assign this device to the guest allocator */ 2955 if (nmd) { 2956 ptnmd = (struct netmap_mem_ptg *)nmd; 2957 ptnmd->ptn_dev = ptn_dev; 2958 } 2959 2960 return nmd; 2961 } 2962 2963 /* Called when ptnet device is attaching */ 2964 struct netmap_mem_d * 2965 netmap_mem_pt_guest_new(if_t ifp, 2966 unsigned int nifp_offset, 2967 unsigned int memid) 2968 { 2969 struct netmap_mem_d *nmd; 2970 2971 if (ifp == NULL) { 2972 return NULL; 2973 } 2974 2975 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); 2976 2977 if (nmd) { 2978 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); 2979 } 2980 2981 return nmd; 2982 } 2983 2984 #endif /* WITH_PTNETMAP */ 2985