1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Matteo Landi 5 * Copyright (C) 2012-2016 Luigi Rizzo 6 * Copyright (C) 2012-2016 Giuseppe Lettieri 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #ifdef linux 32 #include "bsd_glue.h" 33 #endif /* linux */ 34 35 #ifdef __APPLE__ 36 #include "osx_glue.h" 37 #endif /* __APPLE__ */ 38 39 #ifdef __FreeBSD__ 40 #include <sys/cdefs.h> /* prerequisite */ 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/types.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> /* MALLOC_DEFINE */ 46 #include <sys/proc.h> 47 #include <vm/vm.h> /* vtophys */ 48 #include <vm/pmap.h> /* vtophys */ 49 #include <sys/socket.h> /* sockaddrs */ 50 #include <sys/selinfo.h> 51 #include <sys/sysctl.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/vnet.h> 55 #include <machine/bus.h> /* bus_dmamap_* */ 56 57 /* M_NETMAP only used in here */ 58 MALLOC_DECLARE(M_NETMAP); 59 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 60 61 #endif /* __FreeBSD__ */ 62 63 #ifdef _WIN32 64 #include <win_glue.h> 65 #endif 66 67 #include <net/netmap.h> 68 #include <dev/netmap/netmap_kern.h> 69 #include <net/netmap_virt.h> 70 #include "netmap_mem2.h" 71 72 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY 73 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ 74 #else 75 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 76 #endif 77 78 #define NETMAP_POOL_MAX_NAMSZ 32 79 80 81 enum { 82 NETMAP_IF_POOL = 0, 83 NETMAP_RING_POOL, 84 NETMAP_BUF_POOL, 85 NETMAP_POOLS_NR 86 }; 87 88 89 struct netmap_obj_params { 90 u_int size; 91 u_int num; 92 93 u_int last_size; 94 u_int last_num; 95 }; 96 97 struct netmap_obj_pool { 98 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 99 100 /* ---------------------------------------------------*/ 101 /* these are only meaningful if the pool is finalized */ 102 /* (see 'finalized' field in netmap_mem_d) */ 103 u_int objtotal; /* actual total number of objects. */ 104 u_int memtotal; /* actual total memory space */ 105 u_int numclusters; /* actual number of clusters */ 106 107 u_int objfree; /* number of free objects. */ 108 109 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 110 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 111 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ 112 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 113 int alloc_done; /* we have allocated the memory */ 114 /* ---------------------------------------------------*/ 115 116 /* limits */ 117 u_int objminsize; /* minimum object size */ 118 u_int objmaxsize; /* maximum object size */ 119 u_int nummin; /* minimum number of objects */ 120 u_int nummax; /* maximum number of objects */ 121 122 /* these are changed only by config */ 123 u_int _objtotal; /* total number of objects */ 124 u_int _objsize; /* object size */ 125 u_int _clustsize; /* cluster size */ 126 u_int _clustentries; /* objects per cluster */ 127 u_int _numclusters; /* number of clusters */ 128 129 /* requested values */ 130 u_int r_objtotal; 131 u_int r_objsize; 132 }; 133 134 #define NMA_LOCK_T NM_MTX_T 135 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 136 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 137 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 138 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) 139 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 140 141 struct netmap_mem_ops { 142 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 143 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, 144 u_int *memflags, uint16_t *id); 145 146 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 147 int (*nmd_config)(struct netmap_mem_d *); 148 int (*nmd_finalize)(struct netmap_mem_d *); 149 void (*nmd_deref)(struct netmap_mem_d *); 150 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 151 void (*nmd_delete)(struct netmap_mem_d *); 152 153 struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, 154 struct netmap_priv_d *); 155 void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 156 int (*nmd_rings_create)(struct netmap_adapter *); 157 void (*nmd_rings_delete)(struct netmap_adapter *); 158 }; 159 160 struct netmap_mem_d { 161 NMA_LOCK_T nm_mtx; /* protect the allocator */ 162 u_int nm_totalsize; /* shorthand */ 163 164 u_int flags; 165 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 166 #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ 167 int lasterr; /* last error for curr config */ 168 int active; /* active users */ 169 int refcount; 170 /* the three allocators */ 171 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 172 173 nm_memid_t nm_id; /* allocator identifier */ 174 int nm_grp; /* iommu groupd id */ 175 176 /* list of all existing allocators, sorted by nm_id */ 177 struct netmap_mem_d *prev, *next; 178 179 struct netmap_mem_ops *ops; 180 181 struct netmap_obj_params params[NETMAP_POOLS_NR]; 182 183 #define NM_MEM_NAMESZ 16 184 char name[NM_MEM_NAMESZ]; 185 }; 186 187 int 188 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 189 { 190 int rv; 191 192 NMA_LOCK(nmd); 193 rv = nmd->ops->nmd_get_lut(nmd, lut); 194 NMA_UNLOCK(nmd); 195 196 return rv; 197 } 198 199 int 200 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, 201 u_int *memflags, nm_memid_t *memid) 202 { 203 int rv; 204 205 NMA_LOCK(nmd); 206 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); 207 NMA_UNLOCK(nmd); 208 209 return rv; 210 } 211 212 vm_paddr_t 213 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 214 { 215 vm_paddr_t pa; 216 217 #if defined(__FreeBSD__) 218 /* This function is called by netmap_dev_pager_fault(), which holds a 219 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we 220 * spin on the trylock. */ 221 NMA_SPINLOCK(nmd); 222 #else 223 NMA_LOCK(nmd); 224 #endif 225 pa = nmd->ops->nmd_ofstophys(nmd, off); 226 NMA_UNLOCK(nmd); 227 228 return pa; 229 } 230 231 static int 232 netmap_mem_config(struct netmap_mem_d *nmd) 233 { 234 if (nmd->active) { 235 /* already in use. Not fatal, but we 236 * cannot change the configuration 237 */ 238 return 0; 239 } 240 241 return nmd->ops->nmd_config(nmd); 242 } 243 244 ssize_t 245 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) 246 { 247 ssize_t rv; 248 249 NMA_LOCK(nmd); 250 rv = nmd->ops->nmd_if_offset(nmd, off); 251 NMA_UNLOCK(nmd); 252 253 return rv; 254 } 255 256 static void 257 netmap_mem_delete(struct netmap_mem_d *nmd) 258 { 259 nmd->ops->nmd_delete(nmd); 260 } 261 262 struct netmap_if * 263 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 264 { 265 struct netmap_if *nifp; 266 struct netmap_mem_d *nmd = na->nm_mem; 267 268 NMA_LOCK(nmd); 269 nifp = nmd->ops->nmd_if_new(na, priv); 270 NMA_UNLOCK(nmd); 271 272 return nifp; 273 } 274 275 void 276 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) 277 { 278 struct netmap_mem_d *nmd = na->nm_mem; 279 280 NMA_LOCK(nmd); 281 nmd->ops->nmd_if_delete(na, nif); 282 NMA_UNLOCK(nmd); 283 } 284 285 int 286 netmap_mem_rings_create(struct netmap_adapter *na) 287 { 288 int rv; 289 struct netmap_mem_d *nmd = na->nm_mem; 290 291 NMA_LOCK(nmd); 292 rv = nmd->ops->nmd_rings_create(na); 293 NMA_UNLOCK(nmd); 294 295 return rv; 296 } 297 298 void 299 netmap_mem_rings_delete(struct netmap_adapter *na) 300 { 301 struct netmap_mem_d *nmd = na->nm_mem; 302 303 NMA_LOCK(nmd); 304 nmd->ops->nmd_rings_delete(na); 305 NMA_UNLOCK(nmd); 306 } 307 308 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 309 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 310 static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 311 static void nm_mem_release_id(struct netmap_mem_d *); 312 313 nm_memid_t 314 netmap_mem_get_id(struct netmap_mem_d *nmd) 315 { 316 return nmd->nm_id; 317 } 318 319 #ifdef NM_DEBUG_MEM_PUTGET 320 #define NM_DBG_REFC(nmd, func, line) \ 321 nm_prinf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); 322 #else 323 #define NM_DBG_REFC(nmd, func, line) 324 #endif 325 326 /* circular list of all existing allocators */ 327 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 328 NM_MTX_T nm_mem_list_lock; 329 330 struct netmap_mem_d * 331 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 332 { 333 NM_MTX_LOCK(nm_mem_list_lock); 334 nmd->refcount++; 335 NM_DBG_REFC(nmd, func, line); 336 NM_MTX_UNLOCK(nm_mem_list_lock); 337 return nmd; 338 } 339 340 void 341 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 342 { 343 int last; 344 NM_MTX_LOCK(nm_mem_list_lock); 345 last = (--nmd->refcount == 0); 346 if (last) 347 nm_mem_release_id(nmd); 348 NM_DBG_REFC(nmd, func, line); 349 NM_MTX_UNLOCK(nm_mem_list_lock); 350 if (last) 351 netmap_mem_delete(nmd); 352 } 353 354 int 355 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 356 { 357 int lasterr = 0; 358 if (nm_mem_assign_group(nmd, na->pdev) < 0) { 359 return ENOMEM; 360 } 361 362 NMA_LOCK(nmd); 363 364 if (netmap_mem_config(nmd)) 365 goto out; 366 367 nmd->active++; 368 369 nmd->lasterr = nmd->ops->nmd_finalize(nmd); 370 371 if (!nmd->lasterr && na->pdev) { 372 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 373 } 374 375 out: 376 lasterr = nmd->lasterr; 377 NMA_UNLOCK(nmd); 378 379 if (lasterr) 380 netmap_mem_deref(nmd, na); 381 382 return lasterr; 383 } 384 385 static int 386 nm_isset(uint32_t *bitmap, u_int i) 387 { 388 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); 389 } 390 391 392 static int 393 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) 394 { 395 u_int n, j; 396 397 if (p->bitmap == NULL) { 398 /* Allocate the bitmap */ 399 n = (p->objtotal + 31) / 32; 400 p->bitmap = nm_os_malloc(sizeof(uint32_t) * n); 401 if (p->bitmap == NULL) { 402 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 403 p->name); 404 return ENOMEM; 405 } 406 p->bitmap_slots = n; 407 } else { 408 memset(p->bitmap, 0, p->bitmap_slots); 409 } 410 411 p->objfree = 0; 412 /* 413 * Set all the bits in the bitmap that have 414 * corresponding buffers to 1 to indicate they are 415 * free. 416 */ 417 for (j = 0; j < p->objtotal; j++) { 418 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { 419 D("skipping %s %d", p->name, j); 420 continue; 421 } 422 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); 423 p->objfree++; 424 } 425 426 ND("%s free %u", p->name, p->objfree); 427 if (p->objfree == 0) 428 return ENOMEM; 429 430 return 0; 431 } 432 433 static int 434 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) 435 { 436 int i, error = 0; 437 438 for (i = 0; i < NETMAP_POOLS_NR; i++) { 439 struct netmap_obj_pool *p = &nmd->pools[i]; 440 441 error = netmap_init_obj_allocator_bitmap(p); 442 if (error) 443 return error; 444 } 445 446 /* 447 * buffers 0 and 1 are reserved 448 */ 449 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { 450 return ENOMEM; 451 } 452 453 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 454 if (nmd->pools[NETMAP_BUF_POOL].bitmap) { 455 /* XXX This check is a workaround that prevents a 456 * NULL pointer crash which currently happens only 457 * with ptnetmap guests. 458 * Removed shared-info --> is the bug still there? */ 459 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; 460 } 461 return 0; 462 } 463 464 int 465 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 466 { 467 int last_user = 0; 468 NMA_LOCK(nmd); 469 if (na->active_fds <= 0) 470 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 471 if (nmd->active == 1) { 472 last_user = 1; 473 /* 474 * Reset the allocator when it falls out of use so that any 475 * pool resources leaked by unclean application exits are 476 * reclaimed. 477 */ 478 netmap_mem_init_bitmaps(nmd); 479 } 480 nmd->ops->nmd_deref(nmd); 481 482 nmd->active--; 483 if (!nmd->active) 484 nmd->nm_grp = -1; 485 486 NMA_UNLOCK(nmd); 487 return last_user; 488 } 489 490 491 /* accessor functions */ 492 static int 493 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 494 { 495 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 496 #ifdef __FreeBSD__ 497 lut->plut = lut->lut; 498 #endif 499 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 500 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 501 502 return 0; 503 } 504 505 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 506 [NETMAP_IF_POOL] = { 507 .size = 1024, 508 .num = 2, 509 }, 510 [NETMAP_RING_POOL] = { 511 .size = 5*PAGE_SIZE, 512 .num = 4, 513 }, 514 [NETMAP_BUF_POOL] = { 515 .size = 2048, 516 .num = 4098, 517 }, 518 }; 519 520 521 /* 522 * nm_mem is the memory allocator used for all physical interfaces 523 * running in netmap mode. 524 * Virtual (VALE) ports will have each its own allocator. 525 */ 526 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 527 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 528 .pools = { 529 [NETMAP_IF_POOL] = { 530 .name = "netmap_if", 531 .objminsize = sizeof(struct netmap_if), 532 .objmaxsize = 4096, 533 .nummin = 10, /* don't be stingy */ 534 .nummax = 10000, /* XXX very large */ 535 }, 536 [NETMAP_RING_POOL] = { 537 .name = "netmap_ring", 538 .objminsize = sizeof(struct netmap_ring), 539 .objmaxsize = 32*PAGE_SIZE, 540 .nummin = 2, 541 .nummax = 1024, 542 }, 543 [NETMAP_BUF_POOL] = { 544 .name = "netmap_buf", 545 .objminsize = 64, 546 .objmaxsize = 65536, 547 .nummin = 4, 548 .nummax = 1000000, /* one million! */ 549 }, 550 }, 551 552 .params = { 553 [NETMAP_IF_POOL] = { 554 .size = 1024, 555 .num = 100, 556 }, 557 [NETMAP_RING_POOL] = { 558 .size = 9*PAGE_SIZE, 559 .num = 200, 560 }, 561 [NETMAP_BUF_POOL] = { 562 .size = 2048, 563 .num = NETMAP_BUF_MAX_NUM, 564 }, 565 }, 566 567 .nm_id = 1, 568 .nm_grp = -1, 569 570 .prev = &nm_mem, 571 .next = &nm_mem, 572 573 .ops = &netmap_mem_global_ops, 574 575 .name = "1" 576 }; 577 578 579 /* blueprint for the private memory allocators */ 580 /* XXX clang is not happy about using name as a print format */ 581 static const struct netmap_mem_d nm_blueprint = { 582 .pools = { 583 [NETMAP_IF_POOL] = { 584 .name = "%s_if", 585 .objminsize = sizeof(struct netmap_if), 586 .objmaxsize = 4096, 587 .nummin = 1, 588 .nummax = 100, 589 }, 590 [NETMAP_RING_POOL] = { 591 .name = "%s_ring", 592 .objminsize = sizeof(struct netmap_ring), 593 .objmaxsize = 32*PAGE_SIZE, 594 .nummin = 2, 595 .nummax = 1024, 596 }, 597 [NETMAP_BUF_POOL] = { 598 .name = "%s_buf", 599 .objminsize = 64, 600 .objmaxsize = 65536, 601 .nummin = 4, 602 .nummax = 1000000, /* one million! */ 603 }, 604 }, 605 606 .nm_grp = -1, 607 608 .flags = NETMAP_MEM_PRIVATE, 609 610 .ops = &netmap_mem_global_ops, 611 }; 612 613 /* memory allocator related sysctls */ 614 615 #define STRINGIFY(x) #x 616 617 618 #define DECLARE_SYSCTLS(id, name) \ 619 SYSBEGIN(mem2_ ## name); \ 620 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 621 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 622 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 623 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 624 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 625 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 626 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 627 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 628 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 629 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 630 "Default size of private netmap " STRINGIFY(name) "s"); \ 631 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 632 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 633 "Default number of private netmap " STRINGIFY(name) "s"); \ 634 SYSEND 635 636 SYSCTL_DECL(_dev_netmap); 637 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 638 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 639 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 640 641 /* call with nm_mem_list_lock held */ 642 static int 643 nm_mem_assign_id_locked(struct netmap_mem_d *nmd) 644 { 645 nm_memid_t id; 646 struct netmap_mem_d *scan = netmap_last_mem_d; 647 int error = ENOMEM; 648 649 do { 650 /* we rely on unsigned wrap around */ 651 id = scan->nm_id + 1; 652 if (id == 0) /* reserve 0 as error value */ 653 id = 1; 654 scan = scan->next; 655 if (id != scan->nm_id) { 656 nmd->nm_id = id; 657 nmd->prev = scan->prev; 658 nmd->next = scan; 659 scan->prev->next = nmd; 660 scan->prev = nmd; 661 netmap_last_mem_d = nmd; 662 nmd->refcount = 1; 663 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 664 error = 0; 665 break; 666 } 667 } while (scan != netmap_last_mem_d); 668 669 return error; 670 } 671 672 /* call with nm_mem_list_lock *not* held */ 673 static int 674 nm_mem_assign_id(struct netmap_mem_d *nmd) 675 { 676 int ret; 677 678 NM_MTX_LOCK(nm_mem_list_lock); 679 ret = nm_mem_assign_id_locked(nmd); 680 NM_MTX_UNLOCK(nm_mem_list_lock); 681 682 return ret; 683 } 684 685 /* call with nm_mem_list_lock held */ 686 static void 687 nm_mem_release_id(struct netmap_mem_d *nmd) 688 { 689 nmd->prev->next = nmd->next; 690 nmd->next->prev = nmd->prev; 691 692 if (netmap_last_mem_d == nmd) 693 netmap_last_mem_d = nmd->prev; 694 695 nmd->prev = nmd->next = NULL; 696 } 697 698 struct netmap_mem_d * 699 netmap_mem_find(nm_memid_t id) 700 { 701 struct netmap_mem_d *nmd; 702 703 NM_MTX_LOCK(nm_mem_list_lock); 704 nmd = netmap_last_mem_d; 705 do { 706 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { 707 nmd->refcount++; 708 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 709 NM_MTX_UNLOCK(nm_mem_list_lock); 710 return nmd; 711 } 712 nmd = nmd->next; 713 } while (nmd != netmap_last_mem_d); 714 NM_MTX_UNLOCK(nm_mem_list_lock); 715 return NULL; 716 } 717 718 static int 719 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 720 { 721 int err = 0, id; 722 id = nm_iommu_group_id(dev); 723 if (netmap_verbose) 724 D("iommu_group %d", id); 725 726 NMA_LOCK(nmd); 727 728 if (nmd->nm_grp < 0) 729 nmd->nm_grp = id; 730 731 if (nmd->nm_grp != id) 732 nmd->lasterr = err = ENOMEM; 733 734 NMA_UNLOCK(nmd); 735 return err; 736 } 737 738 static struct lut_entry * 739 nm_alloc_lut(u_int nobj) 740 { 741 size_t n = sizeof(struct lut_entry) * nobj; 742 struct lut_entry *lut; 743 #ifdef linux 744 lut = vmalloc(n); 745 #else 746 lut = nm_os_malloc(n); 747 #endif 748 return lut; 749 } 750 751 static void 752 nm_free_lut(struct lut_entry *lut, u_int objtotal) 753 { 754 bzero(lut, sizeof(struct lut_entry) * objtotal); 755 #ifdef linux 756 vfree(lut); 757 #else 758 nm_os_free(lut); 759 #endif 760 } 761 762 #if defined(linux) || defined(_WIN32) 763 static struct plut_entry * 764 nm_alloc_plut(u_int nobj) 765 { 766 size_t n = sizeof(struct plut_entry) * nobj; 767 struct plut_entry *lut; 768 lut = vmalloc(n); 769 return lut; 770 } 771 772 static void 773 nm_free_plut(struct plut_entry * lut) 774 { 775 vfree(lut); 776 } 777 #endif /* linux or _WIN32 */ 778 779 780 /* 781 * First, find the allocator that contains the requested offset, 782 * then locate the cluster through a lookup table. 783 */ 784 static vm_paddr_t 785 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 786 { 787 int i; 788 vm_ooffset_t o = offset; 789 vm_paddr_t pa; 790 struct netmap_obj_pool *p; 791 792 p = nmd->pools; 793 794 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 795 if (offset >= p[i].memtotal) 796 continue; 797 // now lookup the cluster's address 798 #ifndef _WIN32 799 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 800 offset % p[i]._objsize; 801 #else 802 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); 803 pa.QuadPart += offset % p[i]._objsize; 804 #endif 805 return pa; 806 } 807 /* this is only in case of errors */ 808 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 809 p[NETMAP_IF_POOL].memtotal, 810 p[NETMAP_IF_POOL].memtotal 811 + p[NETMAP_RING_POOL].memtotal, 812 p[NETMAP_IF_POOL].memtotal 813 + p[NETMAP_RING_POOL].memtotal 814 + p[NETMAP_BUF_POOL].memtotal); 815 #ifndef _WIN32 816 return 0; /* bad address */ 817 #else 818 vm_paddr_t res; 819 res.QuadPart = 0; 820 return res; 821 #endif 822 } 823 824 #ifdef _WIN32 825 826 /* 827 * win32_build_virtual_memory_for_userspace 828 * 829 * This function get all the object making part of the pools and maps 830 * a contiguous virtual memory space for the userspace 831 * It works this way 832 * 1 - allocate a Memory Descriptor List wide as the sum 833 * of the memory needed for the pools 834 * 2 - cycle all the objects in every pool and for every object do 835 * 836 * 2a - cycle all the objects in every pool, get the list 837 * of the physical address descriptors 838 * 2b - calculate the offset in the array of pages desciptor in the 839 * main MDL 840 * 2c - copy the descriptors of the object in the main MDL 841 * 842 * 3 - return the resulting MDL that needs to be mapped in userland 843 * 844 * In this way we will have an MDL that describes all the memory for the 845 * objects in a single object 846 */ 847 848 PMDL 849 win32_build_user_vm_map(struct netmap_mem_d* nmd) 850 { 851 u_int memflags, ofs = 0; 852 PMDL mainMdl, tempMdl; 853 uint64_t memsize; 854 int i, j; 855 856 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { 857 D("memory not finalised yet"); 858 return NULL; 859 } 860 861 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); 862 if (mainMdl == NULL) { 863 D("failed to allocate mdl"); 864 return NULL; 865 } 866 867 NMA_LOCK(nmd); 868 for (i = 0; i < NETMAP_POOLS_NR; i++) { 869 struct netmap_obj_pool *p = &nmd->pools[i]; 870 int clsz = p->_clustsize; 871 int clobjs = p->_clustentries; /* objects per cluster */ 872 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); 873 PPFN_NUMBER pSrc, pDst; 874 875 /* each pool has a different cluster size so we need to reallocate */ 876 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); 877 if (tempMdl == NULL) { 878 NMA_UNLOCK(nmd); 879 D("fail to allocate tempMdl"); 880 IoFreeMdl(mainMdl); 881 return NULL; 882 } 883 pSrc = MmGetMdlPfnArray(tempMdl); 884 /* create one entry per cluster, the lut[] has one entry per object */ 885 for (j = 0; j < p->numclusters; j++, ofs += clsz) { 886 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; 887 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); 888 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ 889 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ 890 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ 891 } 892 IoFreeMdl(tempMdl); 893 } 894 NMA_UNLOCK(nmd); 895 return mainMdl; 896 } 897 898 #endif /* _WIN32 */ 899 900 /* 901 * helper function for OS-specific mmap routines (currently only windows). 902 * Given an nmd and a pool index, returns the cluster size and number of clusters. 903 * Returns 0 if memory is finalised and the pool is valid, otherwise 1. 904 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. 905 */ 906 907 int 908 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) 909 { 910 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) 911 return 1; /* invalid arguments */ 912 // NMA_LOCK_ASSERT(nmd); 913 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 914 *clustsize = *numclusters = 0; 915 return 1; /* not ready yet */ 916 } 917 *clustsize = nmd->pools[pool]._clustsize; 918 *numclusters = nmd->pools[pool].numclusters; 919 return 0; /* success */ 920 } 921 922 static int 923 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, 924 u_int *memflags, nm_memid_t *id) 925 { 926 int error = 0; 927 error = netmap_mem_config(nmd); 928 if (error) 929 goto out; 930 if (size) { 931 if (nmd->flags & NETMAP_MEM_FINALIZED) { 932 *size = nmd->nm_totalsize; 933 } else { 934 int i; 935 *size = 0; 936 for (i = 0; i < NETMAP_POOLS_NR; i++) { 937 struct netmap_obj_pool *p = nmd->pools + i; 938 *size += (p->_numclusters * p->_clustsize); 939 } 940 } 941 } 942 if (memflags) 943 *memflags = nmd->flags; 944 if (id) 945 *id = nmd->nm_id; 946 out: 947 return error; 948 } 949 950 /* 951 * we store objects by kernel address, need to find the offset 952 * within the pool to export the value to userspace. 953 * Algorithm: scan until we find the cluster, then add the 954 * actual offset in the cluster 955 */ 956 static ssize_t 957 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 958 { 959 int i, k = p->_clustentries, n = p->objtotal; 960 ssize_t ofs = 0; 961 962 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 963 const char *base = p->lut[i].vaddr; 964 ssize_t relofs = (const char *) vaddr - base; 965 966 if (relofs < 0 || relofs >= p->_clustsize) 967 continue; 968 969 ofs = ofs + relofs; 970 ND("%s: return offset %d (cluster %d) for pointer %p", 971 p->name, ofs, i, vaddr); 972 return ofs; 973 } 974 D("address %p is not contained inside any cluster (%s)", 975 vaddr, p->name); 976 return 0; /* An error occurred */ 977 } 978 979 /* Helper functions which convert virtual addresses to offsets */ 980 #define netmap_if_offset(n, v) \ 981 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 982 983 #define netmap_ring_offset(n, v) \ 984 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 985 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 986 987 static ssize_t 988 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 989 { 990 return netmap_if_offset(nmd, addr); 991 } 992 993 /* 994 * report the index, and use start position as a hint, 995 * otherwise buffer allocation becomes terribly expensive. 996 */ 997 static void * 998 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 999 { 1000 uint32_t i = 0; /* index in the bitmap */ 1001 uint32_t mask, j = 0; /* slot counter */ 1002 void *vaddr = NULL; 1003 1004 if (len > p->_objsize) { 1005 D("%s request size %d too large", p->name, len); 1006 return NULL; 1007 } 1008 1009 if (p->objfree == 0) { 1010 D("no more %s objects", p->name); 1011 return NULL; 1012 } 1013 if (start) 1014 i = *start; 1015 1016 /* termination is guaranteed by p->free, but better check bounds on i */ 1017 while (vaddr == NULL && i < p->bitmap_slots) { 1018 uint32_t cur = p->bitmap[i]; 1019 if (cur == 0) { /* bitmask is fully used */ 1020 i++; 1021 continue; 1022 } 1023 /* locate a slot */ 1024 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 1025 ; 1026 1027 p->bitmap[i] &= ~mask; /* mark object as in use */ 1028 p->objfree--; 1029 1030 vaddr = p->lut[i * 32 + j].vaddr; 1031 if (index) 1032 *index = i * 32 + j; 1033 } 1034 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); 1035 1036 if (start) 1037 *start = i; 1038 return vaddr; 1039 } 1040 1041 1042 /* 1043 * free by index, not by address. 1044 * XXX should we also cleanup the content ? 1045 */ 1046 static int 1047 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 1048 { 1049 uint32_t *ptr, mask; 1050 1051 if (j >= p->objtotal) { 1052 D("invalid index %u, max %u", j, p->objtotal); 1053 return 1; 1054 } 1055 ptr = &p->bitmap[j / 32]; 1056 mask = (1 << (j % 32)); 1057 if (*ptr & mask) { 1058 D("ouch, double free on buffer %d", j); 1059 return 1; 1060 } else { 1061 *ptr |= mask; 1062 p->objfree++; 1063 return 0; 1064 } 1065 } 1066 1067 /* 1068 * free by address. This is slow but is only used for a few 1069 * objects (rings, nifp) 1070 */ 1071 static void 1072 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 1073 { 1074 u_int i, j, n = p->numclusters; 1075 1076 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 1077 void *base = p->lut[i * p->_clustentries].vaddr; 1078 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 1079 1080 /* Given address, is out of the scope of the current cluster.*/ 1081 if (base == NULL || vaddr < base || relofs >= p->_clustsize) 1082 continue; 1083 1084 j = j + relofs / p->_objsize; 1085 /* KASSERT(j != 0, ("Cannot free object 0")); */ 1086 netmap_obj_free(p, j); 1087 return; 1088 } 1089 D("address %p is not contained inside any cluster (%s)", 1090 vaddr, p->name); 1091 } 1092 1093 unsigned 1094 netmap_mem_bufsize(struct netmap_mem_d *nmd) 1095 { 1096 return nmd->pools[NETMAP_BUF_POOL]._objsize; 1097 } 1098 1099 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 1100 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 1101 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 1102 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 1103 #define netmap_buf_malloc(n, _pos, _index) \ 1104 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 1105 1106 1107 #if 0 /* currently unused */ 1108 /* Return the index associated to the given packet buffer */ 1109 #define netmap_buf_index(n, v) \ 1110 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 1111 #endif 1112 1113 /* 1114 * allocate extra buffers in a linked list. 1115 * returns the actual number. 1116 */ 1117 uint32_t 1118 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 1119 { 1120 struct netmap_mem_d *nmd = na->nm_mem; 1121 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 1122 1123 NMA_LOCK(nmd); 1124 1125 *head = 0; /* default, 'null' index ie empty list */ 1126 for (i = 0 ; i < n; i++) { 1127 uint32_t cur = *head; /* save current head */ 1128 uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 1129 if (p == NULL) { 1130 D("no more buffers after %d of %d", i, n); 1131 *head = cur; /* restore */ 1132 break; 1133 } 1134 ND(5, "allocate buffer %d -> %d", *head, cur); 1135 *p = cur; /* link to previous head */ 1136 } 1137 1138 NMA_UNLOCK(nmd); 1139 1140 return i; 1141 } 1142 1143 static void 1144 netmap_extra_free(struct netmap_adapter *na, uint32_t head) 1145 { 1146 struct lut_entry *lut = na->na_lut.lut; 1147 struct netmap_mem_d *nmd = na->nm_mem; 1148 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1149 uint32_t i, cur, *buf; 1150 1151 ND("freeing the extra list"); 1152 for (i = 0; head >=2 && head < p->objtotal; i++) { 1153 cur = head; 1154 buf = lut[head].vaddr; 1155 head = *buf; 1156 *buf = 0; 1157 if (netmap_obj_free(p, cur)) 1158 break; 1159 } 1160 if (head != 0) 1161 D("breaking with head %d", head); 1162 if (netmap_verbose) 1163 D("freed %d buffers", i); 1164 } 1165 1166 1167 /* Return nonzero on error */ 1168 static int 1169 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1170 { 1171 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1172 u_int i = 0; /* slot counter */ 1173 uint32_t pos = 0; /* slot in p->bitmap */ 1174 uint32_t index = 0; /* buffer index */ 1175 1176 for (i = 0; i < n; i++) { 1177 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 1178 if (vaddr == NULL) { 1179 D("no more buffers after %d of %d", i, n); 1180 goto cleanup; 1181 } 1182 slot[i].buf_idx = index; 1183 slot[i].len = p->_objsize; 1184 slot[i].flags = 0; 1185 slot[i].ptr = 0; 1186 } 1187 1188 ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); 1189 return (0); 1190 1191 cleanup: 1192 while (i > 0) { 1193 i--; 1194 netmap_obj_free(p, slot[i].buf_idx); 1195 } 1196 bzero(slot, n * sizeof(slot[0])); 1197 return (ENOMEM); 1198 } 1199 1200 static void 1201 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 1202 { 1203 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1204 u_int i; 1205 1206 for (i = 0; i < n; i++) { 1207 slot[i].buf_idx = index; 1208 slot[i].len = p->_objsize; 1209 slot[i].flags = 0; 1210 } 1211 } 1212 1213 1214 static void 1215 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 1216 { 1217 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1218 1219 if (i < 2 || i >= p->objtotal) { 1220 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 1221 return; 1222 } 1223 netmap_obj_free(p, i); 1224 } 1225 1226 1227 static void 1228 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1229 { 1230 u_int i; 1231 1232 for (i = 0; i < n; i++) { 1233 if (slot[i].buf_idx > 1) 1234 netmap_free_buf(nmd, slot[i].buf_idx); 1235 } 1236 ND("%s: released some buffers, available: %u", 1237 p->name, p->objfree); 1238 } 1239 1240 static void 1241 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 1242 { 1243 1244 if (p == NULL) 1245 return; 1246 if (p->bitmap) 1247 nm_os_free(p->bitmap); 1248 p->bitmap = NULL; 1249 if (p->invalid_bitmap) 1250 nm_os_free(p->invalid_bitmap); 1251 p->invalid_bitmap = NULL; 1252 if (!p->alloc_done) { 1253 /* allocation was done by somebody else. 1254 * Let them clean up after themselves. 1255 */ 1256 return; 1257 } 1258 if (p->lut) { 1259 u_int i; 1260 1261 /* 1262 * Free each cluster allocated in 1263 * netmap_finalize_obj_allocator(). The cluster start 1264 * addresses are stored at multiples of p->_clusterentries 1265 * in the lut. 1266 */ 1267 for (i = 0; i < p->objtotal; i += p->_clustentries) { 1268 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 1269 } 1270 nm_free_lut(p->lut, p->objtotal); 1271 } 1272 p->lut = NULL; 1273 p->objtotal = 0; 1274 p->memtotal = 0; 1275 p->numclusters = 0; 1276 p->objfree = 0; 1277 p->alloc_done = 0; 1278 } 1279 1280 /* 1281 * Free all resources related to an allocator. 1282 */ 1283 static void 1284 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 1285 { 1286 if (p == NULL) 1287 return; 1288 netmap_reset_obj_allocator(p); 1289 } 1290 1291 /* 1292 * We receive a request for objtotal objects, of size objsize each. 1293 * Internally we may round up both numbers, as we allocate objects 1294 * in small clusters multiple of the page size. 1295 * We need to keep track of objtotal and clustentries, 1296 * as they are needed when freeing memory. 1297 * 1298 * XXX note -- userspace needs the buffers to be contiguous, 1299 * so we cannot afford gaps at the end of a cluster. 1300 */ 1301 1302 1303 /* call with NMA_LOCK held */ 1304 static int 1305 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 1306 { 1307 int i; 1308 u_int clustsize; /* the cluster size, multiple of page size */ 1309 u_int clustentries; /* how many objects per entry */ 1310 1311 /* we store the current request, so we can 1312 * detect configuration changes later */ 1313 p->r_objtotal = objtotal; 1314 p->r_objsize = objsize; 1315 1316 #define MAX_CLUSTSIZE (1<<22) // 4 MB 1317 #define LINE_ROUND NM_CACHE_ALIGN // 64 1318 if (objsize >= MAX_CLUSTSIZE) { 1319 /* we could do it but there is no point */ 1320 D("unsupported allocation for %d bytes", objsize); 1321 return EINVAL; 1322 } 1323 /* make sure objsize is a multiple of LINE_ROUND */ 1324 i = (objsize & (LINE_ROUND - 1)); 1325 if (i) { 1326 D("XXX aligning object by %d bytes", LINE_ROUND - i); 1327 objsize += LINE_ROUND - i; 1328 } 1329 if (objsize < p->objminsize || objsize > p->objmaxsize) { 1330 D("requested objsize %d out of range [%d, %d]", 1331 objsize, p->objminsize, p->objmaxsize); 1332 return EINVAL; 1333 } 1334 if (objtotal < p->nummin || objtotal > p->nummax) { 1335 D("requested objtotal %d out of range [%d, %d]", 1336 objtotal, p->nummin, p->nummax); 1337 return EINVAL; 1338 } 1339 /* 1340 * Compute number of objects using a brute-force approach: 1341 * given a max cluster size, 1342 * we try to fill it with objects keeping track of the 1343 * wasted space to the next page boundary. 1344 */ 1345 for (clustentries = 0, i = 1;; i++) { 1346 u_int delta, used = i * objsize; 1347 if (used > MAX_CLUSTSIZE) 1348 break; 1349 delta = used % PAGE_SIZE; 1350 if (delta == 0) { // exact solution 1351 clustentries = i; 1352 break; 1353 } 1354 } 1355 /* exact solution not found */ 1356 if (clustentries == 0) { 1357 D("unsupported allocation for %d bytes", objsize); 1358 return EINVAL; 1359 } 1360 /* compute clustsize */ 1361 clustsize = clustentries * objsize; 1362 if (netmap_verbose) 1363 D("objsize %d clustsize %d objects %d", 1364 objsize, clustsize, clustentries); 1365 1366 /* 1367 * The number of clusters is n = ceil(objtotal/clustentries) 1368 * objtotal' = n * clustentries 1369 */ 1370 p->_clustentries = clustentries; 1371 p->_clustsize = clustsize; 1372 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 1373 1374 /* actual values (may be larger than requested) */ 1375 p->_objsize = objsize; 1376 p->_objtotal = p->_numclusters * clustentries; 1377 1378 return 0; 1379 } 1380 1381 /* call with NMA_LOCK held */ 1382 static int 1383 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 1384 { 1385 int i; /* must be signed */ 1386 size_t n; 1387 1388 if (p->lut) { 1389 /* if the lut is already there we assume that also all the 1390 * clusters have already been allocated, possibily by somebody 1391 * else (e.g., extmem). In the latter case, the alloc_done flag 1392 * will remain at zero, so that we will not attempt to 1393 * deallocate the clusters by ourselves in 1394 * netmap_reset_obj_allocator. 1395 */ 1396 return 0; 1397 } 1398 1399 /* optimistically assume we have enough memory */ 1400 p->numclusters = p->_numclusters; 1401 p->objtotal = p->_objtotal; 1402 p->alloc_done = 1; 1403 1404 p->lut = nm_alloc_lut(p->objtotal); 1405 if (p->lut == NULL) { 1406 D("Unable to create lookup table for '%s'", p->name); 1407 goto clean; 1408 } 1409 1410 /* 1411 * Allocate clusters, init pointers 1412 */ 1413 1414 n = p->_clustsize; 1415 for (i = 0; i < (int)p->objtotal;) { 1416 int lim = i + p->_clustentries; 1417 char *clust; 1418 1419 /* 1420 * XXX Note, we only need contigmalloc() for buffers attached 1421 * to native interfaces. In all other cases (nifp, netmap rings 1422 * and even buffers for VALE ports or emulated interfaces) we 1423 * can live with standard malloc, because the hardware will not 1424 * access the pages directly. 1425 */ 1426 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1427 (size_t)0, -1UL, PAGE_SIZE, 0); 1428 if (clust == NULL) { 1429 /* 1430 * If we get here, there is a severe memory shortage, 1431 * so halve the allocated memory to reclaim some. 1432 */ 1433 D("Unable to create cluster at %d for '%s' allocator", 1434 i, p->name); 1435 if (i < 2) /* nothing to halve */ 1436 goto out; 1437 lim = i / 2; 1438 for (i--; i >= lim; i--) { 1439 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1440 contigfree(p->lut[i].vaddr, 1441 n, M_NETMAP); 1442 p->lut[i].vaddr = NULL; 1443 } 1444 out: 1445 p->objtotal = i; 1446 /* we may have stopped in the middle of a cluster */ 1447 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1448 break; 1449 } 1450 /* 1451 * Set lut state for all buffers in the current cluster. 1452 * 1453 * [i, lim) is the set of buffer indexes that cover the 1454 * current cluster. 1455 * 1456 * 'clust' is really the address of the current buffer in 1457 * the current cluster as we index through it with a stride 1458 * of p->_objsize. 1459 */ 1460 for (; i < lim; i++, clust += p->_objsize) { 1461 p->lut[i].vaddr = clust; 1462 #if !defined(linux) && !defined(_WIN32) 1463 p->lut[i].paddr = vtophys(clust); 1464 #endif 1465 } 1466 } 1467 p->memtotal = p->numclusters * p->_clustsize; 1468 if (netmap_verbose) 1469 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 1470 p->numclusters, p->_clustsize >> 10, 1471 p->memtotal >> 10, p->name); 1472 1473 return 0; 1474 1475 clean: 1476 netmap_reset_obj_allocator(p); 1477 return ENOMEM; 1478 } 1479 1480 /* call with lock held */ 1481 static int 1482 netmap_mem_params_changed(struct netmap_obj_params* p) 1483 { 1484 int i, rv = 0; 1485 1486 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1487 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { 1488 p[i].last_size = p[i].size; 1489 p[i].last_num = p[i].num; 1490 rv = 1; 1491 } 1492 } 1493 return rv; 1494 } 1495 1496 static void 1497 netmap_mem_reset_all(struct netmap_mem_d *nmd) 1498 { 1499 int i; 1500 1501 if (netmap_verbose) 1502 D("resetting %p", nmd); 1503 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1504 netmap_reset_obj_allocator(&nmd->pools[i]); 1505 } 1506 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1507 } 1508 1509 static int 1510 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 1511 { 1512 int i, lim = p->objtotal; 1513 struct netmap_lut *lut = &na->na_lut; 1514 1515 if (na == NULL || na->pdev == NULL) 1516 return 0; 1517 1518 #if defined(__FreeBSD__) 1519 /* On FreeBSD mapping and unmapping is performed by the txsync 1520 * and rxsync routine, packet by packet. */ 1521 (void)i; 1522 (void)lim; 1523 (void)lut; 1524 #elif defined(_WIN32) 1525 (void)i; 1526 (void)lim; 1527 (void)lut; 1528 D("unsupported on Windows"); 1529 #else /* linux */ 1530 ND("unmapping and freeing plut for %s", na->name); 1531 if (lut->plut == NULL) 1532 return 0; 1533 for (i = 0; i < lim; i += p->_clustentries) { 1534 if (lut->plut[i].paddr) 1535 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); 1536 } 1537 nm_free_plut(lut->plut); 1538 lut->plut = NULL; 1539 #endif /* linux */ 1540 1541 return 0; 1542 } 1543 1544 static int 1545 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 1546 { 1547 int error = 0; 1548 int i, lim = p->objtotal; 1549 struct netmap_lut *lut = &na->na_lut; 1550 1551 if (na->pdev == NULL) 1552 return 0; 1553 1554 #if defined(__FreeBSD__) 1555 /* On FreeBSD mapping and unmapping is performed by the txsync 1556 * and rxsync routine, packet by packet. */ 1557 (void)i; 1558 (void)lim; 1559 (void)lut; 1560 #elif defined(_WIN32) 1561 (void)i; 1562 (void)lim; 1563 (void)lut; 1564 D("unsupported on Windows"); 1565 #else /* linux */ 1566 1567 if (lut->plut != NULL) { 1568 ND("plut already allocated for %s", na->name); 1569 return 0; 1570 } 1571 1572 ND("allocating physical lut for %s", na->name); 1573 lut->plut = nm_alloc_plut(lim); 1574 if (lut->plut == NULL) { 1575 D("Failed to allocate physical lut for %s", na->name); 1576 return ENOMEM; 1577 } 1578 1579 for (i = 0; i < lim; i += p->_clustentries) { 1580 lut->plut[i].paddr = 0; 1581 } 1582 1583 for (i = 0; i < lim; i += p->_clustentries) { 1584 int j; 1585 1586 if (p->lut[i].vaddr == NULL) 1587 continue; 1588 1589 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, 1590 p->lut[i].vaddr, p->_clustsize); 1591 if (error) { 1592 D("Failed to map cluster #%d from the %s pool", i, p->name); 1593 break; 1594 } 1595 1596 for (j = 1; j < p->_clustentries; j++) { 1597 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; 1598 } 1599 } 1600 1601 if (error) 1602 netmap_mem_unmap(p, na); 1603 1604 #endif /* linux */ 1605 1606 return error; 1607 } 1608 1609 static int 1610 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1611 { 1612 int i; 1613 if (nmd->flags & NETMAP_MEM_FINALIZED) 1614 return 0; 1615 nmd->lasterr = 0; 1616 nmd->nm_totalsize = 0; 1617 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1618 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1619 if (nmd->lasterr) 1620 goto error; 1621 nmd->nm_totalsize += nmd->pools[i].memtotal; 1622 } 1623 nmd->lasterr = netmap_mem_init_bitmaps(nmd); 1624 if (nmd->lasterr) 1625 goto error; 1626 1627 nmd->flags |= NETMAP_MEM_FINALIZED; 1628 1629 if (netmap_verbose) 1630 D("interfaces %d KB, rings %d KB, buffers %d MB", 1631 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1632 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1633 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1634 1635 if (netmap_verbose) 1636 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1637 1638 1639 return 0; 1640 error: 1641 netmap_mem_reset_all(nmd); 1642 return nmd->lasterr; 1643 } 1644 1645 /* 1646 * allocator for private memory 1647 */ 1648 static void * 1649 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, 1650 struct netmap_mem_ops *ops, int *perr) 1651 { 1652 struct netmap_mem_d *d = NULL; 1653 int i, err = 0; 1654 1655 d = nm_os_malloc(size); 1656 if (d == NULL) { 1657 err = ENOMEM; 1658 goto error; 1659 } 1660 1661 *d = nm_blueprint; 1662 d->ops = ops; 1663 1664 err = nm_mem_assign_id(d); 1665 if (err) 1666 goto error_free; 1667 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); 1668 1669 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1670 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1671 nm_blueprint.pools[i].name, 1672 d->name); 1673 d->params[i].num = p[i].num; 1674 d->params[i].size = p[i].size; 1675 } 1676 1677 NMA_LOCK_INIT(d); 1678 1679 err = netmap_mem_config(d); 1680 if (err) 1681 goto error_rel_id; 1682 1683 d->flags &= ~NETMAP_MEM_FINALIZED; 1684 1685 return d; 1686 1687 error_rel_id: 1688 NMA_LOCK_DESTROY(d); 1689 nm_mem_release_id(d); 1690 error_free: 1691 nm_os_free(d); 1692 error: 1693 if (perr) 1694 *perr = err; 1695 return NULL; 1696 } 1697 1698 struct netmap_mem_d * 1699 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, 1700 u_int extra_bufs, u_int npipes, int *perr) 1701 { 1702 struct netmap_mem_d *d = NULL; 1703 struct netmap_obj_params p[NETMAP_POOLS_NR]; 1704 int i; 1705 u_int v, maxd; 1706 /* account for the fake host rings */ 1707 txr++; 1708 rxr++; 1709 1710 /* copy the min values */ 1711 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1712 p[i] = netmap_min_priv_params[i]; 1713 } 1714 1715 /* possibly increase them to fit user request */ 1716 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1717 if (p[NETMAP_IF_POOL].size < v) 1718 p[NETMAP_IF_POOL].size = v; 1719 v = 2 + 4 * npipes; 1720 if (p[NETMAP_IF_POOL].num < v) 1721 p[NETMAP_IF_POOL].num = v; 1722 maxd = (txd > rxd) ? txd : rxd; 1723 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1724 if (p[NETMAP_RING_POOL].size < v) 1725 p[NETMAP_RING_POOL].size = v; 1726 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1727 * and two rx rings (again, 1 normal and 1 fake host) 1728 */ 1729 v = txr + rxr + 8 * npipes; 1730 if (p[NETMAP_RING_POOL].num < v) 1731 p[NETMAP_RING_POOL].num = v; 1732 /* for each pipe we only need the buffers for the 4 "real" rings. 1733 * On the other end, the pipe ring dimension may be different from 1734 * the parent port ring dimension. As a compromise, we allocate twice the 1735 * space actually needed if the pipe rings were the same size as the parent rings 1736 */ 1737 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1738 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1739 if (p[NETMAP_BUF_POOL].num < v) 1740 p[NETMAP_BUF_POOL].num = v; 1741 1742 if (netmap_verbose) 1743 D("req if %d*%d ring %d*%d buf %d*%d", 1744 p[NETMAP_IF_POOL].num, 1745 p[NETMAP_IF_POOL].size, 1746 p[NETMAP_RING_POOL].num, 1747 p[NETMAP_RING_POOL].size, 1748 p[NETMAP_BUF_POOL].num, 1749 p[NETMAP_BUF_POOL].size); 1750 1751 d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); 1752 1753 return d; 1754 } 1755 1756 1757 /* call with lock held */ 1758 static int 1759 netmap_mem2_config(struct netmap_mem_d *nmd) 1760 { 1761 int i; 1762 1763 if (!netmap_mem_params_changed(nmd->params)) 1764 goto out; 1765 1766 ND("reconfiguring"); 1767 1768 if (nmd->flags & NETMAP_MEM_FINALIZED) { 1769 /* reset previous allocation */ 1770 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1771 netmap_reset_obj_allocator(&nmd->pools[i]); 1772 } 1773 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1774 } 1775 1776 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1777 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1778 nmd->params[i].num, nmd->params[i].size); 1779 if (nmd->lasterr) 1780 goto out; 1781 } 1782 1783 out: 1784 1785 return nmd->lasterr; 1786 } 1787 1788 static int 1789 netmap_mem2_finalize(struct netmap_mem_d *nmd) 1790 { 1791 if (nmd->flags & NETMAP_MEM_FINALIZED) 1792 goto out; 1793 1794 if (netmap_mem_finalize_all(nmd)) 1795 goto out; 1796 1797 nmd->lasterr = 0; 1798 1799 out: 1800 return nmd->lasterr; 1801 } 1802 1803 static void 1804 netmap_mem2_delete(struct netmap_mem_d *nmd) 1805 { 1806 int i; 1807 1808 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1809 netmap_destroy_obj_allocator(&nmd->pools[i]); 1810 } 1811 1812 NMA_LOCK_DESTROY(nmd); 1813 if (nmd != &nm_mem) 1814 nm_os_free(nmd); 1815 } 1816 1817 #ifdef WITH_EXTMEM 1818 /* doubly linekd list of all existing external allocators */ 1819 static struct netmap_mem_ext *netmap_mem_ext_list = NULL; 1820 NM_MTX_T nm_mem_ext_list_lock; 1821 #endif /* WITH_EXTMEM */ 1822 1823 int 1824 netmap_mem_init(void) 1825 { 1826 NM_MTX_INIT(nm_mem_list_lock); 1827 NMA_LOCK_INIT(&nm_mem); 1828 netmap_mem_get(&nm_mem); 1829 #ifdef WITH_EXTMEM 1830 NM_MTX_INIT(nm_mem_ext_list_lock); 1831 #endif /* WITH_EXTMEM */ 1832 return (0); 1833 } 1834 1835 void 1836 netmap_mem_fini(void) 1837 { 1838 netmap_mem_put(&nm_mem); 1839 } 1840 1841 static void 1842 netmap_free_rings(struct netmap_adapter *na) 1843 { 1844 enum txrx t; 1845 1846 for_rx_tx(t) { 1847 u_int i; 1848 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 1849 struct netmap_kring *kring = NMR(na, t)[i]; 1850 struct netmap_ring *ring = kring->ring; 1851 1852 if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { 1853 if (netmap_verbose) 1854 D("NOT deleting ring %s (ring %p, users %d neekring %d)", 1855 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1856 continue; 1857 } 1858 if (netmap_verbose) 1859 D("deleting ring %s", kring->name); 1860 if (!(kring->nr_kflags & NKR_FAKERING)) { 1861 ND("freeing bufs for %s", kring->name); 1862 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1863 } else { 1864 ND("NOT freeing bufs for %s", kring->name); 1865 } 1866 netmap_ring_free(na->nm_mem, ring); 1867 kring->ring = NULL; 1868 } 1869 } 1870 } 1871 1872 /* call with NMA_LOCK held * 1873 * 1874 * Allocate netmap rings and buffers for this card 1875 * The rings are contiguous, but have variable size. 1876 * The kring array must follow the layout described 1877 * in netmap_krings_create(). 1878 */ 1879 static int 1880 netmap_mem2_rings_create(struct netmap_adapter *na) 1881 { 1882 enum txrx t; 1883 1884 for_rx_tx(t) { 1885 u_int i; 1886 1887 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1888 struct netmap_kring *kring = NMR(na, t)[i]; 1889 struct netmap_ring *ring = kring->ring; 1890 u_int len, ndesc; 1891 1892 if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { 1893 /* uneeded, or already created by somebody else */ 1894 if (netmap_verbose) 1895 D("NOT creating ring %s (ring %p, users %d neekring %d)", 1896 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1897 continue; 1898 } 1899 if (netmap_verbose) 1900 D("creating %s", kring->name); 1901 ndesc = kring->nkr_num_slots; 1902 len = sizeof(struct netmap_ring) + 1903 ndesc * sizeof(struct netmap_slot); 1904 ring = netmap_ring_malloc(na->nm_mem, len); 1905 if (ring == NULL) { 1906 D("Cannot allocate %s_ring", nm_txrx2str(t)); 1907 goto cleanup; 1908 } 1909 ND("txring at %p", ring); 1910 kring->ring = ring; 1911 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 1912 *(int64_t *)(uintptr_t)&ring->buf_ofs = 1913 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1914 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1915 netmap_ring_offset(na->nm_mem, ring); 1916 1917 /* copy values from kring */ 1918 ring->head = kring->rhead; 1919 ring->cur = kring->rcur; 1920 ring->tail = kring->rtail; 1921 *(uint32_t *)(uintptr_t)&ring->nr_buf_size = 1922 netmap_mem_bufsize(na->nm_mem); 1923 ND("%s h %d c %d t %d", kring->name, 1924 ring->head, ring->cur, ring->tail); 1925 ND("initializing slots for %s_ring", nm_txrx2str(txrx)); 1926 if (!(kring->nr_kflags & NKR_FAKERING)) { 1927 /* this is a real ring */ 1928 ND("allocating buffers for %s", kring->name); 1929 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1930 D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1931 goto cleanup; 1932 } 1933 } else { 1934 /* this is a fake ring, set all indices to 0 */ 1935 ND("NOT allocating buffers for %s", kring->name); 1936 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1937 } 1938 /* ring info */ 1939 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1940 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1941 } 1942 } 1943 1944 return 0; 1945 1946 cleanup: 1947 /* we cannot actually cleanup here, since we don't own kring->users 1948 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement 1949 * the first or zero-out the second, then call netmap_free_rings() 1950 * to do the cleanup 1951 */ 1952 1953 return ENOMEM; 1954 } 1955 1956 static void 1957 netmap_mem2_rings_delete(struct netmap_adapter *na) 1958 { 1959 /* last instance, release bufs and rings */ 1960 netmap_free_rings(na); 1961 } 1962 1963 1964 /* call with NMA_LOCK held */ 1965 /* 1966 * Allocate the per-fd structure netmap_if. 1967 * 1968 * We assume that the configuration stored in na 1969 * (number of tx/rx rings and descs) does not change while 1970 * the interface is in netmap mode. 1971 */ 1972 static struct netmap_if * 1973 netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 1974 { 1975 struct netmap_if *nifp; 1976 ssize_t base; /* handy for relative offsets between rings and nifp */ 1977 u_int i, len, n[NR_TXRX], ntot; 1978 enum txrx t; 1979 1980 ntot = 0; 1981 for_rx_tx(t) { 1982 /* account for the (eventually fake) host rings */ 1983 n[t] = nma_get_nrings(na, t) + 1; 1984 ntot += n[t]; 1985 } 1986 /* 1987 * the descriptor is followed inline by an array of offsets 1988 * to the tx and rx rings in the shared memory region. 1989 */ 1990 1991 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 1992 nifp = netmap_if_malloc(na->nm_mem, len); 1993 if (nifp == NULL) { 1994 NMA_UNLOCK(na->nm_mem); 1995 return NULL; 1996 } 1997 1998 /* initialize base fields -- override const */ 1999 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 2000 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 2001 strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); 2002 2003 /* 2004 * fill the slots for the rx and tx rings. They contain the offset 2005 * between the ring and nifp, so the information is usable in 2006 * userspace to reach the ring from the nifp. 2007 */ 2008 base = netmap_if_offset(na->nm_mem, nifp); 2009 for (i = 0; i < n[NR_TX]; i++) { 2010 /* XXX instead of ofs == 0 maybe use the offset of an error 2011 * ring, like we do for buffers? */ 2012 ssize_t ofs = 0; 2013 2014 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] 2015 && i < priv->np_qlast[NR_TX]) { 2016 ofs = netmap_ring_offset(na->nm_mem, 2017 na->tx_rings[i]->ring) - base; 2018 } 2019 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; 2020 } 2021 for (i = 0; i < n[NR_RX]; i++) { 2022 /* XXX instead of ofs == 0 maybe use the offset of an error 2023 * ring, like we do for buffers? */ 2024 ssize_t ofs = 0; 2025 2026 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] 2027 && i < priv->np_qlast[NR_RX]) { 2028 ofs = netmap_ring_offset(na->nm_mem, 2029 na->rx_rings[i]->ring) - base; 2030 } 2031 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; 2032 } 2033 2034 return (nifp); 2035 } 2036 2037 static void 2038 netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2039 { 2040 if (nifp == NULL) 2041 /* nothing to do */ 2042 return; 2043 if (nifp->ni_bufs_head) 2044 netmap_extra_free(na, nifp->ni_bufs_head); 2045 netmap_if_free(na->nm_mem, nifp); 2046 } 2047 2048 static void 2049 netmap_mem2_deref(struct netmap_mem_d *nmd) 2050 { 2051 2052 if (netmap_verbose) 2053 D("active = %d", nmd->active); 2054 2055 } 2056 2057 struct netmap_mem_ops netmap_mem_global_ops = { 2058 .nmd_get_lut = netmap_mem2_get_lut, 2059 .nmd_get_info = netmap_mem2_get_info, 2060 .nmd_ofstophys = netmap_mem2_ofstophys, 2061 .nmd_config = netmap_mem2_config, 2062 .nmd_finalize = netmap_mem2_finalize, 2063 .nmd_deref = netmap_mem2_deref, 2064 .nmd_delete = netmap_mem2_delete, 2065 .nmd_if_offset = netmap_mem2_if_offset, 2066 .nmd_if_new = netmap_mem2_if_new, 2067 .nmd_if_delete = netmap_mem2_if_delete, 2068 .nmd_rings_create = netmap_mem2_rings_create, 2069 .nmd_rings_delete = netmap_mem2_rings_delete 2070 }; 2071 2072 int 2073 netmap_mem_pools_info_get(struct nmreq_pools_info *req, 2074 struct netmap_mem_d *nmd) 2075 { 2076 int ret; 2077 2078 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, 2079 &req->nr_mem_id); 2080 if (ret) { 2081 return ret; 2082 } 2083 2084 NMA_LOCK(nmd); 2085 req->nr_if_pool_offset = 0; 2086 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; 2087 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; 2088 2089 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; 2090 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; 2091 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; 2092 2093 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + 2094 nmd->pools[NETMAP_RING_POOL].memtotal; 2095 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 2096 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2097 NMA_UNLOCK(nmd); 2098 2099 return 0; 2100 } 2101 2102 #ifdef WITH_EXTMEM 2103 struct netmap_mem_ext { 2104 struct netmap_mem_d up; 2105 2106 struct nm_os_extmem *os; 2107 struct netmap_mem_ext *next, *prev; 2108 }; 2109 2110 /* call with nm_mem_list_lock held */ 2111 static void 2112 netmap_mem_ext_register(struct netmap_mem_ext *e) 2113 { 2114 NM_MTX_LOCK(nm_mem_ext_list_lock); 2115 if (netmap_mem_ext_list) 2116 netmap_mem_ext_list->prev = e; 2117 e->next = netmap_mem_ext_list; 2118 netmap_mem_ext_list = e; 2119 e->prev = NULL; 2120 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2121 } 2122 2123 /* call with nm_mem_list_lock held */ 2124 static void 2125 netmap_mem_ext_unregister(struct netmap_mem_ext *e) 2126 { 2127 if (e->prev) 2128 e->prev->next = e->next; 2129 else 2130 netmap_mem_ext_list = e->next; 2131 if (e->next) 2132 e->next->prev = e->prev; 2133 e->prev = e->next = NULL; 2134 } 2135 2136 static struct netmap_mem_ext * 2137 netmap_mem_ext_search(struct nm_os_extmem *os) 2138 { 2139 struct netmap_mem_ext *e; 2140 2141 NM_MTX_LOCK(nm_mem_ext_list_lock); 2142 for (e = netmap_mem_ext_list; e; e = e->next) { 2143 if (nm_os_extmem_isequal(e->os, os)) { 2144 netmap_mem_get(&e->up); 2145 break; 2146 } 2147 } 2148 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2149 return e; 2150 } 2151 2152 2153 static void 2154 netmap_mem_ext_delete(struct netmap_mem_d *d) 2155 { 2156 int i; 2157 struct netmap_mem_ext *e = 2158 (struct netmap_mem_ext *)d; 2159 2160 netmap_mem_ext_unregister(e); 2161 2162 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2163 struct netmap_obj_pool *p = &d->pools[i]; 2164 2165 if (p->lut) { 2166 nm_free_lut(p->lut, p->objtotal); 2167 p->lut = NULL; 2168 } 2169 } 2170 if (e->os) 2171 nm_os_extmem_delete(e->os); 2172 netmap_mem2_delete(d); 2173 } 2174 2175 static int 2176 netmap_mem_ext_config(struct netmap_mem_d *nmd) 2177 { 2178 return 0; 2179 } 2180 2181 struct netmap_mem_ops netmap_mem_ext_ops = { 2182 .nmd_get_lut = netmap_mem2_get_lut, 2183 .nmd_get_info = netmap_mem2_get_info, 2184 .nmd_ofstophys = netmap_mem2_ofstophys, 2185 .nmd_config = netmap_mem_ext_config, 2186 .nmd_finalize = netmap_mem2_finalize, 2187 .nmd_deref = netmap_mem2_deref, 2188 .nmd_delete = netmap_mem_ext_delete, 2189 .nmd_if_offset = netmap_mem2_if_offset, 2190 .nmd_if_new = netmap_mem2_if_new, 2191 .nmd_if_delete = netmap_mem2_if_delete, 2192 .nmd_rings_create = netmap_mem2_rings_create, 2193 .nmd_rings_delete = netmap_mem2_rings_delete 2194 }; 2195 2196 struct netmap_mem_d * 2197 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) 2198 { 2199 int error = 0; 2200 int i, j; 2201 struct netmap_mem_ext *nme; 2202 char *clust; 2203 size_t off; 2204 struct nm_os_extmem *os = NULL; 2205 int nr_pages; 2206 2207 // XXX sanity checks 2208 if (pi->nr_if_pool_objtotal == 0) 2209 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; 2210 if (pi->nr_if_pool_objsize == 0) 2211 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; 2212 if (pi->nr_ring_pool_objtotal == 0) 2213 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; 2214 if (pi->nr_ring_pool_objsize == 0) 2215 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; 2216 if (pi->nr_buf_pool_objtotal == 0) 2217 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; 2218 if (pi->nr_buf_pool_objsize == 0) 2219 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; 2220 D("if %d %d ring %d %d buf %d %d", 2221 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, 2222 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, 2223 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); 2224 2225 os = nm_os_extmem_create(usrptr, pi, &error); 2226 if (os == NULL) { 2227 D("os extmem creation failed"); 2228 goto out; 2229 } 2230 2231 nme = netmap_mem_ext_search(os); 2232 if (nme) { 2233 nm_os_extmem_delete(os); 2234 return &nme->up; 2235 } 2236 D("not found, creating new"); 2237 2238 nme = _netmap_mem_private_new(sizeof(*nme), 2239 (struct netmap_obj_params[]){ 2240 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, 2241 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, 2242 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, 2243 &netmap_mem_ext_ops, 2244 &error); 2245 if (nme == NULL) 2246 goto out_unmap; 2247 2248 nr_pages = nm_os_extmem_nr_pages(os); 2249 2250 /* from now on pages will be released by nme destructor; 2251 * we let res = 0 to prevent release in out_unmap below 2252 */ 2253 nme->os = os; 2254 os = NULL; /* pass ownership */ 2255 2256 clust = nm_os_extmem_nextpage(nme->os); 2257 off = 0; 2258 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2259 struct netmap_obj_pool *p = &nme->up.pools[i]; 2260 struct netmap_obj_params *o = &nme->up.params[i]; 2261 2262 p->_objsize = o->size; 2263 p->_clustsize = o->size; 2264 p->_clustentries = 1; 2265 2266 p->lut = nm_alloc_lut(o->num); 2267 if (p->lut == NULL) { 2268 error = ENOMEM; 2269 goto out_delete; 2270 } 2271 2272 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); 2273 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); 2274 if (p->invalid_bitmap == NULL) { 2275 error = ENOMEM; 2276 goto out_delete; 2277 } 2278 2279 if (nr_pages == 0) { 2280 p->objtotal = 0; 2281 p->memtotal = 0; 2282 p->objfree = 0; 2283 continue; 2284 } 2285 2286 for (j = 0; j < o->num && nr_pages > 0; j++) { 2287 size_t noff; 2288 2289 p->lut[j].vaddr = clust + off; 2290 #if !defined(linux) && !defined(_WIN32) 2291 p->lut[j].paddr = vtophys(p->lut[j].vaddr); 2292 #endif 2293 ND("%s %d at %p", p->name, j, p->lut[j].vaddr); 2294 noff = off + p->_objsize; 2295 if (noff < PAGE_SIZE) { 2296 off = noff; 2297 continue; 2298 } 2299 ND("too big, recomputing offset..."); 2300 while (noff >= PAGE_SIZE) { 2301 char *old_clust = clust; 2302 noff -= PAGE_SIZE; 2303 clust = nm_os_extmem_nextpage(nme->os); 2304 nr_pages--; 2305 ND("noff %zu page %p nr_pages %d", noff, 2306 page_to_virt(*pages), nr_pages); 2307 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && 2308 (nr_pages == 0 || 2309 old_clust + PAGE_SIZE != clust)) 2310 { 2311 /* out of space or non contiguous, 2312 * drop this object 2313 * */ 2314 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); 2315 ND("non contiguous at off %zu, drop", noff); 2316 } 2317 if (nr_pages == 0) 2318 break; 2319 } 2320 off = noff; 2321 } 2322 p->objtotal = j; 2323 p->numclusters = p->objtotal; 2324 p->memtotal = j * p->_objsize; 2325 ND("%d memtotal %u", j, p->memtotal); 2326 } 2327 2328 netmap_mem_ext_register(nme); 2329 2330 return &nme->up; 2331 2332 out_delete: 2333 netmap_mem_put(&nme->up); 2334 out_unmap: 2335 if (os) 2336 nm_os_extmem_delete(os); 2337 out: 2338 if (perror) 2339 *perror = error; 2340 return NULL; 2341 2342 } 2343 #endif /* WITH_EXTMEM */ 2344 2345 2346 #ifdef WITH_PTNETMAP_GUEST 2347 struct mem_pt_if { 2348 struct mem_pt_if *next; 2349 struct ifnet *ifp; 2350 unsigned int nifp_offset; 2351 }; 2352 2353 /* Netmap allocator for ptnetmap guests. */ 2354 struct netmap_mem_ptg { 2355 struct netmap_mem_d up; 2356 2357 vm_paddr_t nm_paddr; /* physical address in the guest */ 2358 void *nm_addr; /* virtual address in the guest */ 2359 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ 2360 nm_memid_t host_mem_id; /* allocator identifier in the host */ 2361 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ 2362 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ 2363 }; 2364 2365 /* Link a passthrough interface to a passthrough netmap allocator. */ 2366 static int 2367 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, 2368 unsigned int nifp_offset) 2369 { 2370 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2371 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); 2372 2373 if (!ptif) { 2374 return ENOMEM; 2375 } 2376 2377 NMA_LOCK(nmd); 2378 2379 ptif->ifp = ifp; 2380 ptif->nifp_offset = nifp_offset; 2381 2382 if (ptnmd->pt_ifs) { 2383 ptif->next = ptnmd->pt_ifs; 2384 } 2385 ptnmd->pt_ifs = ptif; 2386 2387 NMA_UNLOCK(nmd); 2388 2389 D("added (ifp=%p,nifp_offset=%u)", ptif->ifp, ptif->nifp_offset); 2390 2391 return 0; 2392 } 2393 2394 /* Called with NMA_LOCK(nmd) held. */ 2395 static struct mem_pt_if * 2396 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) 2397 { 2398 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2399 struct mem_pt_if *curr; 2400 2401 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2402 if (curr->ifp == ifp) { 2403 return curr; 2404 } 2405 } 2406 2407 return NULL; 2408 } 2409 2410 /* Unlink a passthrough interface from a passthrough netmap allocator. */ 2411 int 2412 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) 2413 { 2414 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2415 struct mem_pt_if *prev = NULL; 2416 struct mem_pt_if *curr; 2417 int ret = -1; 2418 2419 NMA_LOCK(nmd); 2420 2421 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2422 if (curr->ifp == ifp) { 2423 if (prev) { 2424 prev->next = curr->next; 2425 } else { 2426 ptnmd->pt_ifs = curr->next; 2427 } 2428 D("removed (ifp=%p,nifp_offset=%u)", 2429 curr->ifp, curr->nifp_offset); 2430 nm_os_free(curr); 2431 ret = 0; 2432 break; 2433 } 2434 prev = curr; 2435 } 2436 2437 NMA_UNLOCK(nmd); 2438 2439 return ret; 2440 } 2441 2442 static int 2443 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 2444 { 2445 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2446 2447 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 2448 return EINVAL; 2449 } 2450 2451 *lut = ptnmd->buf_lut; 2452 return 0; 2453 } 2454 2455 static int 2456 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, 2457 u_int *memflags, uint16_t *id) 2458 { 2459 int error = 0; 2460 2461 error = nmd->ops->nmd_config(nmd); 2462 if (error) 2463 goto out; 2464 2465 if (size) 2466 *size = nmd->nm_totalsize; 2467 if (memflags) 2468 *memflags = nmd->flags; 2469 if (id) 2470 *id = nmd->nm_id; 2471 2472 out: 2473 2474 return error; 2475 } 2476 2477 static vm_paddr_t 2478 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 2479 { 2480 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2481 vm_paddr_t paddr; 2482 /* if the offset is valid, just return csb->base_addr + off */ 2483 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); 2484 ND("off %lx padr %lx", off, (unsigned long)paddr); 2485 return paddr; 2486 } 2487 2488 static int 2489 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) 2490 { 2491 /* nothing to do, we are configured on creation 2492 * and configuration never changes thereafter 2493 */ 2494 return 0; 2495 } 2496 2497 static int 2498 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) 2499 { 2500 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2501 uint64_t mem_size; 2502 uint32_t bufsize; 2503 uint32_t nbuffers; 2504 uint32_t poolofs; 2505 vm_paddr_t paddr; 2506 char *vaddr; 2507 int i; 2508 int error = 0; 2509 2510 if (nmd->flags & NETMAP_MEM_FINALIZED) 2511 goto out; 2512 2513 if (ptnmd->ptn_dev == NULL) { 2514 D("ptnetmap memdev not attached"); 2515 error = ENOMEM; 2516 goto out; 2517 } 2518 /* Map memory through ptnetmap-memdev BAR. */ 2519 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, 2520 &ptnmd->nm_addr, &mem_size); 2521 if (error) 2522 goto out; 2523 2524 /* Initialize the lut using the information contained in the 2525 * ptnetmap memory device. */ 2526 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2527 PTNET_MDEV_IO_BUF_POOL_OBJSZ); 2528 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2529 PTNET_MDEV_IO_BUF_POOL_OBJNUM); 2530 2531 /* allocate the lut */ 2532 if (ptnmd->buf_lut.lut == NULL) { 2533 D("allocating lut"); 2534 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); 2535 if (ptnmd->buf_lut.lut == NULL) { 2536 D("lut allocation failed"); 2537 return ENOMEM; 2538 } 2539 } 2540 2541 /* we have physically contiguous memory mapped through PCI BAR */ 2542 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2543 PTNET_MDEV_IO_BUF_POOL_OFS); 2544 vaddr = (char *)(ptnmd->nm_addr) + poolofs; 2545 paddr = ptnmd->nm_paddr + poolofs; 2546 2547 for (i = 0; i < nbuffers; i++) { 2548 ptnmd->buf_lut.lut[i].vaddr = vaddr; 2549 vaddr += bufsize; 2550 paddr += bufsize; 2551 } 2552 2553 ptnmd->buf_lut.objtotal = nbuffers; 2554 ptnmd->buf_lut.objsize = bufsize; 2555 nmd->nm_totalsize = (unsigned int)mem_size; 2556 2557 /* Initialize these fields as are needed by 2558 * netmap_mem_bufsize(). 2559 * XXX please improve this, why do we need this 2560 * replication? maybe we nmd->pools[] should no be 2561 * there for the guest allocator? */ 2562 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; 2563 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; 2564 2565 nmd->flags |= NETMAP_MEM_FINALIZED; 2566 out: 2567 return error; 2568 } 2569 2570 static void 2571 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) 2572 { 2573 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2574 2575 if (nmd->active == 1 && 2576 (nmd->flags & NETMAP_MEM_FINALIZED)) { 2577 nmd->flags &= ~NETMAP_MEM_FINALIZED; 2578 /* unmap ptnetmap-memdev memory */ 2579 if (ptnmd->ptn_dev) { 2580 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); 2581 } 2582 ptnmd->nm_addr = NULL; 2583 ptnmd->nm_paddr = 0; 2584 } 2585 } 2586 2587 static ssize_t 2588 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) 2589 { 2590 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2591 2592 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); 2593 } 2594 2595 static void 2596 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) 2597 { 2598 if (nmd == NULL) 2599 return; 2600 if (netmap_verbose) 2601 D("deleting %p", nmd); 2602 if (nmd->active > 0) 2603 D("bug: deleting mem allocator with active=%d!", nmd->active); 2604 if (netmap_verbose) 2605 D("done deleting %p", nmd); 2606 NMA_LOCK_DESTROY(nmd); 2607 nm_os_free(nmd); 2608 } 2609 2610 static struct netmap_if * 2611 netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 2612 { 2613 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2614 struct mem_pt_if *ptif; 2615 struct netmap_if *nifp = NULL; 2616 2617 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2618 if (ptif == NULL) { 2619 D("Error: interface %p is not in passthrough", na->ifp); 2620 goto out; 2621 } 2622 2623 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + 2624 ptif->nifp_offset); 2625 out: 2626 return nifp; 2627 } 2628 2629 static void 2630 netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2631 { 2632 struct mem_pt_if *ptif; 2633 2634 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2635 if (ptif == NULL) { 2636 D("Error: interface %p is not in passthrough", na->ifp); 2637 } 2638 } 2639 2640 static int 2641 netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) 2642 { 2643 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2644 struct mem_pt_if *ptif; 2645 struct netmap_if *nifp; 2646 int i, error = -1; 2647 2648 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2649 if (ptif == NULL) { 2650 D("Error: interface %p is not in passthrough", na->ifp); 2651 goto out; 2652 } 2653 2654 2655 /* point each kring to the corresponding backend ring */ 2656 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); 2657 for (i = 0; i <= na->num_tx_rings; i++) { 2658 struct netmap_kring *kring = na->tx_rings[i]; 2659 if (kring->ring) 2660 continue; 2661 kring->ring = (struct netmap_ring *) 2662 ((char *)nifp + nifp->ring_ofs[i]); 2663 } 2664 for (i = 0; i <= na->num_rx_rings; i++) { 2665 struct netmap_kring *kring = na->rx_rings[i]; 2666 if (kring->ring) 2667 continue; 2668 kring->ring = (struct netmap_ring *) 2669 ((char *)nifp + 2670 nifp->ring_ofs[i + na->num_tx_rings + 1]); 2671 } 2672 2673 error = 0; 2674 out: 2675 return error; 2676 } 2677 2678 static void 2679 netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) 2680 { 2681 #if 0 2682 enum txrx t; 2683 2684 for_rx_tx(t) { 2685 u_int i; 2686 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 2687 struct netmap_kring *kring = &NMR(na, t)[i]; 2688 2689 kring->ring = NULL; 2690 } 2691 } 2692 #endif 2693 } 2694 2695 static struct netmap_mem_ops netmap_mem_pt_guest_ops = { 2696 .nmd_get_lut = netmap_mem_pt_guest_get_lut, 2697 .nmd_get_info = netmap_mem_pt_guest_get_info, 2698 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, 2699 .nmd_config = netmap_mem_pt_guest_config, 2700 .nmd_finalize = netmap_mem_pt_guest_finalize, 2701 .nmd_deref = netmap_mem_pt_guest_deref, 2702 .nmd_if_offset = netmap_mem_pt_guest_if_offset, 2703 .nmd_delete = netmap_mem_pt_guest_delete, 2704 .nmd_if_new = netmap_mem_pt_guest_if_new, 2705 .nmd_if_delete = netmap_mem_pt_guest_if_delete, 2706 .nmd_rings_create = netmap_mem_pt_guest_rings_create, 2707 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete 2708 }; 2709 2710 /* Called with nm_mem_list_lock held. */ 2711 static struct netmap_mem_d * 2712 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) 2713 { 2714 struct netmap_mem_d *mem = NULL; 2715 struct netmap_mem_d *scan = netmap_last_mem_d; 2716 2717 do { 2718 /* find ptnetmap allocator through host ID */ 2719 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && 2720 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { 2721 mem = scan; 2722 mem->refcount++; 2723 NM_DBG_REFC(mem, __FUNCTION__, __LINE__); 2724 break; 2725 } 2726 scan = scan->next; 2727 } while (scan != netmap_last_mem_d); 2728 2729 return mem; 2730 } 2731 2732 /* Called with nm_mem_list_lock held. */ 2733 static struct netmap_mem_d * 2734 netmap_mem_pt_guest_create(nm_memid_t mem_id) 2735 { 2736 struct netmap_mem_ptg *ptnmd; 2737 int err = 0; 2738 2739 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); 2740 if (ptnmd == NULL) { 2741 err = ENOMEM; 2742 goto error; 2743 } 2744 2745 ptnmd->up.ops = &netmap_mem_pt_guest_ops; 2746 ptnmd->host_mem_id = mem_id; 2747 ptnmd->pt_ifs = NULL; 2748 2749 /* Assign new id in the guest (We have the lock) */ 2750 err = nm_mem_assign_id_locked(&ptnmd->up); 2751 if (err) 2752 goto error; 2753 2754 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; 2755 ptnmd->up.flags |= NETMAP_MEM_IO; 2756 2757 NMA_LOCK_INIT(&ptnmd->up); 2758 2759 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); 2760 2761 2762 return &ptnmd->up; 2763 error: 2764 netmap_mem_pt_guest_delete(&ptnmd->up); 2765 return NULL; 2766 } 2767 2768 /* 2769 * find host id in guest allocators and create guest allocator 2770 * if it is not there 2771 */ 2772 static struct netmap_mem_d * 2773 netmap_mem_pt_guest_get(nm_memid_t mem_id) 2774 { 2775 struct netmap_mem_d *nmd; 2776 2777 NM_MTX_LOCK(nm_mem_list_lock); 2778 nmd = netmap_mem_pt_guest_find_memid(mem_id); 2779 if (nmd == NULL) { 2780 nmd = netmap_mem_pt_guest_create(mem_id); 2781 } 2782 NM_MTX_UNLOCK(nm_mem_list_lock); 2783 2784 return nmd; 2785 } 2786 2787 /* 2788 * The guest allocator can be created by ptnetmap_memdev (during the device 2789 * attach) or by ptnetmap device (ptnet), during the netmap_attach. 2790 * 2791 * The order is not important (we have different order in LINUX and FreeBSD). 2792 * The first one, creates the device, and the second one simply attaches it. 2793 */ 2794 2795 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in 2796 * the guest */ 2797 struct netmap_mem_d * 2798 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) 2799 { 2800 struct netmap_mem_d *nmd; 2801 struct netmap_mem_ptg *ptnmd; 2802 2803 nmd = netmap_mem_pt_guest_get(mem_id); 2804 2805 /* assign this device to the guest allocator */ 2806 if (nmd) { 2807 ptnmd = (struct netmap_mem_ptg *)nmd; 2808 ptnmd->ptn_dev = ptn_dev; 2809 } 2810 2811 return nmd; 2812 } 2813 2814 /* Called when ptnet device is attaching */ 2815 struct netmap_mem_d * 2816 netmap_mem_pt_guest_new(struct ifnet *ifp, 2817 unsigned int nifp_offset, 2818 unsigned int memid) 2819 { 2820 struct netmap_mem_d *nmd; 2821 2822 if (ifp == NULL) { 2823 return NULL; 2824 } 2825 2826 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); 2827 2828 if (nmd) { 2829 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); 2830 } 2831 2832 return nmd; 2833 } 2834 2835 #endif /* WITH_PTNETMAP_GUEST */ 2836