1 /* 2 * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #ifdef linux 27 #include "bsd_glue.h" 28 #endif /* linux */ 29 30 #ifdef __APPLE__ 31 #include "osx_glue.h" 32 #endif /* __APPLE__ */ 33 34 #ifdef __FreeBSD__ 35 #include <sys/cdefs.h> /* prerequisite */ 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <vm/vm.h> /* vtophys */ 42 #include <vm/pmap.h> /* vtophys */ 43 #include <sys/socket.h> /* sockaddrs */ 44 #include <sys/selinfo.h> 45 #include <sys/sysctl.h> 46 #include <net/if.h> 47 #include <net/if_var.h> 48 #include <net/vnet.h> 49 #include <machine/bus.h> /* bus_dmamap_* */ 50 51 #endif /* __FreeBSD__ */ 52 53 #include <net/netmap.h> 54 #include <dev/netmap/netmap_kern.h> 55 #include "netmap_mem2.h" 56 57 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 58 59 #define NETMAP_POOL_MAX_NAMSZ 32 60 61 62 enum { 63 NETMAP_IF_POOL = 0, 64 NETMAP_RING_POOL, 65 NETMAP_BUF_POOL, 66 NETMAP_POOLS_NR 67 }; 68 69 70 struct netmap_obj_params { 71 u_int size; 72 u_int num; 73 }; 74 75 struct netmap_obj_pool { 76 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 77 78 /* ---------------------------------------------------*/ 79 /* these are only meaningful if the pool is finalized */ 80 /* (see 'finalized' field in netmap_mem_d) */ 81 u_int objtotal; /* actual total number of objects. */ 82 u_int memtotal; /* actual total memory space */ 83 u_int numclusters; /* actual number of clusters */ 84 85 u_int objfree; /* number of free objects. */ 86 87 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 88 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 89 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 90 /* ---------------------------------------------------*/ 91 92 /* limits */ 93 u_int objminsize; /* minimum object size */ 94 u_int objmaxsize; /* maximum object size */ 95 u_int nummin; /* minimum number of objects */ 96 u_int nummax; /* maximum number of objects */ 97 98 /* these are changed only by config */ 99 u_int _objtotal; /* total number of objects */ 100 u_int _objsize; /* object size */ 101 u_int _clustsize; /* cluster size */ 102 u_int _clustentries; /* objects per cluster */ 103 u_int _numclusters; /* number of clusters */ 104 105 /* requested values */ 106 u_int r_objtotal; 107 u_int r_objsize; 108 }; 109 110 #define NMA_LOCK_T NM_MTX_T 111 112 113 struct netmap_mem_ops { 114 void (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 115 int (*nmd_get_info)(struct netmap_mem_d *, u_int *size, 116 u_int *memflags, uint16_t *id); 117 118 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 119 int (*nmd_config)(struct netmap_mem_d *); 120 int (*nmd_finalize)(struct netmap_mem_d *); 121 void (*nmd_deref)(struct netmap_mem_d *); 122 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 123 void (*nmd_delete)(struct netmap_mem_d *); 124 125 struct netmap_if * (*nmd_if_new)(struct netmap_adapter *); 126 void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 127 int (*nmd_rings_create)(struct netmap_adapter *); 128 void (*nmd_rings_delete)(struct netmap_adapter *); 129 }; 130 131 typedef uint16_t nm_memid_t; 132 133 struct netmap_mem_d { 134 NMA_LOCK_T nm_mtx; /* protect the allocator */ 135 u_int nm_totalsize; /* shorthand */ 136 137 u_int flags; 138 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 139 int lasterr; /* last error for curr config */ 140 int active; /* active users */ 141 int refcount; 142 /* the three allocators */ 143 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 144 145 nm_memid_t nm_id; /* allocator identifier */ 146 int nm_grp; /* iommu groupd id */ 147 148 /* list of all existing allocators, sorted by nm_id */ 149 struct netmap_mem_d *prev, *next; 150 151 struct netmap_mem_ops *ops; 152 }; 153 154 #define NMD_DEFCB(t0, name) \ 155 t0 \ 156 netmap_mem_##name(struct netmap_mem_d *nmd) \ 157 { \ 158 return nmd->ops->nmd_##name(nmd); \ 159 } 160 161 #define NMD_DEFCB1(t0, name, t1) \ 162 t0 \ 163 netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ 164 { \ 165 return nmd->ops->nmd_##name(nmd, a1); \ 166 } 167 168 #define NMD_DEFCB3(t0, name, t1, t2, t3) \ 169 t0 \ 170 netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ 171 { \ 172 return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ 173 } 174 175 #define NMD_DEFNACB(t0, name) \ 176 t0 \ 177 netmap_mem_##name(struct netmap_adapter *na) \ 178 { \ 179 return na->nm_mem->ops->nmd_##name(na); \ 180 } 181 182 #define NMD_DEFNACB1(t0, name, t1) \ 183 t0 \ 184 netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ 185 { \ 186 return na->nm_mem->ops->nmd_##name(na, a1); \ 187 } 188 189 NMD_DEFCB1(void, get_lut, struct netmap_lut *); 190 NMD_DEFCB3(int, get_info, u_int *, u_int *, uint16_t *); 191 NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); 192 static int netmap_mem_config(struct netmap_mem_d *); 193 NMD_DEFCB(int, config); 194 NMD_DEFCB1(ssize_t, if_offset, const void *); 195 NMD_DEFCB(void, delete); 196 197 NMD_DEFNACB(struct netmap_if *, if_new); 198 NMD_DEFNACB1(void, if_delete, struct netmap_if *); 199 NMD_DEFNACB(int, rings_create); 200 NMD_DEFNACB(void, rings_delete); 201 202 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 203 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 204 static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 205 206 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 207 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 208 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 209 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 210 211 #ifdef NM_DEBUG_MEM_PUTGET 212 #define NM_DBG_REFC(nmd, func, line) \ 213 printf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); 214 #else 215 #define NM_DBG_REFC(nmd, func, line) 216 #endif 217 218 #ifdef NM_DEBUG_MEM_PUTGET 219 void __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 220 #else 221 void netmap_mem_get(struct netmap_mem_d *nmd) 222 #endif 223 { 224 NMA_LOCK(nmd); 225 nmd->refcount++; 226 NM_DBG_REFC(nmd, func, line); 227 NMA_UNLOCK(nmd); 228 } 229 230 #ifdef NM_DEBUG_MEM_PUTGET 231 void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 232 #else 233 void netmap_mem_put(struct netmap_mem_d *nmd) 234 #endif 235 { 236 int last; 237 NMA_LOCK(nmd); 238 last = (--nmd->refcount == 0); 239 NM_DBG_REFC(nmd, func, line); 240 NMA_UNLOCK(nmd); 241 if (last) 242 netmap_mem_delete(nmd); 243 } 244 245 int 246 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 247 { 248 if (nm_mem_assign_group(nmd, na->pdev) < 0) { 249 return ENOMEM; 250 } else { 251 nmd->ops->nmd_finalize(nmd); 252 } 253 254 if (!nmd->lasterr && na->pdev) 255 netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 256 257 return nmd->lasterr; 258 } 259 260 void 261 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 262 { 263 NMA_LOCK(nmd); 264 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 265 NMA_UNLOCK(nmd); 266 return nmd->ops->nmd_deref(nmd); 267 } 268 269 270 /* accessor functions */ 271 static void 272 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 273 { 274 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 275 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 276 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 277 } 278 279 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 280 [NETMAP_IF_POOL] = { 281 .size = 1024, 282 .num = 100, 283 }, 284 [NETMAP_RING_POOL] = { 285 .size = 9*PAGE_SIZE, 286 .num = 200, 287 }, 288 [NETMAP_BUF_POOL] = { 289 .size = 2048, 290 .num = NETMAP_BUF_MAX_NUM, 291 }, 292 }; 293 294 struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 295 [NETMAP_IF_POOL] = { 296 .size = 1024, 297 .num = 1, 298 }, 299 [NETMAP_RING_POOL] = { 300 .size = 5*PAGE_SIZE, 301 .num = 4, 302 }, 303 [NETMAP_BUF_POOL] = { 304 .size = 2048, 305 .num = 4098, 306 }, 307 }; 308 309 310 /* 311 * nm_mem is the memory allocator used for all physical interfaces 312 * running in netmap mode. 313 * Virtual (VALE) ports will have each its own allocator. 314 */ 315 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 316 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 317 .pools = { 318 [NETMAP_IF_POOL] = { 319 .name = "netmap_if", 320 .objminsize = sizeof(struct netmap_if), 321 .objmaxsize = 4096, 322 .nummin = 10, /* don't be stingy */ 323 .nummax = 10000, /* XXX very large */ 324 }, 325 [NETMAP_RING_POOL] = { 326 .name = "netmap_ring", 327 .objminsize = sizeof(struct netmap_ring), 328 .objmaxsize = 32*PAGE_SIZE, 329 .nummin = 2, 330 .nummax = 1024, 331 }, 332 [NETMAP_BUF_POOL] = { 333 .name = "netmap_buf", 334 .objminsize = 64, 335 .objmaxsize = 65536, 336 .nummin = 4, 337 .nummax = 1000000, /* one million! */ 338 }, 339 }, 340 341 .nm_id = 1, 342 .nm_grp = -1, 343 344 .prev = &nm_mem, 345 .next = &nm_mem, 346 347 .ops = &netmap_mem_global_ops 348 }; 349 350 351 struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 352 353 /* blueprint for the private memory allocators */ 354 extern struct netmap_mem_ops netmap_mem_private_ops; /* forward */ 355 const struct netmap_mem_d nm_blueprint = { 356 .pools = { 357 [NETMAP_IF_POOL] = { 358 .name = "%s_if", 359 .objminsize = sizeof(struct netmap_if), 360 .objmaxsize = 4096, 361 .nummin = 1, 362 .nummax = 100, 363 }, 364 [NETMAP_RING_POOL] = { 365 .name = "%s_ring", 366 .objminsize = sizeof(struct netmap_ring), 367 .objmaxsize = 32*PAGE_SIZE, 368 .nummin = 2, 369 .nummax = 1024, 370 }, 371 [NETMAP_BUF_POOL] = { 372 .name = "%s_buf", 373 .objminsize = 64, 374 .objmaxsize = 65536, 375 .nummin = 4, 376 .nummax = 1000000, /* one million! */ 377 }, 378 }, 379 380 .flags = NETMAP_MEM_PRIVATE, 381 382 .ops = &netmap_mem_private_ops 383 }; 384 385 /* memory allocator related sysctls */ 386 387 #define STRINGIFY(x) #x 388 389 390 #define DECLARE_SYSCTLS(id, name) \ 391 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 392 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 393 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 394 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 395 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 396 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 397 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 398 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 399 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 400 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 401 "Default size of private netmap " STRINGIFY(name) "s"); \ 402 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 403 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 404 "Default number of private netmap " STRINGIFY(name) "s") 405 406 SYSCTL_DECL(_dev_netmap); 407 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 408 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 409 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 410 411 static int 412 nm_mem_assign_id(struct netmap_mem_d *nmd) 413 { 414 nm_memid_t id; 415 struct netmap_mem_d *scan = netmap_last_mem_d; 416 int error = ENOMEM; 417 418 NMA_LOCK(&nm_mem); 419 420 do { 421 /* we rely on unsigned wrap around */ 422 id = scan->nm_id + 1; 423 if (id == 0) /* reserve 0 as error value */ 424 id = 1; 425 scan = scan->next; 426 if (id != scan->nm_id) { 427 nmd->nm_id = id; 428 nmd->prev = scan->prev; 429 nmd->next = scan; 430 scan->prev->next = nmd; 431 scan->prev = nmd; 432 netmap_last_mem_d = nmd; 433 error = 0; 434 break; 435 } 436 } while (scan != netmap_last_mem_d); 437 438 NMA_UNLOCK(&nm_mem); 439 return error; 440 } 441 442 static void 443 nm_mem_release_id(struct netmap_mem_d *nmd) 444 { 445 NMA_LOCK(&nm_mem); 446 447 nmd->prev->next = nmd->next; 448 nmd->next->prev = nmd->prev; 449 450 if (netmap_last_mem_d == nmd) 451 netmap_last_mem_d = nmd->prev; 452 453 nmd->prev = nmd->next = NULL; 454 455 NMA_UNLOCK(&nm_mem); 456 } 457 458 static int 459 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 460 { 461 int err = 0, id; 462 id = nm_iommu_group_id(dev); 463 if (netmap_verbose) 464 D("iommu_group %d", id); 465 466 NMA_LOCK(nmd); 467 468 if (nmd->nm_grp < 0) 469 nmd->nm_grp = id; 470 471 if (nmd->nm_grp != id) 472 nmd->lasterr = err = ENOMEM; 473 474 NMA_UNLOCK(nmd); 475 return err; 476 } 477 478 /* 479 * First, find the allocator that contains the requested offset, 480 * then locate the cluster through a lookup table. 481 */ 482 static vm_paddr_t 483 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 484 { 485 int i; 486 vm_ooffset_t o = offset; 487 vm_paddr_t pa; 488 struct netmap_obj_pool *p; 489 490 NMA_LOCK(nmd); 491 p = nmd->pools; 492 493 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 494 if (offset >= p[i].memtotal) 495 continue; 496 // now lookup the cluster's address 497 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 498 offset % p[i]._objsize; 499 NMA_UNLOCK(nmd); 500 return pa; 501 } 502 /* this is only in case of errors */ 503 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 504 p[NETMAP_IF_POOL].memtotal, 505 p[NETMAP_IF_POOL].memtotal 506 + p[NETMAP_RING_POOL].memtotal, 507 p[NETMAP_IF_POOL].memtotal 508 + p[NETMAP_RING_POOL].memtotal 509 + p[NETMAP_BUF_POOL].memtotal); 510 NMA_UNLOCK(nmd); 511 return 0; // XXX bad address 512 } 513 514 static int 515 netmap_mem2_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags, 516 nm_memid_t *id) 517 { 518 int error = 0; 519 NMA_LOCK(nmd); 520 error = netmap_mem_config(nmd); 521 if (error) 522 goto out; 523 if (size) { 524 if (nmd->flags & NETMAP_MEM_FINALIZED) { 525 *size = nmd->nm_totalsize; 526 } else { 527 int i; 528 *size = 0; 529 for (i = 0; i < NETMAP_POOLS_NR; i++) { 530 struct netmap_obj_pool *p = nmd->pools + i; 531 *size += (p->_numclusters * p->_clustsize); 532 } 533 } 534 } 535 if (memflags) 536 *memflags = nmd->flags; 537 if (id) 538 *id = nmd->nm_id; 539 out: 540 NMA_UNLOCK(nmd); 541 return error; 542 } 543 544 /* 545 * we store objects by kernel address, need to find the offset 546 * within the pool to export the value to userspace. 547 * Algorithm: scan until we find the cluster, then add the 548 * actual offset in the cluster 549 */ 550 static ssize_t 551 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 552 { 553 int i, k = p->_clustentries, n = p->objtotal; 554 ssize_t ofs = 0; 555 556 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 557 const char *base = p->lut[i].vaddr; 558 ssize_t relofs = (const char *) vaddr - base; 559 560 if (relofs < 0 || relofs >= p->_clustsize) 561 continue; 562 563 ofs = ofs + relofs; 564 ND("%s: return offset %d (cluster %d) for pointer %p", 565 p->name, ofs, i, vaddr); 566 return ofs; 567 } 568 D("address %p is not contained inside any cluster (%s)", 569 vaddr, p->name); 570 return 0; /* An error occurred */ 571 } 572 573 /* Helper functions which convert virtual addresses to offsets */ 574 #define netmap_if_offset(n, v) \ 575 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 576 577 #define netmap_ring_offset(n, v) \ 578 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 579 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 580 581 #define netmap_buf_offset(n, v) \ 582 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 583 (n)->pools[NETMAP_RING_POOL].memtotal + \ 584 netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) 585 586 587 static ssize_t 588 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 589 { 590 ssize_t v; 591 NMA_LOCK(nmd); 592 v = netmap_if_offset(nmd, addr); 593 NMA_UNLOCK(nmd); 594 return v; 595 } 596 597 /* 598 * report the index, and use start position as a hint, 599 * otherwise buffer allocation becomes terribly expensive. 600 */ 601 static void * 602 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 603 { 604 uint32_t i = 0; /* index in the bitmap */ 605 uint32_t mask, j; /* slot counter */ 606 void *vaddr = NULL; 607 608 if (len > p->_objsize) { 609 D("%s request size %d too large", p->name, len); 610 // XXX cannot reduce the size 611 return NULL; 612 } 613 614 if (p->objfree == 0) { 615 D("no more %s objects", p->name); 616 return NULL; 617 } 618 if (start) 619 i = *start; 620 621 /* termination is guaranteed by p->free, but better check bounds on i */ 622 while (vaddr == NULL && i < p->bitmap_slots) { 623 uint32_t cur = p->bitmap[i]; 624 if (cur == 0) { /* bitmask is fully used */ 625 i++; 626 continue; 627 } 628 /* locate a slot */ 629 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 630 ; 631 632 p->bitmap[i] &= ~mask; /* mark object as in use */ 633 p->objfree--; 634 635 vaddr = p->lut[i * 32 + j].vaddr; 636 if (index) 637 *index = i * 32 + j; 638 } 639 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 640 641 if (start) 642 *start = i; 643 return vaddr; 644 } 645 646 647 /* 648 * free by index, not by address. 649 * XXX should we also cleanup the content ? 650 */ 651 static int 652 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 653 { 654 uint32_t *ptr, mask; 655 656 if (j >= p->objtotal) { 657 D("invalid index %u, max %u", j, p->objtotal); 658 return 1; 659 } 660 ptr = &p->bitmap[j / 32]; 661 mask = (1 << (j % 32)); 662 if (*ptr & mask) { 663 D("ouch, double free on buffer %d", j); 664 return 1; 665 } else { 666 *ptr |= mask; 667 p->objfree++; 668 return 0; 669 } 670 } 671 672 /* 673 * free by address. This is slow but is only used for a few 674 * objects (rings, nifp) 675 */ 676 static void 677 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 678 { 679 u_int i, j, n = p->numclusters; 680 681 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 682 void *base = p->lut[i * p->_clustentries].vaddr; 683 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 684 685 /* Given address, is out of the scope of the current cluster.*/ 686 if (vaddr < base || relofs >= p->_clustsize) 687 continue; 688 689 j = j + relofs / p->_objsize; 690 /* KASSERT(j != 0, ("Cannot free object 0")); */ 691 netmap_obj_free(p, j); 692 return; 693 } 694 D("address %p is not contained inside any cluster (%s)", 695 vaddr, p->name); 696 } 697 698 #define netmap_mem_bufsize(n) \ 699 ((n)->pools[NETMAP_BUF_POOL]._objsize) 700 701 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 702 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 703 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 704 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 705 #define netmap_buf_malloc(n, _pos, _index) \ 706 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 707 708 709 #if 0 // XXX unused 710 /* Return the index associated to the given packet buffer */ 711 #define netmap_buf_index(n, v) \ 712 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 713 #endif 714 715 /* 716 * allocate extra buffers in a linked list. 717 * returns the actual number. 718 */ 719 uint32_t 720 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 721 { 722 struct netmap_mem_d *nmd = na->nm_mem; 723 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 724 725 NMA_LOCK(nmd); 726 727 *head = 0; /* default, 'null' index ie empty list */ 728 for (i = 0 ; i < n; i++) { 729 uint32_t cur = *head; /* save current head */ 730 uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 731 if (p == NULL) { 732 D("no more buffers after %d of %d", i, n); 733 *head = cur; /* restore */ 734 break; 735 } 736 RD(5, "allocate buffer %d -> %d", *head, cur); 737 *p = cur; /* link to previous head */ 738 } 739 740 NMA_UNLOCK(nmd); 741 742 return i; 743 } 744 745 static void 746 netmap_extra_free(struct netmap_adapter *na, uint32_t head) 747 { 748 struct lut_entry *lut = na->na_lut.lut; 749 struct netmap_mem_d *nmd = na->nm_mem; 750 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 751 uint32_t i, cur, *buf; 752 753 D("freeing the extra list"); 754 for (i = 0; head >=2 && head < p->objtotal; i++) { 755 cur = head; 756 buf = lut[head].vaddr; 757 head = *buf; 758 *buf = 0; 759 if (netmap_obj_free(p, cur)) 760 break; 761 } 762 if (head != 0) 763 D("breaking with head %d", head); 764 D("freed %d buffers", i); 765 } 766 767 768 /* Return nonzero on error */ 769 static int 770 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 771 { 772 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 773 u_int i = 0; /* slot counter */ 774 uint32_t pos = 0; /* slot in p->bitmap */ 775 uint32_t index = 0; /* buffer index */ 776 777 for (i = 0; i < n; i++) { 778 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 779 if (vaddr == NULL) { 780 D("no more buffers after %d of %d", i, n); 781 goto cleanup; 782 } 783 slot[i].buf_idx = index; 784 slot[i].len = p->_objsize; 785 slot[i].flags = 0; 786 } 787 788 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 789 return (0); 790 791 cleanup: 792 while (i > 0) { 793 i--; 794 netmap_obj_free(p, slot[i].buf_idx); 795 } 796 bzero(slot, n * sizeof(slot[0])); 797 return (ENOMEM); 798 } 799 800 static void 801 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 802 { 803 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 804 u_int i; 805 806 for (i = 0; i < n; i++) { 807 slot[i].buf_idx = index; 808 slot[i].len = p->_objsize; 809 slot[i].flags = 0; 810 } 811 } 812 813 814 static void 815 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 816 { 817 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 818 819 if (i < 2 || i >= p->objtotal) { 820 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 821 return; 822 } 823 netmap_obj_free(p, i); 824 } 825 826 827 static void 828 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 829 { 830 u_int i; 831 832 for (i = 0; i < n; i++) { 833 if (slot[i].buf_idx > 2) 834 netmap_free_buf(nmd, slot[i].buf_idx); 835 } 836 } 837 838 static void 839 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 840 { 841 842 if (p == NULL) 843 return; 844 if (p->bitmap) 845 free(p->bitmap, M_NETMAP); 846 p->bitmap = NULL; 847 if (p->lut) { 848 u_int i; 849 size_t sz = p->_clustsize; 850 851 /* 852 * Free each cluster allocated in 853 * netmap_finalize_obj_allocator(). The cluster start 854 * addresses are stored at multiples of p->_clusterentries 855 * in the lut. 856 */ 857 for (i = 0; i < p->objtotal; i += p->_clustentries) { 858 if (p->lut[i].vaddr) 859 contigfree(p->lut[i].vaddr, sz, M_NETMAP); 860 } 861 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 862 #ifdef linux 863 vfree(p->lut); 864 #else 865 free(p->lut, M_NETMAP); 866 #endif 867 } 868 p->lut = NULL; 869 p->objtotal = 0; 870 p->memtotal = 0; 871 p->numclusters = 0; 872 p->objfree = 0; 873 } 874 875 /* 876 * Free all resources related to an allocator. 877 */ 878 static void 879 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 880 { 881 if (p == NULL) 882 return; 883 netmap_reset_obj_allocator(p); 884 } 885 886 /* 887 * We receive a request for objtotal objects, of size objsize each. 888 * Internally we may round up both numbers, as we allocate objects 889 * in small clusters multiple of the page size. 890 * We need to keep track of objtotal and clustentries, 891 * as they are needed when freeing memory. 892 * 893 * XXX note -- userspace needs the buffers to be contiguous, 894 * so we cannot afford gaps at the end of a cluster. 895 */ 896 897 898 /* call with NMA_LOCK held */ 899 static int 900 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 901 { 902 int i; 903 u_int clustsize; /* the cluster size, multiple of page size */ 904 u_int clustentries; /* how many objects per entry */ 905 906 /* we store the current request, so we can 907 * detect configuration changes later */ 908 p->r_objtotal = objtotal; 909 p->r_objsize = objsize; 910 911 #define MAX_CLUSTSIZE (1<<22) // 4 MB 912 #define LINE_ROUND NM_CACHE_ALIGN // 64 913 if (objsize >= MAX_CLUSTSIZE) { 914 /* we could do it but there is no point */ 915 D("unsupported allocation for %d bytes", objsize); 916 return EINVAL; 917 } 918 /* make sure objsize is a multiple of LINE_ROUND */ 919 i = (objsize & (LINE_ROUND - 1)); 920 if (i) { 921 D("XXX aligning object by %d bytes", LINE_ROUND - i); 922 objsize += LINE_ROUND - i; 923 } 924 if (objsize < p->objminsize || objsize > p->objmaxsize) { 925 D("requested objsize %d out of range [%d, %d]", 926 objsize, p->objminsize, p->objmaxsize); 927 return EINVAL; 928 } 929 if (objtotal < p->nummin || objtotal > p->nummax) { 930 D("requested objtotal %d out of range [%d, %d]", 931 objtotal, p->nummin, p->nummax); 932 return EINVAL; 933 } 934 /* 935 * Compute number of objects using a brute-force approach: 936 * given a max cluster size, 937 * we try to fill it with objects keeping track of the 938 * wasted space to the next page boundary. 939 */ 940 for (clustentries = 0, i = 1;; i++) { 941 u_int delta, used = i * objsize; 942 if (used > MAX_CLUSTSIZE) 943 break; 944 delta = used % PAGE_SIZE; 945 if (delta == 0) { // exact solution 946 clustentries = i; 947 break; 948 } 949 } 950 /* exact solution not found */ 951 if (clustentries == 0) { 952 D("unsupported allocation for %d bytes", objsize); 953 return EINVAL; 954 } 955 /* compute clustsize */ 956 clustsize = clustentries * objsize; 957 if (netmap_verbose) 958 D("objsize %d clustsize %d objects %d", 959 objsize, clustsize, clustentries); 960 961 /* 962 * The number of clusters is n = ceil(objtotal/clustentries) 963 * objtotal' = n * clustentries 964 */ 965 p->_clustentries = clustentries; 966 p->_clustsize = clustsize; 967 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 968 969 /* actual values (may be larger than requested) */ 970 p->_objsize = objsize; 971 p->_objtotal = p->_numclusters * clustentries; 972 973 return 0; 974 } 975 976 977 /* call with NMA_LOCK held */ 978 static int 979 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 980 { 981 int i; /* must be signed */ 982 size_t n; 983 984 /* optimistically assume we have enough memory */ 985 p->numclusters = p->_numclusters; 986 p->objtotal = p->_objtotal; 987 988 n = sizeof(struct lut_entry) * p->objtotal; 989 #ifdef linux 990 p->lut = vmalloc(n); 991 #else 992 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 993 #endif 994 if (p->lut == NULL) { 995 D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); 996 goto clean; 997 } 998 999 /* Allocate the bitmap */ 1000 n = (p->objtotal + 31) / 32; 1001 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 1002 if (p->bitmap == NULL) { 1003 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 1004 p->name); 1005 goto clean; 1006 } 1007 p->bitmap_slots = n; 1008 1009 /* 1010 * Allocate clusters, init pointers and bitmap 1011 */ 1012 1013 n = p->_clustsize; 1014 for (i = 0; i < (int)p->objtotal;) { 1015 int lim = i + p->_clustentries; 1016 char *clust; 1017 1018 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1019 (size_t)0, -1UL, PAGE_SIZE, 0); 1020 if (clust == NULL) { 1021 /* 1022 * If we get here, there is a severe memory shortage, 1023 * so halve the allocated memory to reclaim some. 1024 */ 1025 D("Unable to create cluster at %d for '%s' allocator", 1026 i, p->name); 1027 if (i < 2) /* nothing to halve */ 1028 goto out; 1029 lim = i / 2; 1030 for (i--; i >= lim; i--) { 1031 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 1032 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1033 contigfree(p->lut[i].vaddr, 1034 n, M_NETMAP); 1035 p->lut[i].vaddr = NULL; 1036 } 1037 out: 1038 p->objtotal = i; 1039 /* we may have stopped in the middle of a cluster */ 1040 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1041 break; 1042 } 1043 /* 1044 * Set bitmap and lut state for all buffers in the current 1045 * cluster. 1046 * 1047 * [i, lim) is the set of buffer indexes that cover the 1048 * current cluster. 1049 * 1050 * 'clust' is really the address of the current buffer in 1051 * the current cluster as we index through it with a stride 1052 * of p->_objsize. 1053 */ 1054 for (; i < lim; i++, clust += p->_objsize) { 1055 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 1056 p->lut[i].vaddr = clust; 1057 p->lut[i].paddr = vtophys(clust); 1058 } 1059 } 1060 p->objfree = p->objtotal; 1061 p->memtotal = p->numclusters * p->_clustsize; 1062 if (p->objfree == 0) 1063 goto clean; 1064 if (netmap_verbose) 1065 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 1066 p->numclusters, p->_clustsize >> 10, 1067 p->memtotal >> 10, p->name); 1068 1069 return 0; 1070 1071 clean: 1072 netmap_reset_obj_allocator(p); 1073 return ENOMEM; 1074 } 1075 1076 /* call with lock held */ 1077 static int 1078 netmap_memory_config_changed(struct netmap_mem_d *nmd) 1079 { 1080 int i; 1081 1082 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1083 if (nmd->pools[i].r_objsize != netmap_params[i].size || 1084 nmd->pools[i].r_objtotal != netmap_params[i].num) 1085 return 1; 1086 } 1087 return 0; 1088 } 1089 1090 static void 1091 netmap_mem_reset_all(struct netmap_mem_d *nmd) 1092 { 1093 int i; 1094 1095 if (netmap_verbose) 1096 D("resetting %p", nmd); 1097 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1098 netmap_reset_obj_allocator(&nmd->pools[i]); 1099 } 1100 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1101 } 1102 1103 static int 1104 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 1105 { 1106 int i, lim = p->_objtotal; 1107 1108 if (na->pdev == NULL) 1109 return 0; 1110 1111 #ifdef __FreeBSD__ 1112 (void)i; 1113 (void)lim; 1114 D("unsupported on FreeBSD"); 1115 #else /* linux */ 1116 for (i = 2; i < lim; i++) { 1117 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr); 1118 } 1119 #endif /* linux */ 1120 1121 return 0; 1122 } 1123 1124 static int 1125 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 1126 { 1127 #ifdef __FreeBSD__ 1128 D("unsupported on FreeBSD"); 1129 #else /* linux */ 1130 int i, lim = p->_objtotal; 1131 1132 if (na->pdev == NULL) 1133 return 0; 1134 1135 for (i = 2; i < lim; i++) { 1136 netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr, 1137 p->lut[i].vaddr); 1138 } 1139 #endif /* linux */ 1140 1141 return 0; 1142 } 1143 1144 static int 1145 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1146 { 1147 int i; 1148 if (nmd->flags & NETMAP_MEM_FINALIZED) 1149 return 0; 1150 nmd->lasterr = 0; 1151 nmd->nm_totalsize = 0; 1152 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1153 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1154 if (nmd->lasterr) 1155 goto error; 1156 nmd->nm_totalsize += nmd->pools[i].memtotal; 1157 } 1158 /* buffers 0 and 1 are reserved */ 1159 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 1160 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; 1161 nmd->flags |= NETMAP_MEM_FINALIZED; 1162 1163 if (netmap_verbose) 1164 D("interfaces %d KB, rings %d KB, buffers %d MB", 1165 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1166 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1167 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1168 1169 if (netmap_verbose) 1170 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1171 1172 1173 return 0; 1174 error: 1175 netmap_mem_reset_all(nmd); 1176 return nmd->lasterr; 1177 } 1178 1179 1180 1181 static void 1182 netmap_mem_private_delete(struct netmap_mem_d *nmd) 1183 { 1184 if (nmd == NULL) 1185 return; 1186 if (netmap_verbose) 1187 D("deleting %p", nmd); 1188 if (nmd->active > 0) 1189 D("bug: deleting mem allocator with active=%d!", nmd->active); 1190 nm_mem_release_id(nmd); 1191 if (netmap_verbose) 1192 D("done deleting %p", nmd); 1193 NMA_LOCK_DESTROY(nmd); 1194 free(nmd, M_DEVBUF); 1195 } 1196 1197 static int 1198 netmap_mem_private_config(struct netmap_mem_d *nmd) 1199 { 1200 /* nothing to do, we are configured on creation 1201 * and configuration never changes thereafter 1202 */ 1203 return 0; 1204 } 1205 1206 static int 1207 netmap_mem_private_finalize(struct netmap_mem_d *nmd) 1208 { 1209 int err; 1210 NMA_LOCK(nmd); 1211 nmd->active++; 1212 err = netmap_mem_finalize_all(nmd); 1213 NMA_UNLOCK(nmd); 1214 return err; 1215 1216 } 1217 1218 static void 1219 netmap_mem_private_deref(struct netmap_mem_d *nmd) 1220 { 1221 NMA_LOCK(nmd); 1222 if (--nmd->active <= 0) 1223 netmap_mem_reset_all(nmd); 1224 NMA_UNLOCK(nmd); 1225 } 1226 1227 1228 /* 1229 * allocator for private memory 1230 */ 1231 struct netmap_mem_d * 1232 netmap_mem_private_new(const char *name, u_int txr, u_int txd, 1233 u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) 1234 { 1235 struct netmap_mem_d *d = NULL; 1236 struct netmap_obj_params p[NETMAP_POOLS_NR]; 1237 int i, err; 1238 u_int v, maxd; 1239 1240 d = malloc(sizeof(struct netmap_mem_d), 1241 M_DEVBUF, M_NOWAIT | M_ZERO); 1242 if (d == NULL) { 1243 err = ENOMEM; 1244 goto error; 1245 } 1246 1247 *d = nm_blueprint; 1248 1249 err = nm_mem_assign_id(d); 1250 if (err) 1251 goto error; 1252 1253 /* account for the fake host rings */ 1254 txr++; 1255 rxr++; 1256 1257 /* copy the min values */ 1258 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1259 p[i] = netmap_min_priv_params[i]; 1260 } 1261 1262 /* possibly increase them to fit user request */ 1263 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1264 if (p[NETMAP_IF_POOL].size < v) 1265 p[NETMAP_IF_POOL].size = v; 1266 v = 2 + 4 * npipes; 1267 if (p[NETMAP_IF_POOL].num < v) 1268 p[NETMAP_IF_POOL].num = v; 1269 maxd = (txd > rxd) ? txd : rxd; 1270 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1271 if (p[NETMAP_RING_POOL].size < v) 1272 p[NETMAP_RING_POOL].size = v; 1273 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1274 * and two rx rings (again, 1 normal and 1 fake host) 1275 */ 1276 v = txr + rxr + 8 * npipes; 1277 if (p[NETMAP_RING_POOL].num < v) 1278 p[NETMAP_RING_POOL].num = v; 1279 /* for each pipe we only need the buffers for the 4 "real" rings. 1280 * On the other end, the pipe ring dimension may be different from 1281 * the parent port ring dimension. As a compromise, we allocate twice the 1282 * space actually needed if the pipe rings were the same size as the parent rings 1283 */ 1284 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1285 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1286 if (p[NETMAP_BUF_POOL].num < v) 1287 p[NETMAP_BUF_POOL].num = v; 1288 1289 if (netmap_verbose) 1290 D("req if %d*%d ring %d*%d buf %d*%d", 1291 p[NETMAP_IF_POOL].num, 1292 p[NETMAP_IF_POOL].size, 1293 p[NETMAP_RING_POOL].num, 1294 p[NETMAP_RING_POOL].size, 1295 p[NETMAP_BUF_POOL].num, 1296 p[NETMAP_BUF_POOL].size); 1297 1298 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1299 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1300 nm_blueprint.pools[i].name, 1301 name); 1302 err = netmap_config_obj_allocator(&d->pools[i], 1303 p[i].num, p[i].size); 1304 if (err) 1305 goto error; 1306 } 1307 1308 d->flags &= ~NETMAP_MEM_FINALIZED; 1309 1310 NMA_LOCK_INIT(d); 1311 1312 return d; 1313 error: 1314 netmap_mem_private_delete(d); 1315 if (perr) 1316 *perr = err; 1317 return NULL; 1318 } 1319 1320 1321 /* call with lock held */ 1322 static int 1323 netmap_mem_global_config(struct netmap_mem_d *nmd) 1324 { 1325 int i; 1326 1327 if (nmd->active) 1328 /* already in use, we cannot change the configuration */ 1329 goto out; 1330 1331 if (!netmap_memory_config_changed(nmd)) 1332 goto out; 1333 1334 ND("reconfiguring"); 1335 1336 if (nmd->flags & NETMAP_MEM_FINALIZED) { 1337 /* reset previous allocation */ 1338 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1339 netmap_reset_obj_allocator(&nmd->pools[i]); 1340 } 1341 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1342 } 1343 1344 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1345 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1346 netmap_params[i].num, netmap_params[i].size); 1347 if (nmd->lasterr) 1348 goto out; 1349 } 1350 1351 out: 1352 1353 return nmd->lasterr; 1354 } 1355 1356 static int 1357 netmap_mem_global_finalize(struct netmap_mem_d *nmd) 1358 { 1359 int err; 1360 1361 /* update configuration if changed */ 1362 if (netmap_mem_global_config(nmd)) 1363 goto out; 1364 1365 nmd->active++; 1366 1367 if (nmd->flags & NETMAP_MEM_FINALIZED) { 1368 /* may happen if config is not changed */ 1369 ND("nothing to do"); 1370 goto out; 1371 } 1372 1373 if (netmap_mem_finalize_all(nmd)) 1374 goto out; 1375 1376 nmd->lasterr = 0; 1377 1378 out: 1379 if (nmd->lasterr) 1380 nmd->active--; 1381 err = nmd->lasterr; 1382 1383 return err; 1384 1385 } 1386 1387 static void 1388 netmap_mem_global_delete(struct netmap_mem_d *nmd) 1389 { 1390 int i; 1391 1392 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1393 netmap_destroy_obj_allocator(&nm_mem.pools[i]); 1394 } 1395 1396 NMA_LOCK_DESTROY(&nm_mem); 1397 } 1398 1399 int 1400 netmap_mem_init(void) 1401 { 1402 NMA_LOCK_INIT(&nm_mem); 1403 netmap_mem_get(&nm_mem); 1404 return (0); 1405 } 1406 1407 void 1408 netmap_mem_fini(void) 1409 { 1410 netmap_mem_put(&nm_mem); 1411 } 1412 1413 static void 1414 netmap_free_rings(struct netmap_adapter *na) 1415 { 1416 enum txrx t; 1417 1418 for_rx_tx(t) { 1419 u_int i; 1420 for (i = 0; i < netmap_real_rings(na, t); i++) { 1421 struct netmap_kring *kring = &NMR(na, t)[i]; 1422 struct netmap_ring *ring = kring->ring; 1423 1424 if (ring == NULL) 1425 continue; 1426 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1427 netmap_ring_free(na->nm_mem, ring); 1428 kring->ring = NULL; 1429 } 1430 } 1431 } 1432 1433 /* call with NMA_LOCK held * 1434 * 1435 * Allocate netmap rings and buffers for this card 1436 * The rings are contiguous, but have variable size. 1437 * The kring array must follow the layout described 1438 * in netmap_krings_create(). 1439 */ 1440 static int 1441 netmap_mem2_rings_create(struct netmap_adapter *na) 1442 { 1443 enum txrx t; 1444 1445 NMA_LOCK(na->nm_mem); 1446 1447 for_rx_tx(t) { 1448 u_int i; 1449 1450 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1451 struct netmap_kring *kring = &NMR(na, t)[i]; 1452 struct netmap_ring *ring = kring->ring; 1453 u_int len, ndesc; 1454 1455 if (ring) { 1456 ND("%s already created", kring->name); 1457 continue; /* already created by somebody else */ 1458 } 1459 ndesc = kring->nkr_num_slots; 1460 len = sizeof(struct netmap_ring) + 1461 ndesc * sizeof(struct netmap_slot); 1462 ring = netmap_ring_malloc(na->nm_mem, len); 1463 if (ring == NULL) { 1464 D("Cannot allocate %s_ring", nm_txrx2str(t)); 1465 goto cleanup; 1466 } 1467 ND("txring at %p", ring); 1468 kring->ring = ring; 1469 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 1470 *(int64_t *)(uintptr_t)&ring->buf_ofs = 1471 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1472 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1473 netmap_ring_offset(na->nm_mem, ring); 1474 1475 /* copy values from kring */ 1476 ring->head = kring->rhead; 1477 ring->cur = kring->rcur; 1478 ring->tail = kring->rtail; 1479 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = 1480 netmap_mem_bufsize(na->nm_mem); 1481 ND("%s h %d c %d t %d", kring->name, 1482 ring->head, ring->cur, ring->tail); 1483 ND("initializing slots for %s_ring", nm_txrx2str(txrx)); 1484 if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { 1485 /* this is a real ring */ 1486 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1487 D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1488 goto cleanup; 1489 } 1490 } else { 1491 /* this is a fake ring, set all indices to 0 */ 1492 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1493 } 1494 /* ring info */ 1495 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1496 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1497 } 1498 } 1499 1500 NMA_UNLOCK(na->nm_mem); 1501 1502 return 0; 1503 1504 cleanup: 1505 netmap_free_rings(na); 1506 1507 NMA_UNLOCK(na->nm_mem); 1508 1509 return ENOMEM; 1510 } 1511 1512 static void 1513 netmap_mem2_rings_delete(struct netmap_adapter *na) 1514 { 1515 /* last instance, release bufs and rings */ 1516 NMA_LOCK(na->nm_mem); 1517 1518 netmap_free_rings(na); 1519 1520 NMA_UNLOCK(na->nm_mem); 1521 } 1522 1523 1524 /* call with NMA_LOCK held */ 1525 /* 1526 * Allocate the per-fd structure netmap_if. 1527 * 1528 * We assume that the configuration stored in na 1529 * (number of tx/rx rings and descs) does not change while 1530 * the interface is in netmap mode. 1531 */ 1532 static struct netmap_if * 1533 netmap_mem2_if_new(struct netmap_adapter *na) 1534 { 1535 struct netmap_if *nifp; 1536 ssize_t base; /* handy for relative offsets between rings and nifp */ 1537 u_int i, len, n[NR_TXRX], ntot; 1538 enum txrx t; 1539 1540 ntot = 0; 1541 for_rx_tx(t) { 1542 /* account for the (eventually fake) host rings */ 1543 n[t] = nma_get_nrings(na, t) + 1; 1544 ntot += n[t]; 1545 } 1546 /* 1547 * the descriptor is followed inline by an array of offsets 1548 * to the tx and rx rings in the shared memory region. 1549 */ 1550 1551 NMA_LOCK(na->nm_mem); 1552 1553 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 1554 nifp = netmap_if_malloc(na->nm_mem, len); 1555 if (nifp == NULL) { 1556 NMA_UNLOCK(na->nm_mem); 1557 return NULL; 1558 } 1559 1560 /* initialize base fields -- override const */ 1561 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 1562 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 1563 strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); 1564 1565 /* 1566 * fill the slots for the rx and tx rings. They contain the offset 1567 * between the ring and nifp, so the information is usable in 1568 * userspace to reach the ring from the nifp. 1569 */ 1570 base = netmap_if_offset(na->nm_mem, nifp); 1571 for (i = 0; i < n[NR_TX]; i++) { 1572 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 1573 netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; 1574 } 1575 for (i = 0; i < n[NR_RX]; i++) { 1576 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = 1577 netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; 1578 } 1579 1580 NMA_UNLOCK(na->nm_mem); 1581 1582 return (nifp); 1583 } 1584 1585 static void 1586 netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 1587 { 1588 if (nifp == NULL) 1589 /* nothing to do */ 1590 return; 1591 NMA_LOCK(na->nm_mem); 1592 if (nifp->ni_bufs_head) 1593 netmap_extra_free(na, nifp->ni_bufs_head); 1594 netmap_if_free(na->nm_mem, nifp); 1595 1596 NMA_UNLOCK(na->nm_mem); 1597 } 1598 1599 static void 1600 netmap_mem_global_deref(struct netmap_mem_d *nmd) 1601 { 1602 1603 nmd->active--; 1604 if (!nmd->active) 1605 nmd->nm_grp = -1; 1606 if (netmap_verbose) 1607 D("active = %d", nmd->active); 1608 1609 } 1610 1611 struct netmap_mem_ops netmap_mem_global_ops = { 1612 .nmd_get_lut = netmap_mem2_get_lut, 1613 .nmd_get_info = netmap_mem2_get_info, 1614 .nmd_ofstophys = netmap_mem2_ofstophys, 1615 .nmd_config = netmap_mem_global_config, 1616 .nmd_finalize = netmap_mem_global_finalize, 1617 .nmd_deref = netmap_mem_global_deref, 1618 .nmd_delete = netmap_mem_global_delete, 1619 .nmd_if_offset = netmap_mem2_if_offset, 1620 .nmd_if_new = netmap_mem2_if_new, 1621 .nmd_if_delete = netmap_mem2_if_delete, 1622 .nmd_rings_create = netmap_mem2_rings_create, 1623 .nmd_rings_delete = netmap_mem2_rings_delete 1624 }; 1625 struct netmap_mem_ops netmap_mem_private_ops = { 1626 .nmd_get_lut = netmap_mem2_get_lut, 1627 .nmd_get_info = netmap_mem2_get_info, 1628 .nmd_ofstophys = netmap_mem2_ofstophys, 1629 .nmd_config = netmap_mem_private_config, 1630 .nmd_finalize = netmap_mem_private_finalize, 1631 .nmd_deref = netmap_mem_private_deref, 1632 .nmd_if_offset = netmap_mem2_if_offset, 1633 .nmd_delete = netmap_mem_private_delete, 1634 .nmd_if_new = netmap_mem2_if_new, 1635 .nmd_if_delete = netmap_mem2_if_delete, 1636 .nmd_rings_create = netmap_mem2_rings_create, 1637 .nmd_rings_delete = netmap_mem2_rings_delete 1638 }; 1639