1 /* 2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * $Id: netmap_mem2.c 11881 2012-10-18 23:24:15Z luigi $ 29 * 30 * (New) memory allocator for netmap 31 */ 32 33 /* 34 * This allocator creates three memory regions: 35 * nm_if_pool for the struct netmap_if 36 * nm_ring_pool for the struct netmap_ring 37 * nm_buf_pool for the packet buffers. 38 * 39 * All regions need to be multiple of a page size as we export them to 40 * userspace through mmap. Only the latter needs to be dma-able, 41 * but for convenience use the same type of allocator for all. 42 * 43 * Once mapped, the three regions are exported to userspace 44 * as a contiguous block, starting from nm_if_pool. Each 45 * cluster (and pool) is an integral number of pages. 46 * [ . . . ][ . . . . . .][ . . . . . . . . . .] 47 * nm_if nm_ring nm_buf 48 * 49 * The userspace areas contain offsets of the objects in userspace. 50 * When (at init time) we write these offsets, we find out the index 51 * of the object, and from there locate the offset from the beginning 52 * of the region. 53 * 54 * The invididual allocators manage a pool of memory for objects of 55 * the same size. 56 * The pool is split into smaller clusters, whose size is a 57 * multiple of the page size. The cluster size is chosen 58 * to minimize the waste for a given max cluster size 59 * (we do it by brute force, as we have relatively few object 60 * per cluster). 61 * 62 * Objects are aligned to the cache line (64 bytes) rounding up object 63 * sizes when needed. A bitmap contains the state of each object. 64 * Allocation scans the bitmap; this is done only on attach, so we are not 65 * too worried about performance 66 * 67 * For each allocator we can define (thorugh sysctl) the size and 68 * number of each object. Memory is allocated at the first use of a 69 * netmap file descriptor, and can be freed when all such descriptors 70 * have been released (including unmapping the memory). 71 * If memory is scarce, the system tries to get as much as possible 72 * and the sysctl values reflect the actual allocation. 73 * Together with desired values, the sysctl export also absolute 74 * min and maximum values that cannot be overridden. 75 * 76 * struct netmap_if: 77 * variable size, max 16 bytes per ring pair plus some fixed amount. 78 * 1024 bytes should be large enough in practice. 79 * 80 * In the worst case we have one netmap_if per ring in the system. 81 * 82 * struct netmap_ring 83 * variable too, 8 byte per slot plus some fixed amount. 84 * Rings can be large (e.g. 4k slots, or >32Kbytes). 85 * We default to 36 KB (9 pages), and a few hundred rings. 86 * 87 * struct netmap_buffer 88 * The more the better, both because fast interfaces tend to have 89 * many slots, and because we may want to use buffers to store 90 * packets in userspace avoiding copies. 91 * Must contain a full frame (eg 1518, or more for vlans, jumbo 92 * frames etc.) plus be nicely aligned, plus some NICs restrict 93 * the size to multiple of 1K or so. Default to 2K 94 */ 95 96 #ifndef CONSERVATIVE 97 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 98 #else /* CONSERVATIVE */ 99 #define NETMAP_BUF_MAX_NUM 20000 /* 40MB */ 100 #endif 101 102 #ifdef linux 103 #define NMA_LOCK_T struct semaphore 104 #define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1) 105 #define NMA_LOCK_DESTROY() 106 #define NMA_LOCK() down(&nm_mem.nm_mtx) 107 #define NMA_UNLOCK() up(&nm_mem.nm_mtx) 108 #else /* !linux */ 109 #define NMA_LOCK_T struct mtx 110 #define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF) 111 #define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx) 112 #define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx) 113 #define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx) 114 #endif /* linux */ 115 116 enum { 117 NETMAP_IF_POOL = 0, 118 NETMAP_RING_POOL, 119 NETMAP_BUF_POOL, 120 NETMAP_POOLS_NR 121 }; 122 123 124 struct netmap_obj_params { 125 u_int size; 126 u_int num; 127 }; 128 129 130 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 131 [NETMAP_IF_POOL] = { 132 .size = 1024, 133 .num = 100, 134 }, 135 [NETMAP_RING_POOL] = { 136 .size = 9*PAGE_SIZE, 137 .num = 200, 138 }, 139 [NETMAP_BUF_POOL] = { 140 .size = 2048, 141 .num = NETMAP_BUF_MAX_NUM, 142 }, 143 }; 144 145 146 struct netmap_obj_pool { 147 char name[16]; /* name of the allocator */ 148 u_int objtotal; /* actual total number of objects. */ 149 u_int objfree; /* number of free objects. */ 150 u_int clustentries; /* actual objects per cluster */ 151 152 /* limits */ 153 u_int objminsize; /* minimum object size */ 154 u_int objmaxsize; /* maximum object size */ 155 u_int nummin; /* minimum number of objects */ 156 u_int nummax; /* maximum number of objects */ 157 158 /* the total memory space is _numclusters*_clustsize */ 159 u_int _numclusters; /* how many clusters */ 160 u_int _clustsize; /* cluster size */ 161 u_int _objsize; /* actual object size */ 162 163 u_int _memtotal; /* _numclusters*_clustsize */ 164 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 165 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 166 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 167 }; 168 169 170 struct netmap_mem_d { 171 NMA_LOCK_T nm_mtx; /* protect the allocator */ 172 u_int nm_totalsize; /* shorthand */ 173 174 int finalized; /* !=0 iff preallocation done */ 175 int lasterr; /* last error for curr config */ 176 int refcount; /* existing priv structures */ 177 /* the three allocators */ 178 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 179 }; 180 181 182 static struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 183 .pools = { 184 [NETMAP_IF_POOL] = { 185 .name = "netmap_if", 186 .objminsize = sizeof(struct netmap_if), 187 .objmaxsize = 4096, 188 .nummin = 10, /* don't be stingy */ 189 .nummax = 10000, /* XXX very large */ 190 }, 191 [NETMAP_RING_POOL] = { 192 .name = "netmap_ring", 193 .objminsize = sizeof(struct netmap_ring), 194 .objmaxsize = 32*PAGE_SIZE, 195 .nummin = 2, 196 .nummax = 1024, 197 }, 198 [NETMAP_BUF_POOL] = { 199 .name = "netmap_buf", 200 .objminsize = 64, 201 .objmaxsize = 65536, 202 .nummin = 4, 203 .nummax = 1000000, /* one million! */ 204 }, 205 }, 206 }; 207 208 struct lut_entry *netmap_buffer_lut; /* exported */ 209 210 /* memory allocator related sysctls */ 211 212 #define STRINGIFY(x) #x 213 214 #define DECLARE_SYSCTLS(id, name) \ 215 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_size", &netmap_params[id].size); */ \ 216 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 217 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 218 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 219 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 220 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_num", &netmap_params[id].num); */ \ 221 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 222 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 223 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 224 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 225 226 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 227 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 228 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 229 230 /* 231 * Convert a userspace offset to a phisical address. 232 * XXX re-do in a simpler way. 233 * 234 * The idea here is to hide userspace applications the fact that pre-allocated 235 * memory is not contiguous, but fragmented across different clusters and 236 * smaller memory allocators. Consequently, first of all we need to find which 237 * allocator is owning provided offset, then we need to find out the physical 238 * address associated to target page (this is done using the look-up table. 239 */ 240 static inline vm_paddr_t 241 netmap_ofstophys(vm_offset_t offset) 242 { 243 int i; 244 vm_offset_t o = offset; 245 struct netmap_obj_pool *p = nm_mem.pools; 246 247 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) { 248 if (offset >= p[i]._memtotal) 249 continue; 250 // XXX now scan the clusters 251 return p[i].lut[offset / p[i]._objsize].paddr + 252 offset % p[i]._objsize; 253 } 254 /* this is only in case of errors */ 255 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 256 p[NETMAP_IF_POOL]._memtotal, 257 p[NETMAP_IF_POOL]._memtotal 258 + p[NETMAP_RING_POOL]._memtotal, 259 p[NETMAP_IF_POOL]._memtotal 260 + p[NETMAP_RING_POOL]._memtotal 261 + p[NETMAP_BUF_POOL]._memtotal); 262 return 0; // XXX bad address 263 } 264 265 /* 266 * we store objects by kernel address, need to find the offset 267 * within the pool to export the value to userspace. 268 * Algorithm: scan until we find the cluster, then add the 269 * actual offset in the cluster 270 */ 271 static ssize_t 272 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 273 { 274 int i, k = p->clustentries, n = p->objtotal; 275 ssize_t ofs = 0; 276 277 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 278 const char *base = p->lut[i].vaddr; 279 ssize_t relofs = (const char *) vaddr - base; 280 281 if (relofs < 0 || relofs > p->_clustsize) 282 continue; 283 284 ofs = ofs + relofs; 285 ND("%s: return offset %d (cluster %d) for pointer %p", 286 p->name, ofs, i, vaddr); 287 return ofs; 288 } 289 D("address %p is not contained inside any cluster (%s)", 290 vaddr, p->name); 291 return 0; /* An error occurred */ 292 } 293 294 /* Helper functions which convert virtual addresses to offsets */ 295 #define netmap_if_offset(v) \ 296 netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v)) 297 298 #define netmap_ring_offset(v) \ 299 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 300 netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v))) 301 302 #define netmap_buf_offset(v) \ 303 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 304 nm_mem.pools[NETMAP_RING_POOL]._memtotal + \ 305 netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v))) 306 307 308 /* 309 * report the index, and use start position as a hint, 310 * otherwise buffer allocation becomes terribly expensive. 311 */ 312 static void * 313 netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index) 314 { 315 uint32_t i = 0; /* index in the bitmap */ 316 uint32_t mask, j; /* slot counter */ 317 void *vaddr = NULL; 318 319 if (len > p->_objsize) { 320 D("%s request size %d too large", p->name, len); 321 // XXX cannot reduce the size 322 return NULL; 323 } 324 325 if (p->objfree == 0) { 326 D("%s allocator: run out of memory", p->name); 327 return NULL; 328 } 329 if (start) 330 i = *start; 331 332 /* termination is guaranteed by p->free, but better check bounds on i */ 333 while (vaddr == NULL && i < p->bitmap_slots) { 334 uint32_t cur = p->bitmap[i]; 335 if (cur == 0) { /* bitmask is fully used */ 336 i++; 337 continue; 338 } 339 /* locate a slot */ 340 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 341 ; 342 343 p->bitmap[i] &= ~mask; /* mark object as in use */ 344 p->objfree--; 345 346 vaddr = p->lut[i * 32 + j].vaddr; 347 if (index) 348 *index = i * 32 + j; 349 } 350 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 351 352 if (start) 353 *start = i; 354 return vaddr; 355 } 356 357 358 /* 359 * free by index, not by address 360 */ 361 static void 362 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 363 { 364 if (j >= p->objtotal) { 365 D("invalid index %u, max %u", j, p->objtotal); 366 return; 367 } 368 p->bitmap[j / 32] |= (1 << (j % 32)); 369 p->objfree++; 370 return; 371 } 372 373 static void 374 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 375 { 376 int i, j, n = p->_memtotal / p->_clustsize; 377 378 for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 379 void *base = p->lut[i * p->clustentries].vaddr; 380 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 381 382 /* Given address, is out of the scope of the current cluster.*/ 383 if (vaddr < base || relofs > p->_clustsize) 384 continue; 385 386 j = j + relofs / p->_objsize; 387 KASSERT(j != 0, ("Cannot free object 0")); 388 netmap_obj_free(p, j); 389 return; 390 } 391 D("address %p is not contained inside any cluster (%s)", 392 vaddr, p->name); 393 } 394 395 #define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL) 396 #define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v)) 397 #define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL) 398 #define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v)) 399 #define netmap_buf_malloc(_pos, _index) \ 400 netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index) 401 402 403 /* Return the index associated to the given packet buffer */ 404 #define netmap_buf_index(v) \ 405 (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize) 406 407 408 /* Return nonzero on error */ 409 static int 410 netmap_new_bufs(struct netmap_if *nifp, 411 struct netmap_slot *slot, u_int n) 412 { 413 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 414 int i = 0; /* slot counter */ 415 uint32_t pos = 0; /* slot in p->bitmap */ 416 uint32_t index = 0; /* buffer index */ 417 418 (void)nifp; /* UNUSED */ 419 for (i = 0; i < n; i++) { 420 void *vaddr = netmap_buf_malloc(&pos, &index); 421 if (vaddr == NULL) { 422 D("unable to locate empty packet buffer"); 423 goto cleanup; 424 } 425 slot[i].buf_idx = index; 426 slot[i].len = p->_objsize; 427 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 428 * in the NIC ring. This is a hack that hides missing 429 * initializations in the drivers, and should go away. 430 */ 431 slot[i].flags = NS_BUF_CHANGED; 432 } 433 434 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 435 return (0); 436 437 cleanup: 438 while (i > 0) { 439 i--; 440 netmap_obj_free(p, slot[i].buf_idx); 441 } 442 bzero(slot, n * sizeof(slot[0])); 443 return (ENOMEM); 444 } 445 446 447 static void 448 netmap_free_buf(struct netmap_if *nifp, uint32_t i) 449 { 450 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 451 452 if (i < 2 || i >= p->objtotal) { 453 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 454 return; 455 } 456 netmap_obj_free(p, i); 457 } 458 459 static void 460 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 461 { 462 if (p == NULL) 463 return; 464 if (p->bitmap) 465 free(p->bitmap, M_NETMAP); 466 p->bitmap = NULL; 467 if (p->lut) { 468 int i; 469 for (i = 0; i < p->objtotal; i += p->clustentries) { 470 if (p->lut[i].vaddr) 471 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 472 } 473 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 474 #ifdef linux 475 vfree(p->lut); 476 #else 477 free(p->lut, M_NETMAP); 478 #endif 479 } 480 p->lut = NULL; 481 } 482 483 /* 484 * Free all resources related to an allocator. 485 */ 486 static void 487 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 488 { 489 if (p == NULL) 490 return; 491 netmap_reset_obj_allocator(p); 492 } 493 494 /* 495 * We receive a request for objtotal objects, of size objsize each. 496 * Internally we may round up both numbers, as we allocate objects 497 * in small clusters multiple of the page size. 498 * In the allocator we don't need to store the objsize, 499 * but we do need to keep track of objtotal' and clustentries, 500 * as they are needed when freeing memory. 501 * 502 * XXX note -- userspace needs the buffers to be contiguous, 503 * so we cannot afford gaps at the end of a cluster. 504 */ 505 506 507 /* call with NMA_LOCK held */ 508 static int 509 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 510 { 511 int i, n; 512 u_int clustsize; /* the cluster size, multiple of page size */ 513 u_int clustentries; /* how many objects per entry */ 514 515 #define MAX_CLUSTSIZE (1<<17) 516 #define LINE_ROUND 64 517 if (objsize >= MAX_CLUSTSIZE) { 518 /* we could do it but there is no point */ 519 D("unsupported allocation for %d bytes", objsize); 520 goto error; 521 } 522 /* make sure objsize is a multiple of LINE_ROUND */ 523 i = (objsize & (LINE_ROUND - 1)); 524 if (i) { 525 D("XXX aligning object by %d bytes", LINE_ROUND - i); 526 objsize += LINE_ROUND - i; 527 } 528 if (objsize < p->objminsize || objsize > p->objmaxsize) { 529 D("requested objsize %d out of range [%d, %d]", 530 objsize, p->objminsize, p->objmaxsize); 531 goto error; 532 } 533 if (objtotal < p->nummin || objtotal > p->nummax) { 534 D("requested objtotal %d out of range [%d, %d]", 535 objtotal, p->nummin, p->nummax); 536 goto error; 537 } 538 /* 539 * Compute number of objects using a brute-force approach: 540 * given a max cluster size, 541 * we try to fill it with objects keeping track of the 542 * wasted space to the next page boundary. 543 */ 544 for (clustentries = 0, i = 1;; i++) { 545 u_int delta, used = i * objsize; 546 if (used > MAX_CLUSTSIZE) 547 break; 548 delta = used % PAGE_SIZE; 549 if (delta == 0) { // exact solution 550 clustentries = i; 551 break; 552 } 553 if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 554 clustentries = i; 555 } 556 // D("XXX --- ouch, delta %d (bad for buffers)", delta); 557 /* compute clustsize and round to the next page */ 558 clustsize = clustentries * objsize; 559 i = (clustsize & (PAGE_SIZE - 1)); 560 if (i) 561 clustsize += PAGE_SIZE - i; 562 if (netmap_verbose) 563 D("objsize %d clustsize %d objects %d", 564 objsize, clustsize, clustentries); 565 566 /* 567 * The number of clusters is n = ceil(objtotal/clustentries) 568 * objtotal' = n * clustentries 569 */ 570 p->clustentries = clustentries; 571 p->_clustsize = clustsize; 572 n = (objtotal + clustentries - 1) / clustentries; 573 p->_numclusters = n; 574 p->objtotal = n * clustentries; 575 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 576 p->_memtotal = p->_numclusters * p->_clustsize; 577 p->_objsize = objsize; 578 579 return 0; 580 581 error: 582 p->_objsize = objsize; 583 p->objtotal = objtotal; 584 585 return EINVAL; 586 } 587 588 589 /* call with NMA_LOCK held */ 590 static int 591 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 592 { 593 int i, n; 594 595 n = sizeof(struct lut_entry) * p->objtotal; 596 #ifdef linux 597 p->lut = vmalloc(n); 598 #else 599 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 600 #endif 601 if (p->lut == NULL) { 602 D("Unable to create lookup table (%d bytes) for '%s'", n, p->name); 603 goto clean; 604 } 605 606 /* Allocate the bitmap */ 607 n = (p->objtotal + 31) / 32; 608 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 609 if (p->bitmap == NULL) { 610 D("Unable to create bitmap (%d entries) for allocator '%s'", n, 611 p->name); 612 goto clean; 613 } 614 p->bitmap_slots = n; 615 616 /* 617 * Allocate clusters, init pointers and bitmap 618 */ 619 for (i = 0; i < p->objtotal;) { 620 int lim = i + p->clustentries; 621 char *clust; 622 623 clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO, 624 0, -1UL, PAGE_SIZE, 0); 625 if (clust == NULL) { 626 /* 627 * If we get here, there is a severe memory shortage, 628 * so halve the allocated memory to reclaim some. 629 * XXX check boundaries 630 */ 631 D("Unable to create cluster at %d for '%s' allocator", 632 i, p->name); 633 lim = i / 2; 634 for (i--; i >= lim; i--) { 635 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 636 if (i % p->clustentries == 0 && p->lut[i].vaddr) 637 contigfree(p->lut[i].vaddr, 638 p->_clustsize, M_NETMAP); 639 } 640 p->objtotal = i; 641 p->objfree = p->objtotal - 2; 642 p->_numclusters = i / p->clustentries; 643 p->_memtotal = p->_numclusters * p->_clustsize; 644 break; 645 } 646 for (; i < lim; i++, clust += p->_objsize) { 647 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 648 p->lut[i].vaddr = clust; 649 p->lut[i].paddr = vtophys(clust); 650 } 651 } 652 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 653 if (netmap_verbose) 654 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 655 p->_numclusters, p->_clustsize >> 10, 656 p->_memtotal >> 10, p->name); 657 658 return 0; 659 660 clean: 661 netmap_reset_obj_allocator(p); 662 return ENOMEM; 663 } 664 665 /* call with lock held */ 666 static int 667 netmap_memory_config_changed(void) 668 { 669 int i; 670 671 for (i = 0; i < NETMAP_POOLS_NR; i++) { 672 if (nm_mem.pools[i]._objsize != netmap_params[i].size || 673 nm_mem.pools[i].objtotal != netmap_params[i].num) 674 return 1; 675 } 676 return 0; 677 } 678 679 680 /* call with lock held */ 681 static int 682 netmap_memory_config(void) 683 { 684 int i; 685 686 687 if (!netmap_memory_config_changed()) 688 goto out; 689 690 D("reconfiguring"); 691 692 if (nm_mem.finalized) { 693 /* reset previous allocation */ 694 for (i = 0; i < NETMAP_POOLS_NR; i++) { 695 netmap_reset_obj_allocator(&nm_mem.pools[i]); 696 } 697 nm_mem.finalized = 0; 698 } 699 700 for (i = 0; i < NETMAP_POOLS_NR; i++) { 701 nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i], 702 netmap_params[i].num, netmap_params[i].size); 703 if (nm_mem.lasterr) 704 goto out; 705 } 706 707 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 708 nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10, 709 nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10, 710 nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20); 711 712 out: 713 714 return nm_mem.lasterr; 715 } 716 717 /* call with lock held */ 718 static int 719 netmap_memory_finalize(void) 720 { 721 int i; 722 u_int totalsize = 0; 723 724 nm_mem.refcount++; 725 if (nm_mem.refcount > 1) { 726 ND("busy (refcount %d)", nm_mem.refcount); 727 goto out; 728 } 729 730 /* update configuration if changed */ 731 if (netmap_memory_config()) 732 goto out; 733 734 if (nm_mem.finalized) { 735 /* may happen if config is not changed */ 736 ND("nothing to do"); 737 goto out; 738 } 739 740 for (i = 0; i < NETMAP_POOLS_NR; i++) { 741 nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]); 742 if (nm_mem.lasterr) 743 goto cleanup; 744 totalsize += nm_mem.pools[i]._memtotal; 745 } 746 nm_mem.nm_totalsize = totalsize; 747 748 /* backward compatibility */ 749 netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize; 750 netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal; 751 752 netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut; 753 netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr; 754 755 nm_mem.finalized = 1; 756 nm_mem.lasterr = 0; 757 758 /* make sysctl values match actual values in the pools */ 759 for (i = 0; i < NETMAP_POOLS_NR; i++) { 760 netmap_params[i].size = nm_mem.pools[i]._objsize; 761 netmap_params[i].num = nm_mem.pools[i].objtotal; 762 } 763 764 out: 765 if (nm_mem.lasterr) 766 nm_mem.refcount--; 767 768 return nm_mem.lasterr; 769 770 cleanup: 771 for (i = 0; i < NETMAP_POOLS_NR; i++) { 772 netmap_reset_obj_allocator(&nm_mem.pools[i]); 773 } 774 nm_mem.refcount--; 775 776 return nm_mem.lasterr; 777 } 778 779 static int 780 netmap_memory_init(void) 781 { 782 NMA_LOCK_INIT(); 783 return (0); 784 } 785 786 static void 787 netmap_memory_fini(void) 788 { 789 int i; 790 791 for (i = 0; i < NETMAP_POOLS_NR; i++) { 792 netmap_destroy_obj_allocator(&nm_mem.pools[i]); 793 } 794 NMA_LOCK_DESTROY(); 795 } 796 797 static void 798 netmap_free_rings(struct netmap_adapter *na) 799 { 800 int i; 801 if (!na->tx_rings) 802 return; 803 for (i = 0; i < na->num_tx_rings + 1; i++) { 804 netmap_ring_free(na->tx_rings[i].ring); 805 na->tx_rings[i].ring = NULL; 806 } 807 for (i = 0; i < na->num_rx_rings + 1; i++) { 808 netmap_ring_free(na->rx_rings[i].ring); 809 na->rx_rings[i].ring = NULL; 810 } 811 free(na->tx_rings, M_DEVBUF); 812 na->tx_rings = na->rx_rings = NULL; 813 } 814 815 816 817 /* call with NMA_LOCK held */ 818 /* 819 * Allocate the per-fd structure netmap_if. 820 * If this is the first instance, also allocate the krings, rings etc. 821 */ 822 static void * 823 netmap_if_new(const char *ifname, struct netmap_adapter *na) 824 { 825 struct netmap_if *nifp; 826 struct netmap_ring *ring; 827 ssize_t base; /* handy for relative offsets between rings and nifp */ 828 u_int i, len, ndesc, ntx, nrx; 829 struct netmap_kring *kring; 830 831 if (netmap_update_config(na)) { 832 /* configuration mismatch, report and fail */ 833 return NULL; 834 } 835 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 836 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 837 /* 838 * the descriptor is followed inline by an array of offsets 839 * to the tx and rx rings in the shared memory region. 840 */ 841 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 842 nifp = netmap_if_malloc(len); 843 if (nifp == NULL) { 844 return NULL; 845 } 846 847 /* initialize base fields -- override const */ 848 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 849 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 850 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 851 852 (na->refcount)++; /* XXX atomic ? we are under lock */ 853 if (na->refcount > 1) { /* already setup, we are done */ 854 goto final; 855 } 856 857 len = (ntx + nrx) * sizeof(struct netmap_kring); 858 na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 859 if (na->tx_rings == NULL) { 860 D("Cannot allocate krings for %s", ifname); 861 goto cleanup; 862 } 863 na->rx_rings = na->tx_rings + ntx; 864 865 /* 866 * First instance, allocate netmap rings and buffers for this card 867 * The rings are contiguous, but have variable size. 868 */ 869 for (i = 0; i < ntx; i++) { /* Transmit rings */ 870 kring = &na->tx_rings[i]; 871 ndesc = na->num_tx_desc; 872 bzero(kring, sizeof(*kring)); 873 len = sizeof(struct netmap_ring) + 874 ndesc * sizeof(struct netmap_slot); 875 ring = netmap_ring_malloc(len); 876 if (ring == NULL) { 877 D("Cannot allocate tx_ring[%d] for %s", i, ifname); 878 goto cleanup; 879 } 880 ND("txring[%d] at %p ofs %d", i, ring); 881 kring->na = na; 882 kring->ring = ring; 883 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 884 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 885 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 886 nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 887 netmap_ring_offset(ring); 888 889 /* 890 * IMPORTANT: 891 * Always keep one slot empty, so we can detect new 892 * transmissions comparing cur and nr_hwcur (they are 893 * the same only if there are no new transmissions). 894 */ 895 ring->avail = kring->nr_hwavail = ndesc - 1; 896 ring->cur = kring->nr_hwcur = 0; 897 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 898 ND("initializing slots for txring[%d]", i); 899 if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 900 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname); 901 goto cleanup; 902 } 903 } 904 905 for (i = 0; i < nrx; i++) { /* Receive rings */ 906 kring = &na->rx_rings[i]; 907 ndesc = na->num_rx_desc; 908 bzero(kring, sizeof(*kring)); 909 len = sizeof(struct netmap_ring) + 910 ndesc * sizeof(struct netmap_slot); 911 ring = netmap_ring_malloc(len); 912 if (ring == NULL) { 913 D("Cannot allocate rx_ring[%d] for %s", i, ifname); 914 goto cleanup; 915 } 916 ND("rxring[%d] at %p ofs %d", i, ring); 917 918 kring->na = na; 919 kring->ring = ring; 920 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 921 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 922 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 923 nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 924 netmap_ring_offset(ring); 925 926 ring->cur = kring->nr_hwcur = 0; 927 ring->avail = kring->nr_hwavail = 0; /* empty */ 928 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 929 ND("initializing slots for rxring[%d]", i); 930 if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 931 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname); 932 goto cleanup; 933 } 934 } 935 #ifdef linux 936 // XXX initialize the selrecord structs. 937 for (i = 0; i < ntx; i++) 938 init_waitqueue_head(&na->tx_rings[i].si); 939 for (i = 0; i < nrx; i++) 940 init_waitqueue_head(&na->rx_rings[i].si); 941 init_waitqueue_head(&na->tx_si); 942 init_waitqueue_head(&na->rx_si); 943 #endif 944 final: 945 /* 946 * fill the slots for the rx and tx rings. They contain the offset 947 * between the ring and nifp, so the information is usable in 948 * userspace to reach the ring from the nifp. 949 */ 950 base = netmap_if_offset(nifp); 951 for (i = 0; i < ntx; i++) { 952 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 953 netmap_ring_offset(na->tx_rings[i].ring) - base; 954 } 955 for (i = 0; i < nrx; i++) { 956 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 957 netmap_ring_offset(na->rx_rings[i].ring) - base; 958 } 959 return (nifp); 960 cleanup: 961 netmap_free_rings(na); 962 netmap_if_free(nifp); 963 (na->refcount)--; 964 return NULL; 965 } 966 967 /* call with NMA_LOCK held */ 968 static void 969 netmap_memory_deref(void) 970 { 971 nm_mem.refcount--; 972 if (netmap_verbose) 973 D("refcount = %d", nm_mem.refcount); 974 } 975