1 /* 2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * 29 * (New) memory allocator for netmap 30 */ 31 32 /* 33 * This allocator creates three memory pools: 34 * nm_if_pool for the struct netmap_if 35 * nm_ring_pool for the struct netmap_ring 36 * nm_buf_pool for the packet buffers. 37 * 38 * that contain netmap objects. Each pool is made of a number of clusters, 39 * multiple of a page size, each containing an integer number of objects. 40 * The clusters are contiguous in user space but not in the kernel. 41 * Only nm_buf_pool needs to be dma-able, 42 * but for convenience use the same type of allocator for all. 43 * 44 * Once mapped, the three pools are exported to userspace 45 * as a contiguous block, starting from nm_if_pool. Each 46 * cluster (and pool) is an integral number of pages. 47 * [ . . . ][ . . . . . .][ . . . . . . . . . .] 48 * nm_if nm_ring nm_buf 49 * 50 * The userspace areas contain offsets of the objects in userspace. 51 * When (at init time) we write these offsets, we find out the index 52 * of the object, and from there locate the offset from the beginning 53 * of the region. 54 * 55 * The invididual allocators manage a pool of memory for objects of 56 * the same size. 57 * The pool is split into smaller clusters, whose size is a 58 * multiple of the page size. The cluster size is chosen 59 * to minimize the waste for a given max cluster size 60 * (we do it by brute force, as we have relatively few objects 61 * per cluster). 62 * 63 * Objects are aligned to the cache line (64 bytes) rounding up object 64 * sizes when needed. A bitmap contains the state of each object. 65 * Allocation scans the bitmap; this is done only on attach, so we are not 66 * too worried about performance 67 * 68 * For each allocator we can define (thorugh sysctl) the size and 69 * number of each object. Memory is allocated at the first use of a 70 * netmap file descriptor, and can be freed when all such descriptors 71 * have been released (including unmapping the memory). 72 * If memory is scarce, the system tries to get as much as possible 73 * and the sysctl values reflect the actual allocation. 74 * Together with desired values, the sysctl export also absolute 75 * min and maximum values that cannot be overridden. 76 * 77 * struct netmap_if: 78 * variable size, max 16 bytes per ring pair plus some fixed amount. 79 * 1024 bytes should be large enough in practice. 80 * 81 * In the worst case we have one netmap_if per ring in the system. 82 * 83 * struct netmap_ring 84 * variable size, 8 byte per slot plus some fixed amount. 85 * Rings can be large (e.g. 4k slots, or >32Kbytes). 86 * We default to 36 KB (9 pages), and a few hundred rings. 87 * 88 * struct netmap_buffer 89 * The more the better, both because fast interfaces tend to have 90 * many slots, and because we may want to use buffers to store 91 * packets in userspace avoiding copies. 92 * Must contain a full frame (eg 1518, or more for vlans, jumbo 93 * frames etc.) plus be nicely aligned, plus some NICs restrict 94 * the size to multiple of 1K or so. Default to 2K 95 */ 96 97 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 98 99 #ifdef linux 100 // XXX a mtx would suffice here 20130415 lr 101 // #define NMA_LOCK_T safe_spinlock_t 102 #define NMA_LOCK_T struct semaphore 103 #define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1) 104 #define NMA_LOCK_DESTROY() 105 #define NMA_LOCK() down(&nm_mem.nm_mtx) 106 #define NMA_UNLOCK() up(&nm_mem.nm_mtx) 107 #else /* !linux */ 108 #define NMA_LOCK_T struct mtx 109 #define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF) 110 #define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx) 111 #define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx) 112 #define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx) 113 #endif /* linux */ 114 115 enum { 116 NETMAP_IF_POOL = 0, 117 NETMAP_RING_POOL, 118 NETMAP_BUF_POOL, 119 NETMAP_POOLS_NR 120 }; 121 122 123 struct netmap_obj_params { 124 u_int size; 125 u_int num; 126 }; 127 128 129 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 130 [NETMAP_IF_POOL] = { 131 .size = 1024, 132 .num = 100, 133 }, 134 [NETMAP_RING_POOL] = { 135 .size = 9*PAGE_SIZE, 136 .num = 200, 137 }, 138 [NETMAP_BUF_POOL] = { 139 .size = 2048, 140 .num = NETMAP_BUF_MAX_NUM, 141 }, 142 }; 143 144 145 struct netmap_obj_pool { 146 char name[16]; /* name of the allocator */ 147 u_int objtotal; /* actual total number of objects. */ 148 u_int objfree; /* number of free objects. */ 149 u_int clustentries; /* actual objects per cluster */ 150 151 /* limits */ 152 u_int objminsize; /* minimum object size */ 153 u_int objmaxsize; /* maximum object size */ 154 u_int nummin; /* minimum number of objects */ 155 u_int nummax; /* maximum number of objects */ 156 157 /* the total memory space is _numclusters*_clustsize */ 158 u_int _numclusters; /* how many clusters */ 159 u_int _clustsize; /* cluster size */ 160 u_int _objsize; /* actual object size */ 161 162 u_int _memtotal; /* _numclusters*_clustsize */ 163 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 164 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 165 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 166 }; 167 168 169 struct netmap_mem_d { 170 NMA_LOCK_T nm_mtx; /* protect the allocator */ 171 u_int nm_totalsize; /* shorthand */ 172 173 int finalized; /* !=0 iff preallocation done */ 174 int lasterr; /* last error for curr config */ 175 int refcount; /* existing priv structures */ 176 /* the three allocators */ 177 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 178 }; 179 180 /* 181 * nm_mem is the memory allocator used for all physical interfaces 182 * running in netmap mode. 183 * Virtual (VALE) ports will have each its own allocator. 184 */ 185 static struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 186 .pools = { 187 [NETMAP_IF_POOL] = { 188 .name = "netmap_if", 189 .objminsize = sizeof(struct netmap_if), 190 .objmaxsize = 4096, 191 .nummin = 10, /* don't be stingy */ 192 .nummax = 10000, /* XXX very large */ 193 }, 194 [NETMAP_RING_POOL] = { 195 .name = "netmap_ring", 196 .objminsize = sizeof(struct netmap_ring), 197 .objmaxsize = 32*PAGE_SIZE, 198 .nummin = 2, 199 .nummax = 1024, 200 }, 201 [NETMAP_BUF_POOL] = { 202 .name = "netmap_buf", 203 .objminsize = 64, 204 .objmaxsize = 65536, 205 .nummin = 4, 206 .nummax = 1000000, /* one million! */ 207 }, 208 }, 209 }; 210 211 // XXX logically belongs to nm_mem 212 struct lut_entry *netmap_buffer_lut; /* exported */ 213 214 /* memory allocator related sysctls */ 215 216 #define STRINGIFY(x) #x 217 218 #define DECLARE_SYSCTLS(id, name) \ 219 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 220 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 221 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 222 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 223 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 224 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 225 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 226 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 227 228 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 229 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 230 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 231 232 /* 233 * Convert a userspace offset to a physical address. 234 * XXX only called in the FreeBSD's netmap_mmap() 235 * because in linux we map everything at once. 236 * 237 * First, find the allocator that contains the requested offset, 238 * then locate the cluster through a lookup table. 239 */ 240 static inline vm_paddr_t 241 netmap_ofstophys(vm_offset_t offset) 242 { 243 int i; 244 vm_offset_t o = offset; 245 struct netmap_obj_pool *p = nm_mem.pools; 246 247 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) { 248 if (offset >= p[i]._memtotal) 249 continue; 250 // now lookup the cluster's address 251 return p[i].lut[offset / p[i]._objsize].paddr + 252 offset % p[i]._objsize; 253 } 254 /* this is only in case of errors */ 255 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 256 p[NETMAP_IF_POOL]._memtotal, 257 p[NETMAP_IF_POOL]._memtotal 258 + p[NETMAP_RING_POOL]._memtotal, 259 p[NETMAP_IF_POOL]._memtotal 260 + p[NETMAP_RING_POOL]._memtotal 261 + p[NETMAP_BUF_POOL]._memtotal); 262 return 0; // XXX bad address 263 } 264 265 /* 266 * we store objects by kernel address, need to find the offset 267 * within the pool to export the value to userspace. 268 * Algorithm: scan until we find the cluster, then add the 269 * actual offset in the cluster 270 */ 271 static ssize_t 272 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 273 { 274 int i, k = p->clustentries, n = p->objtotal; 275 ssize_t ofs = 0; 276 277 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 278 const char *base = p->lut[i].vaddr; 279 ssize_t relofs = (const char *) vaddr - base; 280 281 if (relofs < 0 || relofs >= p->_clustsize) 282 continue; 283 284 ofs = ofs + relofs; 285 ND("%s: return offset %d (cluster %d) for pointer %p", 286 p->name, ofs, i, vaddr); 287 return ofs; 288 } 289 D("address %p is not contained inside any cluster (%s)", 290 vaddr, p->name); 291 return 0; /* An error occurred */ 292 } 293 294 /* Helper functions which convert virtual addresses to offsets */ 295 #define netmap_if_offset(v) \ 296 netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v)) 297 298 #define netmap_ring_offset(v) \ 299 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 300 netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v))) 301 302 #define netmap_buf_offset(v) \ 303 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \ 304 nm_mem.pools[NETMAP_RING_POOL]._memtotal + \ 305 netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v))) 306 307 308 /* 309 * report the index, and use start position as a hint, 310 * otherwise buffer allocation becomes terribly expensive. 311 */ 312 static void * 313 netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index) 314 { 315 uint32_t i = 0; /* index in the bitmap */ 316 uint32_t mask, j; /* slot counter */ 317 void *vaddr = NULL; 318 319 if (len > p->_objsize) { 320 D("%s request size %d too large", p->name, len); 321 // XXX cannot reduce the size 322 return NULL; 323 } 324 325 if (p->objfree == 0) { 326 D("%s allocator: run out of memory", p->name); 327 return NULL; 328 } 329 if (start) 330 i = *start; 331 332 /* termination is guaranteed by p->free, but better check bounds on i */ 333 while (vaddr == NULL && i < p->bitmap_slots) { 334 uint32_t cur = p->bitmap[i]; 335 if (cur == 0) { /* bitmask is fully used */ 336 i++; 337 continue; 338 } 339 /* locate a slot */ 340 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 341 ; 342 343 p->bitmap[i] &= ~mask; /* mark object as in use */ 344 p->objfree--; 345 346 vaddr = p->lut[i * 32 + j].vaddr; 347 if (index) 348 *index = i * 32 + j; 349 } 350 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 351 352 if (start) 353 *start = i; 354 return vaddr; 355 } 356 357 358 /* 359 * free by index, not by address. This is slow, but is only used 360 * for a small number of objects (rings, nifp) 361 */ 362 static void 363 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 364 { 365 if (j >= p->objtotal) { 366 D("invalid index %u, max %u", j, p->objtotal); 367 return; 368 } 369 p->bitmap[j / 32] |= (1 << (j % 32)); 370 p->objfree++; 371 return; 372 } 373 374 static void 375 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 376 { 377 int i, j, n = p->_memtotal / p->_clustsize; 378 379 for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 380 void *base = p->lut[i * p->clustentries].vaddr; 381 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 382 383 /* Given address, is out of the scope of the current cluster.*/ 384 if (vaddr < base || relofs >= p->_clustsize) 385 continue; 386 387 j = j + relofs / p->_objsize; 388 KASSERT(j != 0, ("Cannot free object 0")); 389 netmap_obj_free(p, j); 390 return; 391 } 392 D("address %p is not contained inside any cluster (%s)", 393 vaddr, p->name); 394 } 395 396 #define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL) 397 #define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v)) 398 #define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL) 399 #define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v)) 400 #define netmap_buf_malloc(_pos, _index) \ 401 netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index) 402 403 404 /* Return the index associated to the given packet buffer */ 405 #define netmap_buf_index(v) \ 406 (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize) 407 408 409 /* Return nonzero on error */ 410 static int 411 netmap_new_bufs(struct netmap_if *nifp, 412 struct netmap_slot *slot, u_int n) 413 { 414 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 415 int i = 0; /* slot counter */ 416 uint32_t pos = 0; /* slot in p->bitmap */ 417 uint32_t index = 0; /* buffer index */ 418 419 (void)nifp; /* UNUSED */ 420 for (i = 0; i < n; i++) { 421 void *vaddr = netmap_buf_malloc(&pos, &index); 422 if (vaddr == NULL) { 423 D("unable to locate empty packet buffer"); 424 goto cleanup; 425 } 426 slot[i].buf_idx = index; 427 slot[i].len = p->_objsize; 428 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 429 * in the NIC ring. This is a hack that hides missing 430 * initializations in the drivers, and should go away. 431 */ 432 // slot[i].flags = NS_BUF_CHANGED; 433 } 434 435 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 436 return (0); 437 438 cleanup: 439 while (i > 0) { 440 i--; 441 netmap_obj_free(p, slot[i].buf_idx); 442 } 443 bzero(slot, n * sizeof(slot[0])); 444 return (ENOMEM); 445 } 446 447 448 static void 449 netmap_free_buf(struct netmap_if *nifp, uint32_t i) 450 { 451 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL]; 452 453 if (i < 2 || i >= p->objtotal) { 454 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 455 return; 456 } 457 netmap_obj_free(p, i); 458 } 459 460 static void 461 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 462 { 463 if (p == NULL) 464 return; 465 if (p->bitmap) 466 free(p->bitmap, M_NETMAP); 467 p->bitmap = NULL; 468 if (p->lut) { 469 int i; 470 for (i = 0; i < p->objtotal; i += p->clustentries) { 471 if (p->lut[i].vaddr) 472 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 473 } 474 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 475 #ifdef linux 476 vfree(p->lut); 477 #else 478 free(p->lut, M_NETMAP); 479 #endif 480 } 481 p->lut = NULL; 482 } 483 484 /* 485 * Free all resources related to an allocator. 486 */ 487 static void 488 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 489 { 490 if (p == NULL) 491 return; 492 netmap_reset_obj_allocator(p); 493 } 494 495 /* 496 * We receive a request for objtotal objects, of size objsize each. 497 * Internally we may round up both numbers, as we allocate objects 498 * in small clusters multiple of the page size. 499 * In the allocator we don't need to store the objsize, 500 * but we do need to keep track of objtotal' and clustentries, 501 * as they are needed when freeing memory. 502 * 503 * XXX note -- userspace needs the buffers to be contiguous, 504 * so we cannot afford gaps at the end of a cluster. 505 */ 506 507 508 /* call with NMA_LOCK held */ 509 static int 510 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 511 { 512 int i, n; 513 u_int clustsize; /* the cluster size, multiple of page size */ 514 u_int clustentries; /* how many objects per entry */ 515 516 #define MAX_CLUSTSIZE (1<<17) 517 #define LINE_ROUND 64 518 if (objsize >= MAX_CLUSTSIZE) { 519 /* we could do it but there is no point */ 520 D("unsupported allocation for %d bytes", objsize); 521 goto error; 522 } 523 /* make sure objsize is a multiple of LINE_ROUND */ 524 i = (objsize & (LINE_ROUND - 1)); 525 if (i) { 526 D("XXX aligning object by %d bytes", LINE_ROUND - i); 527 objsize += LINE_ROUND - i; 528 } 529 if (objsize < p->objminsize || objsize > p->objmaxsize) { 530 D("requested objsize %d out of range [%d, %d]", 531 objsize, p->objminsize, p->objmaxsize); 532 goto error; 533 } 534 if (objtotal < p->nummin || objtotal > p->nummax) { 535 D("requested objtotal %d out of range [%d, %d]", 536 objtotal, p->nummin, p->nummax); 537 goto error; 538 } 539 /* 540 * Compute number of objects using a brute-force approach: 541 * given a max cluster size, 542 * we try to fill it with objects keeping track of the 543 * wasted space to the next page boundary. 544 */ 545 for (clustentries = 0, i = 1;; i++) { 546 u_int delta, used = i * objsize; 547 if (used > MAX_CLUSTSIZE) 548 break; 549 delta = used % PAGE_SIZE; 550 if (delta == 0) { // exact solution 551 clustentries = i; 552 break; 553 } 554 if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 555 clustentries = i; 556 } 557 // D("XXX --- ouch, delta %d (bad for buffers)", delta); 558 /* compute clustsize and round to the next page */ 559 clustsize = clustentries * objsize; 560 i = (clustsize & (PAGE_SIZE - 1)); 561 if (i) 562 clustsize += PAGE_SIZE - i; 563 if (netmap_verbose) 564 D("objsize %d clustsize %d objects %d", 565 objsize, clustsize, clustentries); 566 567 /* 568 * The number of clusters is n = ceil(objtotal/clustentries) 569 * objtotal' = n * clustentries 570 */ 571 p->clustentries = clustentries; 572 p->_clustsize = clustsize; 573 n = (objtotal + clustentries - 1) / clustentries; 574 p->_numclusters = n; 575 p->objtotal = n * clustentries; 576 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 577 p->_memtotal = p->_numclusters * p->_clustsize; 578 p->_objsize = objsize; 579 580 return 0; 581 582 error: 583 p->_objsize = objsize; 584 p->objtotal = objtotal; 585 586 return EINVAL; 587 } 588 589 590 /* call with NMA_LOCK held */ 591 static int 592 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 593 { 594 int i, n; 595 596 n = sizeof(struct lut_entry) * p->objtotal; 597 #ifdef linux 598 p->lut = vmalloc(n); 599 #else 600 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 601 #endif 602 if (p->lut == NULL) { 603 D("Unable to create lookup table (%d bytes) for '%s'", n, p->name); 604 goto clean; 605 } 606 607 /* Allocate the bitmap */ 608 n = (p->objtotal + 31) / 32; 609 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 610 if (p->bitmap == NULL) { 611 D("Unable to create bitmap (%d entries) for allocator '%s'", n, 612 p->name); 613 goto clean; 614 } 615 p->bitmap_slots = n; 616 617 /* 618 * Allocate clusters, init pointers and bitmap 619 */ 620 for (i = 0; i < p->objtotal;) { 621 int lim = i + p->clustentries; 622 char *clust; 623 624 clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO, 625 0, -1UL, PAGE_SIZE, 0); 626 if (clust == NULL) { 627 /* 628 * If we get here, there is a severe memory shortage, 629 * so halve the allocated memory to reclaim some. 630 * XXX check boundaries 631 */ 632 D("Unable to create cluster at %d for '%s' allocator", 633 i, p->name); 634 lim = i / 2; 635 for (i--; i >= lim; i--) { 636 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 637 if (i % p->clustentries == 0 && p->lut[i].vaddr) 638 contigfree(p->lut[i].vaddr, 639 p->_clustsize, M_NETMAP); 640 } 641 p->objtotal = i; 642 p->objfree = p->objtotal - 2; 643 p->_numclusters = i / p->clustentries; 644 p->_memtotal = p->_numclusters * p->_clustsize; 645 break; 646 } 647 for (; i < lim; i++, clust += p->_objsize) { 648 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 649 p->lut[i].vaddr = clust; 650 p->lut[i].paddr = vtophys(clust); 651 } 652 } 653 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 654 if (netmap_verbose) 655 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 656 p->_numclusters, p->_clustsize >> 10, 657 p->_memtotal >> 10, p->name); 658 659 return 0; 660 661 clean: 662 netmap_reset_obj_allocator(p); 663 return ENOMEM; 664 } 665 666 /* call with lock held */ 667 static int 668 netmap_memory_config_changed(void) 669 { 670 int i; 671 672 for (i = 0; i < NETMAP_POOLS_NR; i++) { 673 if (nm_mem.pools[i]._objsize != netmap_params[i].size || 674 nm_mem.pools[i].objtotal != netmap_params[i].num) 675 return 1; 676 } 677 return 0; 678 } 679 680 681 /* call with lock held */ 682 static int 683 netmap_memory_config(void) 684 { 685 int i; 686 687 if (!netmap_memory_config_changed()) 688 goto out; 689 690 D("reconfiguring"); 691 692 if (nm_mem.finalized) { 693 /* reset previous allocation */ 694 for (i = 0; i < NETMAP_POOLS_NR; i++) { 695 netmap_reset_obj_allocator(&nm_mem.pools[i]); 696 } 697 nm_mem.finalized = 0; 698 } 699 700 for (i = 0; i < NETMAP_POOLS_NR; i++) { 701 nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i], 702 netmap_params[i].num, netmap_params[i].size); 703 if (nm_mem.lasterr) 704 goto out; 705 } 706 707 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 708 nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10, 709 nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10, 710 nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20); 711 712 out: 713 714 return nm_mem.lasterr; 715 } 716 717 /* call with lock held */ 718 static int 719 netmap_memory_finalize(void) 720 { 721 int i; 722 u_int totalsize = 0; 723 724 nm_mem.refcount++; 725 if (nm_mem.refcount > 1) { 726 ND("busy (refcount %d)", nm_mem.refcount); 727 goto out; 728 } 729 730 /* update configuration if changed */ 731 if (netmap_memory_config()) 732 goto out; 733 734 if (nm_mem.finalized) { 735 /* may happen if config is not changed */ 736 ND("nothing to do"); 737 goto out; 738 } 739 740 for (i = 0; i < NETMAP_POOLS_NR; i++) { 741 nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]); 742 if (nm_mem.lasterr) 743 goto cleanup; 744 totalsize += nm_mem.pools[i]._memtotal; 745 } 746 nm_mem.nm_totalsize = totalsize; 747 748 /* backward compatibility */ 749 netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize; 750 netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal; 751 752 netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut; 753 netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr; 754 755 nm_mem.finalized = 1; 756 nm_mem.lasterr = 0; 757 758 /* make sysctl values match actual values in the pools */ 759 for (i = 0; i < NETMAP_POOLS_NR; i++) { 760 netmap_params[i].size = nm_mem.pools[i]._objsize; 761 netmap_params[i].num = nm_mem.pools[i].objtotal; 762 } 763 764 out: 765 if (nm_mem.lasterr) 766 nm_mem.refcount--; 767 768 return nm_mem.lasterr; 769 770 cleanup: 771 for (i = 0; i < NETMAP_POOLS_NR; i++) { 772 netmap_reset_obj_allocator(&nm_mem.pools[i]); 773 } 774 nm_mem.refcount--; 775 776 return nm_mem.lasterr; 777 } 778 779 static int 780 netmap_memory_init(void) 781 { 782 NMA_LOCK_INIT(); 783 return (0); 784 } 785 786 static void 787 netmap_memory_fini(void) 788 { 789 int i; 790 791 for (i = 0; i < NETMAP_POOLS_NR; i++) { 792 netmap_destroy_obj_allocator(&nm_mem.pools[i]); 793 } 794 NMA_LOCK_DESTROY(); 795 } 796 797 static void 798 netmap_free_rings(struct netmap_adapter *na) 799 { 800 int i; 801 if (!na->tx_rings) 802 return; 803 for (i = 0; i < na->num_tx_rings + 1; i++) { 804 netmap_ring_free(na->tx_rings[i].ring); 805 na->tx_rings[i].ring = NULL; 806 } 807 for (i = 0; i < na->num_rx_rings + 1; i++) { 808 netmap_ring_free(na->rx_rings[i].ring); 809 na->rx_rings[i].ring = NULL; 810 } 811 free(na->tx_rings, M_DEVBUF); 812 na->tx_rings = na->rx_rings = NULL; 813 } 814 815 816 817 /* call with NMA_LOCK held */ 818 /* 819 * Allocate the per-fd structure netmap_if. 820 * If this is the first instance, also allocate the krings, rings etc. 821 */ 822 static void * 823 netmap_if_new(const char *ifname, struct netmap_adapter *na) 824 { 825 struct netmap_if *nifp; 826 struct netmap_ring *ring; 827 ssize_t base; /* handy for relative offsets between rings and nifp */ 828 u_int i, len, ndesc, ntx, nrx; 829 struct netmap_kring *kring; 830 831 if (netmap_update_config(na)) { 832 /* configuration mismatch, report and fail */ 833 return NULL; 834 } 835 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 836 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 837 /* 838 * the descriptor is followed inline by an array of offsets 839 * to the tx and rx rings in the shared memory region. 840 */ 841 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 842 nifp = netmap_if_malloc(len); 843 if (nifp == NULL) { 844 return NULL; 845 } 846 847 /* initialize base fields -- override const */ 848 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 849 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 850 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 851 852 (na->refcount)++; /* XXX atomic ? we are under lock */ 853 if (na->refcount > 1) { /* already setup, we are done */ 854 goto final; 855 } 856 857 len = (ntx + nrx) * sizeof(struct netmap_kring); 858 na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 859 if (na->tx_rings == NULL) { 860 D("Cannot allocate krings for %s", ifname); 861 goto cleanup; 862 } 863 na->rx_rings = na->tx_rings + ntx; 864 865 /* 866 * First instance, allocate netmap rings and buffers for this card 867 * The rings are contiguous, but have variable size. 868 */ 869 for (i = 0; i < ntx; i++) { /* Transmit rings */ 870 kring = &na->tx_rings[i]; 871 ndesc = na->num_tx_desc; 872 bzero(kring, sizeof(*kring)); 873 len = sizeof(struct netmap_ring) + 874 ndesc * sizeof(struct netmap_slot); 875 ring = netmap_ring_malloc(len); 876 if (ring == NULL) { 877 D("Cannot allocate tx_ring[%d] for %s", i, ifname); 878 goto cleanup; 879 } 880 ND("txring[%d] at %p ofs %d", i, ring); 881 kring->na = na; 882 kring->ring = ring; 883 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 884 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 885 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 886 nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 887 netmap_ring_offset(ring); 888 889 /* 890 * IMPORTANT: 891 * Always keep one slot empty, so we can detect new 892 * transmissions comparing cur and nr_hwcur (they are 893 * the same only if there are no new transmissions). 894 */ 895 ring->avail = kring->nr_hwavail = ndesc - 1; 896 ring->cur = kring->nr_hwcur = 0; 897 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 898 ND("initializing slots for txring[%d]", i); 899 if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 900 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname); 901 goto cleanup; 902 } 903 } 904 905 for (i = 0; i < nrx; i++) { /* Receive rings */ 906 kring = &na->rx_rings[i]; 907 ndesc = na->num_rx_desc; 908 bzero(kring, sizeof(*kring)); 909 len = sizeof(struct netmap_ring) + 910 ndesc * sizeof(struct netmap_slot); 911 ring = netmap_ring_malloc(len); 912 if (ring == NULL) { 913 D("Cannot allocate rx_ring[%d] for %s", i, ifname); 914 goto cleanup; 915 } 916 ND("rxring[%d] at %p ofs %d", i, ring); 917 918 kring->na = na; 919 kring->ring = ring; 920 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 921 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 922 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + 923 nm_mem.pools[NETMAP_RING_POOL]._memtotal) - 924 netmap_ring_offset(ring); 925 926 ring->cur = kring->nr_hwcur = 0; 927 ring->avail = kring->nr_hwavail = 0; /* empty */ 928 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 929 ND("initializing slots for rxring[%d]", i); 930 if (netmap_new_bufs(nifp, ring->slot, ndesc)) { 931 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname); 932 goto cleanup; 933 } 934 } 935 #ifdef linux 936 // XXX initialize the selrecord structs. 937 for (i = 0; i < ntx; i++) 938 init_waitqueue_head(&na->tx_rings[i].si); 939 for (i = 0; i < nrx; i++) 940 init_waitqueue_head(&na->rx_rings[i].si); 941 init_waitqueue_head(&na->tx_si); 942 init_waitqueue_head(&na->rx_si); 943 #endif 944 final: 945 /* 946 * fill the slots for the rx and tx rings. They contain the offset 947 * between the ring and nifp, so the information is usable in 948 * userspace to reach the ring from the nifp. 949 */ 950 base = netmap_if_offset(nifp); 951 for (i = 0; i < ntx; i++) { 952 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 953 netmap_ring_offset(na->tx_rings[i].ring) - base; 954 } 955 for (i = 0; i < nrx; i++) { 956 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 957 netmap_ring_offset(na->rx_rings[i].ring) - base; 958 } 959 return (nifp); 960 cleanup: 961 netmap_free_rings(na); 962 netmap_if_free(nifp); 963 (na->refcount)--; 964 return NULL; 965 } 966 967 /* call with NMA_LOCK held */ 968 static void 969 netmap_memory_deref(void) 970 { 971 nm_mem.refcount--; 972 if (netmap_verbose) 973 D("refcount = %d", nm_mem.refcount); 974 } 975