1 /* 2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * $Id: netmap_mem2.c 10830 2012-03-22 18:06:01Z luigi $ 29 * 30 * New memory allocator for netmap 31 */ 32 33 /* 34 * The new version allocates three regions: 35 * nm_if_pool for the struct netmap_if 36 * nm_ring_pool for the struct netmap_ring 37 * nm_buf_pool for the packet buffers. 38 * 39 * All regions need to be page-sized as we export them to 40 * userspace through mmap. Only the latter need to be dma-able, 41 * but for convenience use the same type of allocator for all. 42 * 43 * Once mapped, the three regions are exported to userspace 44 * as a contiguous block, starting from nm_if_pool. Each 45 * cluster (and pool) is an integral number of pages. 46 * [ . . . ][ . . . . . .][ . . . . . . . . . .] 47 * nm_if nm_ring nm_buf 48 * 49 * The userspace areas contain offsets of the objects in userspace. 50 * When (at init time) we write these offsets, we find out the index 51 * of the object, and from there locate the offset from the beginning 52 * of the region. 53 * 54 * Allocator for a pool of memory objects of the same size. 55 * The pool is split into smaller clusters, whose size is a 56 * multiple of the page size. The cluster size is chosen 57 * to minimize the waste for a given max cluster size 58 * (we do it by brute force, as we have relatively few object 59 * per cluster). 60 * 61 * To be polite with the cache, objects are aligned to 62 * the cache line, or 64 bytes. Sizes are rounded to multiple of 64. 63 * For each object we have 64 * one entry in the bitmap to signal the state. Allocation scans 65 * the bitmap, but since this is done only on attach, we are not 66 * too worried about performance 67 */ 68 69 /* 70 * MEMORY SIZES: 71 * 72 * (all the parameters below will become tunables) 73 * 74 * struct netmap_if is variable size but small. 75 * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if 76 * uses 120 bytes on a 64-bit machine. 77 * We allocate NETMAP_IF_MAX_SIZE (1024) which should work even for 78 * cards with 48 ring pairs. 79 * The total number of 'struct netmap_if' could be slightly larger 80 * that the total number of rings on all interfaces on the system. 81 */ 82 #define NETMAP_IF_MAX_SIZE 1024 83 #define NETMAP_IF_MAX_NUM 512 84 85 /* 86 * netmap rings are up to 2..4k descriptors, 8 bytes each, 87 * plus some glue at the beginning (32 bytes). 88 * We set the default ring size to 9 pages (36K) and enable 89 * a few hundreds of them. 90 */ 91 #define NETMAP_RING_MAX_SIZE (9*PAGE_SIZE) 92 #define NETMAP_RING_MAX_NUM 200 /* approx 8MB */ 93 94 /* 95 * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE, 96 * 2k or slightly less, aligned to 64 bytes. 97 * A large 10G interface can have 2k*18 = 36k buffers per interface, 98 * or about 72MB of memory. Up to us to use more. 99 */ 100 #ifndef CONSERVATIVE 101 #define NETMAP_BUF_MAX_NUM 100000 /* 200MB */ 102 #else /* CONSERVATIVE */ 103 #define NETMAP_BUF_MAX_NUM 20000 /* 40MB */ 104 #endif 105 106 107 struct netmap_obj_pool { 108 char name[16]; /* name of the allocator */ 109 u_int objtotal; /* actual total number of objects. */ 110 u_int objfree; /* number of free objects. */ 111 u_int clustentries; /* actual objects per cluster */ 112 113 /* the total memory space is _numclusters*_clustsize */ 114 u_int _numclusters; /* how many clusters */ 115 u_int _clustsize; /* cluster size */ 116 u_int _objsize; /* actual object size */ 117 118 u_int _memtotal; /* _numclusters*_clustsize */ 119 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 120 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 121 }; 122 123 struct netmap_mem_d { 124 NM_LOCK_T nm_mtx; /* protect the allocator ? */ 125 u_int nm_totalsize; /* shorthand */ 126 127 /* pointers to the three allocators */ 128 struct netmap_obj_pool *nm_if_pool; 129 struct netmap_obj_pool *nm_ring_pool; 130 struct netmap_obj_pool *nm_buf_pool; 131 }; 132 133 struct lut_entry *netmap_buffer_lut; /* exported */ 134 135 136 /* 137 * Convert a userspace offset to a phisical address. 138 * XXX re-do in a simpler way. 139 * 140 * The idea here is to hide userspace applications the fact that pre-allocated 141 * memory is not contiguous, but fragmented across different clusters and 142 * smaller memory allocators. Consequently, first of all we need to find which 143 * allocator is owning provided offset, then we need to find out the physical 144 * address associated to target page (this is done using the look-up table. 145 */ 146 static inline vm_paddr_t 147 netmap_ofstophys(vm_offset_t offset) 148 { 149 const struct netmap_obj_pool *p[] = { 150 nm_mem->nm_if_pool, 151 nm_mem->nm_ring_pool, 152 nm_mem->nm_buf_pool }; 153 int i; 154 vm_offset_t o = offset; 155 156 157 for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) { 158 if (offset >= p[i]->_memtotal) 159 continue; 160 // XXX now scan the clusters 161 return p[i]->lut[offset / p[i]->_objsize].paddr + 162 offset % p[i]->_objsize; 163 } 164 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 165 p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal, 166 p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal); 167 return 0; // XXX bad address 168 } 169 170 /* 171 * we store objects by kernel address, need to find the offset 172 * within the pool to export the value to userspace. 173 * Algorithm: scan until we find the cluster, then add the 174 * actual offset in the cluster 175 */ 176 static ssize_t 177 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 178 { 179 int i, k = p->clustentries, n = p->objtotal; 180 ssize_t ofs = 0; 181 182 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 183 const char *base = p->lut[i].vaddr; 184 ssize_t relofs = (const char *) vaddr - base; 185 186 if (relofs < 0 || relofs > p->_clustsize) 187 continue; 188 189 ofs = ofs + relofs; 190 ND("%s: return offset %d (cluster %d) for pointer %p", 191 p->name, ofs, i, vaddr); 192 return ofs; 193 } 194 D("address %p is not contained inside any cluster (%s)", 195 vaddr, p->name); 196 return 0; /* An error occurred */ 197 } 198 199 /* Helper functions which convert virtual addresses to offsets */ 200 #define netmap_if_offset(v) \ 201 netmap_obj_offset(nm_mem->nm_if_pool, (v)) 202 203 #define netmap_ring_offset(v) \ 204 (nm_mem->nm_if_pool->_memtotal + \ 205 netmap_obj_offset(nm_mem->nm_ring_pool, (v))) 206 207 #define netmap_buf_offset(v) \ 208 (nm_mem->nm_if_pool->_memtotal + \ 209 nm_mem->nm_ring_pool->_memtotal + \ 210 netmap_obj_offset(nm_mem->nm_buf_pool, (v))) 211 212 213 static void * 214 netmap_obj_malloc(struct netmap_obj_pool *p, int len) 215 { 216 uint32_t i = 0; /* index in the bitmap */ 217 uint32_t mask, j; /* slot counter */ 218 void *vaddr = NULL; 219 220 if (len > p->_objsize) { 221 D("%s request size %d too large", p->name, len); 222 // XXX cannot reduce the size 223 return NULL; 224 } 225 226 if (p->objfree == 0) { 227 D("%s allocator: run out of memory", p->name); 228 return NULL; 229 } 230 231 /* termination is guaranteed by p->free */ 232 while (vaddr == NULL) { 233 uint32_t cur = p->bitmap[i]; 234 if (cur == 0) { /* bitmask is fully used */ 235 i++; 236 continue; 237 } 238 /* locate a slot */ 239 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 240 ; 241 242 p->bitmap[i] &= ~mask; /* mark object as in use */ 243 p->objfree--; 244 245 vaddr = p->lut[i * 32 + j].vaddr; 246 } 247 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 248 249 return vaddr; 250 } 251 252 253 /* 254 * free by index, not by address 255 */ 256 static void 257 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 258 { 259 if (j >= p->objtotal) { 260 D("invalid index %u, max %u", j, p->objtotal); 261 return; 262 } 263 p->bitmap[j / 32] |= (1 << (j % 32)); 264 p->objfree++; 265 return; 266 } 267 268 static void 269 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 270 { 271 int i, j, n = p->_memtotal / p->_clustsize; 272 273 for (i = 0, j = 0; i < n; i++, j += p->clustentries) { 274 void *base = p->lut[i * p->clustentries].vaddr; 275 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 276 277 /* Given address, is out of the scope of the current cluster.*/ 278 if (vaddr < base || relofs > p->_clustsize) 279 continue; 280 281 j = j + relofs / p->_objsize; 282 KASSERT(j != 0, ("Cannot free object 0")); 283 netmap_obj_free(p, j); 284 return; 285 } 286 ND("address %p is not contained inside any cluster (%s)", 287 vaddr, p->name); 288 } 289 290 #define netmap_if_malloc(len) netmap_obj_malloc(nm_mem->nm_if_pool, len) 291 #define netmap_if_free(v) netmap_obj_free_va(nm_mem->nm_if_pool, (v)) 292 #define netmap_ring_malloc(len) netmap_obj_malloc(nm_mem->nm_ring_pool, len) 293 #define netmap_buf_malloc() \ 294 netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE) 295 296 297 /* Return the index associated to the given packet buffer */ 298 #define netmap_buf_index(v) \ 299 (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize) 300 301 302 static void 303 netmap_new_bufs(struct netmap_if *nifp __unused, 304 struct netmap_slot *slot, u_int n) 305 { 306 struct netmap_obj_pool *p = nm_mem->nm_buf_pool; 307 uint32_t i = 0; /* slot counter */ 308 309 for (i = 0; i < n; i++) { 310 void *vaddr = netmap_buf_malloc(); 311 if (vaddr == NULL) { 312 D("unable to locate empty packet buffer"); 313 goto cleanup; 314 } 315 316 slot[i].buf_idx = netmap_buf_index(vaddr); 317 KASSERT(slot[i].buf_idx != 0, 318 ("Assigning buf_idx=0 to just created slot")); 319 slot[i].len = p->_objsize; 320 slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack 321 } 322 323 ND("allocated %d buffers, %d available", n, p->objfree); 324 return; 325 326 cleanup: 327 for (i--; i >= 0; i--) { 328 netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx); 329 } 330 } 331 332 333 static void 334 netmap_free_buf(struct netmap_if *nifp, uint32_t i) 335 { 336 struct netmap_obj_pool *p = nm_mem->nm_buf_pool; 337 if (i < 2 || i >= p->objtotal) { 338 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 339 return; 340 } 341 netmap_obj_free(nm_mem->nm_buf_pool, i); 342 } 343 344 345 /* 346 * Free all resources related to an allocator. 347 */ 348 static void 349 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 350 { 351 if (p == NULL) 352 return; 353 if (p->bitmap) 354 free(p->bitmap, M_NETMAP); 355 if (p->lut) { 356 int i; 357 for (i = 0; i < p->objtotal; i += p->clustentries) { 358 if (p->lut[i].vaddr) 359 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 360 } 361 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 362 free(p->lut, M_NETMAP); 363 } 364 bzero(p, sizeof(*p)); 365 free(p, M_NETMAP); 366 } 367 368 /* 369 * We receive a request for objtotal objects, of size objsize each. 370 * Internally we may round up both numbers, as we allocate objects 371 * in small clusters multiple of the page size. 372 * In the allocator we don't need to store the objsize, 373 * but we do need to keep track of objtotal' and clustentries, 374 * as they are needed when freeing memory. 375 * 376 * XXX note -- userspace needs the buffers to be contiguous, 377 * so we cannot afford gaps at the end of a cluster. 378 */ 379 static struct netmap_obj_pool * 380 netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize) 381 { 382 struct netmap_obj_pool *p; 383 int i, n; 384 u_int clustsize; /* the cluster size, multiple of page size */ 385 u_int clustentries; /* how many objects per entry */ 386 387 #define MAX_CLUSTSIZE (1<<17) 388 #define LINE_ROUND 64 389 if (objsize >= MAX_CLUSTSIZE) { 390 /* we could do it but there is no point */ 391 D("unsupported allocation for %d bytes", objsize); 392 return NULL; 393 } 394 /* make sure objsize is a multiple of LINE_ROUND */ 395 i = (objsize & (LINE_ROUND - 1)); 396 if (i) { 397 D("XXX aligning object by %d bytes", LINE_ROUND - i); 398 objsize += LINE_ROUND - i; 399 } 400 /* 401 * Compute number of objects using a brute-force approach: 402 * given a max cluster size, 403 * we try to fill it with objects keeping track of the 404 * wasted space to the next page boundary. 405 */ 406 for (clustentries = 0, i = 1;; i++) { 407 u_int delta, used = i * objsize; 408 if (used > MAX_CLUSTSIZE) 409 break; 410 delta = used % PAGE_SIZE; 411 if (delta == 0) { // exact solution 412 clustentries = i; 413 break; 414 } 415 if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 416 clustentries = i; 417 } 418 // D("XXX --- ouch, delta %d (bad for buffers)", delta); 419 /* compute clustsize and round to the next page */ 420 clustsize = clustentries * objsize; 421 i = (clustsize & (PAGE_SIZE - 1)); 422 if (i) 423 clustsize += PAGE_SIZE - i; 424 D("objsize %d clustsize %d objects %d", 425 objsize, clustsize, clustentries); 426 427 p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP, 428 M_WAITOK | M_ZERO); 429 if (p == NULL) { 430 D("Unable to create '%s' allocator", name); 431 return NULL; 432 } 433 /* 434 * Allocate and initialize the lookup table. 435 * 436 * The number of clusters is n = ceil(objtotal/clustentries) 437 * objtotal' = n * clustentries 438 */ 439 strncpy(p->name, name, sizeof(p->name)); 440 p->clustentries = clustentries; 441 p->_clustsize = clustsize; 442 n = (objtotal + clustentries - 1) / clustentries; 443 p->_numclusters = n; 444 p->objtotal = n * clustentries; 445 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */ 446 p->_objsize = objsize; 447 p->_memtotal = p->_numclusters * p->_clustsize; 448 449 p->lut = malloc(sizeof(struct lut_entry) * p->objtotal, 450 M_NETMAP, M_WAITOK | M_ZERO); 451 if (p->lut == NULL) { 452 D("Unable to create lookup table for '%s' allocator", name); 453 goto clean; 454 } 455 456 /* Allocate the bitmap */ 457 n = (p->objtotal + 31) / 32; 458 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO); 459 if (p->bitmap == NULL) { 460 D("Unable to create bitmap (%d entries) for allocator '%s'", n, 461 name); 462 goto clean; 463 } 464 465 /* 466 * Allocate clusters, init pointers and bitmap 467 */ 468 for (i = 0; i < p->objtotal;) { 469 int lim = i + clustentries; 470 char *clust; 471 472 clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO, 473 0, -1UL, PAGE_SIZE, 0); 474 if (clust == NULL) { 475 /* 476 * If we get here, there is a severe memory shortage, 477 * so halve the allocated memory to reclaim some. 478 */ 479 D("Unable to create cluster at %d for '%s' allocator", 480 i, name); 481 lim = i / 2; 482 for (; i >= lim; i--) { 483 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 484 if (i % clustentries == 0 && p->lut[i].vaddr) 485 contigfree(p->lut[i].vaddr, 486 p->_clustsize, M_NETMAP); 487 } 488 p->objtotal = i; 489 p->objfree = p->objtotal - 2; 490 p->_numclusters = i / clustentries; 491 p->_memtotal = p->_numclusters * p->_clustsize; 492 break; 493 } 494 for (; i < lim; i++, clust += objsize) { 495 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 496 p->lut[i].vaddr = clust; 497 p->lut[i].paddr = vtophys(clust); 498 } 499 } 500 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */ 501 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 502 p->_numclusters, p->_clustsize >> 10, 503 p->_memtotal >> 10, name); 504 505 return p; 506 507 clean: 508 netmap_destroy_obj_allocator(p); 509 return NULL; 510 } 511 512 static int 513 netmap_memory_init(void) 514 { 515 struct netmap_obj_pool *p; 516 517 nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP, 518 M_WAITOK | M_ZERO); 519 if (nm_mem == NULL) 520 goto clean; 521 522 p = netmap_new_obj_allocator("netmap_if", 523 NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE); 524 if (p == NULL) 525 goto clean; 526 nm_mem->nm_if_pool = p; 527 528 p = netmap_new_obj_allocator("netmap_ring", 529 NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE); 530 if (p == NULL) 531 goto clean; 532 nm_mem->nm_ring_pool = p; 533 534 p = netmap_new_obj_allocator("netmap_buf", 535 NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE); 536 if (p == NULL) 537 goto clean; 538 netmap_total_buffers = p->objtotal; 539 netmap_buffer_lut = p->lut; 540 nm_mem->nm_buf_pool = p; 541 netmap_buffer_base = p->lut[0].vaddr; 542 543 mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL, 544 MTX_DEF); 545 nm_mem->nm_totalsize = 546 nm_mem->nm_if_pool->_memtotal + 547 nm_mem->nm_ring_pool->_memtotal + 548 nm_mem->nm_buf_pool->_memtotal; 549 550 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 551 nm_mem->nm_if_pool->_memtotal >> 10, 552 nm_mem->nm_ring_pool->_memtotal >> 10, 553 nm_mem->nm_buf_pool->_memtotal >> 20); 554 return 0; 555 556 clean: 557 if (nm_mem) { 558 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool); 559 netmap_destroy_obj_allocator(nm_mem->nm_if_pool); 560 free(nm_mem, M_NETMAP); 561 } 562 return ENOMEM; 563 } 564 565 566 static void 567 netmap_memory_fini(void) 568 { 569 if (!nm_mem) 570 return; 571 netmap_destroy_obj_allocator(nm_mem->nm_if_pool); 572 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool); 573 netmap_destroy_obj_allocator(nm_mem->nm_buf_pool); 574 mtx_destroy(&nm_mem->nm_mtx); 575 free(nm_mem, M_NETMAP); 576 } 577 578 579 580 static void * 581 netmap_if_new(const char *ifname, struct netmap_adapter *na) 582 { 583 struct netmap_if *nifp; 584 struct netmap_ring *ring; 585 ssize_t base; /* handy for relative offsets between rings and nifp */ 586 u_int i, len, ndesc; 587 u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 588 u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 589 struct netmap_kring *kring; 590 591 NMA_LOCK(); 592 /* 593 * the descriptor is followed inline by an array of offsets 594 * to the tx and rx rings in the shared memory region. 595 */ 596 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 597 nifp = netmap_if_malloc(len); 598 if (nifp == NULL) { 599 NMA_UNLOCK(); 600 return NULL; 601 } 602 603 /* initialize base fields -- override const */ 604 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 605 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 606 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 607 608 (na->refcount)++; /* XXX atomic ? we are under lock */ 609 if (na->refcount > 1) { /* already setup, we are done */ 610 NMA_UNLOCK(); 611 goto final; 612 } 613 614 /* 615 * First instance, allocate netmap rings and buffers for this card 616 * The rings are contiguous, but have variable size. 617 */ 618 for (i = 0; i < ntx; i++) { /* Transmit rings */ 619 kring = &na->tx_rings[i]; 620 ndesc = na->num_tx_desc; 621 bzero(kring, sizeof(*kring)); 622 len = sizeof(struct netmap_ring) + 623 ndesc * sizeof(struct netmap_slot); 624 ring = netmap_ring_malloc(len); 625 if (ring == NULL) { 626 D("Cannot allocate tx_ring[%d] for %s", i, ifname); 627 goto cleanup; 628 } 629 ND("txring[%d] at %p ofs %d", i, ring); 630 kring->na = na; 631 kring->ring = ring; 632 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 633 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 634 (nm_mem->nm_if_pool->_memtotal + 635 nm_mem->nm_ring_pool->_memtotal) - 636 netmap_ring_offset(ring); 637 638 /* 639 * IMPORTANT: 640 * Always keep one slot empty, so we can detect new 641 * transmissions comparing cur and nr_hwcur (they are 642 * the same only if there are no new transmissions). 643 */ 644 ring->avail = kring->nr_hwavail = ndesc - 1; 645 ring->cur = kring->nr_hwcur = 0; 646 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 647 ND("initializing slots for txring[%d]", i); 648 netmap_new_bufs(nifp, ring->slot, ndesc); 649 } 650 651 for (i = 0; i < nrx; i++) { /* Receive rings */ 652 kring = &na->rx_rings[i]; 653 ndesc = na->num_rx_desc; 654 bzero(kring, sizeof(*kring)); 655 len = sizeof(struct netmap_ring) + 656 ndesc * sizeof(struct netmap_slot); 657 ring = netmap_ring_malloc(len); 658 if (ring == NULL) { 659 D("Cannot allocate rx_ring[%d] for %s", i, ifname); 660 goto cleanup; 661 } 662 ND("rxring[%d] at %p ofs %d", i, ring); 663 664 kring->na = na; 665 kring->ring = ring; 666 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; 667 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 668 (nm_mem->nm_if_pool->_memtotal + 669 nm_mem->nm_ring_pool->_memtotal) - 670 netmap_ring_offset(ring); 671 672 ring->cur = kring->nr_hwcur = 0; 673 ring->avail = kring->nr_hwavail = 0; /* empty */ 674 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 675 ND("initializing slots for rxring[%d]", i); 676 netmap_new_bufs(nifp, ring->slot, ndesc); 677 } 678 NMA_UNLOCK(); 679 #ifdef linux 680 // XXX initialize the selrecord structs. 681 for (i = 0; i < ntx; i++) 682 init_waitqueue_head(&na->rx_rings[i].si); 683 for (i = 0; i < nrx; i++) 684 init_waitqueue_head(&na->tx_rings[i].si); 685 init_waitqueue_head(&na->rx_si); 686 init_waitqueue_head(&na->tx_si); 687 #endif 688 final: 689 /* 690 * fill the slots for the rx and tx rings. They contain the offset 691 * between the ring and nifp, so the information is usable in 692 * userspace to reach the ring from the nifp. 693 */ 694 base = netmap_if_offset(nifp); 695 for (i = 0; i < ntx; i++) { 696 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 697 netmap_ring_offset(na->tx_rings[i].ring) - base; 698 } 699 for (i = 0; i < nrx; i++) { 700 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 701 netmap_ring_offset(na->rx_rings[i].ring) - base; 702 } 703 return (nifp); 704 cleanup: 705 // XXX missing 706 NMA_UNLOCK(); 707 return NULL; 708 } 709 710 static void 711 netmap_free_rings(struct netmap_adapter *na) 712 { 713 int i; 714 for (i = 0; i < na->num_tx_rings + 1; i++) 715 netmap_obj_free_va(nm_mem->nm_ring_pool, 716 na->tx_rings[i].ring); 717 for (i = 0; i < na->num_rx_rings + 1; i++) 718 netmap_obj_free_va(nm_mem->nm_ring_pool, 719 na->rx_rings[i].ring); 720 } 721