Lines Matching +full:txrx +full:-

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
98 /* ---------------------------------------------------*/
113 /* ---------------------------------------------------*/
134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
197 rv = nmd->ops->nmd_get_lut(nmd, lut); in netmap_mem_get_lut()
210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); in netmap_mem_get_info()
223 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we in netmap_mem_ofstophys()
229 pa = nmd->ops->nmd_ofstophys(nmd, off); in netmap_mem_ofstophys()
238 if (nmd->active) { in netmap_mem_config()
245 return nmd->ops->nmd_config(nmd); in netmap_mem_config()
254 rv = nmd->ops->nmd_if_offset(nmd, off); in netmap_mem_if_offset()
263 nmd->ops->nmd_delete(nmd); in netmap_mem_delete()
270 struct netmap_mem_d *nmd = na->nm_mem; in netmap_mem_if_new()
273 nifp = nmd->ops->nmd_if_new(nmd, na, priv); in netmap_mem_if_new()
282 struct netmap_mem_d *nmd = na->nm_mem; in netmap_mem_if_delete()
285 nmd->ops->nmd_if_delete(nmd, na, nif); in netmap_mem_if_delete()
293 struct netmap_mem_d *nmd = na->nm_mem; in netmap_mem_rings_create()
296 rv = nmd->ops->nmd_rings_create(nmd, na); in netmap_mem_rings_create()
305 struct netmap_mem_d *nmd = na->nm_mem; in netmap_mem_rings_delete()
308 nmd->ops->nmd_rings_delete(nmd, na); in netmap_mem_rings_delete()
320 return nmd->nm_id; in netmap_mem_get_id()
325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
338 nmd->refcount++; in __netmap_mem_get()
349 last = (--nmd->refcount == 0); in __netmap_mem_put()
362 if (nm_mem_check_group(nmd, na->pdev) < 0) { in netmap_mem_finalize()
371 nmd->active++; in netmap_mem_finalize()
373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na); in netmap_mem_finalize()
375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) { in netmap_mem_finalize()
376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); in netmap_mem_finalize()
380 lasterr = nmd->lasterr; in netmap_mem_finalize()
401 if (p->bitmap == NULL) { in netmap_init_obj_allocator_bitmap()
403 n = (p->objtotal + 31) / 32; in netmap_init_obj_allocator_bitmap()
404 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n); in netmap_init_obj_allocator_bitmap()
405 if (p->bitmap == NULL) { in netmap_init_obj_allocator_bitmap()
407 p->name); in netmap_init_obj_allocator_bitmap()
410 p->bitmap_slots = n; in netmap_init_obj_allocator_bitmap()
412 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0])); in netmap_init_obj_allocator_bitmap()
415 p->objfree = 0; in netmap_init_obj_allocator_bitmap()
421 for (j = 0; j < p->objtotal; j++) { in netmap_init_obj_allocator_bitmap()
422 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { in netmap_init_obj_allocator_bitmap()
424 nm_prinf("skipping %s %d", p->name, j); in netmap_init_obj_allocator_bitmap()
427 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); in netmap_init_obj_allocator_bitmap()
428 p->objfree++; in netmap_init_obj_allocator_bitmap()
432 nm_prinf("%s free %u", p->name, p->objfree); in netmap_init_obj_allocator_bitmap()
433 if (p->objfree == 0) { in netmap_init_obj_allocator_bitmap()
435 nm_prerr("%s: no objects available", p->name); in netmap_init_obj_allocator_bitmap()
448 struct netmap_obj_pool *p = &nmd->pools[i]; in netmap_mem_init_bitmaps()
458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { in netmap_mem_init_bitmaps()
459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name); in netmap_mem_init_bitmaps()
463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; in netmap_mem_init_bitmaps()
464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) { in netmap_mem_init_bitmaps()
468 * Removed shared-info --> is the bug still there? */ in netmap_mem_init_bitmaps()
469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; in netmap_mem_init_bitmaps()
479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP)) in netmap_mem_deref()
480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); in netmap_mem_deref()
481 if (nmd->active == 1) { in netmap_mem_deref()
490 nmd->ops->nmd_deref(nmd, na); in netmap_mem_deref()
492 nmd->active--; in netmap_mem_deref()
494 nmd->lasterr = 0; in netmap_mem_deref()
506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; in netmap_mem2_get_lut()
508 lut->plut = lut->lut; in netmap_mem2_get_lut()
510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; in netmap_mem2_get_lut()
511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; in netmap_mem2_get_lut()
579 .nm_grp = -1,
580 .nm_numa_domain = -1,
619 .nm_grp = -1,
620 .nm_numa_domain = -1,
657 "Use NUMA-local memory for memory pools when possible");
669 id = scan->nm_id + 1; in nm_mem_assign_id_locked()
672 scan = scan->next; in nm_mem_assign_id_locked()
673 if (id != scan->nm_id) { in nm_mem_assign_id_locked()
674 nmd->nm_id = id; in nm_mem_assign_id_locked()
675 nmd->nm_grp = grp_id; in nm_mem_assign_id_locked()
676 nmd->nm_numa_domain = domain; in nm_mem_assign_id_locked()
677 nmd->prev = scan->prev; in nm_mem_assign_id_locked()
678 nmd->next = scan; in nm_mem_assign_id_locked()
679 scan->prev->next = nmd; in nm_mem_assign_id_locked()
680 scan->prev = nmd; in nm_mem_assign_id_locked()
682 nmd->refcount = 1; in nm_mem_assign_id_locked()
699 ret = nm_mem_assign_id_locked(nmd, grp_id, -1); in nm_mem_assign_id()
709 nmd->prev->next = nmd->next; in nm_mem_release_id()
710 nmd->next->prev = nmd->prev; in nm_mem_release_id()
713 netmap_last_mem_d = nmd->prev; in nm_mem_release_id()
715 nmd->prev = nmd->next = NULL; in nm_mem_release_id()
726 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { in netmap_mem_find()
727 nmd->refcount++; in netmap_mem_find()
732 nmd = nmd->next; in netmap_mem_find()
744 * Vale port can use particular allocator through vale-ctl -m option in nm_mem_check_group()
754 if (nmd->nm_grp != id) { in nm_mem_check_group()
757 nmd->nm_grp, id); in nm_mem_check_group()
758 nmd->lasterr = err = ENOMEM; in nm_mem_check_group()
819 p = nmd->pools; in netmap_mem2_ofstophys()
821 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { in netmap_mem2_ofstophys()
859 * 1 - allocate a Memory Descriptor List wide as the sum
861 * 2 - cycle all the objects in every pool and for every object do
863 * 2a - cycle all the objects in every pool, get the list
865 * 2b - calculate the offset in the array of pages descriptor in the
867 * 2c - copy the descriptors of the object in the main MDL
869 * 3 - return the resulting MDL that needs to be mapped in userland
896 struct netmap_obj_pool *p = &nmd->pools[i]; in win32_build_user_vm_map()
897 int clsz = p->_clustsize; in win32_build_user_vm_map()
898 int clobjs = p->_clustentries; /* objects per cluster */ in win32_build_user_vm_map()
903 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); in win32_build_user_vm_map()
912 for (j = 0; j < p->numclusters; j++, ofs += clsz) { in win32_build_user_vm_map()
914 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); in win32_build_user_vm_map()
917 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ in win32_build_user_vm_map()
928 * helper function for OS-specific mmap routines (currently only windows).
940 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { in netmap_mem2_get_pool_info()
944 *clustsize = nmd->pools[pool]._clustsize; in netmap_mem2_get_pool_info()
945 *numclusters = nmd->pools[pool].numclusters; in netmap_mem2_get_pool_info()
958 if (nmd->flags & NETMAP_MEM_FINALIZED) { in netmap_mem2_get_info()
959 *size = nmd->nm_totalsize; in netmap_mem2_get_info()
964 struct netmap_obj_pool *p = nmd->pools + i; in netmap_mem2_get_info()
965 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize); in netmap_mem2_get_info()
970 *memflags = nmd->flags; in netmap_mem2_get_info()
972 *id = nmd->nm_id; in netmap_mem2_get_info()
986 int i, k = p->_clustentries, n = p->objtotal; in netmap_obj_offset()
989 for (i = 0; i < n; i += k, ofs += p->_clustsize) { in netmap_obj_offset()
990 const char *base = p->lut[i].vaddr; in netmap_obj_offset()
991 ssize_t relofs = (const char *) vaddr - base; in netmap_obj_offset()
993 if (relofs < 0 || relofs >= p->_clustsize) in netmap_obj_offset()
998 p->name, ofs, i, vaddr); in netmap_obj_offset()
1002 vaddr, p->name); in netmap_obj_offset()
1008 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1011 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1012 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1031 if (len > p->_objsize) { in netmap_obj_malloc()
1032 nm_prerr("%s request size %d too large", p->name, len); in netmap_obj_malloc()
1036 if (p->objfree == 0) { in netmap_obj_malloc()
1037 nm_prerr("no more %s objects", p->name); in netmap_obj_malloc()
1043 /* termination is guaranteed by p->free, but better check bounds on i */ in netmap_obj_malloc()
1044 while (vaddr == NULL && i < p->bitmap_slots) { in netmap_obj_malloc()
1045 uint32_t cur = p->bitmap[i]; in netmap_obj_malloc()
1054 p->bitmap[i] &= ~mask; /* mark object as in use */ in netmap_obj_malloc()
1055 p->objfree--; in netmap_obj_malloc()
1057 vaddr = p->lut[i * 32 + j].vaddr; in netmap_obj_malloc()
1061 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); in netmap_obj_malloc()
1078 if (j >= p->objtotal) { in netmap_obj_free()
1079 nm_prerr("invalid index %u, max %u", j, p->objtotal); in netmap_obj_free()
1082 ptr = &p->bitmap[j / 32]; in netmap_obj_free()
1089 p->objfree++; in netmap_obj_free()
1101 u_int i, j, n = p->numclusters; in netmap_obj_free_va()
1103 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { in netmap_obj_free_va()
1104 void *base = p->lut[i * p->_clustentries].vaddr; in netmap_obj_free_va()
1105 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; in netmap_obj_free_va()
1108 if (base == NULL || vaddr < base || relofs >= p->_clustsize) in netmap_obj_free_va()
1111 j = j + relofs / p->_objsize; in netmap_obj_free_va()
1117 vaddr, p->name); in netmap_obj_free_va()
1123 return nmd->pools[NETMAP_BUF_POOL]._objsize; in netmap_mem_bufsize()
1126 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1127 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1128 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1129 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1131 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1137 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1147 struct netmap_mem_d *nmd = na->nm_mem; in netmap_extra_alloc()
1161 nm_prdis(5, "allocate buffer %d -> %d", *head, cur); in netmap_extra_alloc()
1173 struct lut_entry *lut = na->na_lut.lut; in netmap_extra_free()
1174 struct netmap_mem_d *nmd = na->nm_mem; in netmap_extra_free()
1175 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; in netmap_extra_free()
1179 for (i = 0; head >=2 && head < p->objtotal; i++) { in netmap_extra_free()
1198 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; in netmap_new_bufs()
1200 uint32_t pos = 0; /* slot in p->bitmap */ in netmap_new_bufs()
1210 slot[i].len = p->_objsize; in netmap_new_bufs()
1215 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); in netmap_new_bufs()
1220 i--; in netmap_new_bufs()
1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; in netmap_mem_set_ring()
1235 slot[i].len = p->_objsize; in netmap_mem_set_ring()
1244 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; in netmap_free_buf()
1246 if (i < 2 || i >= p->objtotal) { in netmap_free_buf()
1247 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); in netmap_free_buf()
1264 p->name, p->objfree); in netmap_free_bufs()
1273 if (p->bitmap) in netmap_reset_obj_allocator()
1274 nm_os_free(p->bitmap); in netmap_reset_obj_allocator()
1275 p->bitmap = NULL; in netmap_reset_obj_allocator()
1276 if (p->invalid_bitmap) in netmap_reset_obj_allocator()
1277 nm_os_free(p->invalid_bitmap); in netmap_reset_obj_allocator()
1278 p->invalid_bitmap = NULL; in netmap_reset_obj_allocator()
1279 if (!p->alloc_done) { in netmap_reset_obj_allocator()
1285 if (p->lut) { in netmap_reset_obj_allocator()
1291 * addresses are stored at multiples of p->_clusterentries in netmap_reset_obj_allocator()
1294 for (i = 0; i < p->objtotal; i += p->_clustentries) { in netmap_reset_obj_allocator()
1295 free(p->lut[i].vaddr, M_NETMAP); in netmap_reset_obj_allocator()
1297 nm_free_lut(p->lut, p->objtotal); in netmap_reset_obj_allocator()
1299 p->lut = NULL; in netmap_reset_obj_allocator()
1300 p->objtotal = 0; in netmap_reset_obj_allocator()
1301 p->memtotal = 0; in netmap_reset_obj_allocator()
1302 p->numclusters = 0; in netmap_reset_obj_allocator()
1303 p->objfree = 0; in netmap_reset_obj_allocator()
1304 p->alloc_done = 0; in netmap_reset_obj_allocator()
1325 * XXX note -- userspace needs the buffers to be contiguous,
1340 p->r_objtotal = objtotal; in netmap_config_obj_allocator()
1341 p->r_objsize = objsize; in netmap_config_obj_allocator()
1351 i = (objsize & (LINE_ROUND - 1)); in netmap_config_obj_allocator()
1353 nm_prinf("aligning object by %d bytes", LINE_ROUND - i); in netmap_config_obj_allocator()
1354 objsize += LINE_ROUND - i; in netmap_config_obj_allocator()
1356 if (objsize < p->objminsize || objsize > p->objmaxsize) { in netmap_config_obj_allocator()
1358 objsize, p->objminsize, p->objmaxsize); in netmap_config_obj_allocator()
1361 if (objtotal < p->nummin || objtotal > p->nummax) { in netmap_config_obj_allocator()
1363 objtotal, p->nummin, p->nummax); in netmap_config_obj_allocator()
1367 * Compute number of objects using a brute-force approach: in netmap_config_obj_allocator()
1397 p->_clustentries = clustentries; in netmap_config_obj_allocator()
1398 p->_clustsize = clustsize; in netmap_config_obj_allocator()
1399 p->_numclusters = (objtotal + clustentries - 1) / clustentries; in netmap_config_obj_allocator()
1402 p->_objsize = objsize; in netmap_config_obj_allocator()
1403 p->_objtotal = p->_numclusters * clustentries; in netmap_config_obj_allocator()
1414 if (p->lut) { in netmap_finalize_obj_allocator()
1426 p->numclusters = p->_numclusters; in netmap_finalize_obj_allocator()
1427 p->objtotal = p->_objtotal; in netmap_finalize_obj_allocator()
1428 p->alloc_done = 1; in netmap_finalize_obj_allocator()
1430 p->lut = nm_alloc_lut(p->objtotal); in netmap_finalize_obj_allocator()
1431 if (p->lut == NULL) { in netmap_finalize_obj_allocator()
1432 nm_prerr("Unable to create lookup table for '%s'", p->name); in netmap_finalize_obj_allocator()
1440 for (i = 0; i < (int)p->objtotal;) { in netmap_finalize_obj_allocator()
1441 int lim = i + p->_clustentries; in netmap_finalize_obj_allocator()
1451 if (nmd->nm_numa_domain == -1) { in netmap_finalize_obj_allocator()
1452 clust = contigmalloc(p->_clustsize, M_NETMAP, in netmap_finalize_obj_allocator()
1453 M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); in netmap_finalize_obj_allocator()
1457 ds = DOMAINSET_PREF(nmd->nm_numa_domain); in netmap_finalize_obj_allocator()
1458 clust = contigmalloc_domainset(p->_clustsize, M_NETMAP, in netmap_finalize_obj_allocator()
1459 ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); in netmap_finalize_obj_allocator()
1467 i, p->name); in netmap_finalize_obj_allocator()
1471 for (i--; i >= lim; i--) { in netmap_finalize_obj_allocator()
1472 if (i % p->_clustentries == 0 && p->lut[i].vaddr) in netmap_finalize_obj_allocator()
1473 free(p->lut[i].vaddr, M_NETMAP); in netmap_finalize_obj_allocator()
1474 p->lut[i].vaddr = NULL; in netmap_finalize_obj_allocator()
1477 p->objtotal = i; in netmap_finalize_obj_allocator()
1479 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; in netmap_finalize_obj_allocator()
1490 * of p->_objsize. in netmap_finalize_obj_allocator()
1492 for (; i < lim; i++, clust += p->_objsize) { in netmap_finalize_obj_allocator()
1493 p->lut[i].vaddr = clust; in netmap_finalize_obj_allocator()
1495 p->lut[i].paddr = vtophys(clust); in netmap_finalize_obj_allocator()
1499 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize; in netmap_finalize_obj_allocator()
1501 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'", in netmap_finalize_obj_allocator()
1502 p->numclusters, p->_clustsize >> 10, in netmap_finalize_obj_allocator()
1503 p->memtotal >> 10, p->name); in netmap_finalize_obj_allocator()
1536 netmap_reset_obj_allocator(&nmd->pools[i]); in netmap_mem_reset_all()
1538 nmd->flags &= ~NETMAP_MEM_FINALIZED; in netmap_mem_reset_all()
1544 int i, lim = p->objtotal; in netmap_mem_unmap()
1546 if (na == NULL || na->pdev == NULL) in netmap_mem_unmap()
1549 lut = &na->na_lut; in netmap_mem_unmap()
1565 nm_prdis("unmapping and freeing plut for %s", na->name); in netmap_mem_unmap()
1566 if (lut->plut == NULL || na->pdev == NULL) in netmap_mem_unmap()
1568 for (i = 0; i < lim; i += p->_clustentries) { in netmap_mem_unmap()
1569 if (lut->plut[i].paddr) in netmap_mem_unmap()
1570 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); in netmap_mem_unmap()
1572 nm_free_plut(lut->plut); in netmap_mem_unmap()
1573 lut->plut = NULL; in netmap_mem_unmap()
1583 int i, lim = p->objtotal; in netmap_mem_map()
1584 struct netmap_lut *lut = &na->na_lut; in netmap_mem_map()
1586 if (na->pdev == NULL) in netmap_mem_map()
1602 if (lut->plut != NULL) { in netmap_mem_map()
1603 nm_prdis("plut already allocated for %s", na->name); in netmap_mem_map()
1607 nm_prdis("allocating physical lut for %s", na->name); in netmap_mem_map()
1608 lut->plut = nm_alloc_plut(lim); in netmap_mem_map()
1609 if (lut->plut == NULL) { in netmap_mem_map()
1610 nm_prerr("Failed to allocate physical lut for %s", na->name); in netmap_mem_map()
1614 for (i = 0; i < lim; i += p->_clustentries) { in netmap_mem_map()
1615 lut->plut[i].paddr = 0; in netmap_mem_map()
1618 for (i = 0; i < lim; i += p->_clustentries) { in netmap_mem_map()
1621 if (p->lut[i].vaddr == NULL) in netmap_mem_map()
1624 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, in netmap_mem_map()
1625 p->lut[i].vaddr, p->_clustsize); in netmap_mem_map()
1627 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name); in netmap_mem_map()
1631 for (j = 1; j < p->_clustentries; j++) { in netmap_mem_map()
1632 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; in netmap_mem_map()
1648 if (nmd->flags & NETMAP_MEM_FINALIZED) in netmap_mem_finalize_all()
1650 nmd->lasterr = 0; in netmap_mem_finalize_all()
1651 nmd->nm_totalsize = 0; in netmap_mem_finalize_all()
1653 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]); in netmap_mem_finalize_all()
1654 if (nmd->lasterr) in netmap_mem_finalize_all()
1656 nmd->nm_totalsize += nmd->pools[i].memtotal; in netmap_mem_finalize_all()
1658 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); in netmap_mem_finalize_all()
1659 nmd->lasterr = netmap_mem_init_bitmaps(nmd); in netmap_mem_finalize_all()
1660 if (nmd->lasterr) in netmap_mem_finalize_all()
1663 nmd->flags |= NETMAP_MEM_FINALIZED; in netmap_mem_finalize_all()
1667 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, in netmap_mem_finalize_all()
1668 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, in netmap_mem_finalize_all()
1669 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); in netmap_mem_finalize_all()
1672 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); in netmap_mem_finalize_all()
1678 return nmd->lasterr; in netmap_mem_finalize_all()
1704 d->ops = ops; in _netmap_mem_private_new()
1709 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); in _netmap_mem_private_new()
1712 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, in _netmap_mem_private_new()
1714 d->name); in _netmap_mem_private_new()
1718 nm_prerr("%s: request too large", d->pools[i].name); in _netmap_mem_private_new()
1722 memtotal -= poolsz; in _netmap_mem_private_new()
1724 d->params[i].num = p[i].num; in _netmap_mem_private_new()
1725 d->params[i].size = p[i].size; in _netmap_mem_private_new()
1728 uint64_t sz = d->params[NETMAP_BUF_POOL].size; in _netmap_mem_private_new()
1729 uint64_t n = (memtotal + sz - 1) / sz; in _netmap_mem_private_new()
1734 d->pools[NETMAP_BUF_POOL].name, in _netmap_mem_private_new()
1737 d->params[NETMAP_BUF_POOL].num += n; in _netmap_mem_private_new()
1747 d->flags &= ~NETMAP_MEM_FINALIZED; in _netmap_mem_private_new()
1816 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr); in netmap_mem_private_new()
1821 /* Reference IOMMU and NUMA local allocator - find existing or create new,
1822 * for non-hw adapters, fall back to global allocator.
1830 if (na == NULL || na->pdev == NULL) in netmap_mem_get_allocator()
1833 domain = nm_numa_domain(na->pdev); in netmap_mem_get_allocator()
1834 grp_id = nm_iommu_group_id(na->pdev); in netmap_mem_get_allocator()
1839 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && in netmap_mem_get_allocator()
1840 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) { in netmap_mem_get_allocator()
1841 nmd->refcount++; in netmap_mem_get_allocator()
1846 nmd = nmd->next; in netmap_mem_get_allocator()
1859 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id); in netmap_mem_get_allocator()
1862 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s", in netmap_mem_get_allocator()
1863 nm_mem_blueprint.pools[i].name, nmd->name); in netmap_mem_get_allocator()
1884 if (!netmap_mem_params_changed(nmd->params)) in netmap_mem2_config()
1889 if (nmd->flags & NETMAP_MEM_FINALIZED) { in netmap_mem2_config()
1892 netmap_reset_obj_allocator(&nmd->pools[i]); in netmap_mem2_config()
1894 nmd->flags &= ~NETMAP_MEM_FINALIZED; in netmap_mem2_config()
1898 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], in netmap_mem2_config()
1899 nmd->params[i].num, nmd->params[i].size); in netmap_mem2_config()
1900 if (nmd->lasterr) in netmap_mem2_config()
1906 return nmd->lasterr; in netmap_mem2_config()
1912 if (nmd->flags & NETMAP_MEM_FINALIZED) in netmap_mem2_finalize()
1918 nmd->lasterr = 0; in netmap_mem2_finalize()
1921 return nmd->lasterr; in netmap_mem2_finalize()
1930 netmap_destroy_obj_allocator(&nmd->pools[i]); in netmap_mem2_delete()
1966 return kring->ring == NULL && in netmap_mem_ring_needed()
1967 (kring->users > 0 || in netmap_mem_ring_needed()
1968 (kring->nr_kflags & NKR_NEEDRING)); in netmap_mem_ring_needed()
1974 return kring->ring != NULL && in netmap_mem_ring_todelete()
1975 kring->users == 0 && in netmap_mem_ring_todelete()
1976 !(kring->nr_kflags & NKR_NEEDRING); in netmap_mem_ring_todelete()
1990 enum txrx t; in netmap_mem2_rings_create()
1997 struct netmap_ring *ring = kring->ring; in netmap_mem2_rings_create()
2004 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); in netmap_mem2_rings_create()
2008 nm_prinf("creating %s", kring->name); in netmap_mem2_rings_create()
2009 ndesc = kring->nkr_num_slots; in netmap_mem2_rings_create()
2018 kring->ring = ring; in netmap_mem2_rings_create()
2019 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; in netmap_mem2_rings_create()
2020 *(int64_t *)(uintptr_t)&ring->buf_ofs = in netmap_mem2_rings_create()
2021 (nmd->pools[NETMAP_IF_POOL].memtotal + in netmap_mem2_rings_create()
2022 nmd->pools[NETMAP_RING_POOL].memtotal) - in netmap_mem2_rings_create()
2026 ring->head = kring->rhead; in netmap_mem2_rings_create()
2027 ring->cur = kring->rcur; in netmap_mem2_rings_create()
2028 ring->tail = kring->rtail; in netmap_mem2_rings_create()
2029 *(uint32_t *)(uintptr_t)&ring->nr_buf_size = in netmap_mem2_rings_create()
2031 nm_prdis("%s h %d c %d t %d", kring->name, in netmap_mem2_rings_create()
2032 ring->head, ring->cur, ring->tail); in netmap_mem2_rings_create()
2034 if (!(kring->nr_kflags & NKR_FAKERING)) { in netmap_mem2_rings_create()
2037 nm_prinf("allocating buffers for %s", kring->name); in netmap_mem2_rings_create()
2038 if (netmap_new_bufs(nmd, ring->slot, ndesc)) { in netmap_mem2_rings_create()
2045 nm_prinf("NOT allocating buffers for %s", kring->name); in netmap_mem2_rings_create()
2046 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0); in netmap_mem2_rings_create()
2049 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; in netmap_mem2_rings_create()
2050 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; in netmap_mem2_rings_create()
2057 /* we cannot actually cleanup here, since we don't own kring->users in netmap_mem2_rings_create()
2058 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement in netmap_mem2_rings_create()
2059 * the first or zero-out the second, then call netmap_free_rings() in netmap_mem2_rings_create()
2069 enum txrx t; in netmap_mem2_rings_delete()
2075 struct netmap_ring *ring = kring->ring; in netmap_mem2_rings_delete()
2080 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); in netmap_mem2_rings_delete()
2084 nm_prinf("deleting ring %s", kring->name); in netmap_mem2_rings_delete()
2085 if (!(kring->nr_kflags & NKR_FAKERING)) { in netmap_mem2_rings_delete()
2086 nm_prdis("freeing bufs for %s", kring->name); in netmap_mem2_rings_delete()
2087 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots); in netmap_mem2_rings_delete()
2089 nm_prdis("NOT freeing bufs for %s", kring->name); in netmap_mem2_rings_delete()
2092 kring->ring = NULL; in netmap_mem2_rings_delete()
2099 * Allocate the per-fd structure netmap_if.
2112 enum txrx t; in netmap_mem2_if_new()
2131 /* initialize base fields -- override const */ in netmap_mem2_if_new()
2132 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; in netmap_mem2_if_new()
2133 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; in netmap_mem2_if_new()
2134 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings = in netmap_mem2_if_new()
2135 (na->num_host_tx_rings ? na->num_host_tx_rings : 1); in netmap_mem2_if_new()
2136 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings = in netmap_mem2_if_new()
2137 (na->num_host_rx_rings ? na->num_host_rx_rings : 1); in netmap_mem2_if_new()
2138 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name)); in netmap_mem2_if_new()
2151 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] in netmap_mem2_if_new()
2152 && i < priv->np_qlast[NR_TX]) { in netmap_mem2_if_new()
2154 na->tx_rings[i]->ring) - base; in netmap_mem2_if_new()
2156 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; in netmap_mem2_if_new()
2163 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] in netmap_mem2_if_new()
2164 && i < priv->np_qlast[NR_RX]) { in netmap_mem2_if_new()
2166 na->rx_rings[i]->ring) - base; in netmap_mem2_if_new()
2168 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; in netmap_mem2_if_new()
2181 if (nifp->ni_bufs_head) in netmap_mem2_if_delete()
2182 netmap_extra_free(na, nifp->ni_bufs_head); in netmap_mem2_if_delete()
2191 nm_prinf("active = %d", nmd->active); in netmap_mem2_deref()
2216 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, in netmap_mem_pools_info_get()
2217 &req->nr_mem_id); in netmap_mem_pools_info_get()
2223 req->nr_if_pool_offset = 0; in netmap_mem_pools_info_get()
2224 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; in netmap_mem_pools_info_get()
2225 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; in netmap_mem_pools_info_get()
2227 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; in netmap_mem_pools_info_get()
2228 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; in netmap_mem_pools_info_get()
2229 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; in netmap_mem_pools_info_get()
2231 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + in netmap_mem_pools_info_get()
2232 nmd->pools[NETMAP_RING_POOL].memtotal; in netmap_mem_pools_info_get()
2233 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; in netmap_mem_pools_info_get()
2234 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; in netmap_mem_pools_info_get()
2254 netmap_mem_ext_list->prev = e; in netmap_mem_ext_register()
2255 e->next = netmap_mem_ext_list; in netmap_mem_ext_register()
2257 e->prev = NULL; in netmap_mem_ext_register()
2265 if (e->prev) in netmap_mem_ext_unregister()
2266 e->prev->next = e->next; in netmap_mem_ext_unregister()
2268 netmap_mem_ext_list = e->next; in netmap_mem_ext_unregister()
2269 if (e->next) in netmap_mem_ext_unregister()
2270 e->next->prev = e->prev; in netmap_mem_ext_unregister()
2271 e->prev = e->next = NULL; in netmap_mem_ext_unregister()
2280 for (e = netmap_mem_ext_list; e; e = e->next) { in netmap_mem_ext_search()
2281 if (nm_os_extmem_isequal(e->os, os)) { in netmap_mem_ext_search()
2282 netmap_mem_get(&e->up); in netmap_mem_ext_search()
2301 struct netmap_obj_pool *p = &d->pools[i]; in netmap_mem_ext_delete()
2303 if (p->lut) { in netmap_mem_ext_delete()
2304 nm_free_lut(p->lut, p->objtotal); in netmap_mem_ext_delete()
2305 p->lut = NULL; in netmap_mem_ext_delete()
2308 if (e->os) in netmap_mem_ext_delete()
2309 nm_os_extmem_delete(e->os); in netmap_mem_ext_delete()
2346 if (pi->nr_if_pool_objtotal == 0) in netmap_mem_ext_create()
2347 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; in netmap_mem_ext_create()
2348 if (pi->nr_if_pool_objsize == 0) in netmap_mem_ext_create()
2349 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; in netmap_mem_ext_create()
2350 if (pi->nr_ring_pool_objtotal == 0) in netmap_mem_ext_create()
2351 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; in netmap_mem_ext_create()
2352 if (pi->nr_ring_pool_objsize == 0) in netmap_mem_ext_create()
2353 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; in netmap_mem_ext_create()
2354 if (pi->nr_buf_pool_objtotal == 0) in netmap_mem_ext_create()
2355 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; in netmap_mem_ext_create()
2356 if (pi->nr_buf_pool_objsize == 0) in netmap_mem_ext_create()
2357 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; in netmap_mem_ext_create()
2360 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, in netmap_mem_ext_create()
2361 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, in netmap_mem_ext_create()
2362 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); in netmap_mem_ext_create()
2373 return &nme->up; in netmap_mem_ext_create()
2381 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, in netmap_mem_ext_create()
2382 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, in netmap_mem_ext_create()
2383 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, in netmap_mem_ext_create()
2384 -1, in netmap_mem_ext_create()
2386 pi->nr_memsize, in netmap_mem_ext_create()
2396 nme->os = os; in netmap_mem_ext_create()
2399 clust = nm_os_extmem_nextpage(nme->os); in netmap_mem_ext_create()
2402 struct netmap_obj_pool *p = &nme->up.pools[i]; in netmap_mem_ext_create()
2403 struct netmap_obj_params *o = &nme->up.params[i]; in netmap_mem_ext_create()
2405 p->_objsize = o->size; in netmap_mem_ext_create()
2406 p->_clustsize = o->size; in netmap_mem_ext_create()
2407 p->_clustentries = 1; in netmap_mem_ext_create()
2409 p->lut = nm_alloc_lut(o->num); in netmap_mem_ext_create()
2410 if (p->lut == NULL) { in netmap_mem_ext_create()
2415 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); in netmap_mem_ext_create()
2416 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); in netmap_mem_ext_create()
2417 if (p->invalid_bitmap == NULL) { in netmap_mem_ext_create()
2423 p->objtotal = 0; in netmap_mem_ext_create()
2424 p->memtotal = 0; in netmap_mem_ext_create()
2425 p->objfree = 0; in netmap_mem_ext_create()
2429 for (j = 0; j < o->num && nr_pages > 0; j++) { in netmap_mem_ext_create()
2432 p->lut[j].vaddr = clust + off; in netmap_mem_ext_create()
2434 p->lut[j].paddr = vtophys(p->lut[j].vaddr); in netmap_mem_ext_create()
2436 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr); in netmap_mem_ext_create()
2437 noff = off + p->_objsize; in netmap_mem_ext_create()
2445 noff -= PAGE_SIZE; in netmap_mem_ext_create()
2446 clust = nm_os_extmem_nextpage(nme->os); in netmap_mem_ext_create()
2447 nr_pages--; in netmap_mem_ext_create()
2450 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && in netmap_mem_ext_create()
2457 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); in netmap_mem_ext_create()
2465 p->objtotal = j; in netmap_mem_ext_create()
2466 p->numclusters = p->objtotal; in netmap_mem_ext_create()
2467 p->memtotal = j * (size_t)p->_objsize; in netmap_mem_ext_create()
2468 nm_prdis("%d memtotal %zu", j, p->memtotal); in netmap_mem_ext_create()
2473 return &nme->up; in netmap_mem_ext_create()
2476 netmap_mem_put(&nme->up); in netmap_mem_ext_create()
2522 ptif->ifp = ifp; in netmap_mem_pt_guest_ifp_add()
2523 ptif->nifp_offset = nifp_offset; in netmap_mem_pt_guest_ifp_add()
2525 if (ptnmd->pt_ifs) { in netmap_mem_pt_guest_ifp_add()
2526 ptif->next = ptnmd->pt_ifs; in netmap_mem_pt_guest_ifp_add()
2528 ptnmd->pt_ifs = ptif; in netmap_mem_pt_guest_ifp_add()
2533 if_name(ptif->ifp), ptif->nifp_offset); in netmap_mem_pt_guest_ifp_add()
2545 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { in netmap_mem_pt_guest_ifp_lookup()
2546 if (curr->ifp == ifp) { in netmap_mem_pt_guest_ifp_lookup()
2561 int ret = -1; in netmap_mem_pt_guest_ifp_del()
2565 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { in netmap_mem_pt_guest_ifp_del()
2566 if (curr->ifp == ifp) { in netmap_mem_pt_guest_ifp_del()
2568 prev->next = curr->next; in netmap_mem_pt_guest_ifp_del()
2570 ptnmd->pt_ifs = curr->next; in netmap_mem_pt_guest_ifp_del()
2573 if_name(curr->ifp), curr->nifp_offset); in netmap_mem_pt_guest_ifp_del()
2591 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { in netmap_mem_pt_guest_get_lut()
2595 *lut = ptnmd->buf_lut; in netmap_mem_pt_guest_get_lut()
2605 error = nmd->ops->nmd_config(nmd); in netmap_mem_pt_guest_get_info()
2610 *size = nmd->nm_totalsize; in netmap_mem_pt_guest_get_info()
2612 *memflags = nmd->flags; in netmap_mem_pt_guest_get_info()
2614 *id = nmd->nm_id; in netmap_mem_pt_guest_get_info()
2626 /* if the offset is valid, just return csb->base_addr + off */ in netmap_mem_pt_guest_ofstophys()
2627 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); in netmap_mem_pt_guest_ofstophys()
2654 if (nmd->flags & NETMAP_MEM_FINALIZED) in netmap_mem_pt_guest_finalize()
2657 if (ptnmd->ptn_dev == NULL) { in netmap_mem_pt_guest_finalize()
2662 /* Map memory through ptnetmap-memdev BAR. */ in netmap_mem_pt_guest_finalize()
2663 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, in netmap_mem_pt_guest_finalize()
2664 &ptnmd->nm_addr, &mem_size); in netmap_mem_pt_guest_finalize()
2670 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, in netmap_mem_pt_guest_finalize()
2672 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, in netmap_mem_pt_guest_finalize()
2676 if (ptnmd->buf_lut.lut == NULL) { in netmap_mem_pt_guest_finalize()
2678 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); in netmap_mem_pt_guest_finalize()
2679 if (ptnmd->buf_lut.lut == NULL) { in netmap_mem_pt_guest_finalize()
2686 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, in netmap_mem_pt_guest_finalize()
2688 vaddr = (char *)(ptnmd->nm_addr) + poolofs; in netmap_mem_pt_guest_finalize()
2689 paddr = ptnmd->nm_paddr + poolofs; in netmap_mem_pt_guest_finalize()
2692 ptnmd->buf_lut.lut[i].vaddr = vaddr; in netmap_mem_pt_guest_finalize()
2697 ptnmd->buf_lut.objtotal = nbuffers; in netmap_mem_pt_guest_finalize()
2698 ptnmd->buf_lut.objsize = bufsize; in netmap_mem_pt_guest_finalize()
2699 nmd->nm_totalsize = mem_size; in netmap_mem_pt_guest_finalize()
2704 * replication? maybe we nmd->pools[] should no be in netmap_mem_pt_guest_finalize()
2706 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; in netmap_mem_pt_guest_finalize()
2707 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; in netmap_mem_pt_guest_finalize()
2709 nmd->flags |= NETMAP_MEM_FINALIZED; in netmap_mem_pt_guest_finalize()
2719 if (nmd->active == 1 && in netmap_mem_pt_guest_deref()
2720 (nmd->flags & NETMAP_MEM_FINALIZED)) { in netmap_mem_pt_guest_deref()
2721 nmd->flags &= ~NETMAP_MEM_FINALIZED; in netmap_mem_pt_guest_deref()
2722 /* unmap ptnetmap-memdev memory */ in netmap_mem_pt_guest_deref()
2723 if (ptnmd->ptn_dev) { in netmap_mem_pt_guest_deref()
2724 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); in netmap_mem_pt_guest_deref()
2726 ptnmd->nm_addr = NULL; in netmap_mem_pt_guest_deref()
2727 ptnmd->nm_paddr = 0; in netmap_mem_pt_guest_deref()
2736 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); in netmap_mem_pt_guest_if_offset()
2746 if (nmd->active > 0) in netmap_mem_pt_guest_delete()
2747 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active); in netmap_mem_pt_guest_delete()
2762 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); in netmap_mem_pt_guest_if_new()
2764 nm_prerr("interface %s is not in passthrough", na->name); in netmap_mem_pt_guest_if_new()
2768 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + in netmap_mem_pt_guest_if_new()
2769 ptif->nifp_offset); in netmap_mem_pt_guest_if_new()
2780 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); in netmap_mem_pt_guest_if_delete()
2782 nm_prerr("interface %s is not in passthrough", na->name); in netmap_mem_pt_guest_if_delete()
2793 int i, error = -1; in netmap_mem_pt_guest_rings_create()
2795 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp); in netmap_mem_pt_guest_rings_create()
2797 nm_prerr("interface %s is not in passthrough", na->name); in netmap_mem_pt_guest_rings_create()
2803 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); in netmap_mem_pt_guest_rings_create()
2805 struct netmap_kring *kring = na->tx_rings[i]; in netmap_mem_pt_guest_rings_create()
2806 if (kring->ring) in netmap_mem_pt_guest_rings_create()
2808 kring->ring = (struct netmap_ring *) in netmap_mem_pt_guest_rings_create()
2809 ((char *)nifp + nifp->ring_ofs[i]); in netmap_mem_pt_guest_rings_create()
2812 struct netmap_kring *kring = na->rx_rings[i]; in netmap_mem_pt_guest_rings_create()
2813 if (kring->ring) in netmap_mem_pt_guest_rings_create()
2815 kring->ring = (struct netmap_ring *) in netmap_mem_pt_guest_rings_create()
2817 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]); in netmap_mem_pt_guest_rings_create()
2829 enum txrx t; in netmap_mem_pt_guest_rings_delete()
2836 kring->ring = NULL; in netmap_mem_pt_guest_rings_delete()
2868 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && in netmap_mem_pt_guest_find_memid()
2869 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { in netmap_mem_pt_guest_find_memid()
2871 mem->refcount++; in netmap_mem_pt_guest_find_memid()
2875 scan = scan->next; in netmap_mem_pt_guest_find_memid()
2894 ptnmd->up.ops = &netmap_mem_pt_guest_ops; in netmap_mem_pt_guest_create()
2895 ptnmd->host_mem_id = mem_id; in netmap_mem_pt_guest_create()
2896 ptnmd->pt_ifs = NULL; in netmap_mem_pt_guest_create()
2899 err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1); in netmap_mem_pt_guest_create()
2903 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; in netmap_mem_pt_guest_create()
2904 ptnmd->up.flags |= NETMAP_MEM_IO; in netmap_mem_pt_guest_create()
2906 NMA_LOCK_INIT(&ptnmd->up); in netmap_mem_pt_guest_create()
2908 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); in netmap_mem_pt_guest_create()
2911 return &ptnmd->up; in netmap_mem_pt_guest_create()
2913 netmap_mem_pt_guest_delete(&ptnmd->up); in netmap_mem_pt_guest_create()
2957 ptnmd->ptn_dev = ptn_dev; in netmap_mem_pt_guest_attach()