xref: /freebsd/sys/dev/netmap/netmap_mem2.c (revision f3c5273d315a64826d2149ac453ff8c4583ddbe8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Matteo Landi
5  * Copyright (C) 2012-2016 Luigi Rizzo
6  * Copyright (C) 2012-2016 Giuseppe Lettieri
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *   1. Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *   2. Redistributions in binary form must reproduce the above copyright
15  *      notice, this list of conditions and the following disclaimer in the
16  *      documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifdef linux
32 #include "bsd_glue.h"
33 #endif /* linux */
34 
35 #ifdef __APPLE__
36 #include "osx_glue.h"
37 #endif /* __APPLE__ */
38 
39 #ifdef __FreeBSD__
40 #include <sys/cdefs.h> /* prerequisite */
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/types.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>		/* MALLOC_DEFINE */
46 #include <sys/proc.h>
47 #include <vm/vm.h>	/* vtophys */
48 #include <vm/pmap.h>	/* vtophys */
49 #include <sys/socket.h> /* sockaddrs */
50 #include <sys/selinfo.h>
51 #include <sys/sysctl.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/vnet.h>
55 #include <machine/bus.h>	/* bus_dmamap_* */
56 
57 /* M_NETMAP only used in here */
58 MALLOC_DECLARE(M_NETMAP);
59 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
60 
61 #endif /* __FreeBSD__ */
62 
63 #ifdef _WIN32
64 #include <win_glue.h>
65 #endif
66 
67 #include <net/netmap.h>
68 #include <dev/netmap/netmap_kern.h>
69 #include <net/netmap_virt.h>
70 #include "netmap_mem2.h"
71 
72 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
73 #define NETMAP_BUF_MAX_NUM  8*4096      /* if too big takes too much time to allocate */
74 #else
75 #define NETMAP_BUF_MAX_NUM 20*4096*2	/* large machine */
76 #endif
77 
78 #define NETMAP_POOL_MAX_NAMSZ	32
79 
80 
81 enum {
82 	NETMAP_IF_POOL   = 0,
83 	NETMAP_RING_POOL,
84 	NETMAP_BUF_POOL,
85 	NETMAP_POOLS_NR
86 };
87 
88 
89 struct netmap_obj_params {
90 	u_int size;
91 	u_int num;
92 
93 	u_int last_size;
94 	u_int last_num;
95 };
96 
97 struct netmap_obj_pool {
98 	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
99 
100 	/* ---------------------------------------------------*/
101 	/* these are only meaningful if the pool is finalized */
102 	/* (see 'finalized' field in netmap_mem_d)            */
103 	u_int objtotal;         /* actual total number of objects. */
104 	u_int memtotal;		/* actual total memory space */
105 	u_int numclusters;	/* actual number of clusters */
106 
107 	u_int objfree;          /* number of free objects. */
108 
109 	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
110 	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
111 	uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
112 	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
113 	int	alloc_done;	/* we have allocated the memory */
114 	/* ---------------------------------------------------*/
115 
116 	/* limits */
117 	u_int objminsize;	/* minimum object size */
118 	u_int objmaxsize;	/* maximum object size */
119 	u_int nummin;		/* minimum number of objects */
120 	u_int nummax;		/* maximum number of objects */
121 
122 	/* these are changed only by config */
123 	u_int _objtotal;	/* total number of objects */
124 	u_int _objsize;		/* object size */
125 	u_int _clustsize;       /* cluster size */
126 	u_int _clustentries;    /* objects per cluster */
127 	u_int _numclusters;	/* number of clusters */
128 
129 	/* requested values */
130 	u_int r_objtotal;
131 	u_int r_objsize;
132 };
133 
134 #define NMA_LOCK_T		NM_MTX_T
135 #define NMA_LOCK_INIT(n)	NM_MTX_INIT((n)->nm_mtx)
136 #define NMA_LOCK_DESTROY(n)	NM_MTX_DESTROY((n)->nm_mtx)
137 #define NMA_LOCK(n)		NM_MTX_LOCK((n)->nm_mtx)
138 #define NMA_SPINLOCK(n)         NM_MTX_SPINLOCK((n)->nm_mtx)
139 #define NMA_UNLOCK(n)		NM_MTX_UNLOCK((n)->nm_mtx)
140 
141 struct netmap_mem_ops {
142 	int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
143 	int  (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
144 			u_int *memflags, uint16_t *id);
145 
146 	vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
147 	int (*nmd_config)(struct netmap_mem_d *);
148 	int (*nmd_finalize)(struct netmap_mem_d *);
149 	void (*nmd_deref)(struct netmap_mem_d *);
150 	ssize_t  (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
151 	void (*nmd_delete)(struct netmap_mem_d *);
152 
153 	struct netmap_if * (*nmd_if_new)(struct netmap_adapter *,
154 					 struct netmap_priv_d *);
155 	void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *);
156 	int  (*nmd_rings_create)(struct netmap_adapter *);
157 	void (*nmd_rings_delete)(struct netmap_adapter *);
158 };
159 
160 struct netmap_mem_d {
161 	NMA_LOCK_T nm_mtx;  /* protect the allocator */
162 	u_int nm_totalsize; /* shorthand */
163 
164 	u_int flags;
165 #define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
166 #define NETMAP_MEM_HIDDEN	0x8	/* beeing prepared */
167 	int lasterr;		/* last error for curr config */
168 	int active;		/* active users */
169 	int refcount;
170 	/* the three allocators */
171 	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
172 
173 	nm_memid_t nm_id;	/* allocator identifier */
174 	int nm_grp;	/* iommu groupd id */
175 
176 	/* list of all existing allocators, sorted by nm_id */
177 	struct netmap_mem_d *prev, *next;
178 
179 	struct netmap_mem_ops *ops;
180 
181 	struct netmap_obj_params params[NETMAP_POOLS_NR];
182 
183 #define NM_MEM_NAMESZ	16
184 	char name[NM_MEM_NAMESZ];
185 };
186 
187 int
188 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
189 {
190 	int rv;
191 
192 	NMA_LOCK(nmd);
193 	rv = nmd->ops->nmd_get_lut(nmd, lut);
194 	NMA_UNLOCK(nmd);
195 
196 	return rv;
197 }
198 
199 int
200 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
201 		u_int *memflags, nm_memid_t *memid)
202 {
203 	int rv;
204 
205 	NMA_LOCK(nmd);
206 	rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
207 	NMA_UNLOCK(nmd);
208 
209 	return rv;
210 }
211 
212 vm_paddr_t
213 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
214 {
215 	vm_paddr_t pa;
216 
217 #if defined(__FreeBSD__)
218 	/* This function is called by netmap_dev_pager_fault(), which holds a
219 	 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
220 	 * spin on the trylock. */
221 	NMA_SPINLOCK(nmd);
222 #else
223 	NMA_LOCK(nmd);
224 #endif
225 	pa = nmd->ops->nmd_ofstophys(nmd, off);
226 	NMA_UNLOCK(nmd);
227 
228 	return pa;
229 }
230 
231 static int
232 netmap_mem_config(struct netmap_mem_d *nmd)
233 {
234 	if (nmd->active) {
235 		/* already in use. Not fatal, but we
236 		 * cannot change the configuration
237 		 */
238 		return 0;
239 	}
240 
241 	return nmd->ops->nmd_config(nmd);
242 }
243 
244 ssize_t
245 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
246 {
247 	ssize_t rv;
248 
249 	NMA_LOCK(nmd);
250 	rv = nmd->ops->nmd_if_offset(nmd, off);
251 	NMA_UNLOCK(nmd);
252 
253 	return rv;
254 }
255 
256 static void
257 netmap_mem_delete(struct netmap_mem_d *nmd)
258 {
259 	nmd->ops->nmd_delete(nmd);
260 }
261 
262 struct netmap_if *
263 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
264 {
265 	struct netmap_if *nifp;
266 	struct netmap_mem_d *nmd = na->nm_mem;
267 
268 	NMA_LOCK(nmd);
269 	nifp = nmd->ops->nmd_if_new(na, priv);
270 	NMA_UNLOCK(nmd);
271 
272 	return nifp;
273 }
274 
275 void
276 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
277 {
278 	struct netmap_mem_d *nmd = na->nm_mem;
279 
280 	NMA_LOCK(nmd);
281 	nmd->ops->nmd_if_delete(na, nif);
282 	NMA_UNLOCK(nmd);
283 }
284 
285 int
286 netmap_mem_rings_create(struct netmap_adapter *na)
287 {
288 	int rv;
289 	struct netmap_mem_d *nmd = na->nm_mem;
290 
291 	NMA_LOCK(nmd);
292 	rv = nmd->ops->nmd_rings_create(na);
293 	NMA_UNLOCK(nmd);
294 
295 	return rv;
296 }
297 
298 void
299 netmap_mem_rings_delete(struct netmap_adapter *na)
300 {
301 	struct netmap_mem_d *nmd = na->nm_mem;
302 
303 	NMA_LOCK(nmd);
304 	nmd->ops->nmd_rings_delete(na);
305 	NMA_UNLOCK(nmd);
306 }
307 
308 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
309 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
310 static int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
311 static void nm_mem_release_id(struct netmap_mem_d *);
312 
313 nm_memid_t
314 netmap_mem_get_id(struct netmap_mem_d *nmd)
315 {
316 	return nmd->nm_id;
317 }
318 
319 #ifdef NM_DEBUG_MEM_PUTGET
320 #define NM_DBG_REFC(nmd, func, line)	\
321 	nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount);
322 #else
323 #define NM_DBG_REFC(nmd, func, line)
324 #endif
325 
326 /* circular list of all existing allocators */
327 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
328 NM_MTX_T nm_mem_list_lock;
329 
330 struct netmap_mem_d *
331 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
332 {
333 	NM_MTX_LOCK(nm_mem_list_lock);
334 	nmd->refcount++;
335 	NM_DBG_REFC(nmd, func, line);
336 	NM_MTX_UNLOCK(nm_mem_list_lock);
337 	return nmd;
338 }
339 
340 void
341 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
342 {
343 	int last;
344 	NM_MTX_LOCK(nm_mem_list_lock);
345 	last = (--nmd->refcount == 0);
346 	if (last)
347 		nm_mem_release_id(nmd);
348 	NM_DBG_REFC(nmd, func, line);
349 	NM_MTX_UNLOCK(nm_mem_list_lock);
350 	if (last)
351 		netmap_mem_delete(nmd);
352 }
353 
354 int
355 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
356 {
357 	int lasterr = 0;
358 	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
359 		return ENOMEM;
360 	}
361 
362 	NMA_LOCK(nmd);
363 
364 	if (netmap_mem_config(nmd))
365 		goto out;
366 
367 	nmd->active++;
368 
369 	nmd->lasterr = nmd->ops->nmd_finalize(nmd);
370 
371 	if (!nmd->lasterr && na->pdev) {
372 		nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
373 	}
374 
375 out:
376 	lasterr = nmd->lasterr;
377 	NMA_UNLOCK(nmd);
378 
379 	if (lasterr)
380 		netmap_mem_deref(nmd, na);
381 
382 	return lasterr;
383 }
384 
385 static int
386 nm_isset(uint32_t *bitmap, u_int i)
387 {
388 	return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
389 }
390 
391 
392 static int
393 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
394 {
395 	u_int n, j;
396 
397 	if (p->bitmap == NULL) {
398 		/* Allocate the bitmap */
399 		n = (p->objtotal + 31) / 32;
400 		p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
401 		if (p->bitmap == NULL) {
402 			nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
403 			    p->name);
404 			return ENOMEM;
405 		}
406 		p->bitmap_slots = n;
407 	} else {
408 		memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
409 	}
410 
411 	p->objfree = 0;
412 	/*
413 	 * Set all the bits in the bitmap that have
414 	 * corresponding buffers to 1 to indicate they are
415 	 * free.
416 	 */
417 	for (j = 0; j < p->objtotal; j++) {
418 		if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
419 			if (netmap_debug & NM_DEBUG_MEM)
420 				nm_prinf("skipping %s %d", p->name, j);
421 			continue;
422 		}
423 		p->bitmap[ (j>>5) ] |=  ( 1U << (j & 31U) );
424 		p->objfree++;
425 	}
426 
427 	if (netmap_verbose)
428 		nm_prinf("%s free %u", p->name, p->objfree);
429 	if (p->objfree == 0) {
430 		if (netmap_verbose)
431 			nm_prerr("%s: no objects available", p->name);
432 		return ENOMEM;
433 	}
434 
435 	return 0;
436 }
437 
438 static int
439 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
440 {
441 	int i, error = 0;
442 
443 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
444 		struct netmap_obj_pool *p = &nmd->pools[i];
445 
446 		error = netmap_init_obj_allocator_bitmap(p);
447 		if (error)
448 			return error;
449 	}
450 
451 	/*
452 	 * buffers 0 and 1 are reserved
453 	 */
454 	if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
455 		nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
456 		return ENOMEM;
457 	}
458 
459 	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
460 	if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
461 		/* XXX This check is a workaround that prevents a
462 		 * NULL pointer crash which currently happens only
463 		 * with ptnetmap guests.
464 		 * Removed shared-info --> is the bug still there? */
465 		nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
466 	}
467 	return 0;
468 }
469 
470 int
471 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
472 {
473 	int last_user = 0;
474 	NMA_LOCK(nmd);
475 	if (na->active_fds <= 0)
476 		netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
477 	if (nmd->active == 1) {
478 		last_user = 1;
479 		/*
480 		 * Reset the allocator when it falls out of use so that any
481 		 * pool resources leaked by unclean application exits are
482 		 * reclaimed.
483 		 */
484 		netmap_mem_init_bitmaps(nmd);
485 	}
486 	nmd->ops->nmd_deref(nmd);
487 
488 	nmd->active--;
489 	if (last_user) {
490 		nmd->nm_grp = -1;
491 		nmd->lasterr = 0;
492 	}
493 
494 	NMA_UNLOCK(nmd);
495 	return last_user;
496 }
497 
498 
499 /* accessor functions */
500 static int
501 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
502 {
503 	lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
504 #ifdef __FreeBSD__
505 	lut->plut = lut->lut;
506 #endif
507 	lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
508 	lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
509 
510 	return 0;
511 }
512 
513 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
514 	[NETMAP_IF_POOL] = {
515 		.size = 1024,
516 		.num  = 2,
517 	},
518 	[NETMAP_RING_POOL] = {
519 		.size = 5*PAGE_SIZE,
520 		.num  = 4,
521 	},
522 	[NETMAP_BUF_POOL] = {
523 		.size = 2048,
524 		.num  = 4098,
525 	},
526 };
527 
528 
529 /*
530  * nm_mem is the memory allocator used for all physical interfaces
531  * running in netmap mode.
532  * Virtual (VALE) ports will have each its own allocator.
533  */
534 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
535 struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
536 	.pools = {
537 		[NETMAP_IF_POOL] = {
538 			.name 	= "netmap_if",
539 			.objminsize = sizeof(struct netmap_if),
540 			.objmaxsize = 4096,
541 			.nummin     = 10,	/* don't be stingy */
542 			.nummax	    = 10000,	/* XXX very large */
543 		},
544 		[NETMAP_RING_POOL] = {
545 			.name 	= "netmap_ring",
546 			.objminsize = sizeof(struct netmap_ring),
547 			.objmaxsize = 32*PAGE_SIZE,
548 			.nummin     = 2,
549 			.nummax	    = 1024,
550 		},
551 		[NETMAP_BUF_POOL] = {
552 			.name	= "netmap_buf",
553 			.objminsize = 64,
554 			.objmaxsize = 65536,
555 			.nummin     = 4,
556 			.nummax	    = 1000000, /* one million! */
557 		},
558 	},
559 
560 	.params = {
561 		[NETMAP_IF_POOL] = {
562 			.size = 1024,
563 			.num  = 100,
564 		},
565 		[NETMAP_RING_POOL] = {
566 			.size = 9*PAGE_SIZE,
567 			.num  = 200,
568 		},
569 		[NETMAP_BUF_POOL] = {
570 			.size = 2048,
571 			.num  = NETMAP_BUF_MAX_NUM,
572 		},
573 	},
574 
575 	.nm_id = 1,
576 	.nm_grp = -1,
577 
578 	.prev = &nm_mem,
579 	.next = &nm_mem,
580 
581 	.ops = &netmap_mem_global_ops,
582 
583 	.name = "1"
584 };
585 
586 
587 /* blueprint for the private memory allocators */
588 /* XXX clang is not happy about using name as a print format */
589 static const struct netmap_mem_d nm_blueprint = {
590 	.pools = {
591 		[NETMAP_IF_POOL] = {
592 			.name 	= "%s_if",
593 			.objminsize = sizeof(struct netmap_if),
594 			.objmaxsize = 4096,
595 			.nummin     = 1,
596 			.nummax	    = 100,
597 		},
598 		[NETMAP_RING_POOL] = {
599 			.name 	= "%s_ring",
600 			.objminsize = sizeof(struct netmap_ring),
601 			.objmaxsize = 32*PAGE_SIZE,
602 			.nummin     = 2,
603 			.nummax	    = 1024,
604 		},
605 		[NETMAP_BUF_POOL] = {
606 			.name	= "%s_buf",
607 			.objminsize = 64,
608 			.objmaxsize = 65536,
609 			.nummin     = 4,
610 			.nummax	    = 1000000, /* one million! */
611 		},
612 	},
613 
614 	.nm_grp = -1,
615 
616 	.flags = NETMAP_MEM_PRIVATE,
617 
618 	.ops = &netmap_mem_global_ops,
619 };
620 
621 /* memory allocator related sysctls */
622 
623 #define STRINGIFY(x) #x
624 
625 
626 #define DECLARE_SYSCTLS(id, name) \
627 	SYSBEGIN(mem2_ ## name); \
628 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
629 	    CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
630 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
631 	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
632 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
633 	    CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
634 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
635 	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
636 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
637 	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
638 	    "Default size of private netmap " STRINGIFY(name) "s"); \
639 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
640 	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
641 	    "Default number of private netmap " STRINGIFY(name) "s");	\
642 	SYSEND
643 
644 SYSCTL_DECL(_dev_netmap);
645 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
646 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
647 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
648 
649 /* call with nm_mem_list_lock held */
650 static int
651 nm_mem_assign_id_locked(struct netmap_mem_d *nmd)
652 {
653 	nm_memid_t id;
654 	struct netmap_mem_d *scan = netmap_last_mem_d;
655 	int error = ENOMEM;
656 
657 	do {
658 		/* we rely on unsigned wrap around */
659 		id = scan->nm_id + 1;
660 		if (id == 0) /* reserve 0 as error value */
661 			id = 1;
662 		scan = scan->next;
663 		if (id != scan->nm_id) {
664 			nmd->nm_id = id;
665 			nmd->prev = scan->prev;
666 			nmd->next = scan;
667 			scan->prev->next = nmd;
668 			scan->prev = nmd;
669 			netmap_last_mem_d = nmd;
670 			nmd->refcount = 1;
671 			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
672 			error = 0;
673 			break;
674 		}
675 	} while (scan != netmap_last_mem_d);
676 
677 	return error;
678 }
679 
680 /* call with nm_mem_list_lock *not* held */
681 static int
682 nm_mem_assign_id(struct netmap_mem_d *nmd)
683 {
684 	int ret;
685 
686 	NM_MTX_LOCK(nm_mem_list_lock);
687 	ret = nm_mem_assign_id_locked(nmd);
688 	NM_MTX_UNLOCK(nm_mem_list_lock);
689 
690 	return ret;
691 }
692 
693 /* call with nm_mem_list_lock held */
694 static void
695 nm_mem_release_id(struct netmap_mem_d *nmd)
696 {
697 	nmd->prev->next = nmd->next;
698 	nmd->next->prev = nmd->prev;
699 
700 	if (netmap_last_mem_d == nmd)
701 		netmap_last_mem_d = nmd->prev;
702 
703 	nmd->prev = nmd->next = NULL;
704 }
705 
706 struct netmap_mem_d *
707 netmap_mem_find(nm_memid_t id)
708 {
709 	struct netmap_mem_d *nmd;
710 
711 	NM_MTX_LOCK(nm_mem_list_lock);
712 	nmd = netmap_last_mem_d;
713 	do {
714 		if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
715 			nmd->refcount++;
716 			NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
717 			NM_MTX_UNLOCK(nm_mem_list_lock);
718 			return nmd;
719 		}
720 		nmd = nmd->next;
721 	} while (nmd != netmap_last_mem_d);
722 	NM_MTX_UNLOCK(nm_mem_list_lock);
723 	return NULL;
724 }
725 
726 static int
727 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
728 {
729 	int err = 0, id;
730 	id = nm_iommu_group_id(dev);
731 	if (netmap_debug & NM_DEBUG_MEM)
732 		nm_prinf("iommu_group %d", id);
733 
734 	NMA_LOCK(nmd);
735 
736 	if (nmd->nm_grp < 0)
737 		nmd->nm_grp = id;
738 
739 	if (nmd->nm_grp != id) {
740 		if (netmap_verbose)
741 			nm_prerr("iommu group mismatch: %u vs %u",
742 					nmd->nm_grp, id);
743 		nmd->lasterr = err = ENOMEM;
744 	}
745 
746 	NMA_UNLOCK(nmd);
747 	return err;
748 }
749 
750 static struct lut_entry *
751 nm_alloc_lut(u_int nobj)
752 {
753 	size_t n = sizeof(struct lut_entry) * nobj;
754 	struct lut_entry *lut;
755 #ifdef linux
756 	lut = vmalloc(n);
757 #else
758 	lut = nm_os_malloc(n);
759 #endif
760 	return lut;
761 }
762 
763 static void
764 nm_free_lut(struct lut_entry *lut, u_int objtotal)
765 {
766 	bzero(lut, sizeof(struct lut_entry) * objtotal);
767 #ifdef linux
768 	vfree(lut);
769 #else
770 	nm_os_free(lut);
771 #endif
772 }
773 
774 #if defined(linux) || defined(_WIN32)
775 static struct plut_entry *
776 nm_alloc_plut(u_int nobj)
777 {
778 	size_t n = sizeof(struct plut_entry) * nobj;
779 	struct plut_entry *lut;
780 	lut = vmalloc(n);
781 	return lut;
782 }
783 
784 static void
785 nm_free_plut(struct plut_entry * lut)
786 {
787 	vfree(lut);
788 }
789 #endif /* linux or _WIN32 */
790 
791 
792 /*
793  * First, find the allocator that contains the requested offset,
794  * then locate the cluster through a lookup table.
795  */
796 static vm_paddr_t
797 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
798 {
799 	int i;
800 	vm_ooffset_t o = offset;
801 	vm_paddr_t pa;
802 	struct netmap_obj_pool *p;
803 
804 	p = nmd->pools;
805 
806 	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
807 		if (offset >= p[i].memtotal)
808 			continue;
809 		// now lookup the cluster's address
810 #ifndef _WIN32
811 		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
812 			offset % p[i]._objsize;
813 #else
814 		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
815 		pa.QuadPart += offset % p[i]._objsize;
816 #endif
817 		return pa;
818 	}
819 	/* this is only in case of errors */
820 	nm_prerr("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
821 		p[NETMAP_IF_POOL].memtotal,
822 		p[NETMAP_IF_POOL].memtotal
823 			+ p[NETMAP_RING_POOL].memtotal,
824 		p[NETMAP_IF_POOL].memtotal
825 			+ p[NETMAP_RING_POOL].memtotal
826 			+ p[NETMAP_BUF_POOL].memtotal);
827 #ifndef _WIN32
828 	return 0; /* bad address */
829 #else
830 	vm_paddr_t res;
831 	res.QuadPart = 0;
832 	return res;
833 #endif
834 }
835 
836 #ifdef _WIN32
837 
838 /*
839  * win32_build_virtual_memory_for_userspace
840  *
841  * This function get all the object making part of the pools and maps
842  * a contiguous virtual memory space for the userspace
843  * It works this way
844  * 1 - allocate a Memory Descriptor List wide as the sum
845  *		of the memory needed for the pools
846  * 2 - cycle all the objects in every pool and for every object do
847  *
848  *		2a - cycle all the objects in every pool, get the list
849  *				of the physical address descriptors
850  *		2b - calculate the offset in the array of pages desciptor in the
851  *				main MDL
852  *		2c - copy the descriptors of the object in the main MDL
853  *
854  * 3 - return the resulting MDL that needs to be mapped in userland
855  *
856  * In this way we will have an MDL that describes all the memory for the
857  * objects in a single object
858 */
859 
860 PMDL
861 win32_build_user_vm_map(struct netmap_mem_d* nmd)
862 {
863 	u_int memflags, ofs = 0;
864 	PMDL mainMdl, tempMdl;
865 	uint64_t memsize;
866 	int i, j;
867 
868 	if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
869 		nm_prerr("memory not finalised yet");
870 		return NULL;
871 	}
872 
873 	mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
874 	if (mainMdl == NULL) {
875 		nm_prerr("failed to allocate mdl");
876 		return NULL;
877 	}
878 
879 	NMA_LOCK(nmd);
880 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
881 		struct netmap_obj_pool *p = &nmd->pools[i];
882 		int clsz = p->_clustsize;
883 		int clobjs = p->_clustentries; /* objects per cluster */
884 		int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
885 		PPFN_NUMBER pSrc, pDst;
886 
887 		/* each pool has a different cluster size so we need to reallocate */
888 		tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
889 		if (tempMdl == NULL) {
890 			NMA_UNLOCK(nmd);
891 			nm_prerr("fail to allocate tempMdl");
892 			IoFreeMdl(mainMdl);
893 			return NULL;
894 		}
895 		pSrc = MmGetMdlPfnArray(tempMdl);
896 		/* create one entry per cluster, the lut[] has one entry per object */
897 		for (j = 0; j < p->numclusters; j++, ofs += clsz) {
898 			pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
899 			MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
900 			MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
901 			RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
902 			mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
903 		}
904 		IoFreeMdl(tempMdl);
905 	}
906 	NMA_UNLOCK(nmd);
907 	return mainMdl;
908 }
909 
910 #endif /* _WIN32 */
911 
912 /*
913  * helper function for OS-specific mmap routines (currently only windows).
914  * Given an nmd and a pool index, returns the cluster size and number of clusters.
915  * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
916  * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
917  */
918 
919 int
920 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
921 {
922 	if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
923 		return 1; /* invalid arguments */
924 	// NMA_LOCK_ASSERT(nmd);
925 	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
926 		*clustsize = *numclusters = 0;
927 		return 1; /* not ready yet */
928 	}
929 	*clustsize = nmd->pools[pool]._clustsize;
930 	*numclusters = nmd->pools[pool].numclusters;
931 	return 0; /* success */
932 }
933 
934 static int
935 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
936 			u_int *memflags, nm_memid_t *id)
937 {
938 	int error = 0;
939 	error = netmap_mem_config(nmd);
940 	if (error)
941 		goto out;
942 	if (size) {
943 		if (nmd->flags & NETMAP_MEM_FINALIZED) {
944 			*size = nmd->nm_totalsize;
945 		} else {
946 			int i;
947 			*size = 0;
948 			for (i = 0; i < NETMAP_POOLS_NR; i++) {
949 				struct netmap_obj_pool *p = nmd->pools + i;
950 				*size += (p->_numclusters * p->_clustsize);
951 			}
952 		}
953 	}
954 	if (memflags)
955 		*memflags = nmd->flags;
956 	if (id)
957 		*id = nmd->nm_id;
958 out:
959 	return error;
960 }
961 
962 /*
963  * we store objects by kernel address, need to find the offset
964  * within the pool to export the value to userspace.
965  * Algorithm: scan until we find the cluster, then add the
966  * actual offset in the cluster
967  */
968 static ssize_t
969 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
970 {
971 	int i, k = p->_clustentries, n = p->objtotal;
972 	ssize_t ofs = 0;
973 
974 	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
975 		const char *base = p->lut[i].vaddr;
976 		ssize_t relofs = (const char *) vaddr - base;
977 
978 		if (relofs < 0 || relofs >= p->_clustsize)
979 			continue;
980 
981 		ofs = ofs + relofs;
982 		nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
983 		    p->name, ofs, i, vaddr);
984 		return ofs;
985 	}
986 	nm_prerr("address %p is not contained inside any cluster (%s)",
987 	    vaddr, p->name);
988 	return 0; /* An error occurred */
989 }
990 
991 /* Helper functions which convert virtual addresses to offsets */
992 #define netmap_if_offset(n, v)					\
993 	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
994 
995 #define netmap_ring_offset(n, v)				\
996     ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
997 	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
998 
999 static ssize_t
1000 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1001 {
1002 	return netmap_if_offset(nmd, addr);
1003 }
1004 
1005 /*
1006  * report the index, and use start position as a hint,
1007  * otherwise buffer allocation becomes terribly expensive.
1008  */
1009 static void *
1010 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1011 {
1012 	uint32_t i = 0;			/* index in the bitmap */
1013 	uint32_t mask, j = 0;		/* slot counter */
1014 	void *vaddr = NULL;
1015 
1016 	if (len > p->_objsize) {
1017 		nm_prerr("%s request size %d too large", p->name, len);
1018 		return NULL;
1019 	}
1020 
1021 	if (p->objfree == 0) {
1022 		nm_prerr("no more %s objects", p->name);
1023 		return NULL;
1024 	}
1025 	if (start)
1026 		i = *start;
1027 
1028 	/* termination is guaranteed by p->free, but better check bounds on i */
1029 	while (vaddr == NULL && i < p->bitmap_slots)  {
1030 		uint32_t cur = p->bitmap[i];
1031 		if (cur == 0) { /* bitmask is fully used */
1032 			i++;
1033 			continue;
1034 		}
1035 		/* locate a slot */
1036 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1037 			;
1038 
1039 		p->bitmap[i] &= ~mask; /* mark object as in use */
1040 		p->objfree--;
1041 
1042 		vaddr = p->lut[i * 32 + j].vaddr;
1043 		if (index)
1044 			*index = i * 32 + j;
1045 	}
1046 	nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1047 
1048 	if (start)
1049 		*start = i;
1050 	return vaddr;
1051 }
1052 
1053 
1054 /*
1055  * free by index, not by address.
1056  * XXX should we also cleanup the content ?
1057  */
1058 static int
1059 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1060 {
1061 	uint32_t *ptr, mask;
1062 
1063 	if (j >= p->objtotal) {
1064 		nm_prerr("invalid index %u, max %u", j, p->objtotal);
1065 		return 1;
1066 	}
1067 	ptr = &p->bitmap[j / 32];
1068 	mask = (1 << (j % 32));
1069 	if (*ptr & mask) {
1070 		nm_prerr("ouch, double free on buffer %d", j);
1071 		return 1;
1072 	} else {
1073 		*ptr |= mask;
1074 		p->objfree++;
1075 		return 0;
1076 	}
1077 }
1078 
1079 /*
1080  * free by address. This is slow but is only used for a few
1081  * objects (rings, nifp)
1082  */
1083 static void
1084 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1085 {
1086 	u_int i, j, n = p->numclusters;
1087 
1088 	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1089 		void *base = p->lut[i * p->_clustentries].vaddr;
1090 		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1091 
1092 		/* Given address, is out of the scope of the current cluster.*/
1093 		if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1094 			continue;
1095 
1096 		j = j + relofs / p->_objsize;
1097 		/* KASSERT(j != 0, ("Cannot free object 0")); */
1098 		netmap_obj_free(p, j);
1099 		return;
1100 	}
1101 	nm_prerr("address %p is not contained inside any cluster (%s)",
1102 	    vaddr, p->name);
1103 }
1104 
1105 unsigned
1106 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1107 {
1108 	return nmd->pools[NETMAP_BUF_POOL]._objsize;
1109 }
1110 
1111 #define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1112 #define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1113 #define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1114 #define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1115 #define netmap_buf_malloc(n, _pos, _index)			\
1116 	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1117 
1118 
1119 #if 0 /* currently unused */
1120 /* Return the index associated to the given packet buffer */
1121 #define netmap_buf_index(n, v)						\
1122     (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1123 #endif
1124 
1125 /*
1126  * allocate extra buffers in a linked list.
1127  * returns the actual number.
1128  */
1129 uint32_t
1130 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1131 {
1132 	struct netmap_mem_d *nmd = na->nm_mem;
1133 	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1134 
1135 	NMA_LOCK(nmd);
1136 
1137 	*head = 0;	/* default, 'null' index ie empty list */
1138 	for (i = 0 ; i < n; i++) {
1139 		uint32_t cur = *head;	/* save current head */
1140 		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1141 		if (p == NULL) {
1142 			nm_prerr("no more buffers after %d of %d", i, n);
1143 			*head = cur; /* restore */
1144 			break;
1145 		}
1146 		nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1147 		*p = cur; /* link to previous head */
1148 	}
1149 
1150 	NMA_UNLOCK(nmd);
1151 
1152 	return i;
1153 }
1154 
1155 static void
1156 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1157 {
1158 	struct lut_entry *lut = na->na_lut.lut;
1159 	struct netmap_mem_d *nmd = na->nm_mem;
1160 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1161 	uint32_t i, cur, *buf;
1162 
1163 	nm_prdis("freeing the extra list");
1164 	for (i = 0; head >=2 && head < p->objtotal; i++) {
1165 		cur = head;
1166 		buf = lut[head].vaddr;
1167 		head = *buf;
1168 		*buf = 0;
1169 		if (netmap_obj_free(p, cur))
1170 			break;
1171 	}
1172 	if (head != 0)
1173 		nm_prerr("breaking with head %d", head);
1174 	if (netmap_debug & NM_DEBUG_MEM)
1175 		nm_prinf("freed %d buffers", i);
1176 }
1177 
1178 
1179 /* Return nonzero on error */
1180 static int
1181 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1182 {
1183 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1184 	u_int i = 0;	/* slot counter */
1185 	uint32_t pos = 0;	/* slot in p->bitmap */
1186 	uint32_t index = 0;	/* buffer index */
1187 
1188 	for (i = 0; i < n; i++) {
1189 		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1190 		if (vaddr == NULL) {
1191 			nm_prerr("no more buffers after %d of %d", i, n);
1192 			goto cleanup;
1193 		}
1194 		slot[i].buf_idx = index;
1195 		slot[i].len = p->_objsize;
1196 		slot[i].flags = 0;
1197 		slot[i].ptr = 0;
1198 	}
1199 
1200 	nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1201 	return (0);
1202 
1203 cleanup:
1204 	while (i > 0) {
1205 		i--;
1206 		netmap_obj_free(p, slot[i].buf_idx);
1207 	}
1208 	bzero(slot, n * sizeof(slot[0]));
1209 	return (ENOMEM);
1210 }
1211 
1212 static void
1213 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1214 {
1215 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1216 	u_int i;
1217 
1218 	for (i = 0; i < n; i++) {
1219 		slot[i].buf_idx = index;
1220 		slot[i].len = p->_objsize;
1221 		slot[i].flags = 0;
1222 	}
1223 }
1224 
1225 
1226 static void
1227 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1228 {
1229 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1230 
1231 	if (i < 2 || i >= p->objtotal) {
1232 		nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1233 		return;
1234 	}
1235 	netmap_obj_free(p, i);
1236 }
1237 
1238 
1239 static void
1240 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1241 {
1242 	u_int i;
1243 
1244 	for (i = 0; i < n; i++) {
1245 		if (slot[i].buf_idx > 1)
1246 			netmap_free_buf(nmd, slot[i].buf_idx);
1247 	}
1248 	nm_prdis("%s: released some buffers, available: %u",
1249 			p->name, p->objfree);
1250 }
1251 
1252 static void
1253 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1254 {
1255 
1256 	if (p == NULL)
1257 		return;
1258 	if (p->bitmap)
1259 		nm_os_free(p->bitmap);
1260 	p->bitmap = NULL;
1261 	if (p->invalid_bitmap)
1262 		nm_os_free(p->invalid_bitmap);
1263 	p->invalid_bitmap = NULL;
1264 	if (!p->alloc_done) {
1265 		/* allocation was done by somebody else.
1266 		 * Let them clean up after themselves.
1267 		 */
1268 		return;
1269 	}
1270 	if (p->lut) {
1271 		u_int i;
1272 
1273 		/*
1274 		 * Free each cluster allocated in
1275 		 * netmap_finalize_obj_allocator().  The cluster start
1276 		 * addresses are stored at multiples of p->_clusterentries
1277 		 * in the lut.
1278 		 */
1279 		for (i = 0; i < p->objtotal; i += p->_clustentries) {
1280 			contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1281 		}
1282 		nm_free_lut(p->lut, p->objtotal);
1283 	}
1284 	p->lut = NULL;
1285 	p->objtotal = 0;
1286 	p->memtotal = 0;
1287 	p->numclusters = 0;
1288 	p->objfree = 0;
1289 	p->alloc_done = 0;
1290 }
1291 
1292 /*
1293  * Free all resources related to an allocator.
1294  */
1295 static void
1296 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1297 {
1298 	if (p == NULL)
1299 		return;
1300 	netmap_reset_obj_allocator(p);
1301 }
1302 
1303 /*
1304  * We receive a request for objtotal objects, of size objsize each.
1305  * Internally we may round up both numbers, as we allocate objects
1306  * in small clusters multiple of the page size.
1307  * We need to keep track of objtotal and clustentries,
1308  * as they are needed when freeing memory.
1309  *
1310  * XXX note -- userspace needs the buffers to be contiguous,
1311  *	so we cannot afford gaps at the end of a cluster.
1312  */
1313 
1314 
1315 /* call with NMA_LOCK held */
1316 static int
1317 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1318 {
1319 	int i;
1320 	u_int clustsize;	/* the cluster size, multiple of page size */
1321 	u_int clustentries;	/* how many objects per entry */
1322 
1323 	/* we store the current request, so we can
1324 	 * detect configuration changes later */
1325 	p->r_objtotal = objtotal;
1326 	p->r_objsize = objsize;
1327 
1328 #define MAX_CLUSTSIZE	(1<<22)		// 4 MB
1329 #define LINE_ROUND	NM_CACHE_ALIGN	// 64
1330 	if (objsize >= MAX_CLUSTSIZE) {
1331 		/* we could do it but there is no point */
1332 		nm_prerr("unsupported allocation for %d bytes", objsize);
1333 		return EINVAL;
1334 	}
1335 	/* make sure objsize is a multiple of LINE_ROUND */
1336 	i = (objsize & (LINE_ROUND - 1));
1337 	if (i) {
1338 		nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1339 		objsize += LINE_ROUND - i;
1340 	}
1341 	if (objsize < p->objminsize || objsize > p->objmaxsize) {
1342 		nm_prerr("requested objsize %d out of range [%d, %d]",
1343 			objsize, p->objminsize, p->objmaxsize);
1344 		return EINVAL;
1345 	}
1346 	if (objtotal < p->nummin || objtotal > p->nummax) {
1347 		nm_prerr("requested objtotal %d out of range [%d, %d]",
1348 			objtotal, p->nummin, p->nummax);
1349 		return EINVAL;
1350 	}
1351 	/*
1352 	 * Compute number of objects using a brute-force approach:
1353 	 * given a max cluster size,
1354 	 * we try to fill it with objects keeping track of the
1355 	 * wasted space to the next page boundary.
1356 	 */
1357 	for (clustentries = 0, i = 1;; i++) {
1358 		u_int delta, used = i * objsize;
1359 		if (used > MAX_CLUSTSIZE)
1360 			break;
1361 		delta = used % PAGE_SIZE;
1362 		if (delta == 0) { // exact solution
1363 			clustentries = i;
1364 			break;
1365 		}
1366 	}
1367 	/* exact solution not found */
1368 	if (clustentries == 0) {
1369 		nm_prerr("unsupported allocation for %d bytes", objsize);
1370 		return EINVAL;
1371 	}
1372 	/* compute clustsize */
1373 	clustsize = clustentries * objsize;
1374 	if (netmap_debug & NM_DEBUG_MEM)
1375 		nm_prinf("objsize %d clustsize %d objects %d",
1376 			objsize, clustsize, clustentries);
1377 
1378 	/*
1379 	 * The number of clusters is n = ceil(objtotal/clustentries)
1380 	 * objtotal' = n * clustentries
1381 	 */
1382 	p->_clustentries = clustentries;
1383 	p->_clustsize = clustsize;
1384 	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1385 
1386 	/* actual values (may be larger than requested) */
1387 	p->_objsize = objsize;
1388 	p->_objtotal = p->_numclusters * clustentries;
1389 
1390 	return 0;
1391 }
1392 
1393 /* call with NMA_LOCK held */
1394 static int
1395 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1396 {
1397 	int i; /* must be signed */
1398 	size_t n;
1399 
1400 	if (p->lut) {
1401 		/* if the lut is already there we assume that also all the
1402 		 * clusters have already been allocated, possibily by somebody
1403 		 * else (e.g., extmem). In the latter case, the alloc_done flag
1404 		 * will remain at zero, so that we will not attempt to
1405 		 * deallocate the clusters by ourselves in
1406 		 * netmap_reset_obj_allocator.
1407 		 */
1408 		return 0;
1409 	}
1410 
1411 	/* optimistically assume we have enough memory */
1412 	p->numclusters = p->_numclusters;
1413 	p->objtotal = p->_objtotal;
1414 	p->alloc_done = 1;
1415 
1416 	p->lut = nm_alloc_lut(p->objtotal);
1417 	if (p->lut == NULL) {
1418 		nm_prerr("Unable to create lookup table for '%s'", p->name);
1419 		goto clean;
1420 	}
1421 
1422 	/*
1423 	 * Allocate clusters, init pointers
1424 	 */
1425 
1426 	n = p->_clustsize;
1427 	for (i = 0; i < (int)p->objtotal;) {
1428 		int lim = i + p->_clustentries;
1429 		char *clust;
1430 
1431 		/*
1432 		 * XXX Note, we only need contigmalloc() for buffers attached
1433 		 * to native interfaces. In all other cases (nifp, netmap rings
1434 		 * and even buffers for VALE ports or emulated interfaces) we
1435 		 * can live with standard malloc, because the hardware will not
1436 		 * access the pages directly.
1437 		 */
1438 		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1439 		    (size_t)0, -1UL, PAGE_SIZE, 0);
1440 		if (clust == NULL) {
1441 			/*
1442 			 * If we get here, there is a severe memory shortage,
1443 			 * so halve the allocated memory to reclaim some.
1444 			 */
1445 			nm_prerr("Unable to create cluster at %d for '%s' allocator",
1446 			    i, p->name);
1447 			if (i < 2) /* nothing to halve */
1448 				goto out;
1449 			lim = i / 2;
1450 			for (i--; i >= lim; i--) {
1451 				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1452 					contigfree(p->lut[i].vaddr,
1453 						n, M_NETMAP);
1454 				p->lut[i].vaddr = NULL;
1455 			}
1456 		out:
1457 			p->objtotal = i;
1458 			/* we may have stopped in the middle of a cluster */
1459 			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1460 			break;
1461 		}
1462 		/*
1463 		 * Set lut state for all buffers in the current cluster.
1464 		 *
1465 		 * [i, lim) is the set of buffer indexes that cover the
1466 		 * current cluster.
1467 		 *
1468 		 * 'clust' is really the address of the current buffer in
1469 		 * the current cluster as we index through it with a stride
1470 		 * of p->_objsize.
1471 		 */
1472 		for (; i < lim; i++, clust += p->_objsize) {
1473 			p->lut[i].vaddr = clust;
1474 #if !defined(linux) && !defined(_WIN32)
1475 			p->lut[i].paddr = vtophys(clust);
1476 #endif
1477 		}
1478 	}
1479 	p->memtotal = p->numclusters * p->_clustsize;
1480 	if (netmap_verbose)
1481 		nm_prinf("Pre-allocated %d clusters (%d/%dKB) for '%s'",
1482 		    p->numclusters, p->_clustsize >> 10,
1483 		    p->memtotal >> 10, p->name);
1484 
1485 	return 0;
1486 
1487 clean:
1488 	netmap_reset_obj_allocator(p);
1489 	return ENOMEM;
1490 }
1491 
1492 /* call with lock held */
1493 static int
1494 netmap_mem_params_changed(struct netmap_obj_params* p)
1495 {
1496 	int i, rv = 0;
1497 
1498 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1499 		if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1500 			p[i].last_size = p[i].size;
1501 			p[i].last_num = p[i].num;
1502 			rv = 1;
1503 		}
1504 	}
1505 	return rv;
1506 }
1507 
1508 static void
1509 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1510 {
1511 	int i;
1512 
1513 	if (netmap_debug & NM_DEBUG_MEM)
1514 		nm_prinf("resetting %p", nmd);
1515 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1516 		netmap_reset_obj_allocator(&nmd->pools[i]);
1517 	}
1518 	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
1519 }
1520 
1521 static int
1522 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1523 {
1524 	int i, lim = p->objtotal;
1525 	struct netmap_lut *lut = &na->na_lut;
1526 
1527 	if (na == NULL || na->pdev == NULL)
1528 		return 0;
1529 
1530 #if defined(__FreeBSD__)
1531 	/* On FreeBSD mapping and unmapping is performed by the txsync
1532 	 * and rxsync routine, packet by packet. */
1533 	(void)i;
1534 	(void)lim;
1535 	(void)lut;
1536 #elif defined(_WIN32)
1537 	(void)i;
1538 	(void)lim;
1539 	(void)lut;
1540 	nm_prerr("unsupported on Windows");
1541 #else /* linux */
1542 	nm_prdis("unmapping and freeing plut for %s", na->name);
1543 	if (lut->plut == NULL)
1544 		return 0;
1545 	for (i = 0; i < lim; i += p->_clustentries) {
1546 		if (lut->plut[i].paddr)
1547 			netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1548 	}
1549 	nm_free_plut(lut->plut);
1550 	lut->plut = NULL;
1551 #endif /* linux */
1552 
1553 	return 0;
1554 }
1555 
1556 static int
1557 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1558 {
1559 	int error = 0;
1560 	int i, lim = p->objtotal;
1561 	struct netmap_lut *lut = &na->na_lut;
1562 
1563 	if (na->pdev == NULL)
1564 		return 0;
1565 
1566 #if defined(__FreeBSD__)
1567 	/* On FreeBSD mapping and unmapping is performed by the txsync
1568 	 * and rxsync routine, packet by packet. */
1569 	(void)i;
1570 	(void)lim;
1571 	(void)lut;
1572 #elif defined(_WIN32)
1573 	(void)i;
1574 	(void)lim;
1575 	(void)lut;
1576 	nm_prerr("unsupported on Windows");
1577 #else /* linux */
1578 
1579 	if (lut->plut != NULL) {
1580 		nm_prdis("plut already allocated for %s", na->name);
1581 		return 0;
1582 	}
1583 
1584 	nm_prdis("allocating physical lut for %s", na->name);
1585 	lut->plut = nm_alloc_plut(lim);
1586 	if (lut->plut == NULL) {
1587 		nm_prerr("Failed to allocate physical lut for %s", na->name);
1588 		return ENOMEM;
1589 	}
1590 
1591 	for (i = 0; i < lim; i += p->_clustentries) {
1592 		lut->plut[i].paddr = 0;
1593 	}
1594 
1595 	for (i = 0; i < lim; i += p->_clustentries) {
1596 		int j;
1597 
1598 		if (p->lut[i].vaddr == NULL)
1599 			continue;
1600 
1601 		error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1602 				p->lut[i].vaddr, p->_clustsize);
1603 		if (error) {
1604 			nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1605 			break;
1606 		}
1607 
1608 		for (j = 1; j < p->_clustentries; j++) {
1609 			lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1610 		}
1611 	}
1612 
1613 	if (error)
1614 		netmap_mem_unmap(p, na);
1615 
1616 #endif /* linux */
1617 
1618 	return error;
1619 }
1620 
1621 static int
1622 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1623 {
1624 	int i;
1625 	if (nmd->flags & NETMAP_MEM_FINALIZED)
1626 		return 0;
1627 	nmd->lasterr = 0;
1628 	nmd->nm_totalsize = 0;
1629 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1630 		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1631 		if (nmd->lasterr)
1632 			goto error;
1633 		nmd->nm_totalsize += nmd->pools[i].memtotal;
1634 	}
1635 	nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1636 	if (nmd->lasterr)
1637 		goto error;
1638 
1639 	nmd->flags |= NETMAP_MEM_FINALIZED;
1640 
1641 	if (netmap_verbose)
1642 		nm_prinf("interfaces %d KB, rings %d KB, buffers %d MB",
1643 		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1644 		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1645 		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1646 
1647 	if (netmap_verbose)
1648 		nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1649 
1650 
1651 	return 0;
1652 error:
1653 	netmap_mem_reset_all(nmd);
1654 	return nmd->lasterr;
1655 }
1656 
1657 /*
1658  * allocator for private memory
1659  */
1660 static void *
1661 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
1662 		struct netmap_mem_ops *ops, int *perr)
1663 {
1664 	struct netmap_mem_d *d = NULL;
1665 	int i, err = 0;
1666 
1667 	d = nm_os_malloc(size);
1668 	if (d == NULL) {
1669 		err = ENOMEM;
1670 		goto error;
1671 	}
1672 
1673 	*d = nm_blueprint;
1674 	d->ops = ops;
1675 
1676 	err = nm_mem_assign_id(d);
1677 	if (err)
1678 		goto error_free;
1679 	snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1680 
1681 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1682 		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1683 				nm_blueprint.pools[i].name,
1684 				d->name);
1685 		d->params[i].num = p[i].num;
1686 		d->params[i].size = p[i].size;
1687 	}
1688 
1689 	NMA_LOCK_INIT(d);
1690 
1691 	err = netmap_mem_config(d);
1692 	if (err)
1693 		goto error_rel_id;
1694 
1695 	d->flags &= ~NETMAP_MEM_FINALIZED;
1696 
1697 	return d;
1698 
1699 error_rel_id:
1700 	NMA_LOCK_DESTROY(d);
1701 	nm_mem_release_id(d);
1702 error_free:
1703 	nm_os_free(d);
1704 error:
1705 	if (perr)
1706 		*perr = err;
1707 	return NULL;
1708 }
1709 
1710 struct netmap_mem_d *
1711 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1712 		u_int extra_bufs, u_int npipes, int *perr)
1713 {
1714 	struct netmap_mem_d *d = NULL;
1715 	struct netmap_obj_params p[NETMAP_POOLS_NR];
1716 	int i;
1717 	u_int v, maxd;
1718 	/* account for the fake host rings */
1719 	txr++;
1720 	rxr++;
1721 
1722 	/* copy the min values */
1723 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1724 		p[i] = netmap_min_priv_params[i];
1725 	}
1726 
1727 	/* possibly increase them to fit user request */
1728 	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1729 	if (p[NETMAP_IF_POOL].size < v)
1730 		p[NETMAP_IF_POOL].size = v;
1731 	v = 2 + 4 * npipes;
1732 	if (p[NETMAP_IF_POOL].num < v)
1733 		p[NETMAP_IF_POOL].num = v;
1734 	maxd = (txd > rxd) ? txd : rxd;
1735 	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1736 	if (p[NETMAP_RING_POOL].size < v)
1737 		p[NETMAP_RING_POOL].size = v;
1738 	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1739 	 * and two rx rings (again, 1 normal and 1 fake host)
1740 	 */
1741 	v = txr + rxr + 8 * npipes;
1742 	if (p[NETMAP_RING_POOL].num < v)
1743 		p[NETMAP_RING_POOL].num = v;
1744 	/* for each pipe we only need the buffers for the 4 "real" rings.
1745 	 * On the other end, the pipe ring dimension may be different from
1746 	 * the parent port ring dimension. As a compromise, we allocate twice the
1747 	 * space actually needed if the pipe rings were the same size as the parent rings
1748 	 */
1749 	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1750 		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1751 	if (p[NETMAP_BUF_POOL].num < v)
1752 		p[NETMAP_BUF_POOL].num = v;
1753 
1754 	if (netmap_verbose)
1755 		nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1756 			p[NETMAP_IF_POOL].num,
1757 			p[NETMAP_IF_POOL].size,
1758 			p[NETMAP_RING_POOL].num,
1759 			p[NETMAP_RING_POOL].size,
1760 			p[NETMAP_BUF_POOL].num,
1761 			p[NETMAP_BUF_POOL].size);
1762 
1763 	d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr);
1764 
1765 	return d;
1766 }
1767 
1768 
1769 /* call with lock held */
1770 static int
1771 netmap_mem2_config(struct netmap_mem_d *nmd)
1772 {
1773 	int i;
1774 
1775 	if (!netmap_mem_params_changed(nmd->params))
1776 		goto out;
1777 
1778 	nm_prdis("reconfiguring");
1779 
1780 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1781 		/* reset previous allocation */
1782 		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1783 			netmap_reset_obj_allocator(&nmd->pools[i]);
1784 		}
1785 		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1786 	}
1787 
1788 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1789 		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1790 				nmd->params[i].num, nmd->params[i].size);
1791 		if (nmd->lasterr)
1792 			goto out;
1793 	}
1794 
1795 out:
1796 
1797 	return nmd->lasterr;
1798 }
1799 
1800 static int
1801 netmap_mem2_finalize(struct netmap_mem_d *nmd)
1802 {
1803 	if (nmd->flags & NETMAP_MEM_FINALIZED)
1804 		goto out;
1805 
1806 	if (netmap_mem_finalize_all(nmd))
1807 		goto out;
1808 
1809 	nmd->lasterr = 0;
1810 
1811 out:
1812 	return nmd->lasterr;
1813 }
1814 
1815 static void
1816 netmap_mem2_delete(struct netmap_mem_d *nmd)
1817 {
1818 	int i;
1819 
1820 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1821 	    netmap_destroy_obj_allocator(&nmd->pools[i]);
1822 	}
1823 
1824 	NMA_LOCK_DESTROY(nmd);
1825 	if (nmd != &nm_mem)
1826 		nm_os_free(nmd);
1827 }
1828 
1829 #ifdef WITH_EXTMEM
1830 /* doubly linekd list of all existing external allocators */
1831 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1832 NM_MTX_T nm_mem_ext_list_lock;
1833 #endif /* WITH_EXTMEM */
1834 
1835 int
1836 netmap_mem_init(void)
1837 {
1838 	NM_MTX_INIT(nm_mem_list_lock);
1839 	NMA_LOCK_INIT(&nm_mem);
1840 	netmap_mem_get(&nm_mem);
1841 #ifdef WITH_EXTMEM
1842 	NM_MTX_INIT(nm_mem_ext_list_lock);
1843 #endif /* WITH_EXTMEM */
1844 	return (0);
1845 }
1846 
1847 void
1848 netmap_mem_fini(void)
1849 {
1850 	netmap_mem_put(&nm_mem);
1851 }
1852 
1853 static void
1854 netmap_free_rings(struct netmap_adapter *na)
1855 {
1856 	enum txrx t;
1857 
1858 	for_rx_tx(t) {
1859 		u_int i;
1860 		for (i = 0; i < netmap_all_rings(na, t); i++) {
1861 			struct netmap_kring *kring = NMR(na, t)[i];
1862 			struct netmap_ring *ring = kring->ring;
1863 
1864 			if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) {
1865 				if (netmap_debug & NM_DEBUG_MEM)
1866 					nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
1867 						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1868 				continue;
1869 			}
1870 			if (netmap_debug & NM_DEBUG_MEM)
1871 				nm_prinf("deleting ring %s", kring->name);
1872 			if (!(kring->nr_kflags & NKR_FAKERING)) {
1873 				nm_prdis("freeing bufs for %s", kring->name);
1874 				netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1875 			} else {
1876 				nm_prdis("NOT freeing bufs for %s", kring->name);
1877 			}
1878 			netmap_ring_free(na->nm_mem, ring);
1879 			kring->ring = NULL;
1880 		}
1881 	}
1882 }
1883 
1884 /* call with NMA_LOCK held *
1885  *
1886  * Allocate netmap rings and buffers for this card
1887  * The rings are contiguous, but have variable size.
1888  * The kring array must follow the layout described
1889  * in netmap_krings_create().
1890  */
1891 static int
1892 netmap_mem2_rings_create(struct netmap_adapter *na)
1893 {
1894 	enum txrx t;
1895 
1896 	for_rx_tx(t) {
1897 		u_int i;
1898 
1899 		for (i = 0; i < netmap_all_rings(na, t); i++) {
1900 			struct netmap_kring *kring = NMR(na, t)[i];
1901 			struct netmap_ring *ring = kring->ring;
1902 			u_int len, ndesc;
1903 
1904 			if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) {
1905 				/* uneeded, or already created by somebody else */
1906 				if (netmap_debug & NM_DEBUG_MEM)
1907 					nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
1908 						kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1909 				continue;
1910 			}
1911 			if (netmap_debug & NM_DEBUG_MEM)
1912 				nm_prinf("creating %s", kring->name);
1913 			ndesc = kring->nkr_num_slots;
1914 			len = sizeof(struct netmap_ring) +
1915 				  ndesc * sizeof(struct netmap_slot);
1916 			ring = netmap_ring_malloc(na->nm_mem, len);
1917 			if (ring == NULL) {
1918 				nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
1919 				goto cleanup;
1920 			}
1921 			nm_prdis("txring at %p", ring);
1922 			kring->ring = ring;
1923 			*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1924 			*(int64_t *)(uintptr_t)&ring->buf_ofs =
1925 			    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1926 				na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1927 				netmap_ring_offset(na->nm_mem, ring);
1928 
1929 			/* copy values from kring */
1930 			ring->head = kring->rhead;
1931 			ring->cur = kring->rcur;
1932 			ring->tail = kring->rtail;
1933 			*(uint32_t *)(uintptr_t)&ring->nr_buf_size =
1934 				netmap_mem_bufsize(na->nm_mem);
1935 			nm_prdis("%s h %d c %d t %d", kring->name,
1936 				ring->head, ring->cur, ring->tail);
1937 			nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
1938 			if (!(kring->nr_kflags & NKR_FAKERING)) {
1939 				/* this is a real ring */
1940 				if (netmap_debug & NM_DEBUG_MEM)
1941 					nm_prinf("allocating buffers for %s", kring->name);
1942 				if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1943 					nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
1944 					goto cleanup;
1945 				}
1946 			} else {
1947 				/* this is a fake ring, set all indices to 0 */
1948 				if (netmap_debug & NM_DEBUG_MEM)
1949 					nm_prinf("NOT allocating buffers for %s", kring->name);
1950 				netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1951 			}
1952 		        /* ring info */
1953 		        *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
1954 		        *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
1955 		}
1956 	}
1957 
1958 	return 0;
1959 
1960 cleanup:
1961 	/* we cannot actually cleanup here, since we don't own kring->users
1962 	 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
1963 	 * the first or zero-out the second, then call netmap_free_rings()
1964 	 * to do the cleanup
1965 	 */
1966 
1967 	return ENOMEM;
1968 }
1969 
1970 static void
1971 netmap_mem2_rings_delete(struct netmap_adapter *na)
1972 {
1973 	/* last instance, release bufs and rings */
1974 	netmap_free_rings(na);
1975 }
1976 
1977 
1978 /* call with NMA_LOCK held */
1979 /*
1980  * Allocate the per-fd structure netmap_if.
1981  *
1982  * We assume that the configuration stored in na
1983  * (number of tx/rx rings and descs) does not change while
1984  * the interface is in netmap mode.
1985  */
1986 static struct netmap_if *
1987 netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
1988 {
1989 	struct netmap_if *nifp;
1990 	ssize_t base; /* handy for relative offsets between rings and nifp */
1991 	u_int i, len, n[NR_TXRX], ntot;
1992 	enum txrx t;
1993 
1994 	ntot = 0;
1995 	for_rx_tx(t) {
1996 		/* account for the (eventually fake) host rings */
1997 		n[t] = netmap_all_rings(na, t);
1998 		ntot += n[t];
1999 	}
2000 	/*
2001 	 * the descriptor is followed inline by an array of offsets
2002 	 * to the tx and rx rings in the shared memory region.
2003 	 */
2004 
2005 	len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2006 	nifp = netmap_if_malloc(na->nm_mem, len);
2007 	if (nifp == NULL) {
2008 		NMA_UNLOCK(na->nm_mem);
2009 		return NULL;
2010 	}
2011 
2012 	/* initialize base fields -- override const */
2013 	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2014 	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2015 	strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2016 
2017 	/*
2018 	 * fill the slots for the rx and tx rings. They contain the offset
2019 	 * between the ring and nifp, so the information is usable in
2020 	 * userspace to reach the ring from the nifp.
2021 	 */
2022 	base = netmap_if_offset(na->nm_mem, nifp);
2023 	for (i = 0; i < n[NR_TX]; i++) {
2024 		/* XXX instead of ofs == 0 maybe use the offset of an error
2025 		 * ring, like we do for buffers? */
2026 		ssize_t ofs = 0;
2027 
2028 		if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2029 				&& i < priv->np_qlast[NR_TX]) {
2030 			ofs = netmap_ring_offset(na->nm_mem,
2031 						 na->tx_rings[i]->ring) - base;
2032 		}
2033 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2034 	}
2035 	for (i = 0; i < n[NR_RX]; i++) {
2036 		/* XXX instead of ofs == 0 maybe use the offset of an error
2037 		 * ring, like we do for buffers? */
2038 		ssize_t ofs = 0;
2039 
2040 		if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2041 				&& i < priv->np_qlast[NR_RX]) {
2042 			ofs = netmap_ring_offset(na->nm_mem,
2043 						 na->rx_rings[i]->ring) - base;
2044 		}
2045 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2046 	}
2047 
2048 	return (nifp);
2049 }
2050 
2051 static void
2052 netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2053 {
2054 	if (nifp == NULL)
2055 		/* nothing to do */
2056 		return;
2057 	if (nifp->ni_bufs_head)
2058 		netmap_extra_free(na, nifp->ni_bufs_head);
2059 	netmap_if_free(na->nm_mem, nifp);
2060 }
2061 
2062 static void
2063 netmap_mem2_deref(struct netmap_mem_d *nmd)
2064 {
2065 
2066 	if (netmap_debug & NM_DEBUG_MEM)
2067 		nm_prinf("active = %d", nmd->active);
2068 
2069 }
2070 
2071 struct netmap_mem_ops netmap_mem_global_ops = {
2072 	.nmd_get_lut = netmap_mem2_get_lut,
2073 	.nmd_get_info = netmap_mem2_get_info,
2074 	.nmd_ofstophys = netmap_mem2_ofstophys,
2075 	.nmd_config = netmap_mem2_config,
2076 	.nmd_finalize = netmap_mem2_finalize,
2077 	.nmd_deref = netmap_mem2_deref,
2078 	.nmd_delete = netmap_mem2_delete,
2079 	.nmd_if_offset = netmap_mem2_if_offset,
2080 	.nmd_if_new = netmap_mem2_if_new,
2081 	.nmd_if_delete = netmap_mem2_if_delete,
2082 	.nmd_rings_create = netmap_mem2_rings_create,
2083 	.nmd_rings_delete = netmap_mem2_rings_delete
2084 };
2085 
2086 int
2087 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2088 				struct netmap_mem_d *nmd)
2089 {
2090 	int ret;
2091 
2092 	ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2093 					&req->nr_mem_id);
2094 	if (ret) {
2095 		return ret;
2096 	}
2097 
2098 	NMA_LOCK(nmd);
2099 	req->nr_if_pool_offset = 0;
2100 	req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2101 	req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2102 
2103 	req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2104 	req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2105 	req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2106 
2107 	req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2108 			     nmd->pools[NETMAP_RING_POOL].memtotal;
2109 	req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2110 	req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2111 	NMA_UNLOCK(nmd);
2112 
2113 	return 0;
2114 }
2115 
2116 #ifdef WITH_EXTMEM
2117 struct netmap_mem_ext {
2118 	struct netmap_mem_d up;
2119 
2120 	struct nm_os_extmem *os;
2121 	struct netmap_mem_ext *next, *prev;
2122 };
2123 
2124 /* call with nm_mem_list_lock held */
2125 static void
2126 netmap_mem_ext_register(struct netmap_mem_ext *e)
2127 {
2128 	NM_MTX_LOCK(nm_mem_ext_list_lock);
2129 	if (netmap_mem_ext_list)
2130 		netmap_mem_ext_list->prev = e;
2131 	e->next = netmap_mem_ext_list;
2132 	netmap_mem_ext_list = e;
2133 	e->prev = NULL;
2134 	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2135 }
2136 
2137 /* call with nm_mem_list_lock held */
2138 static void
2139 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2140 {
2141 	if (e->prev)
2142 		e->prev->next = e->next;
2143 	else
2144 		netmap_mem_ext_list = e->next;
2145 	if (e->next)
2146 		e->next->prev = e->prev;
2147 	e->prev = e->next = NULL;
2148 }
2149 
2150 static struct netmap_mem_ext *
2151 netmap_mem_ext_search(struct nm_os_extmem *os)
2152 {
2153 	struct netmap_mem_ext *e;
2154 
2155 	NM_MTX_LOCK(nm_mem_ext_list_lock);
2156 	for (e = netmap_mem_ext_list; e; e = e->next) {
2157 		if (nm_os_extmem_isequal(e->os, os)) {
2158 			netmap_mem_get(&e->up);
2159 			break;
2160 		}
2161 	}
2162 	NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2163 	return e;
2164 }
2165 
2166 
2167 static void
2168 netmap_mem_ext_delete(struct netmap_mem_d *d)
2169 {
2170 	int i;
2171 	struct netmap_mem_ext *e =
2172 		(struct netmap_mem_ext *)d;
2173 
2174 	netmap_mem_ext_unregister(e);
2175 
2176 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2177 		struct netmap_obj_pool *p = &d->pools[i];
2178 
2179 		if (p->lut) {
2180 			nm_free_lut(p->lut, p->objtotal);
2181 			p->lut = NULL;
2182 		}
2183 	}
2184 	if (e->os)
2185 		nm_os_extmem_delete(e->os);
2186 	netmap_mem2_delete(d);
2187 }
2188 
2189 static int
2190 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2191 {
2192 	return 0;
2193 }
2194 
2195 struct netmap_mem_ops netmap_mem_ext_ops = {
2196 	.nmd_get_lut = netmap_mem2_get_lut,
2197 	.nmd_get_info = netmap_mem2_get_info,
2198 	.nmd_ofstophys = netmap_mem2_ofstophys,
2199 	.nmd_config = netmap_mem_ext_config,
2200 	.nmd_finalize = netmap_mem2_finalize,
2201 	.nmd_deref = netmap_mem2_deref,
2202 	.nmd_delete = netmap_mem_ext_delete,
2203 	.nmd_if_offset = netmap_mem2_if_offset,
2204 	.nmd_if_new = netmap_mem2_if_new,
2205 	.nmd_if_delete = netmap_mem2_if_delete,
2206 	.nmd_rings_create = netmap_mem2_rings_create,
2207 	.nmd_rings_delete = netmap_mem2_rings_delete
2208 };
2209 
2210 struct netmap_mem_d *
2211 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2212 {
2213 	int error = 0;
2214 	int i, j;
2215 	struct netmap_mem_ext *nme;
2216 	char *clust;
2217 	size_t off;
2218 	struct nm_os_extmem *os = NULL;
2219 	int nr_pages;
2220 
2221 	// XXX sanity checks
2222 	if (pi->nr_if_pool_objtotal == 0)
2223 		pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2224 	if (pi->nr_if_pool_objsize == 0)
2225 		pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2226 	if (pi->nr_ring_pool_objtotal == 0)
2227 		pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2228 	if (pi->nr_ring_pool_objsize == 0)
2229 		pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2230 	if (pi->nr_buf_pool_objtotal == 0)
2231 		pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2232 	if (pi->nr_buf_pool_objsize == 0)
2233 		pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2234 	if (netmap_verbose & NM_DEBUG_MEM)
2235 		nm_prinf("if %d %d ring %d %d buf %d %d",
2236 			pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2237 			pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2238 			pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2239 
2240 	os = nm_os_extmem_create(usrptr, pi, &error);
2241 	if (os == NULL) {
2242 		nm_prerr("os extmem creation failed");
2243 		goto out;
2244 	}
2245 
2246 	nme = netmap_mem_ext_search(os);
2247 	if (nme) {
2248 		nm_os_extmem_delete(os);
2249 		return &nme->up;
2250 	}
2251 	if (netmap_verbose & NM_DEBUG_MEM)
2252 		nm_prinf("not found, creating new");
2253 
2254 	nme = _netmap_mem_private_new(sizeof(*nme),
2255 			(struct netmap_obj_params[]){
2256 				{ pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2257 				{ pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2258 				{ pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2259 			&netmap_mem_ext_ops,
2260 			&error);
2261 	if (nme == NULL)
2262 		goto out_unmap;
2263 
2264 	nr_pages = nm_os_extmem_nr_pages(os);
2265 
2266 	/* from now on pages will be released by nme destructor;
2267 	 * we let res = 0 to prevent release in out_unmap below
2268 	 */
2269 	nme->os = os;
2270 	os = NULL; /* pass ownership */
2271 
2272 	clust = nm_os_extmem_nextpage(nme->os);
2273 	off = 0;
2274 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
2275 		struct netmap_obj_pool *p = &nme->up.pools[i];
2276 		struct netmap_obj_params *o = &nme->up.params[i];
2277 
2278 		p->_objsize = o->size;
2279 		p->_clustsize = o->size;
2280 		p->_clustentries = 1;
2281 
2282 		p->lut = nm_alloc_lut(o->num);
2283 		if (p->lut == NULL) {
2284 			error = ENOMEM;
2285 			goto out_delete;
2286 		}
2287 
2288 		p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2289 		p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2290 		if (p->invalid_bitmap == NULL) {
2291 			error = ENOMEM;
2292 			goto out_delete;
2293 		}
2294 
2295 		if (nr_pages == 0) {
2296 			p->objtotal = 0;
2297 			p->memtotal = 0;
2298 			p->objfree = 0;
2299 			continue;
2300 		}
2301 
2302 		for (j = 0; j < o->num && nr_pages > 0; j++) {
2303 			size_t noff;
2304 
2305 			p->lut[j].vaddr = clust + off;
2306 #if !defined(linux) && !defined(_WIN32)
2307 			p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2308 #endif
2309 			nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2310 			noff = off + p->_objsize;
2311 			if (noff < PAGE_SIZE) {
2312 				off = noff;
2313 				continue;
2314 			}
2315 			nm_prdis("too big, recomputing offset...");
2316 			while (noff >= PAGE_SIZE) {
2317 				char *old_clust = clust;
2318 				noff -= PAGE_SIZE;
2319 				clust = nm_os_extmem_nextpage(nme->os);
2320 				nr_pages--;
2321 				nm_prdis("noff %zu page %p nr_pages %d", noff,
2322 						page_to_virt(*pages), nr_pages);
2323 				if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2324 					(nr_pages == 0 ||
2325 					 old_clust + PAGE_SIZE != clust))
2326 				{
2327 					/* out of space or non contiguous,
2328 					 * drop this object
2329 					 * */
2330 					p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2331 					nm_prdis("non contiguous at off %zu, drop", noff);
2332 				}
2333 				if (nr_pages == 0)
2334 					break;
2335 			}
2336 			off = noff;
2337 		}
2338 		p->objtotal = j;
2339 		p->numclusters = p->objtotal;
2340 		p->memtotal = j * p->_objsize;
2341 		nm_prdis("%d memtotal %u", j, p->memtotal);
2342 	}
2343 
2344 	netmap_mem_ext_register(nme);
2345 
2346 	return &nme->up;
2347 
2348 out_delete:
2349 	netmap_mem_put(&nme->up);
2350 out_unmap:
2351 	if (os)
2352 		nm_os_extmem_delete(os);
2353 out:
2354 	if (perror)
2355 		*perror = error;
2356 	return NULL;
2357 
2358 }
2359 #endif /* WITH_EXTMEM */
2360 
2361 
2362 #ifdef WITH_PTNETMAP
2363 struct mem_pt_if {
2364 	struct mem_pt_if *next;
2365 	struct ifnet *ifp;
2366 	unsigned int nifp_offset;
2367 };
2368 
2369 /* Netmap allocator for ptnetmap guests. */
2370 struct netmap_mem_ptg {
2371 	struct netmap_mem_d up;
2372 
2373 	vm_paddr_t nm_paddr;            /* physical address in the guest */
2374 	void *nm_addr;                  /* virtual address in the guest */
2375 	struct netmap_lut buf_lut;      /* lookup table for BUF pool in the guest */
2376 	nm_memid_t host_mem_id;         /* allocator identifier in the host */
2377 	struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2378 	struct mem_pt_if *pt_ifs;	/* list of interfaces in passthrough */
2379 };
2380 
2381 /* Link a passthrough interface to a passthrough netmap allocator. */
2382 static int
2383 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp,
2384 			    unsigned int nifp_offset)
2385 {
2386 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2387 	struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2388 
2389 	if (!ptif) {
2390 		return ENOMEM;
2391 	}
2392 
2393 	NMA_LOCK(nmd);
2394 
2395 	ptif->ifp = ifp;
2396 	ptif->nifp_offset = nifp_offset;
2397 
2398 	if (ptnmd->pt_ifs) {
2399 		ptif->next = ptnmd->pt_ifs;
2400 	}
2401 	ptnmd->pt_ifs = ptif;
2402 
2403 	NMA_UNLOCK(nmd);
2404 
2405 	nm_prinf("ifp=%s,nifp_offset=%u",
2406 		ptif->ifp->if_xname, ptif->nifp_offset);
2407 
2408 	return 0;
2409 }
2410 
2411 /* Called with NMA_LOCK(nmd) held. */
2412 static struct mem_pt_if *
2413 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp)
2414 {
2415 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2416 	struct mem_pt_if *curr;
2417 
2418 	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2419 		if (curr->ifp == ifp) {
2420 			return curr;
2421 		}
2422 	}
2423 
2424 	return NULL;
2425 }
2426 
2427 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2428 int
2429 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
2430 {
2431 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2432 	struct mem_pt_if *prev = NULL;
2433 	struct mem_pt_if *curr;
2434 	int ret = -1;
2435 
2436 	NMA_LOCK(nmd);
2437 
2438 	for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2439 		if (curr->ifp == ifp) {
2440 			if (prev) {
2441 				prev->next = curr->next;
2442 			} else {
2443 				ptnmd->pt_ifs = curr->next;
2444 			}
2445 			nm_prinf("removed (ifp=%p,nifp_offset=%u)",
2446 			  curr->ifp, curr->nifp_offset);
2447 			nm_os_free(curr);
2448 			ret = 0;
2449 			break;
2450 		}
2451 		prev = curr;
2452 	}
2453 
2454 	NMA_UNLOCK(nmd);
2455 
2456 	return ret;
2457 }
2458 
2459 static int
2460 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2461 {
2462 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2463 
2464 	if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2465 		return EINVAL;
2466 	}
2467 
2468 	*lut = ptnmd->buf_lut;
2469 	return 0;
2470 }
2471 
2472 static int
2473 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2474 			     u_int *memflags, uint16_t *id)
2475 {
2476 	int error = 0;
2477 
2478 	error = nmd->ops->nmd_config(nmd);
2479 	if (error)
2480 		goto out;
2481 
2482 	if (size)
2483 		*size = nmd->nm_totalsize;
2484 	if (memflags)
2485 		*memflags = nmd->flags;
2486 	if (id)
2487 		*id = nmd->nm_id;
2488 
2489 out:
2490 
2491 	return error;
2492 }
2493 
2494 static vm_paddr_t
2495 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2496 {
2497 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2498 	vm_paddr_t paddr;
2499 	/* if the offset is valid, just return csb->base_addr + off */
2500 	paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2501 	nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2502 	return paddr;
2503 }
2504 
2505 static int
2506 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2507 {
2508 	/* nothing to do, we are configured on creation
2509 	 * and configuration never changes thereafter
2510 	 */
2511 	return 0;
2512 }
2513 
2514 static int
2515 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
2516 {
2517 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2518 	uint64_t mem_size;
2519 	uint32_t bufsize;
2520 	uint32_t nbuffers;
2521 	uint32_t poolofs;
2522 	vm_paddr_t paddr;
2523 	char *vaddr;
2524 	int i;
2525 	int error = 0;
2526 
2527 	if (nmd->flags & NETMAP_MEM_FINALIZED)
2528 		goto out;
2529 
2530 	if (ptnmd->ptn_dev == NULL) {
2531 		nm_prerr("ptnetmap memdev not attached");
2532 		error = ENOMEM;
2533 		goto out;
2534 	}
2535 	/* Map memory through ptnetmap-memdev BAR. */
2536 	error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2537 				      &ptnmd->nm_addr, &mem_size);
2538 	if (error)
2539 		goto out;
2540 
2541 	/* Initialize the lut using the information contained in the
2542 	 * ptnetmap memory device. */
2543 	bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2544 					 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2545 	nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2546 					 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2547 
2548 	/* allocate the lut */
2549 	if (ptnmd->buf_lut.lut == NULL) {
2550 		nm_prinf("allocating lut");
2551 		ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2552 		if (ptnmd->buf_lut.lut == NULL) {
2553 			nm_prerr("lut allocation failed");
2554 			return ENOMEM;
2555 		}
2556 	}
2557 
2558 	/* we have physically contiguous memory mapped through PCI BAR */
2559 	poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2560 					 PTNET_MDEV_IO_BUF_POOL_OFS);
2561 	vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2562 	paddr = ptnmd->nm_paddr + poolofs;
2563 
2564 	for (i = 0; i < nbuffers; i++) {
2565 		ptnmd->buf_lut.lut[i].vaddr = vaddr;
2566 		vaddr += bufsize;
2567 		paddr += bufsize;
2568 	}
2569 
2570 	ptnmd->buf_lut.objtotal = nbuffers;
2571 	ptnmd->buf_lut.objsize = bufsize;
2572 	nmd->nm_totalsize = (unsigned int)mem_size;
2573 
2574 	/* Initialize these fields as are needed by
2575 	 * netmap_mem_bufsize().
2576 	 * XXX please improve this, why do we need this
2577 	 * replication? maybe we nmd->pools[] should no be
2578 	 * there for the guest allocator? */
2579 	nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2580 	nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2581 
2582 	nmd->flags |= NETMAP_MEM_FINALIZED;
2583 out:
2584 	return error;
2585 }
2586 
2587 static void
2588 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd)
2589 {
2590 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2591 
2592 	if (nmd->active == 1 &&
2593 		(nmd->flags & NETMAP_MEM_FINALIZED)) {
2594 	    nmd->flags  &= ~NETMAP_MEM_FINALIZED;
2595 	    /* unmap ptnetmap-memdev memory */
2596 	    if (ptnmd->ptn_dev) {
2597 		nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2598 	    }
2599 	    ptnmd->nm_addr = NULL;
2600 	    ptnmd->nm_paddr = 0;
2601 	}
2602 }
2603 
2604 static ssize_t
2605 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2606 {
2607 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2608 
2609 	return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2610 }
2611 
2612 static void
2613 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2614 {
2615 	if (nmd == NULL)
2616 		return;
2617 	if (netmap_verbose)
2618 		nm_prinf("deleting %p", nmd);
2619 	if (nmd->active > 0)
2620 		nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2621 	if (netmap_verbose)
2622 		nm_prinf("done deleting %p", nmd);
2623 	NMA_LOCK_DESTROY(nmd);
2624 	nm_os_free(nmd);
2625 }
2626 
2627 static struct netmap_if *
2628 netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
2629 {
2630 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2631 	struct mem_pt_if *ptif;
2632 	struct netmap_if *nifp = NULL;
2633 
2634 	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2635 	if (ptif == NULL) {
2636 		nm_prerr("interface %s is not in passthrough", na->name);
2637 		goto out;
2638 	}
2639 
2640 	nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2641 				    ptif->nifp_offset);
2642 out:
2643 	return nifp;
2644 }
2645 
2646 static void
2647 netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2648 {
2649 	struct mem_pt_if *ptif;
2650 
2651 	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2652 	if (ptif == NULL) {
2653 		nm_prerr("interface %s is not in passthrough", na->name);
2654 	}
2655 }
2656 
2657 static int
2658 netmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
2659 {
2660 	struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2661 	struct mem_pt_if *ptif;
2662 	struct netmap_if *nifp;
2663 	int i, error = -1;
2664 
2665 	ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2666 	if (ptif == NULL) {
2667 		nm_prerr("interface %s is not in passthrough", na->name);
2668 		goto out;
2669 	}
2670 
2671 
2672 	/* point each kring to the corresponding backend ring */
2673 	nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2674 	for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2675 		struct netmap_kring *kring = na->tx_rings[i];
2676 		if (kring->ring)
2677 			continue;
2678 		kring->ring = (struct netmap_ring *)
2679 			((char *)nifp + nifp->ring_ofs[i]);
2680 	}
2681 	for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2682 		struct netmap_kring *kring = na->rx_rings[i];
2683 		if (kring->ring)
2684 			continue;
2685 		kring->ring = (struct netmap_ring *)
2686 			((char *)nifp +
2687 			 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2688 	}
2689 
2690 	error = 0;
2691 out:
2692 	return error;
2693 }
2694 
2695 static void
2696 netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na)
2697 {
2698 #if 0
2699 	enum txrx t;
2700 
2701 	for_rx_tx(t) {
2702 		u_int i;
2703 		for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2704 			struct netmap_kring *kring = &NMR(na, t)[i];
2705 
2706 			kring->ring = NULL;
2707 		}
2708 	}
2709 #endif
2710 }
2711 
2712 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2713 	.nmd_get_lut = netmap_mem_pt_guest_get_lut,
2714 	.nmd_get_info = netmap_mem_pt_guest_get_info,
2715 	.nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2716 	.nmd_config = netmap_mem_pt_guest_config,
2717 	.nmd_finalize = netmap_mem_pt_guest_finalize,
2718 	.nmd_deref = netmap_mem_pt_guest_deref,
2719 	.nmd_if_offset = netmap_mem_pt_guest_if_offset,
2720 	.nmd_delete = netmap_mem_pt_guest_delete,
2721 	.nmd_if_new = netmap_mem_pt_guest_if_new,
2722 	.nmd_if_delete = netmap_mem_pt_guest_if_delete,
2723 	.nmd_rings_create = netmap_mem_pt_guest_rings_create,
2724 	.nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2725 };
2726 
2727 /* Called with nm_mem_list_lock held. */
2728 static struct netmap_mem_d *
2729 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2730 {
2731 	struct netmap_mem_d *mem = NULL;
2732 	struct netmap_mem_d *scan = netmap_last_mem_d;
2733 
2734 	do {
2735 		/* find ptnetmap allocator through host ID */
2736 		if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2737 			((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2738 			mem = scan;
2739 			mem->refcount++;
2740 			NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2741 			break;
2742 		}
2743 		scan = scan->next;
2744 	} while (scan != netmap_last_mem_d);
2745 
2746 	return mem;
2747 }
2748 
2749 /* Called with nm_mem_list_lock held. */
2750 static struct netmap_mem_d *
2751 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2752 {
2753 	struct netmap_mem_ptg *ptnmd;
2754 	int err = 0;
2755 
2756 	ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2757 	if (ptnmd == NULL) {
2758 		err = ENOMEM;
2759 		goto error;
2760 	}
2761 
2762 	ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2763 	ptnmd->host_mem_id = mem_id;
2764 	ptnmd->pt_ifs = NULL;
2765 
2766 	/* Assign new id in the guest (We have the lock) */
2767 	err = nm_mem_assign_id_locked(&ptnmd->up);
2768 	if (err)
2769 		goto error;
2770 
2771 	ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2772 	ptnmd->up.flags |= NETMAP_MEM_IO;
2773 
2774 	NMA_LOCK_INIT(&ptnmd->up);
2775 
2776 	snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2777 
2778 
2779 	return &ptnmd->up;
2780 error:
2781 	netmap_mem_pt_guest_delete(&ptnmd->up);
2782 	return NULL;
2783 }
2784 
2785 /*
2786  * find host id in guest allocators and create guest allocator
2787  * if it is not there
2788  */
2789 static struct netmap_mem_d *
2790 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2791 {
2792 	struct netmap_mem_d *nmd;
2793 
2794 	NM_MTX_LOCK(nm_mem_list_lock);
2795 	nmd = netmap_mem_pt_guest_find_memid(mem_id);
2796 	if (nmd == NULL) {
2797 		nmd = netmap_mem_pt_guest_create(mem_id);
2798 	}
2799 	NM_MTX_UNLOCK(nm_mem_list_lock);
2800 
2801 	return nmd;
2802 }
2803 
2804 /*
2805  * The guest allocator can be created by ptnetmap_memdev (during the device
2806  * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2807  *
2808  * The order is not important (we have different order in LINUX and FreeBSD).
2809  * The first one, creates the device, and the second one simply attaches it.
2810  */
2811 
2812 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2813  * the guest */
2814 struct netmap_mem_d *
2815 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2816 {
2817 	struct netmap_mem_d *nmd;
2818 	struct netmap_mem_ptg *ptnmd;
2819 
2820 	nmd = netmap_mem_pt_guest_get(mem_id);
2821 
2822 	/* assign this device to the guest allocator */
2823 	if (nmd) {
2824 		ptnmd = (struct netmap_mem_ptg *)nmd;
2825 		ptnmd->ptn_dev = ptn_dev;
2826 	}
2827 
2828 	return nmd;
2829 }
2830 
2831 /* Called when ptnet device is attaching */
2832 struct netmap_mem_d *
2833 netmap_mem_pt_guest_new(struct ifnet *ifp,
2834 			unsigned int nifp_offset,
2835 			unsigned int memid)
2836 {
2837 	struct netmap_mem_d *nmd;
2838 
2839 	if (ifp == NULL) {
2840 		return NULL;
2841 	}
2842 
2843 	nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2844 
2845 	if (nmd) {
2846 		netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2847 	}
2848 
2849 	return nmd;
2850 }
2851 
2852 #endif /* WITH_PTNETMAP */
2853