xref: /freebsd/sys/dev/netmap/netmap_mem2.c (revision e8e8c939350bdf3c228a411caa9660c607c27a11)
1 /*
2  * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *      documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #ifdef linux
27 #include "bsd_glue.h"
28 #endif /* linux */
29 
30 #ifdef __APPLE__
31 #include "osx_glue.h"
32 #endif /* __APPLE__ */
33 
34 #ifdef __FreeBSD__
35 #include <sys/cdefs.h> /* prerequisite */
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <vm/vm.h>	/* vtophys */
42 #include <vm/pmap.h>	/* vtophys */
43 #include <sys/socket.h> /* sockaddrs */
44 #include <sys/selinfo.h>
45 #include <sys/sysctl.h>
46 #include <net/if.h>
47 #include <net/if_var.h>
48 #include <net/vnet.h>
49 #include <machine/bus.h>	/* bus_dmamap_* */
50 
51 #endif /* __FreeBSD__ */
52 
53 #include <net/netmap.h>
54 #include <dev/netmap/netmap_kern.h>
55 #include "netmap_mem2.h"
56 
57 #define NETMAP_BUF_MAX_NUM	20*4096*2	/* large machine */
58 
59 #define NETMAP_POOL_MAX_NAMSZ	32
60 
61 
62 enum {
63 	NETMAP_IF_POOL   = 0,
64 	NETMAP_RING_POOL,
65 	NETMAP_BUF_POOL,
66 	NETMAP_POOLS_NR
67 };
68 
69 
70 struct netmap_obj_params {
71 	u_int size;
72 	u_int num;
73 };
74 struct netmap_obj_pool {
75 	char name[NETMAP_POOL_MAX_NAMSZ];	/* name of the allocator */
76 
77 	/* ---------------------------------------------------*/
78 	/* these are only meaningful if the pool is finalized */
79 	/* (see 'finalized' field in netmap_mem_d)            */
80 	u_int objtotal;         /* actual total number of objects. */
81 	u_int memtotal;		/* actual total memory space */
82 	u_int numclusters;	/* actual number of clusters */
83 
84 	u_int objfree;          /* number of free objects. */
85 
86 	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
87 	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
88 	uint32_t bitmap_slots;	/* number of uint32 entries in bitmap */
89 	/* ---------------------------------------------------*/
90 
91 	/* limits */
92 	u_int objminsize;	/* minimum object size */
93 	u_int objmaxsize;	/* maximum object size */
94 	u_int nummin;		/* minimum number of objects */
95 	u_int nummax;		/* maximum number of objects */
96 
97 	/* these are changed only by config */
98 	u_int _objtotal;	/* total number of objects */
99 	u_int _objsize;		/* object size */
100 	u_int _clustsize;       /* cluster size */
101 	u_int _clustentries;    /* objects per cluster */
102 	u_int _numclusters;	/* number of clusters */
103 
104 	/* requested values */
105 	u_int r_objtotal;
106 	u_int r_objsize;
107 };
108 
109 #ifdef linux
110 // XXX a mtx would suffice here 20130415 lr
111 #define NMA_LOCK_T		struct semaphore
112 #else /* !linux */
113 #define NMA_LOCK_T		struct mtx
114 #endif /* linux */
115 
116 typedef int (*netmap_mem_config_t)(struct netmap_mem_d*);
117 typedef int (*netmap_mem_finalize_t)(struct netmap_mem_d*);
118 typedef void (*netmap_mem_deref_t)(struct netmap_mem_d*);
119 
120 typedef uint16_t nm_memid_t;
121 
122 struct netmap_mem_d {
123 	NMA_LOCK_T nm_mtx;  /* protect the allocator */
124 	u_int nm_totalsize; /* shorthand */
125 
126 	u_int flags;
127 #define NETMAP_MEM_FINALIZED	0x1	/* preallocation done */
128 	int lasterr;		/* last error for curr config */
129 	int refcount;		/* existing priv structures */
130 	/* the three allocators */
131 	struct netmap_obj_pool pools[NETMAP_POOLS_NR];
132 
133 	netmap_mem_config_t   config;	/* called with NMA_LOCK held */
134 	netmap_mem_finalize_t finalize;	/* called with NMA_LOCK held */
135 	netmap_mem_deref_t    deref;	/* called with NMA_LOCK held */
136 
137 	nm_memid_t nm_id;	/* allocator identifier */
138 	int nm_grp;	/* iommu groupd id */
139 
140 	/* list of all existing allocators, sorted by nm_id */
141 	struct netmap_mem_d *prev, *next;
142 };
143 
144 /* accessor functions */
145 struct lut_entry*
146 netmap_mem_get_lut(struct netmap_mem_d *nmd)
147 {
148 	return nmd->pools[NETMAP_BUF_POOL].lut;
149 }
150 
151 u_int
152 netmap_mem_get_buftotal(struct netmap_mem_d *nmd)
153 {
154 	return nmd->pools[NETMAP_BUF_POOL].objtotal;
155 }
156 
157 size_t
158 netmap_mem_get_bufsize(struct netmap_mem_d *nmd)
159 {
160 	return nmd->pools[NETMAP_BUF_POOL]._objsize;
161 }
162 
163 #ifdef linux
164 #define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
165 #define NMA_LOCK_DESTROY(n)
166 #define NMA_LOCK(n)		down(&(n)->nm_mtx)
167 #define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
168 #else /* !linux */
169 #define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
170 #define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
171 #define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
172 #define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
173 #endif /* linux */
174 
175 
176 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
177 	[NETMAP_IF_POOL] = {
178 		.size = 1024,
179 		.num  = 100,
180 	},
181 	[NETMAP_RING_POOL] = {
182 		.size = 9*PAGE_SIZE,
183 		.num  = 200,
184 	},
185 	[NETMAP_BUF_POOL] = {
186 		.size = 2048,
187 		.num  = NETMAP_BUF_MAX_NUM,
188 	},
189 };
190 
191 struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
192 	[NETMAP_IF_POOL] = {
193 		.size = 1024,
194 		.num  = 1,
195 	},
196 	[NETMAP_RING_POOL] = {
197 		.size = 5*PAGE_SIZE,
198 		.num  = 4,
199 	},
200 	[NETMAP_BUF_POOL] = {
201 		.size = 2048,
202 		.num  = 4098,
203 	},
204 };
205 
206 
207 /*
208  * nm_mem is the memory allocator used for all physical interfaces
209  * running in netmap mode.
210  * Virtual (VALE) ports will have each its own allocator.
211  */
212 static int netmap_mem_global_config(struct netmap_mem_d *nmd);
213 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
214 static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
215 struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
216 	.pools = {
217 		[NETMAP_IF_POOL] = {
218 			.name 	= "netmap_if",
219 			.objminsize = sizeof(struct netmap_if),
220 			.objmaxsize = 4096,
221 			.nummin     = 10,	/* don't be stingy */
222 			.nummax	    = 10000,	/* XXX very large */
223 		},
224 		[NETMAP_RING_POOL] = {
225 			.name 	= "netmap_ring",
226 			.objminsize = sizeof(struct netmap_ring),
227 			.objmaxsize = 32*PAGE_SIZE,
228 			.nummin     = 2,
229 			.nummax	    = 1024,
230 		},
231 		[NETMAP_BUF_POOL] = {
232 			.name	= "netmap_buf",
233 			.objminsize = 64,
234 			.objmaxsize = 65536,
235 			.nummin     = 4,
236 			.nummax	    = 1000000, /* one million! */
237 		},
238 	},
239 	.config   = netmap_mem_global_config,
240 	.finalize = netmap_mem_global_finalize,
241 	.deref    = netmap_mem_global_deref,
242 
243 	.nm_id = 1,
244 	.nm_grp = -1,
245 
246 	.prev = &nm_mem,
247 	.next = &nm_mem,
248 };
249 
250 
251 struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
252 
253 /* blueprint for the private memory allocators */
254 static int netmap_mem_private_config(struct netmap_mem_d *nmd);
255 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
256 static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
257 const struct netmap_mem_d nm_blueprint = {
258 	.pools = {
259 		[NETMAP_IF_POOL] = {
260 			.name 	= "%s_if",
261 			.objminsize = sizeof(struct netmap_if),
262 			.objmaxsize = 4096,
263 			.nummin     = 1,
264 			.nummax	    = 100,
265 		},
266 		[NETMAP_RING_POOL] = {
267 			.name 	= "%s_ring",
268 			.objminsize = sizeof(struct netmap_ring),
269 			.objmaxsize = 32*PAGE_SIZE,
270 			.nummin     = 2,
271 			.nummax	    = 1024,
272 		},
273 		[NETMAP_BUF_POOL] = {
274 			.name	= "%s_buf",
275 			.objminsize = 64,
276 			.objmaxsize = 65536,
277 			.nummin     = 4,
278 			.nummax	    = 1000000, /* one million! */
279 		},
280 	},
281 	.config   = netmap_mem_private_config,
282 	.finalize = netmap_mem_private_finalize,
283 	.deref    = netmap_mem_private_deref,
284 
285 	.flags = NETMAP_MEM_PRIVATE,
286 };
287 
288 /* memory allocator related sysctls */
289 
290 #define STRINGIFY(x) #x
291 
292 
293 #define DECLARE_SYSCTLS(id, name) \
294 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
295 	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
296 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
297 	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
298 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
299 	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
300 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
301 	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
302 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
303 	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
304 	    "Default size of private netmap " STRINGIFY(name) "s"); \
305 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
306 	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
307 	    "Default number of private netmap " STRINGIFY(name) "s")
308 
309 SYSCTL_DECL(_dev_netmap);
310 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
311 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
312 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
313 
314 static int
315 nm_mem_assign_id(struct netmap_mem_d *nmd)
316 {
317 	nm_memid_t id;
318 	struct netmap_mem_d *scan = netmap_last_mem_d;
319 	int error = ENOMEM;
320 
321 	NMA_LOCK(&nm_mem);
322 
323 	do {
324 		/* we rely on unsigned wrap around */
325 		id = scan->nm_id + 1;
326 		if (id == 0) /* reserve 0 as error value */
327 			id = 1;
328 		scan = scan->next;
329 		if (id != scan->nm_id) {
330 			nmd->nm_id = id;
331 			nmd->prev = scan->prev;
332 			nmd->next = scan;
333 			scan->prev->next = nmd;
334 			scan->prev = nmd;
335 			netmap_last_mem_d = nmd;
336 			error = 0;
337 			break;
338 		}
339 	} while (scan != netmap_last_mem_d);
340 
341 	NMA_UNLOCK(&nm_mem);
342 	return error;
343 }
344 
345 static void
346 nm_mem_release_id(struct netmap_mem_d *nmd)
347 {
348 	NMA_LOCK(&nm_mem);
349 
350 	nmd->prev->next = nmd->next;
351 	nmd->next->prev = nmd->prev;
352 
353 	if (netmap_last_mem_d == nmd)
354 		netmap_last_mem_d = nmd->prev;
355 
356 	nmd->prev = nmd->next = NULL;
357 
358 	NMA_UNLOCK(&nm_mem);
359 }
360 
361 static int
362 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
363 {
364 	int err = 0, id;
365 	id = nm_iommu_group_id(dev);
366 	if (netmap_verbose)
367 		D("iommu_group %d", id);
368 
369 	NMA_LOCK(nmd);
370 
371 	if (nmd->nm_grp < 0)
372 		nmd->nm_grp = id;
373 
374 	if (nmd->nm_grp != id)
375 		nmd->lasterr = err = ENOMEM;
376 
377 	NMA_UNLOCK(nmd);
378 	return err;
379 }
380 
381 /*
382  * First, find the allocator that contains the requested offset,
383  * then locate the cluster through a lookup table.
384  */
385 vm_paddr_t
386 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
387 {
388 	int i;
389 	vm_ooffset_t o = offset;
390 	vm_paddr_t pa;
391 	struct netmap_obj_pool *p;
392 
393 	NMA_LOCK(nmd);
394 	p = nmd->pools;
395 
396 	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
397 		if (offset >= p[i].memtotal)
398 			continue;
399 		// now lookup the cluster's address
400 		pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
401 			offset % p[i]._objsize;
402 		NMA_UNLOCK(nmd);
403 		return pa;
404 	}
405 	/* this is only in case of errors */
406 	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
407 		p[NETMAP_IF_POOL].memtotal,
408 		p[NETMAP_IF_POOL].memtotal
409 			+ p[NETMAP_RING_POOL].memtotal,
410 		p[NETMAP_IF_POOL].memtotal
411 			+ p[NETMAP_RING_POOL].memtotal
412 			+ p[NETMAP_BUF_POOL].memtotal);
413 	NMA_UNLOCK(nmd);
414 	return 0;	// XXX bad address
415 }
416 
417 int
418 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
419 	nm_memid_t *id)
420 {
421 	int error = 0;
422 	NMA_LOCK(nmd);
423 	error = nmd->config(nmd);
424 	if (error)
425 		goto out;
426 	if (size) {
427 		if (nmd->flags & NETMAP_MEM_FINALIZED) {
428 			*size = nmd->nm_totalsize;
429 		} else {
430 			int i;
431 			*size = 0;
432 			for (i = 0; i < NETMAP_POOLS_NR; i++) {
433 				struct netmap_obj_pool *p = nmd->pools + i;
434 				*size += (p->_numclusters * p->_clustsize);
435 			}
436 		}
437 	}
438 	if (memflags)
439 		*memflags = nmd->flags;
440 	if (id)
441 		*id = nmd->nm_id;
442 out:
443 	NMA_UNLOCK(nmd);
444 	return error;
445 }
446 
447 /*
448  * we store objects by kernel address, need to find the offset
449  * within the pool to export the value to userspace.
450  * Algorithm: scan until we find the cluster, then add the
451  * actual offset in the cluster
452  */
453 static ssize_t
454 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
455 {
456 	int i, k = p->_clustentries, n = p->objtotal;
457 	ssize_t ofs = 0;
458 
459 	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
460 		const char *base = p->lut[i].vaddr;
461 		ssize_t relofs = (const char *) vaddr - base;
462 
463 		if (relofs < 0 || relofs >= p->_clustsize)
464 			continue;
465 
466 		ofs = ofs + relofs;
467 		ND("%s: return offset %d (cluster %d) for pointer %p",
468 		    p->name, ofs, i, vaddr);
469 		return ofs;
470 	}
471 	D("address %p is not contained inside any cluster (%s)",
472 	    vaddr, p->name);
473 	return 0; /* An error occurred */
474 }
475 
476 /* Helper functions which convert virtual addresses to offsets */
477 #define netmap_if_offset(n, v)					\
478 	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
479 
480 #define netmap_ring_offset(n, v)				\
481     ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
482 	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
483 
484 #define netmap_buf_offset(n, v)					\
485     ((n)->pools[NETMAP_IF_POOL].memtotal +			\
486 	(n)->pools[NETMAP_RING_POOL].memtotal +		\
487 	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
488 
489 
490 ssize_t
491 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
492 {
493 	ssize_t v;
494 	NMA_LOCK(nmd);
495 	v = netmap_if_offset(nmd, addr);
496 	NMA_UNLOCK(nmd);
497 	return v;
498 }
499 
500 /*
501  * report the index, and use start position as a hint,
502  * otherwise buffer allocation becomes terribly expensive.
503  */
504 static void *
505 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
506 {
507 	uint32_t i = 0;			/* index in the bitmap */
508 	uint32_t mask, j;		/* slot counter */
509 	void *vaddr = NULL;
510 
511 	if (len > p->_objsize) {
512 		D("%s request size %d too large", p->name, len);
513 		// XXX cannot reduce the size
514 		return NULL;
515 	}
516 
517 	if (p->objfree == 0) {
518 		D("no more %s objects", p->name);
519 		return NULL;
520 	}
521 	if (start)
522 		i = *start;
523 
524 	/* termination is guaranteed by p->free, but better check bounds on i */
525 	while (vaddr == NULL && i < p->bitmap_slots)  {
526 		uint32_t cur = p->bitmap[i];
527 		if (cur == 0) { /* bitmask is fully used */
528 			i++;
529 			continue;
530 		}
531 		/* locate a slot */
532 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
533 			;
534 
535 		p->bitmap[i] &= ~mask; /* mark object as in use */
536 		p->objfree--;
537 
538 		vaddr = p->lut[i * 32 + j].vaddr;
539 		if (index)
540 			*index = i * 32 + j;
541 	}
542 	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
543 
544 	if (start)
545 		*start = i;
546 	return vaddr;
547 }
548 
549 
550 /*
551  * free by index, not by address.
552  * XXX should we also cleanup the content ?
553  */
554 static int
555 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
556 {
557 	uint32_t *ptr, mask;
558 
559 	if (j >= p->objtotal) {
560 		D("invalid index %u, max %u", j, p->objtotal);
561 		return 1;
562 	}
563 	ptr = &p->bitmap[j / 32];
564 	mask = (1 << (j % 32));
565 	if (*ptr & mask) {
566 		D("ouch, double free on buffer %d", j);
567 		return 1;
568 	} else {
569 		*ptr |= mask;
570 		p->objfree++;
571 		return 0;
572 	}
573 }
574 
575 /*
576  * free by address. This is slow but is only used for a few
577  * objects (rings, nifp)
578  */
579 static void
580 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
581 {
582 	u_int i, j, n = p->numclusters;
583 
584 	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
585 		void *base = p->lut[i * p->_clustentries].vaddr;
586 		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
587 
588 		/* Given address, is out of the scope of the current cluster.*/
589 		if (vaddr < base || relofs >= p->_clustsize)
590 			continue;
591 
592 		j = j + relofs / p->_objsize;
593 		/* KASSERT(j != 0, ("Cannot free object 0")); */
594 		netmap_obj_free(p, j);
595 		return;
596 	}
597 	D("address %p is not contained inside any cluster (%s)",
598 	    vaddr, p->name);
599 }
600 
601 #define netmap_mem_bufsize(n)	\
602 	((n)->pools[NETMAP_BUF_POOL]._objsize)
603 
604 #define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
605 #define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
606 #define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
607 #define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
608 #define netmap_buf_malloc(n, _pos, _index)			\
609 	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
610 
611 
612 #if 0 // XXX unused
613 /* Return the index associated to the given packet buffer */
614 #define netmap_buf_index(n, v)						\
615     (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
616 #endif
617 
618 /*
619  * allocate extra buffers in a linked list.
620  * returns the actual number.
621  */
622 uint32_t
623 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
624 {
625 	struct netmap_mem_d *nmd = na->nm_mem;
626 	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
627 
628 	NMA_LOCK(nmd);
629 
630 	*head = 0;	/* default, 'null' index ie empty list */
631 	for (i = 0 ; i < n; i++) {
632 		uint32_t cur = *head;	/* save current head */
633 		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
634 		if (p == NULL) {
635 			D("no more buffers after %d of %d", i, n);
636 			*head = cur; /* restore */
637 			break;
638 		}
639 		RD(5, "allocate buffer %d -> %d", *head, cur);
640 		*p = cur; /* link to previous head */
641 	}
642 
643 	NMA_UNLOCK(nmd);
644 
645 	return i;
646 }
647 
648 static void
649 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
650 {
651         struct lut_entry *lut = na->na_lut;
652 	struct netmap_mem_d *nmd = na->nm_mem;
653 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
654 	uint32_t i, cur, *buf;
655 
656 	D("freeing the extra list");
657 	for (i = 0; head >=2 && head < p->objtotal; i++) {
658 		cur = head;
659 		buf = lut[head].vaddr;
660 		head = *buf;
661 		*buf = 0;
662 		if (netmap_obj_free(p, cur))
663 			break;
664 	}
665 	if (head != 0)
666 		D("breaking with head %d", head);
667 	D("freed %d buffers", i);
668 }
669 
670 
671 /* Return nonzero on error */
672 static int
673 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
674 {
675 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
676 	u_int i = 0;	/* slot counter */
677 	uint32_t pos = 0;	/* slot in p->bitmap */
678 	uint32_t index = 0;	/* buffer index */
679 
680 	for (i = 0; i < n; i++) {
681 		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
682 		if (vaddr == NULL) {
683 			D("no more buffers after %d of %d", i, n);
684 			goto cleanup;
685 		}
686 		slot[i].buf_idx = index;
687 		slot[i].len = p->_objsize;
688 		slot[i].flags = 0;
689 	}
690 
691 	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
692 	return (0);
693 
694 cleanup:
695 	while (i > 0) {
696 		i--;
697 		netmap_obj_free(p, slot[i].buf_idx);
698 	}
699 	bzero(slot, n * sizeof(slot[0]));
700 	return (ENOMEM);
701 }
702 
703 static void
704 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
705 {
706 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
707 	u_int i;
708 
709 	for (i = 0; i < n; i++) {
710 		slot[i].buf_idx = index;
711 		slot[i].len = p->_objsize;
712 		slot[i].flags = 0;
713 	}
714 }
715 
716 
717 static void
718 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
719 {
720 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
721 
722 	if (i < 2 || i >= p->objtotal) {
723 		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
724 		return;
725 	}
726 	netmap_obj_free(p, i);
727 }
728 
729 
730 static void
731 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
732 {
733 	u_int i;
734 
735 	for (i = 0; i < n; i++) {
736 		if (slot[i].buf_idx > 2)
737 			netmap_free_buf(nmd, slot[i].buf_idx);
738 	}
739 }
740 
741 static void
742 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
743 {
744 
745 	if (p == NULL)
746 		return;
747 	if (p->bitmap)
748 		free(p->bitmap, M_NETMAP);
749 	p->bitmap = NULL;
750 	if (p->lut) {
751 		u_int i;
752 		size_t sz = p->_clustsize;
753 
754 		/*
755 		 * Free each cluster allocated in
756 		 * netmap_finalize_obj_allocator().  The cluster start
757 		 * addresses are stored at multiples of p->_clusterentries
758 		 * in the lut.
759 		 */
760 		for (i = 0; i < p->objtotal; i += p->_clustentries) {
761 			if (p->lut[i].vaddr)
762 				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
763 		}
764 		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
765 #ifdef linux
766 		vfree(p->lut);
767 #else
768 		free(p->lut, M_NETMAP);
769 #endif
770 	}
771 	p->lut = NULL;
772 	p->objtotal = 0;
773 	p->memtotal = 0;
774 	p->numclusters = 0;
775 	p->objfree = 0;
776 }
777 
778 /*
779  * Free all resources related to an allocator.
780  */
781 static void
782 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
783 {
784 	if (p == NULL)
785 		return;
786 	netmap_reset_obj_allocator(p);
787 }
788 
789 /*
790  * We receive a request for objtotal objects, of size objsize each.
791  * Internally we may round up both numbers, as we allocate objects
792  * in small clusters multiple of the page size.
793  * We need to keep track of objtotal and clustentries,
794  * as they are needed when freeing memory.
795  *
796  * XXX note -- userspace needs the buffers to be contiguous,
797  *	so we cannot afford gaps at the end of a cluster.
798  */
799 
800 
801 /* call with NMA_LOCK held */
802 static int
803 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
804 {
805 	int i;
806 	u_int clustsize;	/* the cluster size, multiple of page size */
807 	u_int clustentries;	/* how many objects per entry */
808 
809 	/* we store the current request, so we can
810 	 * detect configuration changes later */
811 	p->r_objtotal = objtotal;
812 	p->r_objsize = objsize;
813 
814 #define MAX_CLUSTSIZE	(1<<22)		// 4 MB
815 #define LINE_ROUND	NM_CACHE_ALIGN	// 64
816 	if (objsize >= MAX_CLUSTSIZE) {
817 		/* we could do it but there is no point */
818 		D("unsupported allocation for %d bytes", objsize);
819 		return EINVAL;
820 	}
821 	/* make sure objsize is a multiple of LINE_ROUND */
822 	i = (objsize & (LINE_ROUND - 1));
823 	if (i) {
824 		D("XXX aligning object by %d bytes", LINE_ROUND - i);
825 		objsize += LINE_ROUND - i;
826 	}
827 	if (objsize < p->objminsize || objsize > p->objmaxsize) {
828 		D("requested objsize %d out of range [%d, %d]",
829 			objsize, p->objminsize, p->objmaxsize);
830 		return EINVAL;
831 	}
832 	if (objtotal < p->nummin || objtotal > p->nummax) {
833 		D("requested objtotal %d out of range [%d, %d]",
834 			objtotal, p->nummin, p->nummax);
835 		return EINVAL;
836 	}
837 	/*
838 	 * Compute number of objects using a brute-force approach:
839 	 * given a max cluster size,
840 	 * we try to fill it with objects keeping track of the
841 	 * wasted space to the next page boundary.
842 	 */
843 	for (clustentries = 0, i = 1;; i++) {
844 		u_int delta, used = i * objsize;
845 		if (used > MAX_CLUSTSIZE)
846 			break;
847 		delta = used % PAGE_SIZE;
848 		if (delta == 0) { // exact solution
849 			clustentries = i;
850 			break;
851 		}
852 	}
853 	/* exact solution not found */
854 	if (clustentries == 0) {
855 		D("unsupported allocation for %d bytes", objsize);
856 		return EINVAL;
857 	}
858 	/* compute clustsize */
859 	clustsize = clustentries * objsize;
860 	if (netmap_verbose)
861 		D("objsize %d clustsize %d objects %d",
862 			objsize, clustsize, clustentries);
863 
864 	/*
865 	 * The number of clusters is n = ceil(objtotal/clustentries)
866 	 * objtotal' = n * clustentries
867 	 */
868 	p->_clustentries = clustentries;
869 	p->_clustsize = clustsize;
870 	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
871 
872 	/* actual values (may be larger than requested) */
873 	p->_objsize = objsize;
874 	p->_objtotal = p->_numclusters * clustentries;
875 
876 	return 0;
877 }
878 
879 
880 /* call with NMA_LOCK held */
881 static int
882 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
883 {
884 	int i; /* must be signed */
885 	size_t n;
886 
887 	/* optimistically assume we have enough memory */
888 	p->numclusters = p->_numclusters;
889 	p->objtotal = p->_objtotal;
890 
891 	n = sizeof(struct lut_entry) * p->objtotal;
892 #ifdef linux
893 	p->lut = vmalloc(n);
894 #else
895 	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
896 #endif
897 	if (p->lut == NULL) {
898 		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
899 		goto clean;
900 	}
901 
902 	/* Allocate the bitmap */
903 	n = (p->objtotal + 31) / 32;
904 	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
905 	if (p->bitmap == NULL) {
906 		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
907 		    p->name);
908 		goto clean;
909 	}
910 	p->bitmap_slots = n;
911 
912 	/*
913 	 * Allocate clusters, init pointers and bitmap
914 	 */
915 
916 	n = p->_clustsize;
917 	for (i = 0; i < (int)p->objtotal;) {
918 		int lim = i + p->_clustentries;
919 		char *clust;
920 
921 		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
922 		    (size_t)0, -1UL, PAGE_SIZE, 0);
923 		if (clust == NULL) {
924 			/*
925 			 * If we get here, there is a severe memory shortage,
926 			 * so halve the allocated memory to reclaim some.
927 			 */
928 			D("Unable to create cluster at %d for '%s' allocator",
929 			    i, p->name);
930 			if (i < 2) /* nothing to halve */
931 				goto out;
932 			lim = i / 2;
933 			for (i--; i >= lim; i--) {
934 				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
935 				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
936 					contigfree(p->lut[i].vaddr,
937 						n, M_NETMAP);
938 				p->lut[i].vaddr = NULL;
939 			}
940 		out:
941 			p->objtotal = i;
942 			/* we may have stopped in the middle of a cluster */
943 			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
944 			break;
945 		}
946 		/*
947 		 * Set bitmap and lut state for all buffers in the current
948 		 * cluster.
949 		 *
950 		 * [i, lim) is the set of buffer indexes that cover the
951 		 * current cluster.
952 		 *
953 		 * 'clust' is really the address of the current buffer in
954 		 * the current cluster as we index through it with a stride
955 		 * of p->_objsize.
956 		 */
957 		for (; i < lim; i++, clust += p->_objsize) {
958 			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
959 			p->lut[i].vaddr = clust;
960 			p->lut[i].paddr = vtophys(clust);
961 		}
962 	}
963 	p->objfree = p->objtotal;
964 	p->memtotal = p->numclusters * p->_clustsize;
965 	if (p->objfree == 0)
966 		goto clean;
967 	if (netmap_verbose)
968 		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
969 		    p->numclusters, p->_clustsize >> 10,
970 		    p->memtotal >> 10, p->name);
971 
972 	return 0;
973 
974 clean:
975 	netmap_reset_obj_allocator(p);
976 	return ENOMEM;
977 }
978 
979 /* call with lock held */
980 static int
981 netmap_memory_config_changed(struct netmap_mem_d *nmd)
982 {
983 	int i;
984 
985 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
986 		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
987 		    nmd->pools[i].r_objtotal != netmap_params[i].num)
988 		    return 1;
989 	}
990 	return 0;
991 }
992 
993 static void
994 netmap_mem_reset_all(struct netmap_mem_d *nmd)
995 {
996 	int i;
997 
998 	if (netmap_verbose)
999 		D("resetting %p", nmd);
1000 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1001 		netmap_reset_obj_allocator(&nmd->pools[i]);
1002 	}
1003 	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
1004 }
1005 
1006 static int
1007 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1008 {
1009 	int i, lim = p->_objtotal;
1010 
1011 	if (na->pdev == NULL)
1012 		return 0;
1013 
1014 #ifdef __FreeBSD__
1015 	(void)i;
1016 	(void)lim;
1017 	D("unsupported on FreeBSD");
1018 #else /* linux */
1019 	for (i = 2; i < lim; i++) {
1020 		netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr);
1021 	}
1022 #endif /* linux */
1023 
1024 	return 0;
1025 }
1026 
1027 static int
1028 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1029 {
1030 #ifdef __FreeBSD__
1031 	D("unsupported on FreeBSD");
1032 #else /* linux */
1033 	int i, lim = p->_objtotal;
1034 
1035 	if (na->pdev == NULL)
1036 		return 0;
1037 
1038 	for (i = 2; i < lim; i++) {
1039 		netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr,
1040 				p->lut[i].vaddr);
1041 	}
1042 #endif /* linux */
1043 
1044 	return 0;
1045 }
1046 
1047 static int
1048 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1049 {
1050 	int i;
1051 	if (nmd->flags & NETMAP_MEM_FINALIZED)
1052 		return 0;
1053 	nmd->lasterr = 0;
1054 	nmd->nm_totalsize = 0;
1055 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1056 		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1057 		if (nmd->lasterr)
1058 			goto error;
1059 		nmd->nm_totalsize += nmd->pools[i].memtotal;
1060 	}
1061 	/* buffers 0 and 1 are reserved */
1062 	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1063 	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
1064 	nmd->flags |= NETMAP_MEM_FINALIZED;
1065 
1066 	if (netmap_verbose)
1067 		D("interfaces %d KB, rings %d KB, buffers %d MB",
1068 		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1069 		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1070 		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1071 
1072 	if (netmap_verbose)
1073 		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1074 
1075 
1076 	return 0;
1077 error:
1078 	netmap_mem_reset_all(nmd);
1079 	return nmd->lasterr;
1080 }
1081 
1082 
1083 
1084 void
1085 netmap_mem_private_delete(struct netmap_mem_d *nmd)
1086 {
1087 	if (nmd == NULL)
1088 		return;
1089 	if (netmap_verbose)
1090 		D("deleting %p", nmd);
1091 	if (nmd->refcount > 0)
1092 		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
1093 	nm_mem_release_id(nmd);
1094 	if (netmap_verbose)
1095 		D("done deleting %p", nmd);
1096 	NMA_LOCK_DESTROY(nmd);
1097 	free(nmd, M_DEVBUF);
1098 }
1099 
1100 static int
1101 netmap_mem_private_config(struct netmap_mem_d *nmd)
1102 {
1103 	/* nothing to do, we are configured on creation
1104  	 * and configuration never changes thereafter
1105  	 */
1106 	return 0;
1107 }
1108 
1109 static int
1110 netmap_mem_private_finalize(struct netmap_mem_d *nmd)
1111 {
1112 	int err;
1113 	nmd->refcount++;
1114 	err = netmap_mem_finalize_all(nmd);
1115 	return err;
1116 
1117 }
1118 
1119 static void
1120 netmap_mem_private_deref(struct netmap_mem_d *nmd)
1121 {
1122 	if (--nmd->refcount <= 0)
1123 		netmap_mem_reset_all(nmd);
1124 }
1125 
1126 
1127 /*
1128  * allocator for private memory
1129  */
1130 struct netmap_mem_d *
1131 netmap_mem_private_new(const char *name, u_int txr, u_int txd,
1132 	u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
1133 {
1134 	struct netmap_mem_d *d = NULL;
1135 	struct netmap_obj_params p[NETMAP_POOLS_NR];
1136 	int i, err;
1137 	u_int v, maxd;
1138 
1139 	d = malloc(sizeof(struct netmap_mem_d),
1140 			M_DEVBUF, M_NOWAIT | M_ZERO);
1141 	if (d == NULL) {
1142 		err = ENOMEM;
1143 		goto error;
1144 	}
1145 
1146 	*d = nm_blueprint;
1147 
1148 	err = nm_mem_assign_id(d);
1149 	if (err)
1150 		goto error;
1151 
1152 	/* account for the fake host rings */
1153 	txr++;
1154 	rxr++;
1155 
1156 	/* copy the min values */
1157 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1158 		p[i] = netmap_min_priv_params[i];
1159 	}
1160 
1161 	/* possibly increase them to fit user request */
1162 	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1163 	if (p[NETMAP_IF_POOL].size < v)
1164 		p[NETMAP_IF_POOL].size = v;
1165 	v = 2 + 4 * npipes;
1166 	if (p[NETMAP_IF_POOL].num < v)
1167 		p[NETMAP_IF_POOL].num = v;
1168 	maxd = (txd > rxd) ? txd : rxd;
1169 	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1170 	if (p[NETMAP_RING_POOL].size < v)
1171 		p[NETMAP_RING_POOL].size = v;
1172 	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1173          * and two rx rings (again, 1 normal and 1 fake host)
1174          */
1175 	v = txr + rxr + 8 * npipes;
1176 	if (p[NETMAP_RING_POOL].num < v)
1177 		p[NETMAP_RING_POOL].num = v;
1178 	/* for each pipe we only need the buffers for the 4 "real" rings.
1179          * On the other end, the pipe ring dimension may be different from
1180          * the parent port ring dimension. As a compromise, we allocate twice the
1181          * space actually needed if the pipe rings were the same size as the parent rings
1182          */
1183 	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1184 		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1185 	if (p[NETMAP_BUF_POOL].num < v)
1186 		p[NETMAP_BUF_POOL].num = v;
1187 
1188 	if (netmap_verbose)
1189 		D("req if %d*%d ring %d*%d buf %d*%d",
1190 			p[NETMAP_IF_POOL].num,
1191 			p[NETMAP_IF_POOL].size,
1192 			p[NETMAP_RING_POOL].num,
1193 			p[NETMAP_RING_POOL].size,
1194 			p[NETMAP_BUF_POOL].num,
1195 			p[NETMAP_BUF_POOL].size);
1196 
1197 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1198 		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1199 				nm_blueprint.pools[i].name,
1200 				name);
1201 		err = netmap_config_obj_allocator(&d->pools[i],
1202 				p[i].num, p[i].size);
1203 		if (err)
1204 			goto error;
1205 	}
1206 
1207 	d->flags &= ~NETMAP_MEM_FINALIZED;
1208 
1209 	NMA_LOCK_INIT(d);
1210 
1211 	return d;
1212 error:
1213 	netmap_mem_private_delete(d);
1214 	if (perr)
1215 		*perr = err;
1216 	return NULL;
1217 }
1218 
1219 
1220 /* call with lock held */
1221 static int
1222 netmap_mem_global_config(struct netmap_mem_d *nmd)
1223 {
1224 	int i;
1225 
1226 	if (nmd->refcount)
1227 		/* already in use, we cannot change the configuration */
1228 		goto out;
1229 
1230 	if (!netmap_memory_config_changed(nmd))
1231 		goto out;
1232 
1233 	D("reconfiguring");
1234 
1235 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1236 		/* reset previous allocation */
1237 		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1238 			netmap_reset_obj_allocator(&nmd->pools[i]);
1239 		}
1240 		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1241 	}
1242 
1243 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1244 		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1245 				netmap_params[i].num, netmap_params[i].size);
1246 		if (nmd->lasterr)
1247 			goto out;
1248 	}
1249 
1250 out:
1251 
1252 	return nmd->lasterr;
1253 }
1254 
1255 static int
1256 netmap_mem_global_finalize(struct netmap_mem_d *nmd)
1257 {
1258 	int err;
1259 
1260 	/* update configuration if changed */
1261 	if (netmap_mem_global_config(nmd))
1262 		goto out;
1263 
1264 	nmd->refcount++;
1265 
1266 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1267 		/* may happen if config is not changed */
1268 		ND("nothing to do");
1269 		goto out;
1270 	}
1271 
1272 	if (netmap_mem_finalize_all(nmd))
1273 		goto out;
1274 
1275 	nmd->lasterr = 0;
1276 
1277 out:
1278 	if (nmd->lasterr)
1279 		nmd->refcount--;
1280 	err = nmd->lasterr;
1281 
1282 	return err;
1283 
1284 }
1285 
1286 int
1287 netmap_mem_init(void)
1288 {
1289 	NMA_LOCK_INIT(&nm_mem);
1290 	return (0);
1291 }
1292 
1293 void
1294 netmap_mem_fini(void)
1295 {
1296 	int i;
1297 
1298 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1299 	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1300 	}
1301 	NMA_LOCK_DESTROY(&nm_mem);
1302 }
1303 
1304 static void
1305 netmap_free_rings(struct netmap_adapter *na)
1306 {
1307 	struct netmap_kring *kring;
1308 	struct netmap_ring *ring;
1309 	if (!na->tx_rings)
1310 		return;
1311 	for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
1312 		ring = kring->ring;
1313 		if (ring == NULL)
1314 			continue;
1315 		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1316 		netmap_ring_free(na->nm_mem, ring);
1317 		kring->ring = NULL;
1318 	}
1319 	for (/* cont'd from above */; kring != na->tailroom; kring++) {
1320 		ring = kring->ring;
1321 		if (ring == NULL)
1322 			continue;
1323 		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1324 		netmap_ring_free(na->nm_mem, ring);
1325 		kring->ring = NULL;
1326 	}
1327 }
1328 
1329 /* call with NMA_LOCK held *
1330  *
1331  * Allocate netmap rings and buffers for this card
1332  * The rings are contiguous, but have variable size.
1333  * The kring array must follow the layout described
1334  * in netmap_krings_create().
1335  */
1336 int
1337 netmap_mem_rings_create(struct netmap_adapter *na)
1338 {
1339 	struct netmap_ring *ring;
1340 	u_int len, ndesc;
1341 	struct netmap_kring *kring;
1342 	u_int i;
1343 
1344 	NMA_LOCK(na->nm_mem);
1345 
1346         /* transmit rings */
1347 	for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
1348 		if (kring->ring) {
1349 			ND("%s %ld already created", kring->name, kring - na->tx_rings);
1350 			continue; /* already created by somebody else */
1351 		}
1352 		ndesc = kring->nkr_num_slots;
1353 		len = sizeof(struct netmap_ring) +
1354 			  ndesc * sizeof(struct netmap_slot);
1355 		ring = netmap_ring_malloc(na->nm_mem, len);
1356 		if (ring == NULL) {
1357 			D("Cannot allocate tx_ring");
1358 			goto cleanup;
1359 		}
1360 		ND("txring at %p", ring);
1361 		kring->ring = ring;
1362 		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1363 		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1364 		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1365 			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1366 			netmap_ring_offset(na->nm_mem, ring);
1367 
1368 		/* copy values from kring */
1369 		ring->head = kring->rhead;
1370 		ring->cur = kring->rcur;
1371 		ring->tail = kring->rtail;
1372 		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1373 			netmap_mem_bufsize(na->nm_mem);
1374 		ND("%s h %d c %d t %d", kring->name,
1375 			ring->head, ring->cur, ring->tail);
1376 		ND("initializing slots for txring");
1377 		if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1378 			/* this is a real ring */
1379 			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1380 				D("Cannot allocate buffers for tx_ring");
1381 				goto cleanup;
1382 			}
1383 		} else {
1384 			/* this is a fake tx ring, set all indices to 0 */
1385 			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1386 		}
1387 	}
1388 
1389 	/* receive rings */
1390 	for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
1391 		if (kring->ring) {
1392 			ND("%s %ld already created", kring->name, kring - na->rx_rings);
1393 			continue; /* already created by somebody else */
1394 		}
1395 		ndesc = kring->nkr_num_slots;
1396 		len = sizeof(struct netmap_ring) +
1397 			  ndesc * sizeof(struct netmap_slot);
1398 		ring = netmap_ring_malloc(na->nm_mem, len);
1399 		if (ring == NULL) {
1400 			D("Cannot allocate rx_ring");
1401 			goto cleanup;
1402 		}
1403 		ND("rxring at %p", ring);
1404 		kring->ring = ring;
1405 		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1406 		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1407 		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1408 		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1409 			netmap_ring_offset(na->nm_mem, ring);
1410 
1411 		/* copy values from kring */
1412 		ring->head = kring->rhead;
1413 		ring->cur = kring->rcur;
1414 		ring->tail = kring->rtail;
1415 		*(int *)(uintptr_t)&ring->nr_buf_size =
1416 			netmap_mem_bufsize(na->nm_mem);
1417 		ND("%s h %d c %d t %d", kring->name,
1418 			ring->head, ring->cur, ring->tail);
1419 		ND("initializing slots for rxring %p", ring);
1420 		if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1421 			/* this is a real ring */
1422 			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1423 				D("Cannot allocate buffers for rx_ring");
1424 				goto cleanup;
1425 			}
1426 		} else {
1427 			/* this is a fake rx ring, set all indices to 1 */
1428 			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
1429 		}
1430 	}
1431 
1432 	NMA_UNLOCK(na->nm_mem);
1433 
1434 	return 0;
1435 
1436 cleanup:
1437 	netmap_free_rings(na);
1438 
1439 	NMA_UNLOCK(na->nm_mem);
1440 
1441 	return ENOMEM;
1442 }
1443 
1444 void
1445 netmap_mem_rings_delete(struct netmap_adapter *na)
1446 {
1447 	/* last instance, release bufs and rings */
1448 	NMA_LOCK(na->nm_mem);
1449 
1450 	netmap_free_rings(na);
1451 
1452 	NMA_UNLOCK(na->nm_mem);
1453 }
1454 
1455 
1456 /* call with NMA_LOCK held */
1457 /*
1458  * Allocate the per-fd structure netmap_if.
1459  *
1460  * We assume that the configuration stored in na
1461  * (number of tx/rx rings and descs) does not change while
1462  * the interface is in netmap mode.
1463  */
1464 struct netmap_if *
1465 netmap_mem_if_new(struct netmap_adapter *na)
1466 {
1467 	struct netmap_if *nifp;
1468 	ssize_t base; /* handy for relative offsets between rings and nifp */
1469 	u_int i, len, ntx, nrx;
1470 
1471 	/* account for the (eventually fake) host rings */
1472 	ntx = na->num_tx_rings + 1;
1473 	nrx = na->num_rx_rings + 1;
1474 	/*
1475 	 * the descriptor is followed inline by an array of offsets
1476 	 * to the tx and rx rings in the shared memory region.
1477 	 */
1478 
1479 	NMA_LOCK(na->nm_mem);
1480 
1481 	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1482 	nifp = netmap_if_malloc(na->nm_mem, len);
1483 	if (nifp == NULL) {
1484 		NMA_UNLOCK(na->nm_mem);
1485 		return NULL;
1486 	}
1487 
1488 	/* initialize base fields -- override const */
1489 	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1490 	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1491 	strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ);
1492 
1493 	/*
1494 	 * fill the slots for the rx and tx rings. They contain the offset
1495 	 * between the ring and nifp, so the information is usable in
1496 	 * userspace to reach the ring from the nifp.
1497 	 */
1498 	base = netmap_if_offset(na->nm_mem, nifp);
1499 	for (i = 0; i < ntx; i++) {
1500 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1501 			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1502 	}
1503 	for (i = 0; i < nrx; i++) {
1504 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1505 			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1506 	}
1507 
1508 	NMA_UNLOCK(na->nm_mem);
1509 
1510 	return (nifp);
1511 }
1512 
1513 void
1514 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1515 {
1516 	if (nifp == NULL)
1517 		/* nothing to do */
1518 		return;
1519 	NMA_LOCK(na->nm_mem);
1520 	if (nifp->ni_bufs_head)
1521 		netmap_extra_free(na, nifp->ni_bufs_head);
1522 	netmap_if_free(na->nm_mem, nifp);
1523 
1524 	NMA_UNLOCK(na->nm_mem);
1525 }
1526 
1527 static void
1528 netmap_mem_global_deref(struct netmap_mem_d *nmd)
1529 {
1530 
1531 	nmd->refcount--;
1532 	if (!nmd->refcount)
1533 		nmd->nm_grp = -1;
1534 	if (netmap_verbose)
1535 		D("refcount = %d", nmd->refcount);
1536 
1537 }
1538 
1539 int
1540 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1541 {
1542 	if (nm_mem_assign_group(nmd, na->pdev) < 0) {
1543 		return ENOMEM;
1544 	} else {
1545 		NMA_LOCK(nmd);
1546 		nmd->finalize(nmd);
1547 		NMA_UNLOCK(nmd);
1548 	}
1549 
1550 	if (!nmd->lasterr && na->pdev)
1551 		netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
1552 
1553 	return nmd->lasterr;
1554 }
1555 
1556 void
1557 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1558 {
1559 	NMA_LOCK(nmd);
1560 	netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
1561 	if (nmd->refcount == 1) {
1562 		u_int i;
1563 
1564 		/*
1565 		 * Reset the allocator when it falls out of use so that any
1566 		 * pool resources leaked by unclean application exits are
1567 		 * reclaimed.
1568 		 */
1569 		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1570 			struct netmap_obj_pool *p;
1571 			u_int j;
1572 
1573 			p = &nmd->pools[i];
1574 			p->objfree = p->objtotal;
1575 			/*
1576 			 * Reproduce the net effect of the M_ZERO malloc()
1577 			 * and marking of free entries in the bitmap that
1578 			 * occur in finalize_obj_allocator()
1579 			 */
1580 			memset(p->bitmap,
1581 			    '\0',
1582 			    sizeof(uint32_t) * ((p->objtotal + 31) / 32));
1583 
1584 			/*
1585 			 * Set all the bits in the bitmap that have
1586 			 * corresponding buffers to 1 to indicate they are
1587 			 * free.
1588 			 */
1589 			for (j = 0; j < p->objtotal; j++) {
1590 				if (p->lut[j].vaddr != NULL) {
1591 					p->bitmap[ (j>>5) ] |=  ( 1 << (j & 31) );
1592 				}
1593 			}
1594 		}
1595 
1596 		/*
1597 		 * Per netmap_mem_finalize_all(),
1598 		 * buffers 0 and 1 are reserved
1599 		 */
1600 		nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1601 		nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
1602 	}
1603 	nmd->deref(nmd);
1604 	NMA_UNLOCK(nmd);
1605 }
1606