xref: /freebsd/sys/dev/netmap/netmap_mem2.c (revision 76b28ad6ab6dc8d4a62cb7de7f143595be535813)
1 /*
2  * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *      documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #ifdef linux
27 #include "bsd_glue.h"
28 #endif /* linux */
29 
30 #ifdef __APPLE__
31 #include "osx_glue.h"
32 #endif /* __APPLE__ */
33 
34 #ifdef __FreeBSD__
35 #include <sys/cdefs.h> /* prerequisite */
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <vm/vm.h>	/* vtophys */
42 #include <vm/pmap.h>	/* vtophys */
43 #include <sys/socket.h> /* sockaddrs */
44 #include <sys/selinfo.h>
45 #include <sys/sysctl.h>
46 #include <net/if.h>
47 #include <net/if_var.h>
48 #include <net/vnet.h>
49 #include <machine/bus.h>	/* bus_dmamap_* */
50 
51 #endif /* __FreeBSD__ */
52 
53 #include <net/netmap.h>
54 #include <dev/netmap/netmap_kern.h>
55 #include "netmap_mem2.h"
56 
57 #ifdef linux
58 #define NMA_LOCK_INIT(n)	sema_init(&(n)->nm_mtx, 1)
59 #define NMA_LOCK_DESTROY(n)
60 #define NMA_LOCK(n)		down(&(n)->nm_mtx)
61 #define NMA_UNLOCK(n)		up(&(n)->nm_mtx)
62 #else /* !linux */
63 #define NMA_LOCK_INIT(n)	mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
64 #define NMA_LOCK_DESTROY(n)	mtx_destroy(&(n)->nm_mtx)
65 #define NMA_LOCK(n)		mtx_lock(&(n)->nm_mtx)
66 #define NMA_UNLOCK(n)		mtx_unlock(&(n)->nm_mtx)
67 #endif /* linux */
68 
69 
70 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
71 	[NETMAP_IF_POOL] = {
72 		.size = 1024,
73 		.num  = 100,
74 	},
75 	[NETMAP_RING_POOL] = {
76 		.size = 9*PAGE_SIZE,
77 		.num  = 200,
78 	},
79 	[NETMAP_BUF_POOL] = {
80 		.size = 2048,
81 		.num  = NETMAP_BUF_MAX_NUM,
82 	},
83 };
84 
85 struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
86 	[NETMAP_IF_POOL] = {
87 		.size = 1024,
88 		.num  = 1,
89 	},
90 	[NETMAP_RING_POOL] = {
91 		.size = 5*PAGE_SIZE,
92 		.num  = 4,
93 	},
94 	[NETMAP_BUF_POOL] = {
95 		.size = 2048,
96 		.num  = 4098,
97 	},
98 };
99 
100 
101 /*
102  * nm_mem is the memory allocator used for all physical interfaces
103  * running in netmap mode.
104  * Virtual (VALE) ports will have each its own allocator.
105  */
106 static int netmap_mem_global_config(struct netmap_mem_d *nmd);
107 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
108 static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
109 struct netmap_mem_d nm_mem = {	/* Our memory allocator. */
110 	.pools = {
111 		[NETMAP_IF_POOL] = {
112 			.name 	= "netmap_if",
113 			.objminsize = sizeof(struct netmap_if),
114 			.objmaxsize = 4096,
115 			.nummin     = 10,	/* don't be stingy */
116 			.nummax	    = 10000,	/* XXX very large */
117 		},
118 		[NETMAP_RING_POOL] = {
119 			.name 	= "netmap_ring",
120 			.objminsize = sizeof(struct netmap_ring),
121 			.objmaxsize = 32*PAGE_SIZE,
122 			.nummin     = 2,
123 			.nummax	    = 1024,
124 		},
125 		[NETMAP_BUF_POOL] = {
126 			.name	= "netmap_buf",
127 			.objminsize = 64,
128 			.objmaxsize = 65536,
129 			.nummin     = 4,
130 			.nummax	    = 1000000, /* one million! */
131 		},
132 	},
133 	.config   = netmap_mem_global_config,
134 	.finalize = netmap_mem_global_finalize,
135 	.deref    = netmap_mem_global_deref,
136 
137 	.nm_id = 1,
138 
139 	.prev = &nm_mem,
140 	.next = &nm_mem,
141 };
142 
143 
144 struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
145 
146 // XXX logically belongs to nm_mem
147 struct lut_entry *netmap_buffer_lut;	/* exported */
148 
149 /* blueprint for the private memory allocators */
150 static int netmap_mem_private_config(struct netmap_mem_d *nmd);
151 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
152 static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
153 const struct netmap_mem_d nm_blueprint = {
154 	.pools = {
155 		[NETMAP_IF_POOL] = {
156 			.name 	= "%s_if",
157 			.objminsize = sizeof(struct netmap_if),
158 			.objmaxsize = 4096,
159 			.nummin     = 1,
160 			.nummax	    = 100,
161 		},
162 		[NETMAP_RING_POOL] = {
163 			.name 	= "%s_ring",
164 			.objminsize = sizeof(struct netmap_ring),
165 			.objmaxsize = 32*PAGE_SIZE,
166 			.nummin     = 2,
167 			.nummax	    = 1024,
168 		},
169 		[NETMAP_BUF_POOL] = {
170 			.name	= "%s_buf",
171 			.objminsize = 64,
172 			.objmaxsize = 65536,
173 			.nummin     = 4,
174 			.nummax	    = 1000000, /* one million! */
175 		},
176 	},
177 	.config   = netmap_mem_private_config,
178 	.finalize = netmap_mem_private_finalize,
179 	.deref    = netmap_mem_private_deref,
180 
181 	.flags = NETMAP_MEM_PRIVATE,
182 };
183 
184 /* memory allocator related sysctls */
185 
186 #define STRINGIFY(x) #x
187 
188 
189 #define DECLARE_SYSCTLS(id, name) \
190 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
191 	    CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
192 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
193 	    CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
194 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
195 	    CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
196 	SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
197 	    CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
198 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
199 	    CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
200 	    "Default size of private netmap " STRINGIFY(name) "s"); \
201 	SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
202 	    CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
203 	    "Default number of private netmap " STRINGIFY(name) "s")
204 
205 SYSCTL_DECL(_dev_netmap);
206 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
207 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
208 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
209 
210 static int
211 nm_mem_assign_id(struct netmap_mem_d *nmd)
212 {
213 	nm_memid_t id;
214 	struct netmap_mem_d *scan = netmap_last_mem_d;
215 	int error = ENOMEM;
216 
217 	NMA_LOCK(&nm_mem);
218 
219 	do {
220 		/* we rely on unsigned wrap around */
221 		id = scan->nm_id + 1;
222 		if (id == 0) /* reserve 0 as error value */
223 			id = 1;
224 		scan = scan->next;
225 		if (id != scan->nm_id) {
226 			nmd->nm_id = id;
227 			nmd->prev = scan->prev;
228 			nmd->next = scan;
229 			scan->prev->next = nmd;
230 			scan->prev = nmd;
231 			netmap_last_mem_d = nmd;
232 			error = 0;
233 			break;
234 		}
235 	} while (scan != netmap_last_mem_d);
236 
237 	NMA_UNLOCK(&nm_mem);
238 	return error;
239 }
240 
241 static void
242 nm_mem_release_id(struct netmap_mem_d *nmd)
243 {
244 	NMA_LOCK(&nm_mem);
245 
246 	nmd->prev->next = nmd->next;
247 	nmd->next->prev = nmd->prev;
248 
249 	if (netmap_last_mem_d == nmd)
250 		netmap_last_mem_d = nmd->prev;
251 
252 	nmd->prev = nmd->next = NULL;
253 
254 	NMA_UNLOCK(&nm_mem);
255 }
256 
257 
258 /*
259  * First, find the allocator that contains the requested offset,
260  * then locate the cluster through a lookup table.
261  */
262 vm_paddr_t
263 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
264 {
265 	int i;
266 	vm_ooffset_t o = offset;
267 	vm_paddr_t pa;
268 	struct netmap_obj_pool *p;
269 
270 	NMA_LOCK(nmd);
271 	p = nmd->pools;
272 
273 	for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
274 		if (offset >= p[i].memtotal)
275 			continue;
276 		// now lookup the cluster's address
277 		pa = p[i].lut[offset / p[i]._objsize].paddr +
278 			offset % p[i]._objsize;
279 		NMA_UNLOCK(nmd);
280 		return pa;
281 	}
282 	/* this is only in case of errors */
283 	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
284 		p[NETMAP_IF_POOL].memtotal,
285 		p[NETMAP_IF_POOL].memtotal
286 			+ p[NETMAP_RING_POOL].memtotal,
287 		p[NETMAP_IF_POOL].memtotal
288 			+ p[NETMAP_RING_POOL].memtotal
289 			+ p[NETMAP_BUF_POOL].memtotal);
290 	NMA_UNLOCK(nmd);
291 	return 0;	// XXX bad address
292 }
293 
294 int
295 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
296 	nm_memid_t *id)
297 {
298 	int error = 0;
299 	NMA_LOCK(nmd);
300 	error = nmd->config(nmd);
301 	if (error)
302 		goto out;
303 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
304 		*size = nmd->nm_totalsize;
305 	} else {
306 		int i;
307 		*size = 0;
308 		for (i = 0; i < NETMAP_POOLS_NR; i++) {
309 			struct netmap_obj_pool *p = nmd->pools + i;
310 			*size += (p->_numclusters * p->_clustsize);
311 		}
312 	}
313 	*memflags = nmd->flags;
314 	*id = nmd->nm_id;
315 out:
316 	NMA_UNLOCK(nmd);
317 	return error;
318 }
319 
320 /*
321  * we store objects by kernel address, need to find the offset
322  * within the pool to export the value to userspace.
323  * Algorithm: scan until we find the cluster, then add the
324  * actual offset in the cluster
325  */
326 static ssize_t
327 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
328 {
329 	int i, k = p->_clustentries, n = p->objtotal;
330 	ssize_t ofs = 0;
331 
332 	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
333 		const char *base = p->lut[i].vaddr;
334 		ssize_t relofs = (const char *) vaddr - base;
335 
336 		if (relofs < 0 || relofs >= p->_clustsize)
337 			continue;
338 
339 		ofs = ofs + relofs;
340 		ND("%s: return offset %d (cluster %d) for pointer %p",
341 		    p->name, ofs, i, vaddr);
342 		return ofs;
343 	}
344 	D("address %p is not contained inside any cluster (%s)",
345 	    vaddr, p->name);
346 	return 0; /* An error occurred */
347 }
348 
349 /* Helper functions which convert virtual addresses to offsets */
350 #define netmap_if_offset(n, v)					\
351 	netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
352 
353 #define netmap_ring_offset(n, v)				\
354     ((n)->pools[NETMAP_IF_POOL].memtotal + 			\
355 	netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
356 
357 #define netmap_buf_offset(n, v)					\
358     ((n)->pools[NETMAP_IF_POOL].memtotal +			\
359 	(n)->pools[NETMAP_RING_POOL].memtotal +		\
360 	netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
361 
362 
363 ssize_t
364 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
365 {
366 	ssize_t v;
367 	NMA_LOCK(nmd);
368 	v = netmap_if_offset(nmd, addr);
369 	NMA_UNLOCK(nmd);
370 	return v;
371 }
372 
373 /*
374  * report the index, and use start position as a hint,
375  * otherwise buffer allocation becomes terribly expensive.
376  */
377 static void *
378 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
379 {
380 	uint32_t i = 0;			/* index in the bitmap */
381 	uint32_t mask, j;		/* slot counter */
382 	void *vaddr = NULL;
383 
384 	if (len > p->_objsize) {
385 		D("%s request size %d too large", p->name, len);
386 		// XXX cannot reduce the size
387 		return NULL;
388 	}
389 
390 	if (p->objfree == 0) {
391 		D("no more %s objects", p->name);
392 		return NULL;
393 	}
394 	if (start)
395 		i = *start;
396 
397 	/* termination is guaranteed by p->free, but better check bounds on i */
398 	while (vaddr == NULL && i < p->bitmap_slots)  {
399 		uint32_t cur = p->bitmap[i];
400 		if (cur == 0) { /* bitmask is fully used */
401 			i++;
402 			continue;
403 		}
404 		/* locate a slot */
405 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
406 			;
407 
408 		p->bitmap[i] &= ~mask; /* mark object as in use */
409 		p->objfree--;
410 
411 		vaddr = p->lut[i * 32 + j].vaddr;
412 		if (index)
413 			*index = i * 32 + j;
414 	}
415 	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
416 
417 	if (start)
418 		*start = i;
419 	return vaddr;
420 }
421 
422 
423 /*
424  * free by index, not by address.
425  * XXX should we also cleanup the content ?
426  */
427 static int
428 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
429 {
430 	uint32_t *ptr, mask;
431 
432 	if (j >= p->objtotal) {
433 		D("invalid index %u, max %u", j, p->objtotal);
434 		return 1;
435 	}
436 	ptr = &p->bitmap[j / 32];
437 	mask = (1 << (j % 32));
438 	if (*ptr & mask) {
439 		D("ouch, double free on buffer %d", j);
440 		return 1;
441 	} else {
442 		*ptr |= mask;
443 		p->objfree++;
444 		return 0;
445 	}
446 }
447 
448 /*
449  * free by address. This is slow but is only used for a few
450  * objects (rings, nifp)
451  */
452 static void
453 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
454 {
455 	u_int i, j, n = p->numclusters;
456 
457 	for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
458 		void *base = p->lut[i * p->_clustentries].vaddr;
459 		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
460 
461 		/* Given address, is out of the scope of the current cluster.*/
462 		if (vaddr < base || relofs >= p->_clustsize)
463 			continue;
464 
465 		j = j + relofs / p->_objsize;
466 		/* KASSERT(j != 0, ("Cannot free object 0")); */
467 		netmap_obj_free(p, j);
468 		return;
469 	}
470 	D("address %p is not contained inside any cluster (%s)",
471 	    vaddr, p->name);
472 }
473 
474 #define netmap_if_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
475 #define netmap_if_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
476 #define netmap_ring_malloc(n, len)	netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
477 #define netmap_ring_free(n, v)		netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
478 #define netmap_buf_malloc(n, _pos, _index)			\
479 	netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
480 
481 
482 #if 0 // XXX unused
483 /* Return the index associated to the given packet buffer */
484 #define netmap_buf_index(n, v)						\
485     (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
486 #endif
487 
488 /*
489  * allocate extra buffers in a linked list.
490  * returns the actual number.
491  */
492 uint32_t
493 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
494 {
495 	struct netmap_mem_d *nmd = na->nm_mem;
496 	uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
497 
498 	NMA_LOCK(nmd);
499 
500 	*head = 0;	/* default, 'null' index ie empty list */
501 	for (i = 0 ; i < n; i++) {
502 		uint32_t cur = *head;	/* save current head */
503 		uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
504 		if (p == NULL) {
505 			D("no more buffers after %d of %d", i, n);
506 			*head = cur; /* restore */
507 			break;
508 		}
509 		RD(5, "allocate buffer %d -> %d", *head, cur);
510 		*p = cur; /* link to previous head */
511 	}
512 
513 	NMA_UNLOCK(nmd);
514 
515 	return i;
516 }
517 
518 static void
519 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
520 {
521         struct lut_entry *lut = na->na_lut;
522 	struct netmap_mem_d *nmd = na->nm_mem;
523 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
524 	uint32_t i, cur, *buf;
525 
526 	D("freeing the extra list");
527 	for (i = 0; head >=2 && head < p->objtotal; i++) {
528 		cur = head;
529 		buf = lut[head].vaddr;
530 		head = *buf;
531 		*buf = 0;
532 		if (netmap_obj_free(p, cur))
533 			break;
534 	}
535 	if (head != 0)
536 		D("breaking with head %d", head);
537 	D("freed %d buffers", i);
538 }
539 
540 
541 /* Return nonzero on error */
542 static int
543 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
544 {
545 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
546 	u_int i = 0;	/* slot counter */
547 	uint32_t pos = 0;	/* slot in p->bitmap */
548 	uint32_t index = 0;	/* buffer index */
549 
550 	for (i = 0; i < n; i++) {
551 		void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
552 		if (vaddr == NULL) {
553 			D("no more buffers after %d of %d", i, n);
554 			goto cleanup;
555 		}
556 		slot[i].buf_idx = index;
557 		slot[i].len = p->_objsize;
558 		slot[i].flags = 0;
559 	}
560 
561 	ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
562 	return (0);
563 
564 cleanup:
565 	while (i > 0) {
566 		i--;
567 		netmap_obj_free(p, slot[i].buf_idx);
568 	}
569 	bzero(slot, n * sizeof(slot[0]));
570 	return (ENOMEM);
571 }
572 
573 static void
574 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
575 {
576 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
577 	u_int i;
578 
579 	for (i = 0; i < n; i++) {
580 		slot[i].buf_idx = index;
581 		slot[i].len = p->_objsize;
582 		slot[i].flags = 0;
583 	}
584 }
585 
586 
587 static void
588 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
589 {
590 	struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
591 
592 	if (i < 2 || i >= p->objtotal) {
593 		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
594 		return;
595 	}
596 	netmap_obj_free(p, i);
597 }
598 
599 
600 static void
601 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
602 {
603 	u_int i;
604 
605 	for (i = 0; i < n; i++) {
606 		if (slot[i].buf_idx > 2)
607 			netmap_free_buf(nmd, slot[i].buf_idx);
608 	}
609 }
610 
611 static void
612 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
613 {
614 
615 	if (p == NULL)
616 		return;
617 	if (p->bitmap)
618 		free(p->bitmap, M_NETMAP);
619 	p->bitmap = NULL;
620 	if (p->lut) {
621 		u_int i;
622 		size_t sz = p->_clustsize;
623 
624 		for (i = 0; i < p->objtotal; i += p->_clustentries) {
625 			if (p->lut[i].vaddr)
626 				contigfree(p->lut[i].vaddr, sz, M_NETMAP);
627 		}
628 		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
629 #ifdef linux
630 		vfree(p->lut);
631 #else
632 		free(p->lut, M_NETMAP);
633 #endif
634 	}
635 	p->lut = NULL;
636 	p->objtotal = 0;
637 	p->memtotal = 0;
638 	p->numclusters = 0;
639 	p->objfree = 0;
640 }
641 
642 /*
643  * Free all resources related to an allocator.
644  */
645 static void
646 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
647 {
648 	if (p == NULL)
649 		return;
650 	netmap_reset_obj_allocator(p);
651 }
652 
653 /*
654  * We receive a request for objtotal objects, of size objsize each.
655  * Internally we may round up both numbers, as we allocate objects
656  * in small clusters multiple of the page size.
657  * We need to keep track of objtotal and clustentries,
658  * as they are needed when freeing memory.
659  *
660  * XXX note -- userspace needs the buffers to be contiguous,
661  *	so we cannot afford gaps at the end of a cluster.
662  */
663 
664 
665 /* call with NMA_LOCK held */
666 static int
667 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
668 {
669 	int i;
670 	u_int clustsize;	/* the cluster size, multiple of page size */
671 	u_int clustentries;	/* how many objects per entry */
672 
673 	/* we store the current request, so we can
674 	 * detect configuration changes later */
675 	p->r_objtotal = objtotal;
676 	p->r_objsize = objsize;
677 
678 #define MAX_CLUSTSIZE	(1<<17)
679 #define LINE_ROUND	NM_CACHE_ALIGN	// 64
680 	if (objsize >= MAX_CLUSTSIZE) {
681 		/* we could do it but there is no point */
682 		D("unsupported allocation for %d bytes", objsize);
683 		return EINVAL;
684 	}
685 	/* make sure objsize is a multiple of LINE_ROUND */
686 	i = (objsize & (LINE_ROUND - 1));
687 	if (i) {
688 		D("XXX aligning object by %d bytes", LINE_ROUND - i);
689 		objsize += LINE_ROUND - i;
690 	}
691 	if (objsize < p->objminsize || objsize > p->objmaxsize) {
692 		D("requested objsize %d out of range [%d, %d]",
693 			objsize, p->objminsize, p->objmaxsize);
694 		return EINVAL;
695 	}
696 	if (objtotal < p->nummin || objtotal > p->nummax) {
697 		D("requested objtotal %d out of range [%d, %d]",
698 			objtotal, p->nummin, p->nummax);
699 		return EINVAL;
700 	}
701 	/*
702 	 * Compute number of objects using a brute-force approach:
703 	 * given a max cluster size,
704 	 * we try to fill it with objects keeping track of the
705 	 * wasted space to the next page boundary.
706 	 */
707 	for (clustentries = 0, i = 1;; i++) {
708 		u_int delta, used = i * objsize;
709 		if (used > MAX_CLUSTSIZE)
710 			break;
711 		delta = used % PAGE_SIZE;
712 		if (delta == 0) { // exact solution
713 			clustentries = i;
714 			break;
715 		}
716 		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
717 			clustentries = i;
718 	}
719 	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
720 	/* compute clustsize and round to the next page */
721 	clustsize = clustentries * objsize;
722 	i =  (clustsize & (PAGE_SIZE - 1));
723 	if (i)
724 		clustsize += PAGE_SIZE - i;
725 	if (netmap_verbose)
726 		D("objsize %d clustsize %d objects %d",
727 			objsize, clustsize, clustentries);
728 
729 	/*
730 	 * The number of clusters is n = ceil(objtotal/clustentries)
731 	 * objtotal' = n * clustentries
732 	 */
733 	p->_clustentries = clustentries;
734 	p->_clustsize = clustsize;
735 	p->_numclusters = (objtotal + clustentries - 1) / clustentries;
736 
737 	/* actual values (may be larger than requested) */
738 	p->_objsize = objsize;
739 	p->_objtotal = p->_numclusters * clustentries;
740 
741 	return 0;
742 }
743 
744 
745 /* call with NMA_LOCK held */
746 static int
747 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
748 {
749 	int i; /* must be signed */
750 	size_t n;
751 
752 	/* optimistically assume we have enough memory */
753 	p->numclusters = p->_numclusters;
754 	p->objtotal = p->_objtotal;
755 
756 	n = sizeof(struct lut_entry) * p->objtotal;
757 #ifdef linux
758 	p->lut = vmalloc(n);
759 #else
760 	p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
761 #endif
762 	if (p->lut == NULL) {
763 		D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
764 		goto clean;
765 	}
766 
767 	/* Allocate the bitmap */
768 	n = (p->objtotal + 31) / 32;
769 	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
770 	if (p->bitmap == NULL) {
771 		D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
772 		    p->name);
773 		goto clean;
774 	}
775 	p->bitmap_slots = n;
776 
777 	/*
778 	 * Allocate clusters, init pointers and bitmap
779 	 */
780 
781 	n = p->_clustsize;
782 	for (i = 0; i < (int)p->objtotal;) {
783 		int lim = i + p->_clustentries;
784 		char *clust;
785 
786 		clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
787 		    (size_t)0, -1UL, PAGE_SIZE, 0);
788 		if (clust == NULL) {
789 			/*
790 			 * If we get here, there is a severe memory shortage,
791 			 * so halve the allocated memory to reclaim some.
792 			 */
793 			D("Unable to create cluster at %d for '%s' allocator",
794 			    i, p->name);
795 			if (i < 2) /* nothing to halve */
796 				goto out;
797 			lim = i / 2;
798 			for (i--; i >= lim; i--) {
799 				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
800 				if (i % p->_clustentries == 0 && p->lut[i].vaddr)
801 					contigfree(p->lut[i].vaddr,
802 						n, M_NETMAP);
803 			}
804 		out:
805 			p->objtotal = i;
806 			/* we may have stopped in the middle of a cluster */
807 			p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
808 			break;
809 		}
810 		for (; i < lim; i++, clust += p->_objsize) {
811 			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
812 			p->lut[i].vaddr = clust;
813 			p->lut[i].paddr = vtophys(clust);
814 		}
815 	}
816 	p->objfree = p->objtotal;
817 	p->memtotal = p->numclusters * p->_clustsize;
818 	if (p->objfree == 0)
819 		goto clean;
820 	if (netmap_verbose)
821 		D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
822 		    p->numclusters, p->_clustsize >> 10,
823 		    p->memtotal >> 10, p->name);
824 
825 	return 0;
826 
827 clean:
828 	netmap_reset_obj_allocator(p);
829 	return ENOMEM;
830 }
831 
832 /* call with lock held */
833 static int
834 netmap_memory_config_changed(struct netmap_mem_d *nmd)
835 {
836 	int i;
837 
838 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
839 		if (nmd->pools[i].r_objsize != netmap_params[i].size ||
840 		    nmd->pools[i].r_objtotal != netmap_params[i].num)
841 		    return 1;
842 	}
843 	return 0;
844 }
845 
846 static void
847 netmap_mem_reset_all(struct netmap_mem_d *nmd)
848 {
849 	int i;
850 
851 	if (netmap_verbose)
852 		D("resetting %p", nmd);
853 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
854 		netmap_reset_obj_allocator(&nmd->pools[i]);
855 	}
856 	nmd->flags  &= ~NETMAP_MEM_FINALIZED;
857 }
858 
859 static int
860 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
861 {
862 	int i;
863 	if (nmd->flags & NETMAP_MEM_FINALIZED)
864 		return 0;
865 	nmd->lasterr = 0;
866 	nmd->nm_totalsize = 0;
867 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
868 		nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
869 		if (nmd->lasterr)
870 			goto error;
871 		nmd->nm_totalsize += nmd->pools[i].memtotal;
872 	}
873 	/* buffers 0 and 1 are reserved */
874 	nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
875 	nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
876 	nmd->flags |= NETMAP_MEM_FINALIZED;
877 
878 	if (netmap_verbose)
879 		D("interfaces %d KB, rings %d KB, buffers %d MB",
880 		    nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
881 		    nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
882 		    nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
883 
884 	if (netmap_verbose)
885 		D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
886 
887 
888 	return 0;
889 error:
890 	netmap_mem_reset_all(nmd);
891 	return nmd->lasterr;
892 }
893 
894 
895 
896 void
897 netmap_mem_private_delete(struct netmap_mem_d *nmd)
898 {
899 	if (nmd == NULL)
900 		return;
901 	if (netmap_verbose)
902 		D("deleting %p", nmd);
903 	if (nmd->refcount > 0)
904 		D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
905 	nm_mem_release_id(nmd);
906 	if (netmap_verbose)
907 		D("done deleting %p", nmd);
908 	NMA_LOCK_DESTROY(nmd);
909 	free(nmd, M_DEVBUF);
910 }
911 
912 static int
913 netmap_mem_private_config(struct netmap_mem_d *nmd)
914 {
915 	/* nothing to do, we are configured on creation
916  	 * and configuration never changes thereafter
917  	 */
918 	return 0;
919 }
920 
921 static int
922 netmap_mem_private_finalize(struct netmap_mem_d *nmd)
923 {
924 	int err;
925 	NMA_LOCK(nmd);
926 	nmd->refcount++;
927 	err = netmap_mem_finalize_all(nmd);
928 	NMA_UNLOCK(nmd);
929 	return err;
930 
931 }
932 
933 static void
934 netmap_mem_private_deref(struct netmap_mem_d *nmd)
935 {
936 	NMA_LOCK(nmd);
937 	if (--nmd->refcount <= 0)
938 		netmap_mem_reset_all(nmd);
939 	NMA_UNLOCK(nmd);
940 }
941 
942 
943 /*
944  * allocator for private memory
945  */
946 struct netmap_mem_d *
947 netmap_mem_private_new(const char *name, u_int txr, u_int txd,
948 	u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
949 {
950 	struct netmap_mem_d *d = NULL;
951 	struct netmap_obj_params p[NETMAP_POOLS_NR];
952 	int i, err;
953 	u_int v, maxd;
954 
955 	d = malloc(sizeof(struct netmap_mem_d),
956 			M_DEVBUF, M_NOWAIT | M_ZERO);
957 	if (d == NULL) {
958 		err = ENOMEM;
959 		goto error;
960 	}
961 
962 	*d = nm_blueprint;
963 
964 	err = nm_mem_assign_id(d);
965 	if (err)
966 		goto error;
967 
968 	/* account for the fake host rings */
969 	txr++;
970 	rxr++;
971 
972 	/* copy the min values */
973 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
974 		p[i] = netmap_min_priv_params[i];
975 	}
976 
977 	/* possibly increase them to fit user request */
978 	v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
979 	if (p[NETMAP_IF_POOL].size < v)
980 		p[NETMAP_IF_POOL].size = v;
981 	v = 2 + 4 * npipes;
982 	if (p[NETMAP_IF_POOL].num < v)
983 		p[NETMAP_IF_POOL].num = v;
984 	maxd = (txd > rxd) ? txd : rxd;
985 	v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
986 	if (p[NETMAP_RING_POOL].size < v)
987 		p[NETMAP_RING_POOL].size = v;
988 	/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
989          * and two rx rings (again, 1 normal and 1 fake host)
990          */
991 	v = txr + rxr + 8 * npipes;
992 	if (p[NETMAP_RING_POOL].num < v)
993 		p[NETMAP_RING_POOL].num = v;
994 	/* for each pipe we only need the buffers for the 4 "real" rings.
995          * On the other end, the pipe ring dimension may be different from
996          * the parent port ring dimension. As a compromise, we allocate twice the
997          * space actually needed if the pipe rings were the same size as the parent rings
998          */
999 	v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1000 		/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1001 	if (p[NETMAP_BUF_POOL].num < v)
1002 		p[NETMAP_BUF_POOL].num = v;
1003 
1004 	if (netmap_verbose)
1005 		D("req if %d*%d ring %d*%d buf %d*%d",
1006 			p[NETMAP_IF_POOL].num,
1007 			p[NETMAP_IF_POOL].size,
1008 			p[NETMAP_RING_POOL].num,
1009 			p[NETMAP_RING_POOL].size,
1010 			p[NETMAP_BUF_POOL].num,
1011 			p[NETMAP_BUF_POOL].size);
1012 
1013 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1014 		snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1015 				nm_blueprint.pools[i].name,
1016 				name);
1017 		err = netmap_config_obj_allocator(&d->pools[i],
1018 				p[i].num, p[i].size);
1019 		if (err)
1020 			goto error;
1021 	}
1022 
1023 	d->flags &= ~NETMAP_MEM_FINALIZED;
1024 
1025 	NMA_LOCK_INIT(d);
1026 
1027 	return d;
1028 error:
1029 	netmap_mem_private_delete(d);
1030 	if (perr)
1031 		*perr = err;
1032 	return NULL;
1033 }
1034 
1035 
1036 /* call with lock held */
1037 static int
1038 netmap_mem_global_config(struct netmap_mem_d *nmd)
1039 {
1040 	int i;
1041 
1042 	if (nmd->refcount)
1043 		/* already in use, we cannot change the configuration */
1044 		goto out;
1045 
1046 	if (!netmap_memory_config_changed(nmd))
1047 		goto out;
1048 
1049 	D("reconfiguring");
1050 
1051 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1052 		/* reset previous allocation */
1053 		for (i = 0; i < NETMAP_POOLS_NR; i++) {
1054 			netmap_reset_obj_allocator(&nmd->pools[i]);
1055 		}
1056 		nmd->flags &= ~NETMAP_MEM_FINALIZED;
1057 	}
1058 
1059 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1060 		nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1061 				netmap_params[i].num, netmap_params[i].size);
1062 		if (nmd->lasterr)
1063 			goto out;
1064 	}
1065 
1066 out:
1067 
1068 	return nmd->lasterr;
1069 }
1070 
1071 static int
1072 netmap_mem_global_finalize(struct netmap_mem_d *nmd)
1073 {
1074 	int err;
1075 
1076 	NMA_LOCK(nmd);
1077 
1078 
1079 	/* update configuration if changed */
1080 	if (netmap_mem_global_config(nmd))
1081 		goto out;
1082 
1083 	nmd->refcount++;
1084 
1085 	if (nmd->flags & NETMAP_MEM_FINALIZED) {
1086 		/* may happen if config is not changed */
1087 		ND("nothing to do");
1088 		goto out;
1089 	}
1090 
1091 	if (netmap_mem_finalize_all(nmd))
1092 		goto out;
1093 
1094 	/* backward compatibility */
1095 	netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
1096 	netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
1097 
1098 	netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
1099 	netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
1100 
1101 	nmd->lasterr = 0;
1102 
1103 out:
1104 	if (nmd->lasterr)
1105 		nmd->refcount--;
1106 	err = nmd->lasterr;
1107 
1108 	NMA_UNLOCK(nmd);
1109 
1110 	return err;
1111 
1112 }
1113 
1114 int
1115 netmap_mem_init(void)
1116 {
1117 	NMA_LOCK_INIT(&nm_mem);
1118 	return (0);
1119 }
1120 
1121 void
1122 netmap_mem_fini(void)
1123 {
1124 	int i;
1125 
1126 	for (i = 0; i < NETMAP_POOLS_NR; i++) {
1127 	    netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1128 	}
1129 	NMA_LOCK_DESTROY(&nm_mem);
1130 }
1131 
1132 static void
1133 netmap_free_rings(struct netmap_adapter *na)
1134 {
1135 	struct netmap_kring *kring;
1136 	struct netmap_ring *ring;
1137 	if (!na->tx_rings)
1138 		return;
1139 	for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
1140 		ring = kring->ring;
1141 		if (ring == NULL)
1142 			continue;
1143 		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1144 		netmap_ring_free(na->nm_mem, ring);
1145 		kring->ring = NULL;
1146 	}
1147 	for (/* cont'd from above */; kring != na->tailroom; kring++) {
1148 		ring = kring->ring;
1149 		if (ring == NULL)
1150 			continue;
1151 		netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1152 		netmap_ring_free(na->nm_mem, ring);
1153 		kring->ring = NULL;
1154 	}
1155 }
1156 
1157 /* call with NMA_LOCK held *
1158  *
1159  * Allocate netmap rings and buffers for this card
1160  * The rings are contiguous, but have variable size.
1161  * The kring array must follow the layout described
1162  * in netmap_krings_create().
1163  */
1164 int
1165 netmap_mem_rings_create(struct netmap_adapter *na)
1166 {
1167 	struct netmap_ring *ring;
1168 	u_int len, ndesc;
1169 	struct netmap_kring *kring;
1170 	u_int i;
1171 
1172 	NMA_LOCK(na->nm_mem);
1173 
1174         /* transmit rings */
1175 	for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
1176 		if (kring->ring) {
1177 			ND("%s %ld already created", kring->name, kring - na->tx_rings);
1178 			continue; /* already created by somebody else */
1179 		}
1180 		ndesc = kring->nkr_num_slots;
1181 		len = sizeof(struct netmap_ring) +
1182 			  ndesc * sizeof(struct netmap_slot);
1183 		ring = netmap_ring_malloc(na->nm_mem, len);
1184 		if (ring == NULL) {
1185 			D("Cannot allocate tx_ring");
1186 			goto cleanup;
1187 		}
1188 		ND("txring at %p", ring);
1189 		kring->ring = ring;
1190 		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1191 		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1192 		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1193 			na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1194 			netmap_ring_offset(na->nm_mem, ring);
1195 
1196 		/* copy values from kring */
1197 		ring->head = kring->rhead;
1198 		ring->cur = kring->rcur;
1199 		ring->tail = kring->rtail;
1200 		*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1201 			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1202 		ND("%s h %d c %d t %d", kring->name,
1203 			ring->head, ring->cur, ring->tail);
1204 		ND("initializing slots for txring");
1205 		if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1206 			/* this is a real ring */
1207 			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1208 				D("Cannot allocate buffers for tx_ring");
1209 				goto cleanup;
1210 			}
1211 		} else {
1212 			/* this is a fake tx ring, set all indices to 0 */
1213 			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1214 		}
1215 	}
1216 
1217 	/* receive rings */
1218 	for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
1219 		if (kring->ring) {
1220 			ND("%s %ld already created", kring->name, kring - na->rx_rings);
1221 			continue; /* already created by somebody else */
1222 		}
1223 		ndesc = kring->nkr_num_slots;
1224 		len = sizeof(struct netmap_ring) +
1225 			  ndesc * sizeof(struct netmap_slot);
1226 		ring = netmap_ring_malloc(na->nm_mem, len);
1227 		if (ring == NULL) {
1228 			D("Cannot allocate rx_ring");
1229 			goto cleanup;
1230 		}
1231 		ND("rxring at %p", ring);
1232 		kring->ring = ring;
1233 		*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1234 		*(int64_t *)(uintptr_t)&ring->buf_ofs =
1235 		    (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1236 		        na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1237 			netmap_ring_offset(na->nm_mem, ring);
1238 
1239 		/* copy values from kring */
1240 		ring->head = kring->rhead;
1241 		ring->cur = kring->rcur;
1242 		ring->tail = kring->rtail;
1243 		*(int *)(uintptr_t)&ring->nr_buf_size =
1244 			NETMAP_BDG_BUF_SIZE(na->nm_mem);
1245 		ND("%s h %d c %d t %d", kring->name,
1246 			ring->head, ring->cur, ring->tail);
1247 		ND("initializing slots for rxring %p", ring);
1248 		if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1249 			/* this is a real ring */
1250 			if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1251 				D("Cannot allocate buffers for rx_ring");
1252 				goto cleanup;
1253 			}
1254 		} else {
1255 			/* this is a fake rx ring, set all indices to 1 */
1256 			netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
1257 		}
1258 	}
1259 
1260 	NMA_UNLOCK(na->nm_mem);
1261 
1262 	return 0;
1263 
1264 cleanup:
1265 	netmap_free_rings(na);
1266 
1267 	NMA_UNLOCK(na->nm_mem);
1268 
1269 	return ENOMEM;
1270 }
1271 
1272 void
1273 netmap_mem_rings_delete(struct netmap_adapter *na)
1274 {
1275 	/* last instance, release bufs and rings */
1276 	NMA_LOCK(na->nm_mem);
1277 
1278 	netmap_free_rings(na);
1279 
1280 	NMA_UNLOCK(na->nm_mem);
1281 }
1282 
1283 
1284 /* call with NMA_LOCK held */
1285 /*
1286  * Allocate the per-fd structure netmap_if.
1287  *
1288  * We assume that the configuration stored in na
1289  * (number of tx/rx rings and descs) does not change while
1290  * the interface is in netmap mode.
1291  */
1292 struct netmap_if *
1293 netmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
1294 {
1295 	struct netmap_if *nifp;
1296 	ssize_t base; /* handy for relative offsets between rings and nifp */
1297 	u_int i, len, ntx, nrx;
1298 
1299 	/* account for the (eventually fake) host rings */
1300 	ntx = na->num_tx_rings + 1;
1301 	nrx = na->num_rx_rings + 1;
1302 	/*
1303 	 * the descriptor is followed inline by an array of offsets
1304 	 * to the tx and rx rings in the shared memory region.
1305 	 */
1306 
1307 	NMA_LOCK(na->nm_mem);
1308 
1309 	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1310 	nifp = netmap_if_malloc(na->nm_mem, len);
1311 	if (nifp == NULL) {
1312 		NMA_UNLOCK(na->nm_mem);
1313 		return NULL;
1314 	}
1315 
1316 	/* initialize base fields -- override const */
1317 	*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1318 	*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1319 	strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
1320 
1321 	/*
1322 	 * fill the slots for the rx and tx rings. They contain the offset
1323 	 * between the ring and nifp, so the information is usable in
1324 	 * userspace to reach the ring from the nifp.
1325 	 */
1326 	base = netmap_if_offset(na->nm_mem, nifp);
1327 	for (i = 0; i < ntx; i++) {
1328 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1329 			netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1330 	}
1331 	for (i = 0; i < nrx; i++) {
1332 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1333 			netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1334 	}
1335 
1336 	NMA_UNLOCK(na->nm_mem);
1337 
1338 	return (nifp);
1339 }
1340 
1341 void
1342 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1343 {
1344 	if (nifp == NULL)
1345 		/* nothing to do */
1346 		return;
1347 	NMA_LOCK(na->nm_mem);
1348 	if (nifp->ni_bufs_head)
1349 		netmap_extra_free(na, nifp->ni_bufs_head);
1350 	netmap_if_free(na->nm_mem, nifp);
1351 
1352 	NMA_UNLOCK(na->nm_mem);
1353 }
1354 
1355 static void
1356 netmap_mem_global_deref(struct netmap_mem_d *nmd)
1357 {
1358 	NMA_LOCK(nmd);
1359 
1360 	nmd->refcount--;
1361 	if (netmap_verbose)
1362 		D("refcount = %d", nmd->refcount);
1363 
1364 	NMA_UNLOCK(nmd);
1365 }
1366 
1367 int
1368 netmap_mem_finalize(struct netmap_mem_d *nmd)
1369 {
1370 	return nmd->finalize(nmd);
1371 }
1372 
1373 void
1374 netmap_mem_deref(struct netmap_mem_d *nmd)
1375 {
1376 	return nmd->deref(nmd);
1377 }
1378