xref: /freebsd/sys/dev/netmap/netmap_mem2.c (revision a10cee30c94cf5944826d2a495e9cdf339dfbcc8)
1 /*
2  * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  * $Id: netmap_mem2.c 11445 2012-07-30 10:49:07Z luigi $
29  *
30  * New memory allocator for netmap
31  */
32 
33 /*
34  * The new version allocates three regions:
35  *	nm_if_pool      for the struct netmap_if
36  *	nm_ring_pool    for the struct netmap_ring
37  *	nm_buf_pool    for the packet buffers.
38  *
39  * All regions need to be page-sized as we export them to
40  * userspace through mmap. Only the latter need to be dma-able,
41  * but for convenience use the same type of allocator for all.
42  *
43  * Once mapped, the three regions are exported to userspace
44  * as a contiguous block, starting from nm_if_pool. Each
45  * cluster (and pool) is an integral number of pages.
46  *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
47  *    nm_if     nm_ring            nm_buf
48  *
49  * The userspace areas contain offsets of the objects in userspace.
50  * When (at init time) we write these offsets, we find out the index
51  * of the object, and from there locate the offset from the beginning
52  * of the region.
53  *
54  * Allocator for a pool of memory objects of the same size.
55  * The pool is split into smaller clusters, whose size is a
56  * multiple of the page size. The cluster size is chosen
57  * to minimize the waste for a given max cluster size
58  * (we do it by brute force, as we have relatively few object
59  * per cluster).
60  *
61  * To be polite with the cache, objects are aligned to
62  * the cache line, or 64 bytes. Sizes are rounded to multiple of 64.
63  * For each object we have
64  * one entry in the bitmap to signal the state. Allocation scans
65  * the bitmap, but since this is done only on attach, we are not
66  * too worried about performance
67  */
68 
69 /*
70  *	MEMORY SIZES:
71  *
72  * (all the parameters below will become tunables)
73  *
74  * struct netmap_if is variable size but small.
75  * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if
76  * uses 120 bytes on a 64-bit machine.
77  * We allocate NETMAP_IF_MAX_SIZE  (1024) which should work even for
78  * cards with 48 ring pairs.
79  * The total number of 'struct netmap_if' could be slightly larger
80  * that the total number of rings on all interfaces on the system.
81  */
82 #define NETMAP_IF_MAX_SIZE      1024
83 #define NETMAP_IF_MAX_NUM       512
84 
85 /*
86  * netmap rings are up to 2..4k descriptors, 8 bytes each,
87  * plus some glue at the beginning (32 bytes).
88  * We set the default ring size to 9 pages (36K) and enable
89  * a few hundreds of them.
90  */
91 #define NETMAP_RING_MAX_SIZE    (9*PAGE_SIZE)
92 #define NETMAP_RING_MAX_NUM     200	/* approx 8MB */
93 
94 /*
95  * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE,
96  * 2k or slightly less, aligned to 64 bytes.
97  * A large 10G interface can have 2k*18 = 36k buffers per interface,
98  * or about 72MB of memory. Up to us to use more.
99  */
100 #ifndef CONSERVATIVE
101 #define NETMAP_BUF_MAX_NUM      100000  /* 200MB */
102 #else /* CONSERVATIVE */
103 #define NETMAP_BUF_MAX_NUM      20000   /* 40MB */
104 #endif
105 
106 
107 struct netmap_obj_pool {
108 	char name[16];		/* name of the allocator */
109 	u_int objtotal;         /* actual total number of objects. */
110 	u_int objfree;          /* number of free objects. */
111 	u_int clustentries;	/* actual objects per cluster */
112 
113 	/* the total memory space is _numclusters*_clustsize */
114 	u_int _numclusters;	/* how many clusters */
115 	u_int _clustsize;        /* cluster size */
116 	u_int _objsize;		/* actual object size */
117 
118 	u_int _memtotal;	/* _numclusters*_clustsize */
119 	struct lut_entry *lut;  /* virt,phys addresses, objtotal entries */
120 	uint32_t *bitmap;       /* one bit per buffer, 1 means free */
121 };
122 
123 struct netmap_mem_d {
124 	NM_LOCK_T nm_mtx; /* protect the allocator ? */
125 	u_int nm_totalsize; /* shorthand */
126 
127 	/* pointers to the three allocators */
128 	struct netmap_obj_pool *nm_if_pool;
129 	struct netmap_obj_pool *nm_ring_pool;
130 	struct netmap_obj_pool *nm_buf_pool;
131 };
132 
133 struct lut_entry *netmap_buffer_lut;	/* exported */
134 
135 
136 /*
137  * Convert a userspace offset to a phisical address.
138  * XXX re-do in a simpler way.
139  *
140  * The idea here is to hide userspace applications the fact that pre-allocated
141  * memory is not contiguous, but fragmented across different clusters and
142  * smaller memory allocators. Consequently, first of all we need to find which
143  * allocator is owning provided offset, then we need to find out the physical
144  * address associated to target page (this is done using the look-up table.
145  */
146 static inline vm_paddr_t
147 netmap_ofstophys(vm_offset_t offset)
148 {
149 	const struct netmap_obj_pool *p[] = {
150 		nm_mem->nm_if_pool,
151 		nm_mem->nm_ring_pool,
152 		nm_mem->nm_buf_pool };
153 	int i;
154 	vm_offset_t o = offset;
155 
156 
157 	for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) {
158 		if (offset >= p[i]->_memtotal)
159 			continue;
160 		// XXX now scan the clusters
161 		return p[i]->lut[offset / p[i]->_objsize].paddr +
162 			offset % p[i]->_objsize;
163 	}
164 	D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
165 		p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal,
166 		p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal);
167 	return 0;	// XXX bad address
168 }
169 
170 /*
171  * we store objects by kernel address, need to find the offset
172  * within the pool to export the value to userspace.
173  * Algorithm: scan until we find the cluster, then add the
174  * actual offset in the cluster
175  */
176 static ssize_t
177 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
178 {
179 	int i, k = p->clustentries, n = p->objtotal;
180 	ssize_t ofs = 0;
181 
182 	for (i = 0; i < n; i += k, ofs += p->_clustsize) {
183 		const char *base = p->lut[i].vaddr;
184 		ssize_t relofs = (const char *) vaddr - base;
185 
186 		if (relofs < 0 || relofs > p->_clustsize)
187 			continue;
188 
189 		ofs = ofs + relofs;
190 		ND("%s: return offset %d (cluster %d) for pointer %p",
191 		    p->name, ofs, i, vaddr);
192 		return ofs;
193 	}
194 	D("address %p is not contained inside any cluster (%s)",
195 	    vaddr, p->name);
196 	return 0; /* An error occurred */
197 }
198 
199 /* Helper functions which convert virtual addresses to offsets */
200 #define netmap_if_offset(v)					\
201 	netmap_obj_offset(nm_mem->nm_if_pool, (v))
202 
203 #define netmap_ring_offset(v)					\
204     (nm_mem->nm_if_pool->_memtotal + 				\
205 	netmap_obj_offset(nm_mem->nm_ring_pool, (v)))
206 
207 #define netmap_buf_offset(v)					\
208     (nm_mem->nm_if_pool->_memtotal +				\
209 	nm_mem->nm_ring_pool->_memtotal +			\
210 	netmap_obj_offset(nm_mem->nm_buf_pool, (v)))
211 
212 
213 static void *
214 netmap_obj_malloc(struct netmap_obj_pool *p, int len)
215 {
216 	uint32_t i = 0;			/* index in the bitmap */
217 	uint32_t mask, j;		/* slot counter */
218 	void *vaddr = NULL;
219 
220 	if (len > p->_objsize) {
221 		D("%s request size %d too large", p->name, len);
222 		// XXX cannot reduce the size
223 		return NULL;
224 	}
225 
226 	if (p->objfree == 0) {
227 		D("%s allocator: run out of memory", p->name);
228 		return NULL;
229 	}
230 
231 	/* termination is guaranteed by p->free */
232 	while (vaddr == NULL) {
233 		uint32_t cur = p->bitmap[i];
234 		if (cur == 0) { /* bitmask is fully used */
235 			i++;
236 			continue;
237 		}
238 		/* locate a slot */
239 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
240 			;
241 
242 		p->bitmap[i] &= ~mask; /* mark object as in use */
243 		p->objfree--;
244 
245 		vaddr = p->lut[i * 32 + j].vaddr;
246 	}
247 	ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
248 
249 	return vaddr;
250 }
251 
252 
253 /*
254  * free by index, not by address
255  */
256 static void
257 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
258 {
259 	if (j >= p->objtotal) {
260 		D("invalid index %u, max %u", j, p->objtotal);
261 		return;
262 	}
263 	p->bitmap[j / 32] |= (1 << (j % 32));
264 	p->objfree++;
265 	return;
266 }
267 
268 static void
269 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
270 {
271 	int i, j, n = p->_memtotal / p->_clustsize;
272 
273 	for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
274 		void *base = p->lut[i * p->clustentries].vaddr;
275 		ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
276 
277 		/* Given address, is out of the scope of the current cluster.*/
278 		if (vaddr < base || relofs > p->_clustsize)
279 			continue;
280 
281 		j = j + relofs / p->_objsize;
282 		KASSERT(j != 0, ("Cannot free object 0"));
283 		netmap_obj_free(p, j);
284 		return;
285 	}
286 	ND("address %p is not contained inside any cluster (%s)",
287 	    vaddr, p->name);
288 }
289 
290 #define netmap_if_malloc(len)	netmap_obj_malloc(nm_mem->nm_if_pool, len)
291 #define netmap_if_free(v)	netmap_obj_free_va(nm_mem->nm_if_pool, (v))
292 #define netmap_ring_malloc(len)	netmap_obj_malloc(nm_mem->nm_ring_pool, len)
293 #define netmap_buf_malloc()			\
294 	netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE)
295 
296 
297 /* Return the index associated to the given packet buffer */
298 #define netmap_buf_index(v)						\
299     (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize)
300 
301 
302 static void
303 netmap_new_bufs(struct netmap_if *nifp,
304                 struct netmap_slot *slot, u_int n)
305 {
306 	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
307 	uint32_t i = 0;	/* slot counter */
308 
309 	(void)nifp;	/* UNUSED */
310 	for (i = 0; i < n; i++) {
311 		void *vaddr = netmap_buf_malloc();
312 		if (vaddr == NULL) {
313 			D("unable to locate empty packet buffer");
314 			goto cleanup;
315 		}
316 
317 		slot[i].buf_idx = netmap_buf_index(vaddr);
318 		KASSERT(slot[i].buf_idx != 0,
319 		    ("Assigning buf_idx=0 to just created slot"));
320 		slot[i].len = p->_objsize;
321 		slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack
322 	}
323 
324 	ND("allocated %d buffers, %d available", n, p->objfree);
325 	return;
326 
327 cleanup:
328 	for (i--; i >= 0; i--) {
329 		netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx);
330 	}
331 }
332 
333 
334 static void
335 netmap_free_buf(struct netmap_if *nifp, uint32_t i)
336 {
337 	struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
338 	if (i < 2 || i >= p->objtotal) {
339 		D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
340 		return;
341 	}
342 	netmap_obj_free(nm_mem->nm_buf_pool, i);
343 }
344 
345 
346 /*
347  * Free all resources related to an allocator.
348  */
349 static void
350 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
351 {
352 	if (p == NULL)
353 		return;
354 	if (p->bitmap)
355 		free(p->bitmap, M_NETMAP);
356 	if (p->lut) {
357 		int i;
358 		for (i = 0; i < p->objtotal; i += p->clustentries) {
359 			if (p->lut[i].vaddr)
360 				contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
361 		}
362 		bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
363 		free(p->lut, M_NETMAP);
364 	}
365 	bzero(p, sizeof(*p));
366 	free(p, M_NETMAP);
367 }
368 
369 /*
370  * We receive a request for objtotal objects, of size objsize each.
371  * Internally we may round up both numbers, as we allocate objects
372  * in small clusters multiple of the page size.
373  * In the allocator we don't need to store the objsize,
374  * but we do need to keep track of objtotal' and clustentries,
375  * as they are needed when freeing memory.
376  *
377  * XXX note -- userspace needs the buffers to be contiguous,
378  *	so we cannot afford gaps at the end of a cluster.
379  */
380 static struct netmap_obj_pool *
381 netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize)
382 {
383 	struct netmap_obj_pool *p;
384 	int i, n;
385 	u_int clustsize;	/* the cluster size, multiple of page size */
386 	u_int clustentries;	/* how many objects per entry */
387 
388 #define MAX_CLUSTSIZE	(1<<17)
389 #define LINE_ROUND	64
390 	if (objsize >= MAX_CLUSTSIZE) {
391 		/* we could do it but there is no point */
392 		D("unsupported allocation for %d bytes", objsize);
393 		return NULL;
394 	}
395 	/* make sure objsize is a multiple of LINE_ROUND */
396 	i = (objsize & (LINE_ROUND - 1));
397 	if (i) {
398 		D("XXX aligning object by %d bytes", LINE_ROUND - i);
399 		objsize += LINE_ROUND - i;
400 	}
401 	/*
402 	 * Compute number of objects using a brute-force approach:
403 	 * given a max cluster size,
404 	 * we try to fill it with objects keeping track of the
405 	 * wasted space to the next page boundary.
406 	 */
407 	for (clustentries = 0, i = 1;; i++) {
408 		u_int delta, used = i * objsize;
409 		if (used > MAX_CLUSTSIZE)
410 			break;
411 		delta = used % PAGE_SIZE;
412 		if (delta == 0) { // exact solution
413 			clustentries = i;
414 			break;
415 		}
416 		if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
417 			clustentries = i;
418 	}
419 	// D("XXX --- ouch, delta %d (bad for buffers)", delta);
420 	/* compute clustsize and round to the next page */
421 	clustsize = clustentries * objsize;
422 	i =  (clustsize & (PAGE_SIZE - 1));
423 	if (i)
424 		clustsize += PAGE_SIZE - i;
425 	D("objsize %d clustsize %d objects %d",
426 		objsize, clustsize, clustentries);
427 
428 	p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP,
429 	    M_WAITOK | M_ZERO);
430 	if (p == NULL) {
431 		D("Unable to create '%s' allocator", name);
432 		return NULL;
433 	}
434 	/*
435 	 * Allocate and initialize the lookup table.
436 	 *
437 	 * The number of clusters is n = ceil(objtotal/clustentries)
438 	 * objtotal' = n * clustentries
439 	 */
440 	strncpy(p->name, name, sizeof(p->name));
441 	p->clustentries = clustentries;
442 	p->_clustsize = clustsize;
443 	n = (objtotal + clustentries - 1) / clustentries;
444 	p->_numclusters = n;
445 	p->objtotal = n * clustentries;
446 	p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
447 	p->_objsize = objsize;
448 	p->_memtotal = p->_numclusters * p->_clustsize;
449 
450 	p->lut = malloc(sizeof(struct lut_entry) * p->objtotal,
451 	    M_NETMAP, M_WAITOK | M_ZERO);
452 	if (p->lut == NULL) {
453 		D("Unable to create lookup table for '%s' allocator", name);
454 		goto clean;
455 	}
456 
457 	/* Allocate the bitmap */
458 	n = (p->objtotal + 31) / 32;
459 	p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
460 	if (p->bitmap == NULL) {
461 		D("Unable to create bitmap (%d entries) for allocator '%s'", n,
462 		    name);
463 		goto clean;
464 	}
465 
466 	/*
467 	 * Allocate clusters, init pointers and bitmap
468 	 */
469 	for (i = 0; i < p->objtotal;) {
470 		int lim = i + clustentries;
471 		char *clust;
472 
473 		clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO,
474 		    0, -1UL, PAGE_SIZE, 0);
475 		if (clust == NULL) {
476 			/*
477 			 * If we get here, there is a severe memory shortage,
478 			 * so halve the allocated memory to reclaim some.
479 			 */
480 			D("Unable to create cluster at %d for '%s' allocator",
481 			    i, name);
482 			lim = i / 2;
483 			for (; i >= lim; i--) {
484 				p->bitmap[ (i>>5) ] &=  ~( 1 << (i & 31) );
485 				if (i % clustentries == 0 && p->lut[i].vaddr)
486 					contigfree(p->lut[i].vaddr,
487 						p->_clustsize, M_NETMAP);
488 			}
489 			p->objtotal = i;
490 			p->objfree = p->objtotal - 2;
491 			p->_numclusters = i / clustentries;
492 			p->_memtotal = p->_numclusters * p->_clustsize;
493 			break;
494 		}
495 		for (; i < lim; i++, clust += objsize) {
496 			p->bitmap[ (i>>5) ] |=  ( 1 << (i & 31) );
497 			p->lut[i].vaddr = clust;
498 			p->lut[i].paddr = vtophys(clust);
499 		}
500 	}
501 	p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
502 	D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
503 	    p->_numclusters, p->_clustsize >> 10,
504 	    p->_memtotal >> 10, name);
505 
506 	return p;
507 
508 clean:
509 	netmap_destroy_obj_allocator(p);
510 	return NULL;
511 }
512 
513 static int
514 netmap_memory_init(void)
515 {
516 	struct netmap_obj_pool *p;
517 
518 	nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
519 			      M_WAITOK | M_ZERO);
520 	if (nm_mem == NULL)
521 		goto clean;
522 
523 	p = netmap_new_obj_allocator("netmap_if",
524 	    NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE);
525 	if (p == NULL)
526 		goto clean;
527 	nm_mem->nm_if_pool = p;
528 
529 	p = netmap_new_obj_allocator("netmap_ring",
530 	    NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE);
531 	if (p == NULL)
532 		goto clean;
533 	nm_mem->nm_ring_pool = p;
534 
535 	p = netmap_new_obj_allocator("netmap_buf",
536 	    NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE);
537 	if (p == NULL)
538 		goto clean;
539 	netmap_total_buffers = p->objtotal;
540 	netmap_buffer_lut = p->lut;
541 	nm_mem->nm_buf_pool = p;
542 	netmap_buffer_base = p->lut[0].vaddr;
543 
544 	mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL,
545 		 MTX_DEF);
546 	nm_mem->nm_totalsize =
547 	    nm_mem->nm_if_pool->_memtotal +
548 	    nm_mem->nm_ring_pool->_memtotal +
549 	    nm_mem->nm_buf_pool->_memtotal;
550 
551 	D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
552 	    nm_mem->nm_if_pool->_memtotal >> 10,
553 	    nm_mem->nm_ring_pool->_memtotal >> 10,
554 	    nm_mem->nm_buf_pool->_memtotal >> 20);
555 	return 0;
556 
557 clean:
558 	if (nm_mem) {
559 		netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
560 		netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
561 		free(nm_mem, M_NETMAP);
562 	}
563 	return ENOMEM;
564 }
565 
566 
567 static void
568 netmap_memory_fini(void)
569 {
570 	if (!nm_mem)
571 		return;
572 	netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
573 	netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
574 	netmap_destroy_obj_allocator(nm_mem->nm_buf_pool);
575 	mtx_destroy(&nm_mem->nm_mtx);
576 	free(nm_mem, M_NETMAP);
577 }
578 
579 
580 
581 static void *
582 netmap_if_new(const char *ifname, struct netmap_adapter *na)
583 {
584 	struct netmap_if *nifp;
585 	struct netmap_ring *ring;
586 	ssize_t base; /* handy for relative offsets between rings and nifp */
587 	u_int i, len, ndesc;
588 	u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
589 	u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
590 	struct netmap_kring *kring;
591 
592 	NMA_LOCK();
593 	/*
594 	 * the descriptor is followed inline by an array of offsets
595 	 * to the tx and rx rings in the shared memory region.
596 	 */
597 	len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
598 	nifp = netmap_if_malloc(len);
599 	if (nifp == NULL) {
600 		NMA_UNLOCK();
601 		return NULL;
602 	}
603 
604 	/* initialize base fields -- override const */
605 	*(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
606 	*(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
607 	strncpy(nifp->ni_name, ifname, IFNAMSIZ);
608 
609 	(na->refcount)++;	/* XXX atomic ? we are under lock */
610 	if (na->refcount > 1) { /* already setup, we are done */
611 		NMA_UNLOCK();
612 		goto final;
613 	}
614 
615 	/*
616 	 * First instance, allocate netmap rings and buffers for this card
617 	 * The rings are contiguous, but have variable size.
618 	 */
619 	for (i = 0; i < ntx; i++) { /* Transmit rings */
620 		kring = &na->tx_rings[i];
621 		ndesc = na->num_tx_desc;
622 		bzero(kring, sizeof(*kring));
623 		len = sizeof(struct netmap_ring) +
624 			  ndesc * sizeof(struct netmap_slot);
625 		ring = netmap_ring_malloc(len);
626 		if (ring == NULL) {
627 			D("Cannot allocate tx_ring[%d] for %s", i, ifname);
628 			goto cleanup;
629 		}
630 		ND("txring[%d] at %p ofs %d", i, ring);
631 		kring->na = na;
632 		kring->ring = ring;
633 		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
634 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
635 		    (nm_mem->nm_if_pool->_memtotal +
636 			nm_mem->nm_ring_pool->_memtotal) -
637 			netmap_ring_offset(ring);
638 
639 		/*
640 		 * IMPORTANT:
641 		 * Always keep one slot empty, so we can detect new
642 		 * transmissions comparing cur and nr_hwcur (they are
643 		 * the same only if there are no new transmissions).
644 		 */
645 		ring->avail = kring->nr_hwavail = ndesc - 1;
646 		ring->cur = kring->nr_hwcur = 0;
647 		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
648 		ND("initializing slots for txring[%d]", i);
649 		netmap_new_bufs(nifp, ring->slot, ndesc);
650 	}
651 
652 	for (i = 0; i < nrx; i++) { /* Receive rings */
653 		kring = &na->rx_rings[i];
654 		ndesc = na->num_rx_desc;
655 		bzero(kring, sizeof(*kring));
656 		len = sizeof(struct netmap_ring) +
657 			  ndesc * sizeof(struct netmap_slot);
658 		ring = netmap_ring_malloc(len);
659 		if (ring == NULL) {
660 			D("Cannot allocate rx_ring[%d] for %s", i, ifname);
661 			goto cleanup;
662 		}
663 		ND("rxring[%d] at %p ofs %d", i, ring);
664 
665 		kring->na = na;
666 		kring->ring = ring;
667 		*(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
668 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
669 		    (nm_mem->nm_if_pool->_memtotal +
670 		        nm_mem->nm_ring_pool->_memtotal) -
671 			netmap_ring_offset(ring);
672 
673 		ring->cur = kring->nr_hwcur = 0;
674 		ring->avail = kring->nr_hwavail = 0; /* empty */
675 		*(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
676 		ND("initializing slots for rxring[%d]", i);
677 		netmap_new_bufs(nifp, ring->slot, ndesc);
678 	}
679 	NMA_UNLOCK();
680 #ifdef linux
681 	// XXX initialize the selrecord structs.
682 	for (i = 0; i < ntx; i++)
683 		init_waitqueue_head(&na->tx_rings[i].si);
684 	for (i = 0; i < nrx; i++)
685 		init_waitqueue_head(&na->rx_rings[i].si);
686 	init_waitqueue_head(&na->tx_si);
687 	init_waitqueue_head(&na->rx_si);
688 #endif
689 final:
690 	/*
691 	 * fill the slots for the rx and tx rings. They contain the offset
692 	 * between the ring and nifp, so the information is usable in
693 	 * userspace to reach the ring from the nifp.
694 	 */
695 	base = netmap_if_offset(nifp);
696 	for (i = 0; i < ntx; i++) {
697 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
698 			netmap_ring_offset(na->tx_rings[i].ring) - base;
699 	}
700 	for (i = 0; i < nrx; i++) {
701 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
702 			netmap_ring_offset(na->rx_rings[i].ring) - base;
703 	}
704 	return (nifp);
705 cleanup:
706 	// XXX missing
707 	NMA_UNLOCK();
708 	return NULL;
709 }
710 
711 static void
712 netmap_free_rings(struct netmap_adapter *na)
713 {
714 	int i;
715 	for (i = 0; i < na->num_tx_rings + 1; i++)
716 		netmap_obj_free_va(nm_mem->nm_ring_pool,
717 			na->tx_rings[i].ring);
718 	for (i = 0; i < na->num_rx_rings + 1; i++)
719 		netmap_obj_free_va(nm_mem->nm_ring_pool,
720 			na->rx_rings[i].ring);
721 }
722