xref: /freebsd/sys/dev/netmap/netmap_mem2.h (revision 907b59d76938e654f0d040a888e8dfca3de1e222)
1 /*
2  * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  *
29  * (New) memory allocator for netmap
30  */
31 
32 /*
33  * This allocator creates three memory pools:
34  *	nm_if_pool	for the struct netmap_if
35  *	nm_ring_pool	for the struct netmap_ring
36  *	nm_buf_pool	for the packet buffers.
37  *
38  * that contain netmap objects. Each pool is made of a number of clusters,
39  * multiple of a page size, each containing an integer number of objects.
40  * The clusters are contiguous in user space but not in the kernel.
41  * Only nm_buf_pool needs to be dma-able,
42  * but for convenience use the same type of allocator for all.
43  *
44  * Once mapped, the three pools are exported to userspace
45  * as a contiguous block, starting from nm_if_pool. Each
46  * cluster (and pool) is an integral number of pages.
47  *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
48  *    nm_if     nm_ring            nm_buf
49  *
50  * The userspace areas contain offsets of the objects in userspace.
51  * When (at init time) we write these offsets, we find out the index
52  * of the object, and from there locate the offset from the beginning
53  * of the region.
54  *
55  * The invididual allocators manage a pool of memory for objects of
56  * the same size.
57  * The pool is split into smaller clusters, whose size is a
58  * multiple of the page size. The cluster size is chosen
59  * to minimize the waste for a given max cluster size
60  * (we do it by brute force, as we have relatively few objects
61  * per cluster).
62  *
63  * Objects are aligned to the cache line (64 bytes) rounding up object
64  * sizes when needed. A bitmap contains the state of each object.
65  * Allocation scans the bitmap; this is done only on attach, so we are not
66  * too worried about performance
67  *
68  * For each allocator we can define (thorugh sysctl) the size and
69  * number of each object. Memory is allocated at the first use of a
70  * netmap file descriptor, and can be freed when all such descriptors
71  * have been released (including unmapping the memory).
72  * If memory is scarce, the system tries to get as much as possible
73  * and the sysctl values reflect the actual allocation.
74  * Together with desired values, the sysctl export also absolute
75  * min and maximum values that cannot be overridden.
76  *
77  * struct netmap_if:
78  *	variable size, max 16 bytes per ring pair plus some fixed amount.
79  *	1024 bytes should be large enough in practice.
80  *
81  *	In the worst case we have one netmap_if per ring in the system.
82  *
83  * struct netmap_ring
84  *	variable size, 8 byte per slot plus some fixed amount.
85  *	Rings can be large (e.g. 4k slots, or >32Kbytes).
86  *	We default to 36 KB (9 pages), and a few hundred rings.
87  *
88  * struct netmap_buffer
89  *	The more the better, both because fast interfaces tend to have
90  *	many slots, and because we may want to use buffers to store
91  *	packets in userspace avoiding copies.
92  *	Must contain a full frame (eg 1518, or more for vlans, jumbo
93  *	frames etc.) plus be nicely aligned, plus some NICs restrict
94  *	the size to multiple of 1K or so. Default to 2K
95  */
96 #ifndef _NET_NETMAP_MEM2_H_
97 #define _NET_NETMAP_MEM2_H_
98 
99 
100 
101 /* We implement two kinds of netmap_mem_d structures:
102  *
103  * - global: used by hardware NICS;
104  *
105  * - private: used by VALE ports.
106  *
107  * In both cases, the netmap_mem_d structure has the same lifetime as the
108  * netmap_adapter of the corresponding NIC or port. It is the responsibility of
109  * the client code to delete the private allocator when the associated
110  * netmap_adapter is freed (this is implemented by the NAF_MEM_OWNER flag in
111  * netmap.c).  The 'refcount' field counts the number of active users of the
112  * structure. The global allocator uses this information to prevent/allow
113  * reconfiguration. The private allocators release all their memory when there
114  * are no active users.  By 'active user' we mean an existing netmap_priv
115  * structure holding a reference to the allocator.
116  */
117 
118 extern struct netmap_mem_d nm_mem;
119 
120 void	   netmap_mem_get_lut(struct netmap_mem_d *, struct netmap_lut *);
121 vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *, vm_ooffset_t);
122 int	   netmap_mem_finalize(struct netmap_mem_d *, struct netmap_adapter *);
123 int 	   netmap_mem_init(void);
124 void 	   netmap_mem_fini(void);
125 struct netmap_if * netmap_mem_if_new(struct netmap_adapter *);
126 void 	   netmap_mem_if_delete(struct netmap_adapter *, struct netmap_if *);
127 int	   netmap_mem_rings_create(struct netmap_adapter *);
128 void	   netmap_mem_rings_delete(struct netmap_adapter *);
129 void 	   netmap_mem_deref(struct netmap_mem_d *, struct netmap_adapter *);
130 int	   netmap_mem_get_info(struct netmap_mem_d *, u_int *size, u_int *memflags, uint16_t *id);
131 ssize_t    netmap_mem_if_offset(struct netmap_mem_d *, const void *vaddr);
132 struct netmap_mem_d* netmap_mem_private_new(const char *name,
133 	u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes,
134 	int* error);
135 void	   netmap_mem_delete(struct netmap_mem_d *);
136 
137 //#define NM_DEBUG_MEM_PUTGET 1
138 
139 #ifdef NM_DEBUG_MEM_PUTGET
140 
141 #define netmap_mem_get(nmd) 				\
142 	do {						\
143 		__netmap_mem_get(nmd, __FUNCTION__, __LINE__);	\
144 	} while (0)
145 
146 #define netmap_mem_put(nmd)				\
147 	do {						\
148 		__netmap_mem_put(nmd, __FUNCTION__, __LINE__);	\
149 	} while (0)
150 
151 void __netmap_mem_get(struct netmap_mem_d *, const char *, int);
152 void __netmap_mem_put(struct netmap_mem_d *, const char *, int);
153 #else /* !NM_DEBUG_MEM_PUTGET */
154 
155 void netmap_mem_get(struct netmap_mem_d *);
156 void netmap_mem_put(struct netmap_mem_d *);
157 
158 #endif /* !NM_DEBUG_PUTGET */
159 
160 #define NETMAP_MEM_PRIVATE	0x2	/* allocator uses private address space */
161 #define NETMAP_MEM_IO		0x4	/* the underlying memory is mmapped I/O */
162 
163 uint32_t netmap_extra_alloc(struct netmap_adapter *, uint32_t *, uint32_t n);
164 
165 #endif
166