xref: /freebsd/sys/dev/netmap/netmap_mem2.h (revision f39bffc62c1395bde25d152c7f68fdf7cbaab414)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Matteo Landi
5  * Copyright (C) 2012-2016 Luigi Rizzo
6  * Copyright (C) 2012-2016 Giuseppe Lettieri
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *   1. Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *   2. Redistributions in binary form must reproduce the above copyright
15  *      notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * $FreeBSD$
33  *
34  * (New) memory allocator for netmap
35  */
36 
37 /*
38  * This allocator creates three memory pools:
39  *	nm_if_pool	for the struct netmap_if
40  *	nm_ring_pool	for the struct netmap_ring
41  *	nm_buf_pool	for the packet buffers.
42  *
43  * that contain netmap objects. Each pool is made of a number of clusters,
44  * multiple of a page size, each containing an integer number of objects.
45  * The clusters are contiguous in user space but not in the kernel.
46  * Only nm_buf_pool needs to be dma-able,
47  * but for convenience use the same type of allocator for all.
48  *
49  * Once mapped, the three pools are exported to userspace
50  * as a contiguous block, starting from nm_if_pool. Each
51  * cluster (and pool) is an integral number of pages.
52  *   [ . . . ][ . . . . . .][ . . . . . . . . . .]
53  *    nm_if     nm_ring            nm_buf
54  *
55  * The userspace areas contain offsets of the objects in userspace.
56  * When (at init time) we write these offsets, we find out the index
57  * of the object, and from there locate the offset from the beginning
58  * of the region.
59  *
60  * The invididual allocators manage a pool of memory for objects of
61  * the same size.
62  * The pool is split into smaller clusters, whose size is a
63  * multiple of the page size. The cluster size is chosen
64  * to minimize the waste for a given max cluster size
65  * (we do it by brute force, as we have relatively few objects
66  * per cluster).
67  *
68  * Objects are aligned to the cache line (64 bytes) rounding up object
69  * sizes when needed. A bitmap contains the state of each object.
70  * Allocation scans the bitmap; this is done only on attach, so we are not
71  * too worried about performance
72  *
73  * For each allocator we can define (thorugh sysctl) the size and
74  * number of each object. Memory is allocated at the first use of a
75  * netmap file descriptor, and can be freed when all such descriptors
76  * have been released (including unmapping the memory).
77  * If memory is scarce, the system tries to get as much as possible
78  * and the sysctl values reflect the actual allocation.
79  * Together with desired values, the sysctl export also absolute
80  * min and maximum values that cannot be overridden.
81  *
82  * struct netmap_if:
83  *	variable size, max 16 bytes per ring pair plus some fixed amount.
84  *	1024 bytes should be large enough in practice.
85  *
86  *	In the worst case we have one netmap_if per ring in the system.
87  *
88  * struct netmap_ring
89  *	variable size, 8 byte per slot plus some fixed amount.
90  *	Rings can be large (e.g. 4k slots, or >32Kbytes).
91  *	We default to 36 KB (9 pages), and a few hundred rings.
92  *
93  * struct netmap_buffer
94  *	The more the better, both because fast interfaces tend to have
95  *	many slots, and because we may want to use buffers to store
96  *	packets in userspace avoiding copies.
97  *	Must contain a full frame (eg 1518, or more for vlans, jumbo
98  *	frames etc.) plus be nicely aligned, plus some NICs restrict
99  *	the size to multiple of 1K or so. Default to 2K
100  */
101 #ifndef _NET_NETMAP_MEM2_H_
102 #define _NET_NETMAP_MEM2_H_
103 
104 
105 
106 /* We implement two kinds of netmap_mem_d structures:
107  *
108  * - global: used by hardware NICS;
109  *
110  * - private: used by VALE ports.
111  *
112  * In both cases, the netmap_mem_d structure has the same lifetime as the
113  * netmap_adapter of the corresponding NIC or port. It is the responsibility of
114  * the client code to delete the private allocator when the associated
115  * netmap_adapter is freed (this is implemented by the NAF_MEM_OWNER flag in
116  * netmap.c).  The 'refcount' field counts the number of active users of the
117  * structure. The global allocator uses this information to prevent/allow
118  * reconfiguration. The private allocators release all their memory when there
119  * are no active users.  By 'active user' we mean an existing netmap_priv
120  * structure holding a reference to the allocator.
121  */
122 
123 extern struct netmap_mem_d nm_mem;
124 typedef uint16_t nm_memid_t;
125 
126 int	   netmap_mem_get_lut(struct netmap_mem_d *, struct netmap_lut *);
127 nm_memid_t netmap_mem_get_id(struct netmap_mem_d *);
128 vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *, vm_ooffset_t);
129 #ifdef _WIN32
130 PMDL win32_build_user_vm_map(struct netmap_mem_d* nmd);
131 #endif
132 int	   netmap_mem_finalize(struct netmap_mem_d *, struct netmap_adapter *);
133 int 	   netmap_mem_init(void);
134 void 	   netmap_mem_fini(void);
135 struct netmap_if * netmap_mem_if_new(struct netmap_adapter *, struct netmap_priv_d *);
136 void 	   netmap_mem_if_delete(struct netmap_adapter *, struct netmap_if *);
137 int	   netmap_mem_rings_create(struct netmap_adapter *);
138 void	   netmap_mem_rings_delete(struct netmap_adapter *);
139 int 	   netmap_mem_deref(struct netmap_mem_d *, struct netmap_adapter *);
140 int	   netmap_mem2_get_pool_info(struct netmap_mem_d *, u_int, u_int *, u_int *);
141 int	   netmap_mem_get_info(struct netmap_mem_d *, uint64_t *size,
142 				u_int *memflags, nm_memid_t *id);
143 ssize_t    netmap_mem_if_offset(struct netmap_mem_d *, const void *vaddr);
144 struct netmap_mem_d* netmap_mem_private_new( u_int txr, u_int txd, u_int rxr, u_int rxd,
145 		u_int extra_bufs, u_int npipes, int* error);
146 
147 #define netmap_mem_get(d) __netmap_mem_get(d, __FUNCTION__, __LINE__)
148 #define netmap_mem_put(d) __netmap_mem_put(d, __FUNCTION__, __LINE__)
149 struct netmap_mem_d* __netmap_mem_get(struct netmap_mem_d *, const char *, int);
150 void __netmap_mem_put(struct netmap_mem_d *, const char *, int);
151 struct netmap_mem_d* netmap_mem_find(nm_memid_t);
152 unsigned netmap_mem_bufsize(struct netmap_mem_d *nmd);
153 
154 #ifdef WITH_EXTMEM
155 struct netmap_mem_d* netmap_mem_ext_create(uint64_t, struct nmreq_pools_info *, int *);
156 #else /* !WITH_EXTMEM */
157 #define netmap_mem_ext_create(nmr, _perr) \
158 	({ int *perr = _perr; if (perr) *(perr) = EOPNOTSUPP; NULL; })
159 #endif /* WITH_EXTMEM */
160 
161 #ifdef WITH_PTNETMAP_GUEST
162 struct netmap_mem_d* netmap_mem_pt_guest_new(struct ifnet *,
163 					     unsigned int nifp_offset,
164 					     unsigned int memid);
165 struct ptnetmap_memdev;
166 struct netmap_mem_d* netmap_mem_pt_guest_attach(struct ptnetmap_memdev *, uint16_t);
167 int netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *, struct ifnet *);
168 #endif /* WITH_PTNETMAP_GUEST */
169 
170 int netmap_mem_pools_info_get(struct nmreq_pools_info *,
171 				struct netmap_mem_d *);
172 
173 #define NETMAP_MEM_PRIVATE	0x2	/* allocator uses private address space */
174 #define NETMAP_MEM_IO		0x4	/* the underlying memory is mmapped I/O */
175 #define NETMAP_MEM_EXT		0x10	/* external memory (not remappable) */
176 
177 uint32_t netmap_extra_alloc(struct netmap_adapter *, uint32_t *, uint32_t n);
178 
179 #ifdef WITH_EXTMEM
180 #include <net/netmap_virt.h>
181 struct nm_os_extmem; /* opaque */
182 struct nm_os_extmem *nm_os_extmem_create(unsigned long, struct nmreq_pools_info *, int *perror);
183 char *nm_os_extmem_nextpage(struct nm_os_extmem *);
184 int nm_os_extmem_nr_pages(struct nm_os_extmem *);
185 int nm_os_extmem_isequal(struct nm_os_extmem *, struct nm_os_extmem *);
186 void nm_os_extmem_delete(struct nm_os_extmem *);
187 #endif /* WITH_EXTMEM */
188 
189 #endif
190