xref: /freebsd/usr.sbin/bhyve/net_backend_netmap.c (revision be74aede49fb480792448bf563c5079998de7cbd)
1*be74aedeSMark Johnston /*-
2*be74aedeSMark Johnston  * SPDX-License-Identifier: BSD-2-Clause
3*be74aedeSMark Johnston  *
4*be74aedeSMark Johnston  * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
5*be74aedeSMark Johnston  *
6*be74aedeSMark Johnston  * Redistribution and use in source and binary forms, with or without
7*be74aedeSMark Johnston  * modification, are permitted provided that the following conditions
8*be74aedeSMark Johnston  * are met:
9*be74aedeSMark Johnston  * 1. Redistributions of source code must retain the above copyright
10*be74aedeSMark Johnston  *    notice, this list of conditions and the following disclaimer.
11*be74aedeSMark Johnston  * 2. Redistributions in binary form must reproduce the above copyright
12*be74aedeSMark Johnston  *    notice, this list of conditions and the following disclaimer in the
13*be74aedeSMark Johnston  *    documentation and/or other materials provided with the distribution.
14*be74aedeSMark Johnston  *
15*be74aedeSMark Johnston  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
16*be74aedeSMark Johnston  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*be74aedeSMark Johnston  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18*be74aedeSMark Johnston  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
19*be74aedeSMark Johnston  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20*be74aedeSMark Johnston  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21*be74aedeSMark Johnston  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22*be74aedeSMark Johnston  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23*be74aedeSMark Johnston  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
24*be74aedeSMark Johnston  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
25*be74aedeSMark Johnston  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*be74aedeSMark Johnston  */
27*be74aedeSMark Johnston 
28*be74aedeSMark Johnston #include <net/if.h>
29*be74aedeSMark Johnston #include <net/netmap.h>
30*be74aedeSMark Johnston #include <net/netmap_virt.h>
31*be74aedeSMark Johnston #define NETMAP_WITH_LIBS
32*be74aedeSMark Johnston #include <net/netmap_user.h>
33*be74aedeSMark Johnston 
34*be74aedeSMark Johnston #include <assert.h>
35*be74aedeSMark Johnston 
36*be74aedeSMark Johnston #include "debug.h"
37*be74aedeSMark Johnston #include "iov.h"
38*be74aedeSMark Johnston #include "mevent.h"
39*be74aedeSMark Johnston #include "net_backends.h"
40*be74aedeSMark Johnston #include "net_backends_priv.h"
41*be74aedeSMark Johnston 
42*be74aedeSMark Johnston /* The virtio-net features supported by netmap. */
43*be74aedeSMark Johnston #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
44*be74aedeSMark Johnston 		VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
45*be74aedeSMark Johnston 		VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
46*be74aedeSMark Johnston 		VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO)
47*be74aedeSMark Johnston 
48*be74aedeSMark Johnston struct netmap_priv {
49*be74aedeSMark Johnston 	char ifname[IFNAMSIZ];
50*be74aedeSMark Johnston 	struct nm_desc *nmd;
51*be74aedeSMark Johnston 	uint16_t memid;
52*be74aedeSMark Johnston 	struct netmap_ring *rx;
53*be74aedeSMark Johnston 	struct netmap_ring *tx;
54*be74aedeSMark Johnston 	struct mevent *mevp;
55*be74aedeSMark Johnston 	net_be_rxeof_t cb;
56*be74aedeSMark Johnston 	void *cb_param;
57*be74aedeSMark Johnston };
58*be74aedeSMark Johnston 
59*be74aedeSMark Johnston static void
nmreq_init(struct nmreq * req,char * ifname)60*be74aedeSMark Johnston nmreq_init(struct nmreq *req, char *ifname)
61*be74aedeSMark Johnston {
62*be74aedeSMark Johnston 
63*be74aedeSMark Johnston 	memset(req, 0, sizeof(*req));
64*be74aedeSMark Johnston 	strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
65*be74aedeSMark Johnston 	req->nr_version = NETMAP_API;
66*be74aedeSMark Johnston }
67*be74aedeSMark Johnston 
68*be74aedeSMark Johnston static int
netmap_set_vnet_hdr_len(struct net_backend * be,int vnet_hdr_len)69*be74aedeSMark Johnston netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
70*be74aedeSMark Johnston {
71*be74aedeSMark Johnston 	int err;
72*be74aedeSMark Johnston 	struct nmreq req;
73*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
74*be74aedeSMark Johnston 
75*be74aedeSMark Johnston 	nmreq_init(&req, priv->ifname);
76*be74aedeSMark Johnston 	req.nr_cmd = NETMAP_BDG_VNET_HDR;
77*be74aedeSMark Johnston 	req.nr_arg1 = vnet_hdr_len;
78*be74aedeSMark Johnston 	err = ioctl(be->fd, NIOCREGIF, &req);
79*be74aedeSMark Johnston 	if (err) {
80*be74aedeSMark Johnston 		EPRINTLN("Unable to set vnet header length %d", vnet_hdr_len);
81*be74aedeSMark Johnston 		return (err);
82*be74aedeSMark Johnston 	}
83*be74aedeSMark Johnston 
84*be74aedeSMark Johnston 	be->be_vnet_hdr_len = vnet_hdr_len;
85*be74aedeSMark Johnston 
86*be74aedeSMark Johnston 	return (0);
87*be74aedeSMark Johnston }
88*be74aedeSMark Johnston 
89*be74aedeSMark Johnston static int
netmap_has_vnet_hdr_len(struct net_backend * be,unsigned vnet_hdr_len)90*be74aedeSMark Johnston netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
91*be74aedeSMark Johnston {
92*be74aedeSMark Johnston 	unsigned prev_hdr_len = be->be_vnet_hdr_len;
93*be74aedeSMark Johnston 	int ret;
94*be74aedeSMark Johnston 
95*be74aedeSMark Johnston 	if (vnet_hdr_len == prev_hdr_len) {
96*be74aedeSMark Johnston 		return (1);
97*be74aedeSMark Johnston 	}
98*be74aedeSMark Johnston 
99*be74aedeSMark Johnston 	ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
100*be74aedeSMark Johnston 	if (ret) {
101*be74aedeSMark Johnston 		return (0);
102*be74aedeSMark Johnston 	}
103*be74aedeSMark Johnston 
104*be74aedeSMark Johnston 	netmap_set_vnet_hdr_len(be, prev_hdr_len);
105*be74aedeSMark Johnston 
106*be74aedeSMark Johnston 	return (1);
107*be74aedeSMark Johnston }
108*be74aedeSMark Johnston 
109*be74aedeSMark Johnston static uint64_t
netmap_get_cap(struct net_backend * be)110*be74aedeSMark Johnston netmap_get_cap(struct net_backend *be)
111*be74aedeSMark Johnston {
112*be74aedeSMark Johnston 
113*be74aedeSMark Johnston 	return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
114*be74aedeSMark Johnston 	    NETMAP_FEATURES : 0);
115*be74aedeSMark Johnston }
116*be74aedeSMark Johnston 
117*be74aedeSMark Johnston static int
netmap_set_cap(struct net_backend * be,uint64_t features __unused,unsigned vnet_hdr_len)118*be74aedeSMark Johnston netmap_set_cap(struct net_backend *be, uint64_t features __unused,
119*be74aedeSMark Johnston     unsigned vnet_hdr_len)
120*be74aedeSMark Johnston {
121*be74aedeSMark Johnston 
122*be74aedeSMark Johnston 	return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
123*be74aedeSMark Johnston }
124*be74aedeSMark Johnston 
125*be74aedeSMark Johnston static int
netmap_init(struct net_backend * be,const char * devname,nvlist_t * nvl __unused,net_be_rxeof_t cb,void * param)126*be74aedeSMark Johnston netmap_init(struct net_backend *be, const char *devname,
127*be74aedeSMark Johnston     nvlist_t *nvl __unused, net_be_rxeof_t cb, void *param)
128*be74aedeSMark Johnston {
129*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
130*be74aedeSMark Johnston 
131*be74aedeSMark Johnston 	strlcpy(priv->ifname, devname, sizeof(priv->ifname));
132*be74aedeSMark Johnston 	priv->ifname[sizeof(priv->ifname) - 1] = '\0';
133*be74aedeSMark Johnston 
134*be74aedeSMark Johnston 	priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
135*be74aedeSMark Johnston 	if (priv->nmd == NULL) {
136*be74aedeSMark Johnston 		EPRINTLN("Unable to nm_open(): interface '%s', errno (%s)",
137*be74aedeSMark Johnston 		    devname, strerror(errno));
138*be74aedeSMark Johnston 		return (-1);
139*be74aedeSMark Johnston 	}
140*be74aedeSMark Johnston 
141*be74aedeSMark Johnston 	priv->memid = priv->nmd->req.nr_arg2;
142*be74aedeSMark Johnston 	priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
143*be74aedeSMark Johnston 	priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
144*be74aedeSMark Johnston 	priv->cb = cb;
145*be74aedeSMark Johnston 	priv->cb_param = param;
146*be74aedeSMark Johnston 	be->fd = priv->nmd->fd;
147*be74aedeSMark Johnston 
148*be74aedeSMark Johnston 	priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
149*be74aedeSMark Johnston 	if (priv->mevp == NULL) {
150*be74aedeSMark Johnston 		EPRINTLN("Could not register event");
151*be74aedeSMark Johnston 		return (-1);
152*be74aedeSMark Johnston 	}
153*be74aedeSMark Johnston 
154*be74aedeSMark Johnston 	return (0);
155*be74aedeSMark Johnston }
156*be74aedeSMark Johnston 
157*be74aedeSMark Johnston static void
netmap_cleanup(struct net_backend * be)158*be74aedeSMark Johnston netmap_cleanup(struct net_backend *be)
159*be74aedeSMark Johnston {
160*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
161*be74aedeSMark Johnston 
162*be74aedeSMark Johnston 	if (priv->mevp) {
163*be74aedeSMark Johnston 		mevent_delete(priv->mevp);
164*be74aedeSMark Johnston 	}
165*be74aedeSMark Johnston 	if (priv->nmd) {
166*be74aedeSMark Johnston 		nm_close(priv->nmd);
167*be74aedeSMark Johnston 	}
168*be74aedeSMark Johnston 	be->fd = -1;
169*be74aedeSMark Johnston }
170*be74aedeSMark Johnston 
171*be74aedeSMark Johnston static ssize_t
netmap_send(struct net_backend * be,const struct iovec * iov,int iovcnt)172*be74aedeSMark Johnston netmap_send(struct net_backend *be, const struct iovec *iov,
173*be74aedeSMark Johnston 	    int iovcnt)
174*be74aedeSMark Johnston {
175*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
176*be74aedeSMark Johnston 	struct netmap_ring *ring;
177*be74aedeSMark Johnston 	ssize_t totlen = 0;
178*be74aedeSMark Johnston 	int nm_buf_size;
179*be74aedeSMark Johnston 	int nm_buf_len;
180*be74aedeSMark Johnston 	uint32_t head;
181*be74aedeSMark Johnston 	uint8_t *nm_buf;
182*be74aedeSMark Johnston 	int j;
183*be74aedeSMark Johnston 
184*be74aedeSMark Johnston 	ring = priv->tx;
185*be74aedeSMark Johnston 	head = ring->head;
186*be74aedeSMark Johnston 	if (head == ring->tail) {
187*be74aedeSMark Johnston 		EPRINTLN("No space, drop %zu bytes", count_iov(iov, iovcnt));
188*be74aedeSMark Johnston 		goto txsync;
189*be74aedeSMark Johnston 	}
190*be74aedeSMark Johnston 	nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
191*be74aedeSMark Johnston 	nm_buf_size = ring->nr_buf_size;
192*be74aedeSMark Johnston 	nm_buf_len = 0;
193*be74aedeSMark Johnston 
194*be74aedeSMark Johnston 	for (j = 0; j < iovcnt; j++) {
195*be74aedeSMark Johnston 		uint8_t *iov_frag_buf = iov[j].iov_base;
196*be74aedeSMark Johnston 		int iov_frag_size = iov[j].iov_len;
197*be74aedeSMark Johnston 
198*be74aedeSMark Johnston 		totlen += iov_frag_size;
199*be74aedeSMark Johnston 
200*be74aedeSMark Johnston 		/*
201*be74aedeSMark Johnston 		 * Split each iovec fragment over more netmap slots, if
202*be74aedeSMark Johnston 		 * necessary.
203*be74aedeSMark Johnston 		 */
204*be74aedeSMark Johnston 		for (;;) {
205*be74aedeSMark Johnston 			int copylen;
206*be74aedeSMark Johnston 
207*be74aedeSMark Johnston 			copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
208*be74aedeSMark Johnston 			memcpy(nm_buf, iov_frag_buf, copylen);
209*be74aedeSMark Johnston 
210*be74aedeSMark Johnston 			iov_frag_buf += copylen;
211*be74aedeSMark Johnston 			iov_frag_size -= copylen;
212*be74aedeSMark Johnston 			nm_buf += copylen;
213*be74aedeSMark Johnston 			nm_buf_size -= copylen;
214*be74aedeSMark Johnston 			nm_buf_len += copylen;
215*be74aedeSMark Johnston 
216*be74aedeSMark Johnston 			if (iov_frag_size == 0) {
217*be74aedeSMark Johnston 				break;
218*be74aedeSMark Johnston 			}
219*be74aedeSMark Johnston 
220*be74aedeSMark Johnston 			ring->slot[head].len = nm_buf_len;
221*be74aedeSMark Johnston 			ring->slot[head].flags = NS_MOREFRAG;
222*be74aedeSMark Johnston 			head = nm_ring_next(ring, head);
223*be74aedeSMark Johnston 			if (head == ring->tail) {
224*be74aedeSMark Johnston 				/*
225*be74aedeSMark Johnston 				 * We ran out of netmap slots while
226*be74aedeSMark Johnston 				 * splitting the iovec fragments.
227*be74aedeSMark Johnston 				 */
228*be74aedeSMark Johnston 				EPRINTLN("No space, drop %zu bytes",
229*be74aedeSMark Johnston 				    count_iov(iov, iovcnt));
230*be74aedeSMark Johnston 				goto txsync;
231*be74aedeSMark Johnston 			}
232*be74aedeSMark Johnston 			nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
233*be74aedeSMark Johnston 			nm_buf_size = ring->nr_buf_size;
234*be74aedeSMark Johnston 			nm_buf_len = 0;
235*be74aedeSMark Johnston 		}
236*be74aedeSMark Johnston 	}
237*be74aedeSMark Johnston 
238*be74aedeSMark Johnston 	/* Complete the last slot, which must not have NS_MOREFRAG set. */
239*be74aedeSMark Johnston 	ring->slot[head].len = nm_buf_len;
240*be74aedeSMark Johnston 	ring->slot[head].flags = 0;
241*be74aedeSMark Johnston 	head = nm_ring_next(ring, head);
242*be74aedeSMark Johnston 
243*be74aedeSMark Johnston 	/* Now update ring->head and ring->cur. */
244*be74aedeSMark Johnston 	ring->head = ring->cur = head;
245*be74aedeSMark Johnston txsync:
246*be74aedeSMark Johnston 	ioctl(be->fd, NIOCTXSYNC, NULL);
247*be74aedeSMark Johnston 
248*be74aedeSMark Johnston 	return (totlen);
249*be74aedeSMark Johnston }
250*be74aedeSMark Johnston 
251*be74aedeSMark Johnston static ssize_t
netmap_peek_recvlen(struct net_backend * be)252*be74aedeSMark Johnston netmap_peek_recvlen(struct net_backend *be)
253*be74aedeSMark Johnston {
254*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
255*be74aedeSMark Johnston 	struct netmap_ring *ring = priv->rx;
256*be74aedeSMark Johnston 	uint32_t head = ring->head;
257*be74aedeSMark Johnston 	ssize_t totlen = 0;
258*be74aedeSMark Johnston 
259*be74aedeSMark Johnston 	while (head != ring->tail) {
260*be74aedeSMark Johnston 		struct netmap_slot *slot = ring->slot + head;
261*be74aedeSMark Johnston 
262*be74aedeSMark Johnston 		totlen += slot->len;
263*be74aedeSMark Johnston 		if ((slot->flags & NS_MOREFRAG) == 0)
264*be74aedeSMark Johnston 			break;
265*be74aedeSMark Johnston 		head = nm_ring_next(ring, head);
266*be74aedeSMark Johnston 	}
267*be74aedeSMark Johnston 
268*be74aedeSMark Johnston 	return (totlen);
269*be74aedeSMark Johnston }
270*be74aedeSMark Johnston 
271*be74aedeSMark Johnston static ssize_t
netmap_recv(struct net_backend * be,const struct iovec * iov,int iovcnt)272*be74aedeSMark Johnston netmap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
273*be74aedeSMark Johnston {
274*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
275*be74aedeSMark Johnston 	struct netmap_slot *slot = NULL;
276*be74aedeSMark Johnston 	struct netmap_ring *ring;
277*be74aedeSMark Johnston 	uint8_t *iov_frag_buf;
278*be74aedeSMark Johnston 	int iov_frag_size;
279*be74aedeSMark Johnston 	ssize_t totlen = 0;
280*be74aedeSMark Johnston 	uint32_t head;
281*be74aedeSMark Johnston 
282*be74aedeSMark Johnston 	assert(iovcnt);
283*be74aedeSMark Johnston 
284*be74aedeSMark Johnston 	ring = priv->rx;
285*be74aedeSMark Johnston 	head = ring->head;
286*be74aedeSMark Johnston 	iov_frag_buf = iov->iov_base;
287*be74aedeSMark Johnston 	iov_frag_size = iov->iov_len;
288*be74aedeSMark Johnston 
289*be74aedeSMark Johnston 	do {
290*be74aedeSMark Johnston 		uint8_t *nm_buf;
291*be74aedeSMark Johnston 		int nm_buf_len;
292*be74aedeSMark Johnston 
293*be74aedeSMark Johnston 		if (head == ring->tail) {
294*be74aedeSMark Johnston 			return (0);
295*be74aedeSMark Johnston 		}
296*be74aedeSMark Johnston 
297*be74aedeSMark Johnston 		slot = ring->slot + head;
298*be74aedeSMark Johnston 		nm_buf = NETMAP_BUF(ring, slot->buf_idx);
299*be74aedeSMark Johnston 		nm_buf_len = slot->len;
300*be74aedeSMark Johnston 
301*be74aedeSMark Johnston 		for (;;) {
302*be74aedeSMark Johnston 			int copylen = nm_buf_len < iov_frag_size ?
303*be74aedeSMark Johnston 			    nm_buf_len : iov_frag_size;
304*be74aedeSMark Johnston 
305*be74aedeSMark Johnston 			memcpy(iov_frag_buf, nm_buf, copylen);
306*be74aedeSMark Johnston 			nm_buf += copylen;
307*be74aedeSMark Johnston 			nm_buf_len -= copylen;
308*be74aedeSMark Johnston 			iov_frag_buf += copylen;
309*be74aedeSMark Johnston 			iov_frag_size -= copylen;
310*be74aedeSMark Johnston 			totlen += copylen;
311*be74aedeSMark Johnston 
312*be74aedeSMark Johnston 			if (nm_buf_len == 0) {
313*be74aedeSMark Johnston 				break;
314*be74aedeSMark Johnston 			}
315*be74aedeSMark Johnston 
316*be74aedeSMark Johnston 			iov++;
317*be74aedeSMark Johnston 			iovcnt--;
318*be74aedeSMark Johnston 			if (iovcnt == 0) {
319*be74aedeSMark Johnston 				/* No space to receive. */
320*be74aedeSMark Johnston 				EPRINTLN("Short iov, drop %zd bytes",
321*be74aedeSMark Johnston 				    totlen);
322*be74aedeSMark Johnston 				return (-ENOSPC);
323*be74aedeSMark Johnston 			}
324*be74aedeSMark Johnston 			iov_frag_buf = iov->iov_base;
325*be74aedeSMark Johnston 			iov_frag_size = iov->iov_len;
326*be74aedeSMark Johnston 		}
327*be74aedeSMark Johnston 
328*be74aedeSMark Johnston 		head = nm_ring_next(ring, head);
329*be74aedeSMark Johnston 
330*be74aedeSMark Johnston 	} while (slot->flags & NS_MOREFRAG);
331*be74aedeSMark Johnston 
332*be74aedeSMark Johnston 	/* Release slots to netmap. */
333*be74aedeSMark Johnston 	ring->head = ring->cur = head;
334*be74aedeSMark Johnston 
335*be74aedeSMark Johnston 	return (totlen);
336*be74aedeSMark Johnston }
337*be74aedeSMark Johnston 
338*be74aedeSMark Johnston static void
netmap_recv_enable(struct net_backend * be)339*be74aedeSMark Johnston netmap_recv_enable(struct net_backend *be)
340*be74aedeSMark Johnston {
341*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
342*be74aedeSMark Johnston 
343*be74aedeSMark Johnston 	mevent_enable(priv->mevp);
344*be74aedeSMark Johnston }
345*be74aedeSMark Johnston 
346*be74aedeSMark Johnston static void
netmap_recv_disable(struct net_backend * be)347*be74aedeSMark Johnston netmap_recv_disable(struct net_backend *be)
348*be74aedeSMark Johnston {
349*be74aedeSMark Johnston 	struct netmap_priv *priv = NET_BE_PRIV(be);
350*be74aedeSMark Johnston 
351*be74aedeSMark Johnston 	mevent_disable(priv->mevp);
352*be74aedeSMark Johnston }
353*be74aedeSMark Johnston 
354*be74aedeSMark Johnston static struct net_backend netmap_backend = {
355*be74aedeSMark Johnston 	.prefix = "netmap",
356*be74aedeSMark Johnston 	.priv_size = sizeof(struct netmap_priv),
357*be74aedeSMark Johnston 	.init = netmap_init,
358*be74aedeSMark Johnston 	.cleanup = netmap_cleanup,
359*be74aedeSMark Johnston 	.send = netmap_send,
360*be74aedeSMark Johnston 	.peek_recvlen = netmap_peek_recvlen,
361*be74aedeSMark Johnston 	.recv = netmap_recv,
362*be74aedeSMark Johnston 	.recv_enable = netmap_recv_enable,
363*be74aedeSMark Johnston 	.recv_disable = netmap_recv_disable,
364*be74aedeSMark Johnston 	.get_cap = netmap_get_cap,
365*be74aedeSMark Johnston 	.set_cap = netmap_set_cap,
366*be74aedeSMark Johnston };
367*be74aedeSMark Johnston 
368*be74aedeSMark Johnston /* A clone of the netmap backend, with a different prefix. */
369*be74aedeSMark Johnston static struct net_backend vale_backend = {
370*be74aedeSMark Johnston 	.prefix = "vale",
371*be74aedeSMark Johnston 	.priv_size = sizeof(struct netmap_priv),
372*be74aedeSMark Johnston 	.init = netmap_init,
373*be74aedeSMark Johnston 	.cleanup = netmap_cleanup,
374*be74aedeSMark Johnston 	.send = netmap_send,
375*be74aedeSMark Johnston 	.peek_recvlen = netmap_peek_recvlen,
376*be74aedeSMark Johnston 	.recv = netmap_recv,
377*be74aedeSMark Johnston 	.recv_enable = netmap_recv_enable,
378*be74aedeSMark Johnston 	.recv_disable = netmap_recv_disable,
379*be74aedeSMark Johnston 	.get_cap = netmap_get_cap,
380*be74aedeSMark Johnston 	.set_cap = netmap_set_cap,
381*be74aedeSMark Johnston };
382*be74aedeSMark Johnston 
383*be74aedeSMark Johnston DATA_SET(net_backend_set, netmap_backend);
384*be74aedeSMark Johnston DATA_SET(net_backend_set, vale_backend);
385