xref: /freebsd/sys/dev/netmap/netmap_freebsd.c (revision 95d45410b5100e07f6f98450bcd841a8945d4726)
1 /*
2  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *      documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /* $FreeBSD$ */
27 
28 #include <sys/types.h>
29 #include <sys/module.h>
30 #include <sys/errno.h>
31 #include <sys/param.h>  /* defines used in kernel.h */
32 #include <sys/poll.h>  /* POLLIN, POLLOUT */
33 #include <sys/kernel.h> /* types used in module initialization */
34 #include <sys/conf.h>	/* DEV_MODULE */
35 #include <sys/endian.h>
36 
37 #include <sys/rwlock.h>
38 
39 #include <vm/vm.h>      /* vtophys */
40 #include <vm/pmap.h>    /* vtophys */
41 #include <vm/vm_param.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_pager.h>
45 #include <vm/uma.h>
46 
47 
48 #include <sys/malloc.h>
49 #include <sys/socket.h> /* sockaddrs */
50 #include <sys/selinfo.h>
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <machine/bus.h>        /* bus_dmamap_* */
54 #include <netinet/in.h>		/* in6_cksum_pseudo() */
55 #include <machine/in_cksum.h>  /* in_pseudo(), in_cksum_hdr() */
56 
57 #include <net/netmap.h>
58 #include <dev/netmap/netmap_kern.h>
59 #include <dev/netmap/netmap_mem2.h>
60 
61 
62 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
63 
64 rawsum_t
65 nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
66 {
67 	/* TODO XXX please use the FreeBSD implementation for this. */
68 	uint16_t *words = (uint16_t *)data;
69 	int nw = len / 2;
70 	int i;
71 
72 	for (i = 0; i < nw; i++)
73 		cur_sum += be16toh(words[i]);
74 
75 	if (len & 1)
76 		cur_sum += (data[len-1] << 8);
77 
78 	return cur_sum;
79 }
80 
81 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
82  * return value is in network byte order.
83  */
84 uint16_t
85 nm_csum_fold(rawsum_t cur_sum)
86 {
87 	/* TODO XXX please use the FreeBSD implementation for this. */
88 	while (cur_sum >> 16)
89 		cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
90 
91 	return htobe16((~cur_sum) & 0xFFFF);
92 }
93 
94 uint16_t
95 nm_csum_ipv4(struct nm_iphdr *iph)
96 {
97 #if 0
98 	return in_cksum_hdr((void *)iph);
99 #else
100 	return nm_csum_fold(nm_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
101 #endif
102 }
103 
104 void
105 nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
106 					size_t datalen, uint16_t *check)
107 {
108 #ifdef INET
109 	uint16_t pseudolen = datalen + iph->protocol;
110 
111 	/* Compute and insert the pseudo-header cheksum. */
112 	*check = in_pseudo(iph->saddr, iph->daddr,
113 				 htobe16(pseudolen));
114 	/* Compute the checksum on TCP/UDP header + payload
115 	 * (includes the pseudo-header).
116 	 */
117 	*check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
118 #else
119 	static int notsupported = 0;
120 	if (!notsupported) {
121 		notsupported = 1;
122 		D("inet4 segmentation not supported");
123 	}
124 #endif
125 }
126 
127 void
128 nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
129 					size_t datalen, uint16_t *check)
130 {
131 #ifdef INET6
132 	*check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
133 	*check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
134 #else
135 	static int notsupported = 0;
136 	if (!notsupported) {
137 		notsupported = 1;
138 		D("inet6 segmentation not supported");
139 	}
140 #endif
141 }
142 
143 
144 /*
145  * Intercept the rx routine in the standard device driver.
146  * Second argument is non-zero to intercept, 0 to restore
147  */
148 int
149 netmap_catch_rx(struct netmap_adapter *na, int intercept)
150 {
151 	struct netmap_generic_adapter *gna =
152 		(struct netmap_generic_adapter *)na;
153 	struct ifnet *ifp = na->ifp;
154 
155 	if (intercept) {
156 		if (gna->save_if_input) {
157 			D("cannot intercept again");
158 			return EINVAL; /* already set */
159 		}
160 		gna->save_if_input = ifp->if_input;
161 		ifp->if_input = generic_rx_handler;
162 	} else {
163 		if (!gna->save_if_input){
164 			D("cannot restore");
165 			return EINVAL;  /* not saved */
166 		}
167 		ifp->if_input = gna->save_if_input;
168 		gna->save_if_input = NULL;
169 	}
170 
171 	return 0;
172 }
173 
174 
175 /*
176  * Intercept the packet steering routine in the tx path,
177  * so that we can decide which queue is used for an mbuf.
178  * Second argument is non-zero to intercept, 0 to restore.
179  * On freebsd we just intercept if_transmit.
180  */
181 void
182 netmap_catch_tx(struct netmap_generic_adapter *gna, int enable)
183 {
184 	struct netmap_adapter *na = &gna->up.up;
185 	struct ifnet *ifp = na->ifp;
186 
187 	if (enable) {
188 		na->if_transmit = ifp->if_transmit;
189 		ifp->if_transmit = netmap_transmit;
190 	} else {
191 		ifp->if_transmit = na->if_transmit;
192 	}
193 }
194 
195 
196 /*
197  * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
198  * and non-zero on error (which may be packet drops or other errors).
199  * addr and len identify the netmap buffer, m is the (preallocated)
200  * mbuf to use for transmissions.
201  *
202  * We should add a reference to the mbuf so the m_freem() at the end
203  * of the transmission does not consume resources.
204  *
205  * On FreeBSD, and on multiqueue cards, we can force the queue using
206  *      if ((m->m_flags & M_FLOWID) != 0)
207  *              i = m->m_pkthdr.flowid % adapter->num_queues;
208  *      else
209  *              i = curcpu % adapter->num_queues;
210  *
211  */
212 int
213 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
214 	void *addr, u_int len, u_int ring_nr)
215 {
216 	int ret;
217 
218 	/*
219 	 * The mbuf should be a cluster from our special pool,
220 	 * so we do not need to do an m_copyback but just copy
221 	 * (and eventually, just reference the netmap buffer)
222 	 */
223 
224 	if (*m->m_ext.ext_cnt != 1) {
225 		D("invalid refcnt %d for %p",
226 			*m->m_ext.ext_cnt, m);
227 		panic("in generic_xmit_frame");
228 	}
229 	// XXX the ext_size check is unnecessary if we link the netmap buf
230 	if (m->m_ext.ext_size < len) {
231 		RD(5, "size %d < len %d", m->m_ext.ext_size, len);
232 		len = m->m_ext.ext_size;
233 	}
234 	if (0) { /* XXX seems to have negligible benefits */
235 		m->m_ext.ext_buf = m->m_data = addr;
236 	} else {
237 		bcopy(addr, m->m_data, len);
238 	}
239 	m->m_len = m->m_pkthdr.len = len;
240 	// inc refcount. All ours, we could skip the atomic
241 	atomic_fetchadd_int(m->m_ext.ext_cnt, 1);
242 	m->m_flags |= M_FLOWID;
243 	m->m_pkthdr.flowid = ring_nr;
244 	m->m_pkthdr.rcvif = ifp; /* used for tx notification */
245 	ret = NA(ifp)->if_transmit(ifp, m);
246 	return ret;
247 }
248 
249 
250 #if __FreeBSD_version >= 1100005
251 struct netmap_adapter *
252 netmap_getna(if_t ifp)
253 {
254 	return (NA((struct ifnet *)ifp));
255 }
256 #endif /* __FreeBSD_version >= 1100005 */
257 
258 /*
259  * The following two functions are empty until we have a generic
260  * way to extract the info from the ifp
261  */
262 int
263 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
264 {
265 	D("called, in tx %d rx %d", *tx, *rx);
266 	return 0;
267 }
268 
269 
270 void
271 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
272 {
273 	D("called, in txq %d rxq %d", *txq, *rxq);
274 	*txq = netmap_generic_rings;
275 	*rxq = netmap_generic_rings;
276 }
277 
278 
279 void
280 netmap_mitigation_init(struct nm_generic_mit *mit, struct netmap_adapter *na)
281 {
282 	ND("called");
283 	mit->mit_pending = 0;
284 	mit->mit_na = na;
285 }
286 
287 
288 void
289 netmap_mitigation_start(struct nm_generic_mit *mit)
290 {
291 	ND("called");
292 }
293 
294 
295 void
296 netmap_mitigation_restart(struct nm_generic_mit *mit)
297 {
298 	ND("called");
299 }
300 
301 
302 int
303 netmap_mitigation_active(struct nm_generic_mit *mit)
304 {
305 	ND("called");
306 	return 0;
307 }
308 
309 
310 void
311 netmap_mitigation_cleanup(struct nm_generic_mit *mit)
312 {
313 	ND("called");
314 }
315 
316 
317 /*
318  * In order to track whether pages are still mapped, we hook into
319  * the standard cdev_pager and intercept the constructor and
320  * destructor.
321  */
322 
323 struct netmap_vm_handle_t {
324 	struct cdev 		*dev;
325 	struct netmap_priv_d	*priv;
326 };
327 
328 
329 static int
330 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
331     vm_ooffset_t foff, struct ucred *cred, u_short *color)
332 {
333 	struct netmap_vm_handle_t *vmh = handle;
334 
335 	if (netmap_verbose)
336 		D("handle %p size %jd prot %d foff %jd",
337 			handle, (intmax_t)size, prot, (intmax_t)foff);
338 	dev_ref(vmh->dev);
339 	return 0;
340 }
341 
342 
343 static void
344 netmap_dev_pager_dtor(void *handle)
345 {
346 	struct netmap_vm_handle_t *vmh = handle;
347 	struct cdev *dev = vmh->dev;
348 	struct netmap_priv_d *priv = vmh->priv;
349 
350 	if (netmap_verbose)
351 		D("handle %p", handle);
352 	netmap_dtor(priv);
353 	free(vmh, M_DEVBUF);
354 	dev_rel(dev);
355 }
356 
357 
358 static int
359 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
360 	int prot, vm_page_t *mres)
361 {
362 	struct netmap_vm_handle_t *vmh = object->handle;
363 	struct netmap_priv_d *priv = vmh->priv;
364 	vm_paddr_t paddr;
365 	vm_page_t page;
366 	vm_memattr_t memattr;
367 	vm_pindex_t pidx;
368 
369 	ND("object %p offset %jd prot %d mres %p",
370 			object, (intmax_t)offset, prot, mres);
371 	memattr = object->memattr;
372 	pidx = OFF_TO_IDX(offset);
373 	paddr = netmap_mem_ofstophys(priv->np_mref, offset);
374 	if (paddr == 0)
375 		return VM_PAGER_FAIL;
376 
377 	if (((*mres)->flags & PG_FICTITIOUS) != 0) {
378 		/*
379 		 * If the passed in result page is a fake page, update it with
380 		 * the new physical address.
381 		 */
382 		page = *mres;
383 		vm_page_updatefake(page, paddr, memattr);
384 	} else {
385 		/*
386 		 * Replace the passed in reqpage page with our own fake page and
387 		 * free up the all of the original pages.
388 		 */
389 #ifndef VM_OBJECT_WUNLOCK	/* FreeBSD < 10.x */
390 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
391 #define VM_OBJECT_WLOCK	VM_OBJECT_LOCK
392 #endif /* VM_OBJECT_WUNLOCK */
393 
394 		VM_OBJECT_WUNLOCK(object);
395 		page = vm_page_getfake(paddr, memattr);
396 		VM_OBJECT_WLOCK(object);
397 		vm_page_lock(*mres);
398 		vm_page_free(*mres);
399 		vm_page_unlock(*mres);
400 		*mres = page;
401 		vm_page_insert(page, object, pidx);
402 	}
403 	page->valid = VM_PAGE_BITS_ALL;
404 	return (VM_PAGER_OK);
405 }
406 
407 
408 static struct cdev_pager_ops netmap_cdev_pager_ops = {
409 	.cdev_pg_ctor = netmap_dev_pager_ctor,
410 	.cdev_pg_dtor = netmap_dev_pager_dtor,
411 	.cdev_pg_fault = netmap_dev_pager_fault,
412 };
413 
414 
415 static int
416 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
417 	vm_size_t objsize,  vm_object_t *objp, int prot)
418 {
419 	int error;
420 	struct netmap_vm_handle_t *vmh;
421 	struct netmap_priv_d *priv;
422 	vm_object_t obj;
423 
424 	if (netmap_verbose)
425 		D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
426 		    (intmax_t )*foff, (intmax_t )objsize, objp, prot);
427 
428 	vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
429 			      M_NOWAIT | M_ZERO);
430 	if (vmh == NULL)
431 		return ENOMEM;
432 	vmh->dev = cdev;
433 
434 	NMG_LOCK();
435 	error = devfs_get_cdevpriv((void**)&priv);
436 	if (error)
437 		goto err_unlock;
438 	vmh->priv = priv;
439 	priv->np_refcount++;
440 	NMG_UNLOCK();
441 
442 	error = netmap_get_memory(priv);
443 	if (error)
444 		goto err_deref;
445 
446 	obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
447 		&netmap_cdev_pager_ops, objsize, prot,
448 		*foff, NULL);
449 	if (obj == NULL) {
450 		D("cdev_pager_allocate failed");
451 		error = EINVAL;
452 		goto err_deref;
453 	}
454 
455 	*objp = obj;
456 	return 0;
457 
458 err_deref:
459 	NMG_LOCK();
460 	priv->np_refcount--;
461 err_unlock:
462 	NMG_UNLOCK();
463 // err:
464 	free(vmh, M_DEVBUF);
465 	return error;
466 }
467 
468 
469 // XXX can we remove this ?
470 static int
471 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
472 {
473 	if (netmap_verbose)
474 		D("dev %p fflag 0x%x devtype %d td %p",
475 			dev, fflag, devtype, td);
476 	return 0;
477 }
478 
479 
480 static int
481 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
482 {
483 	struct netmap_priv_d *priv;
484 	int error;
485 
486 	(void)dev;
487 	(void)oflags;
488 	(void)devtype;
489 	(void)td;
490 
491 	// XXX wait or nowait ?
492 	priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
493 			      M_NOWAIT | M_ZERO);
494 	if (priv == NULL)
495 		return ENOMEM;
496 
497 	error = devfs_set_cdevpriv(priv, netmap_dtor);
498 	if (error)
499 	        return error;
500 
501 	priv->np_refcount = 1;
502 
503 	return 0;
504 }
505 
506 /******************** kqueue support ****************/
507 
508 /*
509  * The OS_selwakeup also needs to issue a KNOTE_UNLOCKED.
510  * We use a non-zero argument to distinguish the call from the one
511  * in kevent_scan() which instead also needs to run netmap_poll().
512  * The knote uses a global mutex for the time being. We might
513  * try to reuse the one in the si, but it is not allocated
514  * permanently so it might be a bit tricky.
515  *
516  * The *kqfilter function registers one or another f_event
517  * depending on read or write mode.
518  * In the call to f_event() td_fpop is NULL so any child function
519  * calling devfs_get_cdevpriv() would fail - and we need it in
520  * netmap_poll(). As a workaround we store priv into kn->kn_hook
521  * and pass it as first argument to netmap_poll(), which then
522  * uses the failure to tell that we are called from f_event()
523  * and do not need the selrecord().
524  */
525 
526 void freebsd_selwakeup(struct selinfo *si, int pri);
527 
528 void
529 freebsd_selwakeup(struct selinfo *si, int pri)
530 {
531 	if (netmap_verbose)
532 		D("on knote %p", &si->si_note);
533 	selwakeuppri(si, pri);
534 	/* use a non-zero hint to tell the notification from the
535 	 * call done in kqueue_scan() which uses 0
536 	 */
537 	KNOTE_UNLOCKED(&si->si_note, 0x100 /* notification */);
538 }
539 
540 static void
541 netmap_knrdetach(struct knote *kn)
542 {
543 	struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
544 	struct selinfo *si = priv->np_rxsi;
545 
546 	D("remove selinfo %p", si);
547 	knlist_remove(&si->si_note, kn, 0);
548 }
549 
550 static void
551 netmap_knwdetach(struct knote *kn)
552 {
553 	struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
554 	struct selinfo *si = priv->np_txsi;
555 
556 	D("remove selinfo %p", si);
557 	knlist_remove(&si->si_note, kn, 0);
558 }
559 
560 /*
561  * callback from notifies (generated externally) and our
562  * calls to kevent(). The former we just return 1 (ready)
563  * since we do not know better.
564  * In the latter we call netmap_poll and return 0/1 accordingly.
565  */
566 static int
567 netmap_knrw(struct knote *kn, long hint, int events)
568 {
569 	struct netmap_priv_d *priv;
570 	int revents;
571 
572 	if (hint != 0) {
573 		ND(5, "call from notify");
574 		return 1; /* assume we are ready */
575 	}
576 	priv = kn->kn_hook;
577 	/* the notification may come from an external thread,
578 	 * in which case we do not want to run the netmap_poll
579 	 * This should be filtered above, but check just in case.
580 	 */
581 	if (curthread != priv->np_td) { /* should not happen */
582 		RD(5, "curthread changed %p %p", curthread, priv->np_td);
583 		return 1;
584 	} else {
585 		revents = netmap_poll((void *)priv, events, curthread);
586 		return (events & revents) ? 1 : 0;
587 	}
588 }
589 
590 static int
591 netmap_knread(struct knote *kn, long hint)
592 {
593 	return netmap_knrw(kn, hint, POLLIN);
594 }
595 
596 static int
597 netmap_knwrite(struct knote *kn, long hint)
598 {
599 	return netmap_knrw(kn, hint, POLLOUT);
600 }
601 
602 static struct filterops netmap_rfiltops = {
603 	.f_isfd = 1,
604 	.f_detach = netmap_knrdetach,
605 	.f_event = netmap_knread,
606 };
607 
608 static struct filterops netmap_wfiltops = {
609 	.f_isfd = 1,
610 	.f_detach = netmap_knwdetach,
611 	.f_event = netmap_knwrite,
612 };
613 
614 
615 /*
616  * This is called when a thread invokes kevent() to record
617  * a change in the configuration of the kqueue().
618  * The 'priv' should be the same as in the netmap device.
619  */
620 static int
621 netmap_kqfilter(struct cdev *dev, struct knote *kn)
622 {
623 	struct netmap_priv_d *priv;
624 	int error;
625 	struct netmap_adapter *na;
626 	struct selinfo *si;
627 	int ev = kn->kn_filter;
628 
629 	if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
630 		D("bad filter request %d", ev);
631 		return 1;
632 	}
633 	error = devfs_get_cdevpriv((void**)&priv);
634 	if (error) {
635 		D("device not yet setup");
636 		return 1;
637 	}
638 	na = priv->np_na;
639 	if (na == NULL) {
640 		D("no netmap adapter for this file descriptor");
641 		return 1;
642 	}
643 	/* the si is indicated in the priv */
644 	si = (ev == EVFILT_WRITE) ? priv->np_txsi : priv->np_rxsi;
645 	// XXX lock(priv) ?
646 	kn->kn_fop = (ev == EVFILT_WRITE) ?
647 		&netmap_wfiltops : &netmap_rfiltops;
648 	kn->kn_hook = priv;
649 	knlist_add(&si->si_note, kn, 1);
650 	// XXX unlock(priv)
651 	ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s",
652 		na, na->ifp->if_xname, curthread, priv, kn,
653 		priv->np_nifp,
654 		kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH");
655 	return 0;
656 }
657 
658 struct cdevsw netmap_cdevsw = {
659 	.d_version = D_VERSION,
660 	.d_name = "netmap",
661 	.d_open = netmap_open,
662 	.d_mmap_single = netmap_mmap_single,
663 	.d_ioctl = netmap_ioctl,
664 	.d_poll = netmap_poll,
665 	.d_kqfilter = netmap_kqfilter,
666 	.d_close = netmap_close,
667 };
668 /*--- end of kqueue support ----*/
669 
670 /*
671  * Kernel entry point.
672  *
673  * Initialize/finalize the module and return.
674  *
675  * Return 0 on success, errno on failure.
676  */
677 static int
678 netmap_loader(__unused struct module *module, int event, __unused void *arg)
679 {
680 	int error = 0;
681 
682 	switch (event) {
683 	case MOD_LOAD:
684 		error = netmap_init();
685 		break;
686 
687 	case MOD_UNLOAD:
688 		netmap_fini();
689 		break;
690 
691 	default:
692 		error = EOPNOTSUPP;
693 		break;
694 	}
695 
696 	return (error);
697 }
698 
699 
700 DEV_MODULE(netmap, netmap_loader, NULL);
701