1 /* 2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* $FreeBSD$ */ 27 28 #include <sys/types.h> 29 #include <sys/module.h> 30 #include <sys/errno.h> 31 #include <sys/param.h> /* defines used in kernel.h */ 32 #include <sys/poll.h> /* POLLIN, POLLOUT */ 33 #include <sys/kernel.h> /* types used in module initialization */ 34 #include <sys/conf.h> /* DEV_MODULE */ 35 #include <sys/endian.h> 36 37 #include <sys/rwlock.h> 38 39 #include <vm/vm.h> /* vtophys */ 40 #include <vm/pmap.h> /* vtophys */ 41 #include <vm/vm_param.h> 42 #include <vm/vm_object.h> 43 #include <vm/vm_page.h> 44 #include <vm/vm_pager.h> 45 #include <vm/uma.h> 46 47 48 #include <sys/malloc.h> 49 #include <sys/socket.h> /* sockaddrs */ 50 #include <sys/selinfo.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <machine/bus.h> /* bus_dmamap_* */ 54 #include <netinet/in.h> /* in6_cksum_pseudo() */ 55 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */ 56 57 #include <net/netmap.h> 58 #include <dev/netmap/netmap_kern.h> 59 #include <dev/netmap/netmap_mem2.h> 60 61 62 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ 63 64 rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum) 65 { 66 /* TODO XXX please use the FreeBSD implementation for this. */ 67 uint16_t *words = (uint16_t *)data; 68 int nw = len / 2; 69 int i; 70 71 for (i = 0; i < nw; i++) 72 cur_sum += be16toh(words[i]); 73 74 if (len & 1) 75 cur_sum += (data[len-1] << 8); 76 77 return cur_sum; 78 } 79 80 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the 81 * return value is in network byte order. 82 */ 83 uint16_t nm_csum_fold(rawsum_t cur_sum) 84 { 85 /* TODO XXX please use the FreeBSD implementation for this. */ 86 while (cur_sum >> 16) 87 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16); 88 89 return htobe16((~cur_sum) & 0xFFFF); 90 } 91 92 uint16_t nm_csum_ipv4(struct nm_iphdr *iph) 93 { 94 #if 0 95 return in_cksum_hdr((void *)iph); 96 #else 97 return nm_csum_fold(nm_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0)); 98 #endif 99 } 100 101 void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 102 size_t datalen, uint16_t *check) 103 { 104 uint16_t pseudolen = datalen + iph->protocol; 105 106 /* Compute and insert the pseudo-header cheksum. */ 107 *check = in_pseudo(iph->saddr, iph->daddr, 108 htobe16(pseudolen)); 109 /* Compute the checksum on TCP/UDP header + payload 110 * (includes the pseudo-header). 111 */ 112 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0)); 113 } 114 115 void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 116 size_t datalen, uint16_t *check) 117 { 118 #ifdef INET6 119 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0); 120 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0)); 121 #else 122 static int notsupported = 0; 123 if (!notsupported) { 124 notsupported = 1; 125 D("inet6 segmentation not supported"); 126 } 127 #endif 128 } 129 130 131 /* 132 * Intercept the rx routine in the standard device driver. 133 * Second argument is non-zero to intercept, 0 to restore 134 */ 135 int 136 netmap_catch_rx(struct netmap_adapter *na, int intercept) 137 { 138 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 139 struct ifnet *ifp = na->ifp; 140 141 if (intercept) { 142 if (gna->save_if_input) { 143 D("cannot intercept again"); 144 return EINVAL; /* already set */ 145 } 146 gna->save_if_input = ifp->if_input; 147 ifp->if_input = generic_rx_handler; 148 } else { 149 if (!gna->save_if_input){ 150 D("cannot restore"); 151 return EINVAL; /* not saved */ 152 } 153 ifp->if_input = gna->save_if_input; 154 gna->save_if_input = NULL; 155 } 156 157 return 0; 158 } 159 160 161 /* 162 * Intercept the packet steering routine in the tx path, 163 * so that we can decide which queue is used for an mbuf. 164 * Second argument is non-zero to intercept, 0 to restore. 165 * On freebsd we just intercept if_transmit. 166 */ 167 void 168 netmap_catch_tx(struct netmap_generic_adapter *gna, int enable) 169 { 170 struct netmap_adapter *na = &gna->up.up; 171 struct ifnet *ifp = na->ifp; 172 173 if (enable) { 174 na->if_transmit = ifp->if_transmit; 175 ifp->if_transmit = netmap_transmit; 176 } else { 177 ifp->if_transmit = na->if_transmit; 178 } 179 } 180 181 182 /* 183 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success 184 * and non-zero on error (which may be packet drops or other errors). 185 * addr and len identify the netmap buffer, m is the (preallocated) 186 * mbuf to use for transmissions. 187 * 188 * We should add a reference to the mbuf so the m_freem() at the end 189 * of the transmission does not consume resources. 190 * 191 * On FreeBSD, and on multiqueue cards, we can force the queue using 192 * if ((m->m_flags & M_FLOWID) != 0) 193 * i = m->m_pkthdr.flowid % adapter->num_queues; 194 * else 195 * i = curcpu % adapter->num_queues; 196 * 197 */ 198 int 199 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, 200 void *addr, u_int len, u_int ring_nr) 201 { 202 int ret; 203 204 m->m_len = m->m_pkthdr.len = 0; 205 206 // copy data to the mbuf 207 m_copyback(m, 0, len, addr); 208 // inc refcount. We are alone, so we can skip the atomic 209 atomic_fetchadd_int(m->m_ext.ref_cnt, 1); 210 m->m_flags |= M_FLOWID; 211 m->m_pkthdr.flowid = ring_nr; 212 m->m_pkthdr.rcvif = ifp; /* used for tx notification */ 213 ret = NA(ifp)->if_transmit(ifp, m); 214 return ret; 215 } 216 217 218 /* 219 * The following two functions are empty until we have a generic 220 * way to extract the info from the ifp 221 */ 222 int 223 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) 224 { 225 D("called"); 226 return 0; 227 } 228 229 230 void 231 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) 232 { 233 D("called"); 234 *txq = netmap_generic_rings; 235 *rxq = netmap_generic_rings; 236 } 237 238 239 void netmap_mitigation_init(struct nm_generic_mit *mit, struct netmap_adapter *na) 240 { 241 ND("called"); 242 mit->mit_pending = 0; 243 mit->mit_na = na; 244 } 245 246 247 void netmap_mitigation_start(struct nm_generic_mit *mit) 248 { 249 ND("called"); 250 } 251 252 253 void netmap_mitigation_restart(struct nm_generic_mit *mit) 254 { 255 ND("called"); 256 } 257 258 259 int netmap_mitigation_active(struct nm_generic_mit *mit) 260 { 261 ND("called"); 262 return 0; 263 } 264 265 266 void netmap_mitigation_cleanup(struct nm_generic_mit *mit) 267 { 268 ND("called"); 269 } 270 271 272 /* 273 * In order to track whether pages are still mapped, we hook into 274 * the standard cdev_pager and intercept the constructor and 275 * destructor. 276 */ 277 278 struct netmap_vm_handle_t { 279 struct cdev *dev; 280 struct netmap_priv_d *priv; 281 }; 282 283 284 static int 285 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 286 vm_ooffset_t foff, struct ucred *cred, u_short *color) 287 { 288 struct netmap_vm_handle_t *vmh = handle; 289 290 if (netmap_verbose) 291 D("handle %p size %jd prot %d foff %jd", 292 handle, (intmax_t)size, prot, (intmax_t)foff); 293 dev_ref(vmh->dev); 294 return 0; 295 } 296 297 298 static void 299 netmap_dev_pager_dtor(void *handle) 300 { 301 struct netmap_vm_handle_t *vmh = handle; 302 struct cdev *dev = vmh->dev; 303 struct netmap_priv_d *priv = vmh->priv; 304 305 if (netmap_verbose) 306 D("handle %p", handle); 307 netmap_dtor(priv); 308 free(vmh, M_DEVBUF); 309 dev_rel(dev); 310 } 311 312 313 static int 314 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 315 int prot, vm_page_t *mres) 316 { 317 struct netmap_vm_handle_t *vmh = object->handle; 318 struct netmap_priv_d *priv = vmh->priv; 319 vm_paddr_t paddr; 320 vm_page_t page; 321 vm_memattr_t memattr; 322 vm_pindex_t pidx; 323 324 ND("object %p offset %jd prot %d mres %p", 325 object, (intmax_t)offset, prot, mres); 326 memattr = object->memattr; 327 pidx = OFF_TO_IDX(offset); 328 paddr = netmap_mem_ofstophys(priv->np_mref, offset); 329 if (paddr == 0) 330 return VM_PAGER_FAIL; 331 332 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 333 /* 334 * If the passed in result page is a fake page, update it with 335 * the new physical address. 336 */ 337 page = *mres; 338 vm_page_updatefake(page, paddr, memattr); 339 } else { 340 /* 341 * Replace the passed in reqpage page with our own fake page and 342 * free up the all of the original pages. 343 */ 344 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ 345 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK 346 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK 347 #endif /* VM_OBJECT_WUNLOCK */ 348 349 VM_OBJECT_WUNLOCK(object); 350 page = vm_page_getfake(paddr, memattr); 351 VM_OBJECT_WLOCK(object); 352 vm_page_lock(*mres); 353 vm_page_free(*mres); 354 vm_page_unlock(*mres); 355 *mres = page; 356 vm_page_insert(page, object, pidx); 357 } 358 page->valid = VM_PAGE_BITS_ALL; 359 return (VM_PAGER_OK); 360 } 361 362 363 static struct cdev_pager_ops netmap_cdev_pager_ops = { 364 .cdev_pg_ctor = netmap_dev_pager_ctor, 365 .cdev_pg_dtor = netmap_dev_pager_dtor, 366 .cdev_pg_fault = netmap_dev_pager_fault, 367 }; 368 369 370 static int 371 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, 372 vm_size_t objsize, vm_object_t *objp, int prot) 373 { 374 int error; 375 struct netmap_vm_handle_t *vmh; 376 struct netmap_priv_d *priv; 377 vm_object_t obj; 378 379 if (netmap_verbose) 380 D("cdev %p foff %jd size %jd objp %p prot %d", cdev, 381 (intmax_t )*foff, (intmax_t )objsize, objp, prot); 382 383 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, 384 M_NOWAIT | M_ZERO); 385 if (vmh == NULL) 386 return ENOMEM; 387 vmh->dev = cdev; 388 389 NMG_LOCK(); 390 error = devfs_get_cdevpriv((void**)&priv); 391 if (error) 392 goto err_unlock; 393 vmh->priv = priv; 394 priv->np_refcount++; 395 NMG_UNLOCK(); 396 397 error = netmap_get_memory(priv); 398 if (error) 399 goto err_deref; 400 401 obj = cdev_pager_allocate(vmh, OBJT_DEVICE, 402 &netmap_cdev_pager_ops, objsize, prot, 403 *foff, NULL); 404 if (obj == NULL) { 405 D("cdev_pager_allocate failed"); 406 error = EINVAL; 407 goto err_deref; 408 } 409 410 *objp = obj; 411 return 0; 412 413 err_deref: 414 NMG_LOCK(); 415 priv->np_refcount--; 416 err_unlock: 417 NMG_UNLOCK(); 418 // err: 419 free(vmh, M_DEVBUF); 420 return error; 421 } 422 423 424 // XXX can we remove this ? 425 static int 426 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 427 { 428 if (netmap_verbose) 429 D("dev %p fflag 0x%x devtype %d td %p", 430 dev, fflag, devtype, td); 431 return 0; 432 } 433 434 435 static int 436 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 437 { 438 struct netmap_priv_d *priv; 439 int error; 440 441 (void)dev; 442 (void)oflags; 443 (void)devtype; 444 (void)td; 445 446 // XXX wait or nowait ? 447 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 448 M_NOWAIT | M_ZERO); 449 if (priv == NULL) 450 return ENOMEM; 451 452 error = devfs_set_cdevpriv(priv, netmap_dtor); 453 if (error) 454 return error; 455 456 priv->np_refcount = 1; 457 458 return 0; 459 } 460 461 /******************** kqueue support ****************/ 462 463 /* 464 * The OS_selwakeup also needs to issue a KNOTE_UNLOCKED. 465 * We use a non-zero argument to distinguish the call from the one 466 * in kevent_scan() which instead also needs to run netmap_poll(). 467 * The knote uses a global mutex for the time being. We might 468 * try to reuse the one in the si, but it is not allocated 469 * permanently so it might be a bit tricky. 470 * 471 * The *kqfilter function registers one or another f_event 472 * depending on read or write mode. 473 * In the call to f_event() td_fpop is NULL so any child function 474 * calling devfs_get_cdevpriv() would fail - and we need it in 475 * netmap_poll(). As a workaround we store priv into kn->kn_hook 476 * and pass it as first argument to netmap_poll(), which then 477 * uses the failure to tell that we are called from f_event() 478 * and do not need the selrecord(). 479 */ 480 481 void freebsd_selwakeup(struct selinfo *si, int pri); 482 483 void 484 freebsd_selwakeup(struct selinfo *si, int pri) 485 { 486 if (netmap_verbose) 487 D("on knote %p", &si->si_note); 488 selwakeuppri(si, pri); 489 /* use a non-zero hint to tell the notification from the 490 * call done in kqueue_scan() which uses 0 491 */ 492 KNOTE_UNLOCKED(&si->si_note, 0x100 /* notification */); 493 } 494 495 static void 496 netmap_knrdetach(struct knote *kn) 497 { 498 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; 499 struct selinfo *si = priv->np_rxsi; 500 501 D("remove selinfo %p", si); 502 knlist_remove(&si->si_note, kn, 0); 503 } 504 505 static void 506 netmap_knwdetach(struct knote *kn) 507 { 508 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; 509 struct selinfo *si = priv->np_txsi; 510 511 D("remove selinfo %p", si); 512 knlist_remove(&si->si_note, kn, 0); 513 } 514 515 /* 516 * callback from notifies (generated externally) and our 517 * calls to kevent(). The former we just return 1 (ready) 518 * since we do not know better. 519 * In the latter we call netmap_poll and return 0/1 accordingly. 520 */ 521 static int 522 netmap_knrw(struct knote *kn, long hint, int events) 523 { 524 struct netmap_priv_d *priv; 525 int revents; 526 527 if (hint != 0) { 528 ND(5, "call from notify"); 529 return 1; /* assume we are ready */ 530 } 531 priv = kn->kn_hook; 532 /* the notification may come from an external thread, 533 * in which case we do not want to run the netmap_poll 534 * This should be filtered above, but check just in case. 535 */ 536 if (curthread != priv->np_td) { /* should not happen */ 537 RD(5, "curthread changed %p %p", curthread, priv->np_td); 538 return 1; 539 } else { 540 revents = netmap_poll((void *)priv, events, curthread); 541 return (events & revents) ? 1 : 0; 542 } 543 } 544 545 static int 546 netmap_knread(struct knote *kn, long hint) 547 { 548 return netmap_knrw(kn, hint, POLLIN); 549 } 550 551 static int 552 netmap_knwrite(struct knote *kn, long hint) 553 { 554 return netmap_knrw(kn, hint, POLLOUT); 555 } 556 557 static struct filterops netmap_rfiltops = { 558 .f_isfd = 1, 559 .f_detach = netmap_knrdetach, 560 .f_event = netmap_knread, 561 }; 562 563 static struct filterops netmap_wfiltops = { 564 .f_isfd = 1, 565 .f_detach = netmap_knwdetach, 566 .f_event = netmap_knwrite, 567 }; 568 569 570 /* 571 * This is called when a thread invokes kevent() to record 572 * a change in the configuration of the kqueue(). 573 * The 'priv' should be the same as in the netmap device. 574 */ 575 static int 576 netmap_kqfilter(struct cdev *dev, struct knote *kn) 577 { 578 struct netmap_priv_d *priv; 579 int error; 580 struct netmap_adapter *na; 581 struct selinfo *si; 582 int ev = kn->kn_filter; 583 584 if (ev != EVFILT_READ && ev != EVFILT_WRITE) { 585 D("bad filter request %d", ev); 586 return 1; 587 } 588 error = devfs_get_cdevpriv((void**)&priv); 589 if (error) { 590 D("device not yet setup"); 591 return 1; 592 } 593 na = priv->np_na; 594 if (na == NULL) { 595 D("no netmap adapter for this file descriptor"); 596 return 1; 597 } 598 /* the si is indicated in the priv */ 599 si = (ev == EVFILT_WRITE) ? priv->np_txsi : priv->np_rxsi; 600 // XXX lock(priv) ? 601 kn->kn_fop = (ev == EVFILT_WRITE) ? 602 &netmap_wfiltops : &netmap_rfiltops; 603 kn->kn_hook = priv; 604 knlist_add(&si->si_note, kn, 1); 605 // XXX unlock(priv) 606 ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s", 607 na, na->ifp->if_xname, curthread, priv, kn, 608 priv->np_nifp, 609 kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH"); 610 return 0; 611 } 612 613 struct cdevsw netmap_cdevsw = { 614 .d_version = D_VERSION, 615 .d_name = "netmap", 616 .d_open = netmap_open, 617 .d_mmap_single = netmap_mmap_single, 618 .d_ioctl = netmap_ioctl, 619 .d_poll = netmap_poll, 620 .d_kqfilter = netmap_kqfilter, 621 .d_close = netmap_close, 622 }; 623 /*--- end of kqueue support ----*/ 624 625 /* 626 * Kernel entry point. 627 * 628 * Initialize/finalize the module and return. 629 * 630 * Return 0 on success, errno on failure. 631 */ 632 static int 633 netmap_loader(__unused struct module *module, int event, __unused void *arg) 634 { 635 int error = 0; 636 637 switch (event) { 638 case MOD_LOAD: 639 error = netmap_init(); 640 break; 641 642 case MOD_UNLOAD: 643 netmap_fini(); 644 break; 645 646 default: 647 error = EOPNOTSUPP; 648 break; 649 } 650 651 return (error); 652 } 653 654 655 DEV_MODULE(netmap, netmap_loader, NULL); 656