1 /* 2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* $FreeBSD$ */ 27 28 #include <sys/types.h> 29 #include <sys/module.h> 30 #include <sys/errno.h> 31 #include <sys/param.h> /* defines used in kernel.h */ 32 #include <sys/poll.h> /* POLLIN, POLLOUT */ 33 #include <sys/kernel.h> /* types used in module initialization */ 34 #include <sys/conf.h> /* DEV_MODULE */ 35 #include <sys/endian.h> 36 37 #include <sys/rwlock.h> 38 39 #include <vm/vm.h> /* vtophys */ 40 #include <vm/pmap.h> /* vtophys */ 41 #include <vm/vm_param.h> 42 #include <vm/vm_object.h> 43 #include <vm/vm_page.h> 44 #include <vm/vm_pager.h> 45 #include <vm/uma.h> 46 47 48 #include <sys/malloc.h> 49 #include <sys/socket.h> /* sockaddrs */ 50 #include <sys/selinfo.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <machine/bus.h> /* bus_dmamap_* */ 54 #include <netinet/in.h> /* in6_cksum_pseudo() */ 55 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */ 56 57 #include <net/netmap.h> 58 #include <dev/netmap/netmap_kern.h> 59 #include <dev/netmap/netmap_mem2.h> 60 61 62 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ 63 64 rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum) 65 { 66 /* TODO XXX please use the FreeBSD implementation for this. */ 67 uint16_t *words = (uint16_t *)data; 68 int nw = len / 2; 69 int i; 70 71 for (i = 0; i < nw; i++) 72 cur_sum += be16toh(words[i]); 73 74 if (len & 1) 75 cur_sum += (data[len-1] << 8); 76 77 return cur_sum; 78 } 79 80 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the 81 * return value is in network byte order. 82 */ 83 uint16_t nm_csum_fold(rawsum_t cur_sum) 84 { 85 /* TODO XXX please use the FreeBSD implementation for this. */ 86 while (cur_sum >> 16) 87 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16); 88 89 return htobe16((~cur_sum) & 0xFFFF); 90 } 91 92 uint16_t nm_csum_ipv4(struct nm_iphdr *iph) 93 { 94 #if 0 95 return in_cksum_hdr((void *)iph); 96 #else 97 return nm_csum_fold(nm_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0)); 98 #endif 99 } 100 101 void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 102 size_t datalen, uint16_t *check) 103 { 104 #ifdef INET 105 uint16_t pseudolen = datalen + iph->protocol; 106 107 /* Compute and insert the pseudo-header cheksum. */ 108 *check = in_pseudo(iph->saddr, iph->daddr, 109 htobe16(pseudolen)); 110 /* Compute the checksum on TCP/UDP header + payload 111 * (includes the pseudo-header). 112 */ 113 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0)); 114 #else 115 static int notsupported = 0; 116 if (!notsupported) { 117 notsupported = 1; 118 D("inet4 segmentation not supported"); 119 } 120 #endif 121 } 122 123 void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 124 size_t datalen, uint16_t *check) 125 { 126 #ifdef INET6 127 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0); 128 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0)); 129 #else 130 static int notsupported = 0; 131 if (!notsupported) { 132 notsupported = 1; 133 D("inet6 segmentation not supported"); 134 } 135 #endif 136 } 137 138 139 /* 140 * Intercept the rx routine in the standard device driver. 141 * Second argument is non-zero to intercept, 0 to restore 142 */ 143 int 144 netmap_catch_rx(struct netmap_adapter *na, int intercept) 145 { 146 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 147 struct ifnet *ifp = na->ifp; 148 149 if (intercept) { 150 if (gna->save_if_input) { 151 D("cannot intercept again"); 152 return EINVAL; /* already set */ 153 } 154 gna->save_if_input = ifp->if_input; 155 ifp->if_input = generic_rx_handler; 156 } else { 157 if (!gna->save_if_input){ 158 D("cannot restore"); 159 return EINVAL; /* not saved */ 160 } 161 ifp->if_input = gna->save_if_input; 162 gna->save_if_input = NULL; 163 } 164 165 return 0; 166 } 167 168 169 /* 170 * Intercept the packet steering routine in the tx path, 171 * so that we can decide which queue is used for an mbuf. 172 * Second argument is non-zero to intercept, 0 to restore. 173 * On freebsd we just intercept if_transmit. 174 */ 175 void 176 netmap_catch_tx(struct netmap_generic_adapter *gna, int enable) 177 { 178 struct netmap_adapter *na = &gna->up.up; 179 struct ifnet *ifp = na->ifp; 180 181 if (enable) { 182 na->if_transmit = ifp->if_transmit; 183 ifp->if_transmit = netmap_transmit; 184 } else { 185 ifp->if_transmit = na->if_transmit; 186 } 187 } 188 189 190 /* 191 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success 192 * and non-zero on error (which may be packet drops or other errors). 193 * addr and len identify the netmap buffer, m is the (preallocated) 194 * mbuf to use for transmissions. 195 * 196 * We should add a reference to the mbuf so the m_freem() at the end 197 * of the transmission does not consume resources. 198 * 199 * On FreeBSD, and on multiqueue cards, we can force the queue using 200 * if ((m->m_flags & M_FLOWID) != 0) 201 * i = m->m_pkthdr.flowid % adapter->num_queues; 202 * else 203 * i = curcpu % adapter->num_queues; 204 * 205 */ 206 int 207 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, 208 void *addr, u_int len, u_int ring_nr) 209 { 210 int ret; 211 212 m->m_len = m->m_pkthdr.len = 0; 213 214 // copy data to the mbuf 215 m_copyback(m, 0, len, addr); 216 // inc refcount. We are alone, so we can skip the atomic 217 atomic_fetchadd_int(m->m_ext.ref_cnt, 1); 218 m->m_flags |= M_FLOWID; 219 m->m_pkthdr.flowid = ring_nr; 220 m->m_pkthdr.rcvif = ifp; /* used for tx notification */ 221 ret = NA(ifp)->if_transmit(ifp, m); 222 return ret; 223 } 224 225 226 /* 227 * The following two functions are empty until we have a generic 228 * way to extract the info from the ifp 229 */ 230 int 231 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) 232 { 233 D("called"); 234 return 0; 235 } 236 237 238 void 239 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) 240 { 241 D("called"); 242 *txq = netmap_generic_rings; 243 *rxq = netmap_generic_rings; 244 } 245 246 247 void netmap_mitigation_init(struct nm_generic_mit *mit, struct netmap_adapter *na) 248 { 249 ND("called"); 250 mit->mit_pending = 0; 251 mit->mit_na = na; 252 } 253 254 255 void netmap_mitigation_start(struct nm_generic_mit *mit) 256 { 257 ND("called"); 258 } 259 260 261 void netmap_mitigation_restart(struct nm_generic_mit *mit) 262 { 263 ND("called"); 264 } 265 266 267 int netmap_mitigation_active(struct nm_generic_mit *mit) 268 { 269 ND("called"); 270 return 0; 271 } 272 273 274 void netmap_mitigation_cleanup(struct nm_generic_mit *mit) 275 { 276 ND("called"); 277 } 278 279 280 /* 281 * In order to track whether pages are still mapped, we hook into 282 * the standard cdev_pager and intercept the constructor and 283 * destructor. 284 */ 285 286 struct netmap_vm_handle_t { 287 struct cdev *dev; 288 struct netmap_priv_d *priv; 289 }; 290 291 292 static int 293 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 294 vm_ooffset_t foff, struct ucred *cred, u_short *color) 295 { 296 struct netmap_vm_handle_t *vmh = handle; 297 298 if (netmap_verbose) 299 D("handle %p size %jd prot %d foff %jd", 300 handle, (intmax_t)size, prot, (intmax_t)foff); 301 dev_ref(vmh->dev); 302 return 0; 303 } 304 305 306 static void 307 netmap_dev_pager_dtor(void *handle) 308 { 309 struct netmap_vm_handle_t *vmh = handle; 310 struct cdev *dev = vmh->dev; 311 struct netmap_priv_d *priv = vmh->priv; 312 313 if (netmap_verbose) 314 D("handle %p", handle); 315 netmap_dtor(priv); 316 free(vmh, M_DEVBUF); 317 dev_rel(dev); 318 } 319 320 321 static int 322 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 323 int prot, vm_page_t *mres) 324 { 325 struct netmap_vm_handle_t *vmh = object->handle; 326 struct netmap_priv_d *priv = vmh->priv; 327 vm_paddr_t paddr; 328 vm_page_t page; 329 vm_memattr_t memattr; 330 vm_pindex_t pidx; 331 332 ND("object %p offset %jd prot %d mres %p", 333 object, (intmax_t)offset, prot, mres); 334 memattr = object->memattr; 335 pidx = OFF_TO_IDX(offset); 336 paddr = netmap_mem_ofstophys(priv->np_mref, offset); 337 if (paddr == 0) 338 return VM_PAGER_FAIL; 339 340 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 341 /* 342 * If the passed in result page is a fake page, update it with 343 * the new physical address. 344 */ 345 page = *mres; 346 vm_page_updatefake(page, paddr, memattr); 347 } else { 348 /* 349 * Replace the passed in reqpage page with our own fake page and 350 * free up the all of the original pages. 351 */ 352 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ 353 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK 354 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK 355 #endif /* VM_OBJECT_WUNLOCK */ 356 357 VM_OBJECT_WUNLOCK(object); 358 page = vm_page_getfake(paddr, memattr); 359 VM_OBJECT_WLOCK(object); 360 vm_page_lock(*mres); 361 vm_page_free(*mres); 362 vm_page_unlock(*mres); 363 *mres = page; 364 vm_page_insert(page, object, pidx); 365 } 366 page->valid = VM_PAGE_BITS_ALL; 367 return (VM_PAGER_OK); 368 } 369 370 371 static struct cdev_pager_ops netmap_cdev_pager_ops = { 372 .cdev_pg_ctor = netmap_dev_pager_ctor, 373 .cdev_pg_dtor = netmap_dev_pager_dtor, 374 .cdev_pg_fault = netmap_dev_pager_fault, 375 }; 376 377 378 static int 379 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, 380 vm_size_t objsize, vm_object_t *objp, int prot) 381 { 382 int error; 383 struct netmap_vm_handle_t *vmh; 384 struct netmap_priv_d *priv; 385 vm_object_t obj; 386 387 if (netmap_verbose) 388 D("cdev %p foff %jd size %jd objp %p prot %d", cdev, 389 (intmax_t )*foff, (intmax_t )objsize, objp, prot); 390 391 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, 392 M_NOWAIT | M_ZERO); 393 if (vmh == NULL) 394 return ENOMEM; 395 vmh->dev = cdev; 396 397 NMG_LOCK(); 398 error = devfs_get_cdevpriv((void**)&priv); 399 if (error) 400 goto err_unlock; 401 vmh->priv = priv; 402 priv->np_refcount++; 403 NMG_UNLOCK(); 404 405 error = netmap_get_memory(priv); 406 if (error) 407 goto err_deref; 408 409 obj = cdev_pager_allocate(vmh, OBJT_DEVICE, 410 &netmap_cdev_pager_ops, objsize, prot, 411 *foff, NULL); 412 if (obj == NULL) { 413 D("cdev_pager_allocate failed"); 414 error = EINVAL; 415 goto err_deref; 416 } 417 418 *objp = obj; 419 return 0; 420 421 err_deref: 422 NMG_LOCK(); 423 priv->np_refcount--; 424 err_unlock: 425 NMG_UNLOCK(); 426 // err: 427 free(vmh, M_DEVBUF); 428 return error; 429 } 430 431 432 // XXX can we remove this ? 433 static int 434 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 435 { 436 if (netmap_verbose) 437 D("dev %p fflag 0x%x devtype %d td %p", 438 dev, fflag, devtype, td); 439 return 0; 440 } 441 442 443 static int 444 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 445 { 446 struct netmap_priv_d *priv; 447 int error; 448 449 (void)dev; 450 (void)oflags; 451 (void)devtype; 452 (void)td; 453 454 // XXX wait or nowait ? 455 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 456 M_NOWAIT | M_ZERO); 457 if (priv == NULL) 458 return ENOMEM; 459 460 error = devfs_set_cdevpriv(priv, netmap_dtor); 461 if (error) 462 return error; 463 464 priv->np_refcount = 1; 465 466 return 0; 467 } 468 469 /******************** kqueue support ****************/ 470 471 /* 472 * The OS_selwakeup also needs to issue a KNOTE_UNLOCKED. 473 * We use a non-zero argument to distinguish the call from the one 474 * in kevent_scan() which instead also needs to run netmap_poll(). 475 * The knote uses a global mutex for the time being. We might 476 * try to reuse the one in the si, but it is not allocated 477 * permanently so it might be a bit tricky. 478 * 479 * The *kqfilter function registers one or another f_event 480 * depending on read or write mode. 481 * In the call to f_event() td_fpop is NULL so any child function 482 * calling devfs_get_cdevpriv() would fail - and we need it in 483 * netmap_poll(). As a workaround we store priv into kn->kn_hook 484 * and pass it as first argument to netmap_poll(), which then 485 * uses the failure to tell that we are called from f_event() 486 * and do not need the selrecord(). 487 */ 488 489 void freebsd_selwakeup(struct selinfo *si, int pri); 490 491 void 492 freebsd_selwakeup(struct selinfo *si, int pri) 493 { 494 if (netmap_verbose) 495 D("on knote %p", &si->si_note); 496 selwakeuppri(si, pri); 497 /* use a non-zero hint to tell the notification from the 498 * call done in kqueue_scan() which uses 0 499 */ 500 KNOTE_UNLOCKED(&si->si_note, 0x100 /* notification */); 501 } 502 503 static void 504 netmap_knrdetach(struct knote *kn) 505 { 506 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; 507 struct selinfo *si = priv->np_rxsi; 508 509 D("remove selinfo %p", si); 510 knlist_remove(&si->si_note, kn, 0); 511 } 512 513 static void 514 netmap_knwdetach(struct knote *kn) 515 { 516 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; 517 struct selinfo *si = priv->np_txsi; 518 519 D("remove selinfo %p", si); 520 knlist_remove(&si->si_note, kn, 0); 521 } 522 523 /* 524 * callback from notifies (generated externally) and our 525 * calls to kevent(). The former we just return 1 (ready) 526 * since we do not know better. 527 * In the latter we call netmap_poll and return 0/1 accordingly. 528 */ 529 static int 530 netmap_knrw(struct knote *kn, long hint, int events) 531 { 532 struct netmap_priv_d *priv; 533 int revents; 534 535 if (hint != 0) { 536 ND(5, "call from notify"); 537 return 1; /* assume we are ready */ 538 } 539 priv = kn->kn_hook; 540 /* the notification may come from an external thread, 541 * in which case we do not want to run the netmap_poll 542 * This should be filtered above, but check just in case. 543 */ 544 if (curthread != priv->np_td) { /* should not happen */ 545 RD(5, "curthread changed %p %p", curthread, priv->np_td); 546 return 1; 547 } else { 548 revents = netmap_poll((void *)priv, events, curthread); 549 return (events & revents) ? 1 : 0; 550 } 551 } 552 553 static int 554 netmap_knread(struct knote *kn, long hint) 555 { 556 return netmap_knrw(kn, hint, POLLIN); 557 } 558 559 static int 560 netmap_knwrite(struct knote *kn, long hint) 561 { 562 return netmap_knrw(kn, hint, POLLOUT); 563 } 564 565 static struct filterops netmap_rfiltops = { 566 .f_isfd = 1, 567 .f_detach = netmap_knrdetach, 568 .f_event = netmap_knread, 569 }; 570 571 static struct filterops netmap_wfiltops = { 572 .f_isfd = 1, 573 .f_detach = netmap_knwdetach, 574 .f_event = netmap_knwrite, 575 }; 576 577 578 /* 579 * This is called when a thread invokes kevent() to record 580 * a change in the configuration of the kqueue(). 581 * The 'priv' should be the same as in the netmap device. 582 */ 583 static int 584 netmap_kqfilter(struct cdev *dev, struct knote *kn) 585 { 586 struct netmap_priv_d *priv; 587 int error; 588 struct netmap_adapter *na; 589 struct selinfo *si; 590 int ev = kn->kn_filter; 591 592 if (ev != EVFILT_READ && ev != EVFILT_WRITE) { 593 D("bad filter request %d", ev); 594 return 1; 595 } 596 error = devfs_get_cdevpriv((void**)&priv); 597 if (error) { 598 D("device not yet setup"); 599 return 1; 600 } 601 na = priv->np_na; 602 if (na == NULL) { 603 D("no netmap adapter for this file descriptor"); 604 return 1; 605 } 606 /* the si is indicated in the priv */ 607 si = (ev == EVFILT_WRITE) ? priv->np_txsi : priv->np_rxsi; 608 // XXX lock(priv) ? 609 kn->kn_fop = (ev == EVFILT_WRITE) ? 610 &netmap_wfiltops : &netmap_rfiltops; 611 kn->kn_hook = priv; 612 knlist_add(&si->si_note, kn, 1); 613 // XXX unlock(priv) 614 ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s", 615 na, na->ifp->if_xname, curthread, priv, kn, 616 priv->np_nifp, 617 kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH"); 618 return 0; 619 } 620 621 struct cdevsw netmap_cdevsw = { 622 .d_version = D_VERSION, 623 .d_name = "netmap", 624 .d_open = netmap_open, 625 .d_mmap_single = netmap_mmap_single, 626 .d_ioctl = netmap_ioctl, 627 .d_poll = netmap_poll, 628 .d_kqfilter = netmap_kqfilter, 629 .d_close = netmap_close, 630 }; 631 /*--- end of kqueue support ----*/ 632 633 /* 634 * Kernel entry point. 635 * 636 * Initialize/finalize the module and return. 637 * 638 * Return 0 on success, errno on failure. 639 */ 640 static int 641 netmap_loader(__unused struct module *module, int event, __unused void *arg) 642 { 643 int error = 0; 644 645 switch (event) { 646 case MOD_LOAD: 647 error = netmap_init(); 648 break; 649 650 case MOD_UNLOAD: 651 netmap_fini(); 652 break; 653 654 default: 655 error = EOPNOTSUPP; 656 break; 657 } 658 659 return (error); 660 } 661 662 663 DEV_MODULE(netmap, netmap_loader, NULL); 664