1 /* 2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* $FreeBSD$ */ 27 28 #include <sys/types.h> 29 #include <sys/module.h> 30 #include <sys/errno.h> 31 #include <sys/param.h> /* defines used in kernel.h */ 32 #include <sys/kernel.h> /* types used in module initialization */ 33 #include <sys/conf.h> /* DEV_MODULE */ 34 35 #include <sys/rwlock.h> 36 37 #include <vm/vm.h> /* vtophys */ 38 #include <vm/pmap.h> /* vtophys */ 39 #include <vm/vm_param.h> 40 #include <vm/vm_object.h> 41 #include <vm/vm_page.h> 42 #include <vm/vm_pager.h> 43 #include <vm/uma.h> 44 45 46 #include <sys/malloc.h> 47 #include <sys/socket.h> /* sockaddrs */ 48 #include <sys/selinfo.h> 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <machine/bus.h> /* bus_dmamap_* */ 52 53 #include <net/netmap.h> 54 #include <dev/netmap/netmap_kern.h> 55 #include <dev/netmap/netmap_mem2.h> 56 57 58 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ 59 60 /* 61 * Intercept the rx routine in the standard device driver. 62 * Second argument is non-zero to intercept, 0 to restore 63 */ 64 int 65 netmap_catch_rx(struct netmap_adapter *na, int intercept) 66 { 67 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 68 struct ifnet *ifp = na->ifp; 69 70 if (intercept) { 71 if (gna->save_if_input) { 72 D("cannot intercept again"); 73 return EINVAL; /* already set */ 74 } 75 gna->save_if_input = ifp->if_input; 76 ifp->if_input = generic_rx_handler; 77 } else { 78 if (!gna->save_if_input){ 79 D("cannot restore"); 80 return EINVAL; /* not saved */ 81 } 82 ifp->if_input = gna->save_if_input; 83 gna->save_if_input = NULL; 84 } 85 86 return 0; 87 } 88 89 /* 90 * Intercept the packet steering routine in the tx path, 91 * so that we can decide which queue is used for an mbuf. 92 * Second argument is non-zero to intercept, 0 to restore. 93 * 94 * XXX see if FreeBSD has such a mechanism 95 */ 96 void 97 netmap_catch_packet_steering(struct netmap_generic_adapter *na, int enable) 98 { 99 if (enable) { 100 } else { 101 } 102 } 103 104 /* Transmit routine used by generic_netmap_txsync(). Returns 0 on success 105 * and non-zero on error (which may be packet drops or other errors). 106 * addr and len identify the netmap buffer, m is the (preallocated) 107 * mbuf to use for transmissions. 108 * 109 * We should add a reference to the mbuf so the m_freem() at the end 110 * of the transmission does not consume resources. 111 * 112 * On FreeBSD, and on multiqueue cards, we can force the queue using 113 * if ((m->m_flags & M_FLOWID) != 0) 114 * i = m->m_pkthdr.flowid % adapter->num_queues; 115 * else 116 * i = curcpu % adapter->num_queues; 117 * 118 */ 119 int 120 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, 121 void *addr, u_int len, u_int ring_nr) 122 { 123 int ret; 124 125 m->m_len = m->m_pkthdr.len = 0; 126 127 // copy data to the mbuf 128 m_copyback(m, 0, len, addr); 129 130 // inc refcount. We are alone, so we can skip the atomic 131 atomic_fetchadd_int(m->m_ext.ref_cnt, 1); 132 m->m_flags |= M_FLOWID; 133 m->m_pkthdr.flowid = ring_nr; 134 m->m_pkthdr.rcvif = ifp; /* used for tx notification */ 135 ret = ifp->if_transmit(ifp, m); 136 return ret; 137 } 138 139 /* 140 * The following two functions are empty until we have a generic 141 * way to extract the info from the ifp 142 */ 143 int 144 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) 145 { 146 D("called"); 147 return 0; 148 } 149 150 void 151 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) 152 { 153 D("called"); 154 *txq = 1; 155 *rxq = 1; 156 } 157 158 void netmap_mitigation_init(struct netmap_generic_adapter *na) 159 { 160 ND("called"); 161 na->mit_pending = 0; 162 } 163 164 165 void netmap_mitigation_start(struct netmap_generic_adapter *na) 166 { 167 ND("called"); 168 } 169 170 void netmap_mitigation_restart(struct netmap_generic_adapter *na) 171 { 172 ND("called"); 173 } 174 175 int netmap_mitigation_active(struct netmap_generic_adapter *na) 176 { 177 ND("called"); 178 return 0; 179 } 180 181 void netmap_mitigation_cleanup(struct netmap_generic_adapter *na) 182 { 183 ND("called"); 184 } 185 186 /* 187 * In order to track whether pages are still mapped, we hook into 188 * the standard cdev_pager and intercept the constructor and 189 * destructor. 190 */ 191 192 struct netmap_vm_handle_t { 193 struct cdev *dev; 194 struct netmap_priv_d *priv; 195 }; 196 197 static int 198 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 199 vm_ooffset_t foff, struct ucred *cred, u_short *color) 200 { 201 struct netmap_vm_handle_t *vmh = handle; 202 D("handle %p size %jd prot %d foff %jd", 203 handle, (intmax_t)size, prot, (intmax_t)foff); 204 dev_ref(vmh->dev); 205 return 0; 206 } 207 208 209 static void 210 netmap_dev_pager_dtor(void *handle) 211 { 212 struct netmap_vm_handle_t *vmh = handle; 213 struct cdev *dev = vmh->dev; 214 struct netmap_priv_d *priv = vmh->priv; 215 D("handle %p", handle); 216 netmap_dtor(priv); 217 free(vmh, M_DEVBUF); 218 dev_rel(dev); 219 } 220 221 static int 222 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 223 int prot, vm_page_t *mres) 224 { 225 struct netmap_vm_handle_t *vmh = object->handle; 226 struct netmap_priv_d *priv = vmh->priv; 227 vm_paddr_t paddr; 228 vm_page_t page; 229 vm_memattr_t memattr; 230 vm_pindex_t pidx; 231 232 ND("object %p offset %jd prot %d mres %p", 233 object, (intmax_t)offset, prot, mres); 234 memattr = object->memattr; 235 pidx = OFF_TO_IDX(offset); 236 paddr = netmap_mem_ofstophys(priv->np_mref, offset); 237 if (paddr == 0) 238 return VM_PAGER_FAIL; 239 240 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 241 /* 242 * If the passed in result page is a fake page, update it with 243 * the new physical address. 244 */ 245 page = *mres; 246 vm_page_updatefake(page, paddr, memattr); 247 } else { 248 /* 249 * Replace the passed in reqpage page with our own fake page and 250 * free up the all of the original pages. 251 */ 252 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ 253 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK 254 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK 255 #endif /* VM_OBJECT_WUNLOCK */ 256 257 VM_OBJECT_WUNLOCK(object); 258 page = vm_page_getfake(paddr, memattr); 259 VM_OBJECT_WLOCK(object); 260 vm_page_lock(*mres); 261 vm_page_free(*mres); 262 vm_page_unlock(*mres); 263 *mres = page; 264 vm_page_insert(page, object, pidx); 265 } 266 page->valid = VM_PAGE_BITS_ALL; 267 return (VM_PAGER_OK); 268 } 269 270 271 static struct cdev_pager_ops netmap_cdev_pager_ops = { 272 .cdev_pg_ctor = netmap_dev_pager_ctor, 273 .cdev_pg_dtor = netmap_dev_pager_dtor, 274 .cdev_pg_fault = netmap_dev_pager_fault, 275 }; 276 277 278 static int 279 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, 280 vm_size_t objsize, vm_object_t *objp, int prot) 281 { 282 int error; 283 struct netmap_vm_handle_t *vmh; 284 struct netmap_priv_d *priv; 285 vm_object_t obj; 286 287 D("cdev %p foff %jd size %jd objp %p prot %d", cdev, 288 (intmax_t )*foff, (intmax_t )objsize, objp, prot); 289 290 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, 291 M_NOWAIT | M_ZERO); 292 if (vmh == NULL) 293 return ENOMEM; 294 vmh->dev = cdev; 295 296 NMG_LOCK(); 297 error = devfs_get_cdevpriv((void**)&priv); 298 if (error) 299 goto err_unlock; 300 vmh->priv = priv; 301 priv->np_refcount++; 302 NMG_UNLOCK(); 303 304 error = netmap_get_memory(priv); 305 if (error) 306 goto err_deref; 307 308 obj = cdev_pager_allocate(vmh, OBJT_DEVICE, 309 &netmap_cdev_pager_ops, objsize, prot, 310 *foff, NULL); 311 if (obj == NULL) { 312 D("cdev_pager_allocate failed"); 313 error = EINVAL; 314 goto err_deref; 315 } 316 317 *objp = obj; 318 return 0; 319 320 err_deref: 321 NMG_LOCK(); 322 priv->np_refcount--; 323 err_unlock: 324 NMG_UNLOCK(); 325 // err: 326 free(vmh, M_DEVBUF); 327 return error; 328 } 329 330 331 // XXX can we remove this ? 332 static int 333 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 334 { 335 if (netmap_verbose) 336 D("dev %p fflag 0x%x devtype %d td %p", 337 dev, fflag, devtype, td); 338 return 0; 339 } 340 341 342 static int 343 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 344 { 345 struct netmap_priv_d *priv; 346 int error; 347 348 (void)dev; 349 (void)oflags; 350 (void)devtype; 351 (void)td; 352 353 // XXX wait or nowait ? 354 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 355 M_NOWAIT | M_ZERO); 356 if (priv == NULL) 357 return ENOMEM; 358 359 error = devfs_set_cdevpriv(priv, netmap_dtor); 360 if (error) 361 return error; 362 363 priv->np_refcount = 1; 364 365 return 0; 366 } 367 368 369 struct cdevsw netmap_cdevsw = { 370 .d_version = D_VERSION, 371 .d_name = "netmap", 372 .d_open = netmap_open, 373 .d_mmap_single = netmap_mmap_single, 374 .d_ioctl = netmap_ioctl, 375 .d_poll = netmap_poll, 376 .d_close = netmap_close, 377 }; 378 379 380 /* 381 * Kernel entry point. 382 * 383 * Initialize/finalize the module and return. 384 * 385 * Return 0 on success, errno on failure. 386 */ 387 static int 388 netmap_loader(__unused struct module *module, int event, __unused void *arg) 389 { 390 int error = 0; 391 392 switch (event) { 393 case MOD_LOAD: 394 error = netmap_init(); 395 break; 396 397 case MOD_UNLOAD: 398 netmap_fini(); 399 break; 400 401 default: 402 error = EOPNOTSUPP; 403 break; 404 } 405 406 return (error); 407 } 408 409 410 DEV_MODULE(netmap, netmap_loader, NULL); 411