1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2004-2006 Kip Macy
5 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/sockio.h>
36 #include <sys/limits.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51 #include <net/if_types.h>
52
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_lro.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <sys/bus.h>
63
64 #include <xen/xen-os.h>
65 #include <xen/hypervisor.h>
66 #include <xen/xen_intr.h>
67 #include <xen/gnttab.h>
68 #include <contrib/xen/memory.h>
69 #include <contrib/xen/io/netif.h>
70 #include <xen/xenbus/xenbusvar.h>
71
72 #include <machine/bus.h>
73
74 #include "xenbus_if.h"
75
76 /* Features supported by all backends. TSO and LRO can be negotiated */
77 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
78
79 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
80 #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
81
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
83
84 /*
85 * Should the driver do LRO on the RX end
86 * this can be toggled on the fly, but the
87 * interface must be reset (down/up) for it
88 * to take effect.
89 */
90 static int xn_enable_lro = 1;
91 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
92
93 /*
94 * Number of pairs of queues.
95 */
96 static unsigned long xn_num_queues = 4;
97 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues);
98
99 /**
100 * \brief The maximum allowed data fragments in a single transmit
101 * request.
102 *
103 * This limit is imposed by the backend driver. We assume here that
104 * we are dealing with a Linux driver domain and have set our limit
105 * to mirror the Linux MAX_SKB_FRAGS constant.
106 */
107 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
108
109 #define RX_COPY_THRESHOLD 256
110
111 #define net_ratelimit() 0
112
113 struct netfront_rxq;
114 struct netfront_txq;
115 struct netfront_info;
116 struct netfront_rx_info;
117
118 static void xn_txeof(struct netfront_txq *);
119 static void xn_rxeof(struct netfront_rxq *);
120 static void xn_alloc_rx_buffers(struct netfront_rxq *);
121 static void xn_alloc_rx_buffers_callout(void *arg);
122
123 static void xn_release_rx_bufs(struct netfront_rxq *);
124 static void xn_release_tx_bufs(struct netfront_txq *);
125
126 static void xn_rxq_intr(struct netfront_rxq *);
127 static void xn_txq_intr(struct netfront_txq *);
128 static void xn_intr(void *);
129 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
130 static int xn_ioctl(if_t, u_long, caddr_t);
131 static void xn_ifinit_locked(struct netfront_info *);
132 static void xn_ifinit(void *);
133 static void xn_stop(struct netfront_info *);
134 static void xn_query_features(struct netfront_info *np);
135 static int xn_configure_features(struct netfront_info *np);
136 static void netif_free(struct netfront_info *info);
137 static int netfront_detach(device_t dev);
138
139 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
140 static int xn_txq_mq_start(if_t, struct mbuf *);
141
142 static int talk_to_backend(device_t dev, struct netfront_info *info);
143 static int create_netdev(device_t dev);
144 static void netif_disconnect_backend(struct netfront_info *info);
145 static int setup_device(device_t dev, struct netfront_info *info,
146 unsigned long);
147 static int xn_ifmedia_upd(if_t ifp);
148 static void xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
149
150 static int xn_connect(struct netfront_info *);
151 static void xn_kick_rings(struct netfront_info *);
152
153 static int xn_get_responses(struct netfront_rxq *,
154 struct netfront_rx_info *, RING_IDX, RING_IDX *,
155 struct mbuf **);
156
157 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
158
159 #define INVALID_P2M_ENTRY (~0UL)
160 #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */
161 struct netfront_rxq {
162 struct netfront_info *info;
163 u_int id;
164 char name[XN_QUEUE_NAME_LEN];
165 struct mtx lock;
166
167 int ring_ref;
168 netif_rx_front_ring_t ring;
169 xen_intr_handle_t xen_intr_handle;
170
171 grant_ref_t gref_head;
172 grant_ref_t grant_ref[NET_RX_RING_SIZE + 1];
173
174 struct mbuf *mbufs[NET_RX_RING_SIZE + 1];
175
176 struct lro_ctrl lro;
177
178 struct callout rx_refill;
179 };
180
181 struct netfront_txq {
182 struct netfront_info *info;
183 u_int id;
184 char name[XN_QUEUE_NAME_LEN];
185 struct mtx lock;
186
187 int ring_ref;
188 netif_tx_front_ring_t ring;
189 xen_intr_handle_t xen_intr_handle;
190
191 grant_ref_t gref_head;
192 grant_ref_t grant_ref[NET_TX_RING_SIZE + 1];
193
194 struct mbuf *mbufs[NET_TX_RING_SIZE + 1];
195 int mbufs_cnt;
196 struct buf_ring *br;
197
198 struct taskqueue *tq;
199 struct task defrtask;
200
201 bus_dma_segment_t segs[MAX_TX_REQ_FRAGS];
202 struct mbuf_xennet {
203 struct m_tag tag;
204 bus_dma_tag_t dma_tag;
205 bus_dmamap_t dma_map;
206 struct netfront_txq *txq;
207 SLIST_ENTRY(mbuf_xennet) next;
208 u_int count;
209 } xennet_tag[NET_TX_RING_SIZE + 1];
210 SLIST_HEAD(, mbuf_xennet) tags;
211
212 bool full;
213 };
214
215 struct netfront_info {
216 if_t xn_ifp;
217
218 struct mtx sc_lock;
219
220 u_int num_queues;
221 struct netfront_rxq *rxq;
222 struct netfront_txq *txq;
223
224 u_int carrier;
225 u_int maxfrags;
226
227 device_t xbdev;
228 uint8_t mac[ETHER_ADDR_LEN];
229
230 int xn_if_flags;
231
232 struct ifmedia sc_media;
233
234 bus_dma_tag_t dma_tag;
235
236 bool xn_reset;
237 };
238
239 struct netfront_rx_info {
240 struct netif_rx_response rx;
241 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
242 };
243
244 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
245 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
246
247 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
248 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
249 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
250
251 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
252 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
253
254 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
255 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
256 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
257
258 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
259 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
260 #define netfront_carrier_ok(netif) ((netif)->carrier)
261
262 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
263
264 static inline void
add_id_to_freelist(struct mbuf ** list,uintptr_t id)265 add_id_to_freelist(struct mbuf **list, uintptr_t id)
266 {
267
268 KASSERT(id != 0,
269 ("%s: the head item (0) must always be free.", __func__));
270 list[id] = list[0];
271 list[0] = (struct mbuf *)id;
272 }
273
274 static inline unsigned short
get_id_from_freelist(struct mbuf ** list)275 get_id_from_freelist(struct mbuf **list)
276 {
277 uintptr_t id;
278
279 id = (uintptr_t)list[0];
280 KASSERT(id != 0,
281 ("%s: the head item (0) must always remain free.", __func__));
282 list[0] = list[id];
283 return (id);
284 }
285
286 static inline int
xn_rxidx(RING_IDX idx)287 xn_rxidx(RING_IDX idx)
288 {
289
290 return idx & (NET_RX_RING_SIZE - 1);
291 }
292
293 static inline struct mbuf *
xn_get_rx_mbuf(struct netfront_rxq * rxq,RING_IDX ri)294 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
295 {
296 int i;
297 struct mbuf *m;
298
299 i = xn_rxidx(ri);
300 m = rxq->mbufs[i];
301 rxq->mbufs[i] = NULL;
302 return (m);
303 }
304
305 static inline grant_ref_t
xn_get_rx_ref(struct netfront_rxq * rxq,RING_IDX ri)306 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
307 {
308 int i = xn_rxidx(ri);
309 grant_ref_t ref = rxq->grant_ref[i];
310
311 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
312 rxq->grant_ref[i] = GRANT_REF_INVALID;
313 return (ref);
314 }
315
316 #define MTAG_COOKIE 1218492000
317 #define MTAG_XENNET 0
318
mbuf_grab(struct mbuf * m)319 static void mbuf_grab(struct mbuf *m)
320 {
321 struct mbuf_xennet *ref;
322
323 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE,
324 MTAG_XENNET, NULL);
325 KASSERT(ref != NULL, ("Cannot find refcount"));
326 ref->count++;
327 }
328
mbuf_release(struct mbuf * m)329 static void mbuf_release(struct mbuf *m)
330 {
331 struct mbuf_xennet *ref;
332
333 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE,
334 MTAG_XENNET, NULL);
335 KASSERT(ref != NULL, ("Cannot find refcount"));
336 KASSERT(ref->count > 0, ("Invalid reference count"));
337
338 if (--ref->count == 0) {
339 /*
340 * Explicitly free the tag while we hold the tx queue lock.
341 * This ensures that the tag is deleted promptly in case
342 * something else is holding extra references to the mbuf chain,
343 * such as netmap.
344 */
345 m_tag_delete(m, &ref->tag);
346 m_freem(m);
347 }
348 }
349
tag_free(struct m_tag * t)350 static void tag_free(struct m_tag *t)
351 {
352 struct mbuf_xennet *ref = (struct mbuf_xennet *)t;
353
354 KASSERT(ref->count == 0, ("Free mbuf tag with pending refcnt"));
355 bus_dmamap_sync(ref->dma_tag, ref->dma_map, BUS_DMASYNC_POSTWRITE);
356 bus_dmamap_destroy(ref->dma_tag, ref->dma_map);
357 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next);
358 }
359
360 #define IPRINTK(fmt, args...) \
361 printf("[XEN] " fmt, ##args)
362 #ifdef INVARIANTS
363 #define WPRINTK(fmt, args...) \
364 printf("[XEN] " fmt, ##args)
365 #else
366 #define WPRINTK(fmt, args...)
367 #endif
368 #ifdef DEBUG
369 #define DPRINTK(fmt, args...) \
370 printf("[XEN] %s: " fmt, __func__, ##args)
371 #else
372 #define DPRINTK(fmt, args...)
373 #endif
374
375 /**
376 * Read the 'mac' node at the given device's node in the store, and parse that
377 * as colon-separated octets, placing result the given mac array. mac must be
378 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
379 * Return 0 on success, or errno on error.
380 */
381 static int
xen_net_read_mac(device_t dev,uint8_t mac[])382 xen_net_read_mac(device_t dev, uint8_t mac[])
383 {
384 int error, i;
385 char *s, *e, *macstr;
386 const char *path;
387
388 path = xenbus_get_node(dev);
389 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
390 if (error == ENOENT) {
391 /*
392 * Deal with missing mac XenStore nodes on devices with
393 * HVM emulation (the 'ioemu' configuration attribute)
394 * enabled.
395 *
396 * The HVM emulator may execute in a stub device model
397 * domain which lacks the permission, only given to Dom0,
398 * to update the guest's XenStore tree. For this reason,
399 * the HVM emulator doesn't even attempt to write the
400 * front-side mac node, even when operating in Dom0.
401 * However, there should always be a mac listed in the
402 * backend tree. Fallback to this version if our query
403 * of the front side XenStore location doesn't find
404 * anything.
405 */
406 path = xenbus_get_otherend_path(dev);
407 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
408 }
409 if (error != 0) {
410 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
411 return (error);
412 }
413
414 s = macstr;
415 for (i = 0; i < ETHER_ADDR_LEN; i++) {
416 mac[i] = strtoul(s, &e, 16);
417 if (s == e || (e[0] != ':' && e[0] != 0)) {
418 free(macstr, M_XENBUS);
419 return (ENOENT);
420 }
421 s = &e[1];
422 }
423 free(macstr, M_XENBUS);
424 return (0);
425 }
426
427 /**
428 * Entry point to this code when a new device is created. Allocate the basic
429 * structures and the ring buffers for communication with the backend, and
430 * inform the backend of the appropriate details for those. Switch to
431 * Connected state.
432 */
433 static int
netfront_probe(device_t dev)434 netfront_probe(device_t dev)
435 {
436
437 if (xen_pv_nics_disabled())
438 return (ENXIO);
439
440 if (!strcmp(xenbus_get_type(dev), "vif")) {
441 device_set_desc(dev, "Virtual Network Interface");
442 return (0);
443 }
444
445 return (ENXIO);
446 }
447
448 static int
netfront_attach(device_t dev)449 netfront_attach(device_t dev)
450 {
451 int err;
452
453 err = create_netdev(dev);
454 if (err != 0) {
455 xenbus_dev_fatal(dev, err, "creating netdev");
456 return (err);
457 }
458
459 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "enable_lro", CTLFLAG_RW,
462 &xn_enable_lro, 0, "Large Receive Offload");
463
464 SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev),
465 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
466 OID_AUTO, "num_queues", CTLFLAG_RD,
467 &xn_num_queues, "Number of pairs of queues");
468
469 return (0);
470 }
471
472 static int
netfront_suspend(device_t dev)473 netfront_suspend(device_t dev)
474 {
475 struct netfront_info *np = device_get_softc(dev);
476 u_int i;
477
478 for (i = 0; i < np->num_queues; i++) {
479 XN_RX_LOCK(&np->rxq[i]);
480 XN_TX_LOCK(&np->txq[i]);
481 }
482 netfront_carrier_off(np);
483 for (i = 0; i < np->num_queues; i++) {
484 XN_RX_UNLOCK(&np->rxq[i]);
485 XN_TX_UNLOCK(&np->txq[i]);
486 }
487 return (0);
488 }
489
490 /**
491 * We are reconnecting to the backend, due to a suspend/resume, or a backend
492 * driver restart. We tear down our netif structure and recreate it, but
493 * leave the device-layer structures intact so that this is transparent to the
494 * rest of the kernel.
495 */
496 static int
netfront_resume(device_t dev)497 netfront_resume(device_t dev)
498 {
499 struct netfront_info *info = device_get_softc(dev);
500 u_int i;
501
502 if (xen_suspend_cancelled) {
503 for (i = 0; i < info->num_queues; i++) {
504 XN_RX_LOCK(&info->rxq[i]);
505 XN_TX_LOCK(&info->txq[i]);
506 }
507 netfront_carrier_on(info);
508 for (i = 0; i < info->num_queues; i++) {
509 XN_RX_UNLOCK(&info->rxq[i]);
510 XN_TX_UNLOCK(&info->txq[i]);
511 }
512 return (0);
513 }
514
515 netif_disconnect_backend(info);
516 return (0);
517 }
518
519 static int
write_queue_xenstore_keys(device_t dev,struct netfront_rxq * rxq,struct netfront_txq * txq,struct xs_transaction * xst,bool hierarchy)520 write_queue_xenstore_keys(device_t dev,
521 struct netfront_rxq *rxq,
522 struct netfront_txq *txq,
523 struct xs_transaction *xst, bool hierarchy)
524 {
525 int err;
526 const char *message;
527 const char *node = xenbus_get_node(dev);
528 char *path;
529 size_t path_size;
530
531 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
532 /* Split event channel support is not yet there. */
533 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
534 ("Split event channels are not supported"));
535
536 if (hierarchy) {
537 path_size = strlen(node) + 10;
538 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
539 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
540 } else {
541 path_size = strlen(node) + 1;
542 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
543 snprintf(path, path_size, "%s", node);
544 }
545
546 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
547 if (err != 0) {
548 message = "writing tx ring-ref";
549 goto error;
550 }
551 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
552 if (err != 0) {
553 message = "writing rx ring-ref";
554 goto error;
555 }
556 err = xs_printf(*xst, path, "event-channel", "%u",
557 xen_intr_port(rxq->xen_intr_handle));
558 if (err != 0) {
559 message = "writing event-channel";
560 goto error;
561 }
562
563 free(path, M_DEVBUF);
564
565 return (0);
566
567 error:
568 free(path, M_DEVBUF);
569 xenbus_dev_fatal(dev, err, "%s", message);
570
571 return (err);
572 }
573
574 /* Common code used when first setting up, and when resuming. */
575 static int
talk_to_backend(device_t dev,struct netfront_info * info)576 talk_to_backend(device_t dev, struct netfront_info *info)
577 {
578 const char *message;
579 struct xs_transaction xst;
580 const char *node = xenbus_get_node(dev);
581 int err;
582 unsigned long num_queues, max_queues = 0;
583 unsigned int i;
584
585 err = xen_net_read_mac(dev, info->mac);
586 if (err != 0) {
587 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
588 goto out;
589 }
590
591 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
592 "multi-queue-max-queues", NULL, "%lu", &max_queues);
593 if (err != 0)
594 max_queues = 1;
595 num_queues = xn_num_queues;
596 if (num_queues > max_queues)
597 num_queues = max_queues;
598
599 err = setup_device(dev, info, num_queues);
600 if (err != 0) {
601 xenbus_dev_fatal(dev, err, "setup device");
602 goto out;
603 }
604
605 again:
606 err = xs_transaction_start(&xst);
607 if (err != 0) {
608 xenbus_dev_fatal(dev, err, "starting transaction");
609 goto free;
610 }
611
612 if (info->num_queues == 1) {
613 err = write_queue_xenstore_keys(dev, &info->rxq[0],
614 &info->txq[0], &xst, false);
615 if (err != 0)
616 goto abort_transaction_no_def_error;
617 } else {
618 err = xs_printf(xst, node, "multi-queue-num-queues",
619 "%u", info->num_queues);
620 if (err != 0) {
621 message = "writing multi-queue-num-queues";
622 goto abort_transaction;
623 }
624
625 for (i = 0; i < info->num_queues; i++) {
626 err = write_queue_xenstore_keys(dev, &info->rxq[i],
627 &info->txq[i], &xst, true);
628 if (err != 0)
629 goto abort_transaction_no_def_error;
630 }
631 }
632
633 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
634 if (err != 0) {
635 message = "writing request-rx-copy";
636 goto abort_transaction;
637 }
638 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
639 if (err != 0) {
640 message = "writing feature-rx-notify";
641 goto abort_transaction;
642 }
643 err = xs_printf(xst, node, "feature-sg", "%d", 1);
644 if (err != 0) {
645 message = "writing feature-sg";
646 goto abort_transaction;
647 }
648 if ((if_getcapenable(info->xn_ifp) & IFCAP_LRO) != 0) {
649 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
650 if (err != 0) {
651 message = "writing feature-gso-tcpv4";
652 goto abort_transaction;
653 }
654 }
655 if ((if_getcapenable(info->xn_ifp) & IFCAP_RXCSUM) == 0) {
656 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
657 if (err != 0) {
658 message = "writing feature-no-csum-offload";
659 goto abort_transaction;
660 }
661 }
662
663 err = xs_transaction_end(xst, 0);
664 if (err != 0) {
665 if (err == EAGAIN)
666 goto again;
667 xenbus_dev_fatal(dev, err, "completing transaction");
668 goto free;
669 }
670
671 return 0;
672
673 abort_transaction:
674 xenbus_dev_fatal(dev, err, "%s", message);
675 abort_transaction_no_def_error:
676 xs_transaction_end(xst, 1);
677 free:
678 netif_free(info);
679 out:
680 return (err);
681 }
682
683 static void
xn_rxq_intr(struct netfront_rxq * rxq)684 xn_rxq_intr(struct netfront_rxq *rxq)
685 {
686
687 XN_RX_LOCK(rxq);
688 xn_rxeof(rxq);
689 XN_RX_UNLOCK(rxq);
690 }
691
692 static void
xn_txq_start(struct netfront_txq * txq)693 xn_txq_start(struct netfront_txq *txq)
694 {
695 struct netfront_info *np = txq->info;
696 if_t ifp = np->xn_ifp;
697
698 XN_TX_LOCK_ASSERT(txq);
699 if (!drbr_empty(ifp, txq->br))
700 xn_txq_mq_start_locked(txq, NULL);
701 }
702
703 static void
xn_txq_intr(struct netfront_txq * txq)704 xn_txq_intr(struct netfront_txq *txq)
705 {
706
707 XN_TX_LOCK(txq);
708 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
709 xn_txeof(txq);
710 xn_txq_start(txq);
711 XN_TX_UNLOCK(txq);
712 }
713
714 static void
xn_txq_tq_deferred(void * xtxq,int pending)715 xn_txq_tq_deferred(void *xtxq, int pending)
716 {
717 struct netfront_txq *txq = xtxq;
718
719 XN_TX_LOCK(txq);
720 xn_txq_start(txq);
721 XN_TX_UNLOCK(txq);
722 }
723
724 static void
disconnect_rxq(struct netfront_rxq * rxq)725 disconnect_rxq(struct netfront_rxq *rxq)
726 {
727
728 xn_release_rx_bufs(rxq);
729 gnttab_free_grant_references(rxq->gref_head);
730 if (rxq->ring_ref != GRANT_REF_INVALID) {
731 gnttab_end_foreign_access(rxq->ring_ref, NULL);
732 rxq->ring_ref = GRANT_REF_INVALID;
733 }
734 /*
735 * No split event channel support at the moment, handle will
736 * be unbound in tx. So no need to call xen_intr_unbind here,
737 * but we do want to reset the handler to 0.
738 */
739 rxq->xen_intr_handle = 0;
740 }
741
742 static void
destroy_rxq(struct netfront_rxq * rxq)743 destroy_rxq(struct netfront_rxq *rxq)
744 {
745
746 callout_drain(&rxq->rx_refill);
747 free(rxq->ring.sring, M_DEVBUF);
748 rxq->ring.sring = NULL;
749 }
750
751 static void
destroy_rxqs(struct netfront_info * np)752 destroy_rxqs(struct netfront_info *np)
753 {
754 int i;
755
756 for (i = 0; i < np->num_queues; i++)
757 destroy_rxq(&np->rxq[i]);
758
759 free(np->rxq, M_DEVBUF);
760 np->rxq = NULL;
761 }
762
763 static int
setup_rxqs(device_t dev,struct netfront_info * info,unsigned long num_queues)764 setup_rxqs(device_t dev, struct netfront_info *info,
765 unsigned long num_queues)
766 {
767 int q, i;
768 int error;
769 netif_rx_sring_t *rxs;
770 struct netfront_rxq *rxq;
771
772 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
773 M_DEVBUF, M_WAITOK|M_ZERO);
774
775 for (q = 0; q < num_queues; q++) {
776 rxq = &info->rxq[q];
777
778 rxq->id = q;
779 rxq->info = info;
780
781 rxq->gref_head = GNTTAB_LIST_END;
782 rxq->ring_ref = GRANT_REF_INVALID;
783 rxq->ring.sring = NULL;
784 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
785 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
786 MTX_DEF);
787
788 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
789 rxq->mbufs[i] = NULL;
790 rxq->grant_ref[i] = GRANT_REF_INVALID;
791 }
792
793 /* Start resources allocation */
794
795 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
796 &rxq->gref_head) != 0) {
797 device_printf(dev, "allocating rx gref");
798 error = ENOMEM;
799 goto fail;
800 }
801
802 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
803 M_WAITOK|M_ZERO);
804 SHARED_RING_INIT(rxs);
805 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
806
807 error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
808 &rxq->ring_ref);
809 if (error != 0) {
810 device_printf(dev, "granting rx ring page");
811 goto fail_grant_ring;
812 }
813
814 callout_init(&rxq->rx_refill, 1);
815 }
816
817 return (0);
818
819 fail_grant_ring:
820 gnttab_free_grant_references(rxq->gref_head);
821 free(rxq->ring.sring, M_DEVBUF);
822 fail:
823 for (; q >= 0; q--) {
824 disconnect_rxq(&info->rxq[q]);
825 destroy_rxq(&info->rxq[q]);
826 }
827
828 free(info->rxq, M_DEVBUF);
829 return (error);
830 }
831
832 static void
disconnect_txq(struct netfront_txq * txq)833 disconnect_txq(struct netfront_txq *txq)
834 {
835
836 xn_release_tx_bufs(txq);
837 gnttab_free_grant_references(txq->gref_head);
838 if (txq->ring_ref != GRANT_REF_INVALID) {
839 gnttab_end_foreign_access(txq->ring_ref, NULL);
840 txq->ring_ref = GRANT_REF_INVALID;
841 }
842 xen_intr_unbind(&txq->xen_intr_handle);
843 }
844
845 static void
destroy_txq(struct netfront_txq * txq)846 destroy_txq(struct netfront_txq *txq)
847 {
848 unsigned int i;
849
850 free(txq->ring.sring, M_DEVBUF);
851 txq->ring.sring = NULL;
852 buf_ring_free(txq->br, M_DEVBUF);
853 txq->br = NULL;
854 if (txq->tq) {
855 taskqueue_drain_all(txq->tq);
856 taskqueue_free(txq->tq);
857 txq->tq = NULL;
858 }
859
860 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
861 bus_dmamap_destroy(txq->info->dma_tag,
862 txq->xennet_tag[i].dma_map);
863 txq->xennet_tag[i].dma_map = NULL;
864 }
865 }
866
867 static void
destroy_txqs(struct netfront_info * np)868 destroy_txqs(struct netfront_info *np)
869 {
870 int i;
871
872 for (i = 0; i < np->num_queues; i++)
873 destroy_txq(&np->txq[i]);
874
875 free(np->txq, M_DEVBUF);
876 np->txq = NULL;
877 }
878
879 static int
setup_txqs(device_t dev,struct netfront_info * info,unsigned long num_queues)880 setup_txqs(device_t dev, struct netfront_info *info,
881 unsigned long num_queues)
882 {
883 int q, i;
884 int error;
885 netif_tx_sring_t *txs;
886 struct netfront_txq *txq;
887
888 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
889 M_DEVBUF, M_WAITOK|M_ZERO);
890
891 for (q = 0; q < num_queues; q++) {
892 txq = &info->txq[q];
893
894 txq->id = q;
895 txq->info = info;
896
897 txq->gref_head = GNTTAB_LIST_END;
898 txq->ring_ref = GRANT_REF_INVALID;
899 txq->ring.sring = NULL;
900
901 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
902
903 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
904 MTX_DEF);
905 SLIST_INIT(&txq->tags);
906
907 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
908 txq->mbufs[i] = (void *) ((u_long) i+1);
909 txq->grant_ref[i] = GRANT_REF_INVALID;
910 txq->xennet_tag[i].txq = txq;
911 txq->xennet_tag[i].dma_tag = info->dma_tag;
912 error = bus_dmamap_create(info->dma_tag, 0,
913 &txq->xennet_tag[i].dma_map);
914 if (error != 0) {
915 device_printf(dev,
916 "failed to allocate dma map\n");
917 goto fail;
918 }
919 m_tag_setup(&txq->xennet_tag[i].tag,
920 MTAG_COOKIE, MTAG_XENNET,
921 sizeof(txq->xennet_tag[i]) -
922 sizeof(txq->xennet_tag[i].tag));
923 txq->xennet_tag[i].tag.m_tag_free = &tag_free;
924 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i],
925 next);
926 }
927 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
928
929 /* Start resources allocation. */
930
931 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
932 &txq->gref_head) != 0) {
933 device_printf(dev, "failed to allocate tx grant refs\n");
934 error = ENOMEM;
935 goto fail;
936 }
937
938 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
939 M_WAITOK|M_ZERO);
940 SHARED_RING_INIT(txs);
941 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
942
943 error = xenbus_grant_ring(dev, virt_to_mfn(txs),
944 &txq->ring_ref);
945 if (error != 0) {
946 device_printf(dev, "failed to grant tx ring\n");
947 goto fail_grant_ring;
948 }
949
950 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
951 M_WAITOK, &txq->lock);
952 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
953
954 txq->tq = taskqueue_create(txq->name, M_WAITOK,
955 taskqueue_thread_enqueue, &txq->tq);
956
957 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
958 "%s txq %d", device_get_nameunit(dev), txq->id);
959 if (error != 0) {
960 device_printf(dev, "failed to start tx taskq %d\n",
961 txq->id);
962 goto fail_start_thread;
963 }
964
965 error = xen_intr_alloc_and_bind_local_port(dev,
966 xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr,
967 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
968 &txq->xen_intr_handle);
969
970 if (error != 0) {
971 device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n");
972 goto fail_bind_port;
973 }
974 }
975
976 return (0);
977
978 fail_bind_port:
979 taskqueue_drain_all(txq->tq);
980 fail_start_thread:
981 buf_ring_free(txq->br, M_DEVBUF);
982 taskqueue_free(txq->tq);
983 gnttab_end_foreign_access(txq->ring_ref, NULL);
984 fail_grant_ring:
985 gnttab_free_grant_references(txq->gref_head);
986 free(txq->ring.sring, M_DEVBUF);
987 fail:
988 for (; q >= 0; q--) {
989 disconnect_txq(&info->txq[q]);
990 destroy_txq(&info->txq[q]);
991 }
992
993 free(info->txq, M_DEVBUF);
994 return (error);
995 }
996
997 static int
setup_device(device_t dev,struct netfront_info * info,unsigned long num_queues)998 setup_device(device_t dev, struct netfront_info *info,
999 unsigned long num_queues)
1000 {
1001 int error;
1002 int q;
1003
1004 if (info->txq)
1005 destroy_txqs(info);
1006
1007 if (info->rxq)
1008 destroy_rxqs(info);
1009
1010 info->num_queues = 0;
1011
1012 error = setup_rxqs(dev, info, num_queues);
1013 if (error != 0)
1014 goto out;
1015 error = setup_txqs(dev, info, num_queues);
1016 if (error != 0)
1017 goto out;
1018
1019 info->num_queues = num_queues;
1020
1021 /* No split event channel at the moment. */
1022 for (q = 0; q < num_queues; q++)
1023 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
1024
1025 return (0);
1026
1027 out:
1028 KASSERT(error != 0, ("Error path taken without providing an error code"));
1029 return (error);
1030 }
1031
1032 /**
1033 * Callback received when the backend's state changes.
1034 */
1035 static void
netfront_backend_changed(device_t dev,XenbusState newstate)1036 netfront_backend_changed(device_t dev, XenbusState newstate)
1037 {
1038 struct netfront_info *sc = device_get_softc(dev);
1039
1040 DPRINTK("newstate=%d\n", newstate);
1041
1042 CURVNET_SET(if_getvnet(sc->xn_ifp));
1043
1044 switch (newstate) {
1045 case XenbusStateInitialising:
1046 case XenbusStateInitialised:
1047 case XenbusStateUnknown:
1048 case XenbusStateReconfigured:
1049 case XenbusStateReconfiguring:
1050 break;
1051 case XenbusStateInitWait:
1052 if (xenbus_get_state(dev) != XenbusStateInitialising)
1053 break;
1054 if (xn_connect(sc) != 0)
1055 break;
1056 /* Switch to connected state before kicking the rings. */
1057 xenbus_set_state(sc->xbdev, XenbusStateConnected);
1058 xn_kick_rings(sc);
1059 break;
1060 case XenbusStateClosing:
1061 xenbus_set_state(dev, XenbusStateClosed);
1062 break;
1063 case XenbusStateClosed:
1064 if (sc->xn_reset) {
1065 netif_disconnect_backend(sc);
1066 xenbus_set_state(dev, XenbusStateInitialising);
1067 sc->xn_reset = false;
1068 }
1069 break;
1070 case XenbusStateConnected:
1071 #ifdef INET
1072 /*
1073 * If this interface has an ipv4 address, send an arp for it.
1074 * This helps to get the network going again after migrating
1075 * hosts.
1076 */
1077 EVENTHANDLER_INVOKE(iflladdr_event, sc->xn_ifp);
1078 #endif
1079 break;
1080 }
1081
1082 CURVNET_RESTORE();
1083 }
1084
1085 /**
1086 * \brief Verify that there is sufficient space in the Tx ring
1087 * buffer for a maximally sized request to be enqueued.
1088 *
1089 * A transmit request requires a transmit descriptor for each packet
1090 * fragment, plus up to 2 entries for "options" (e.g. TSO).
1091 */
1092 static inline int
xn_tx_slot_available(struct netfront_txq * txq)1093 xn_tx_slot_available(struct netfront_txq *txq)
1094 {
1095
1096 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1097 }
1098
1099 static void
xn_release_tx_bufs(struct netfront_txq * txq)1100 xn_release_tx_bufs(struct netfront_txq *txq)
1101 {
1102 int i;
1103
1104 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1105 struct mbuf *m;
1106
1107 m = txq->mbufs[i];
1108
1109 /*
1110 * We assume that no kernel addresses are
1111 * less than NET_TX_RING_SIZE. Any entry
1112 * in the table that is below this number
1113 * must be an index from free-list tracking.
1114 */
1115 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
1116 continue;
1117 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1118 gnttab_release_grant_reference(&txq->gref_head,
1119 txq->grant_ref[i]);
1120 txq->grant_ref[i] = GRANT_REF_INVALID;
1121 add_id_to_freelist(txq->mbufs, i);
1122 txq->mbufs_cnt--;
1123 if (txq->mbufs_cnt < 0) {
1124 panic("%s: tx_chain_cnt must be >= 0", __func__);
1125 }
1126 mbuf_release(m);
1127 }
1128 }
1129
1130 static struct mbuf *
xn_alloc_one_rx_buffer(struct netfront_rxq * rxq)1131 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1132 {
1133 struct mbuf *m;
1134
1135 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1136 if (m == NULL)
1137 return NULL;
1138 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1139
1140 return (m);
1141 }
1142
1143 static void
xn_alloc_rx_buffers(struct netfront_rxq * rxq)1144 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1145 {
1146 RING_IDX req_prod;
1147 int notify;
1148
1149 XN_RX_LOCK_ASSERT(rxq);
1150
1151 if (__predict_false(rxq->info->carrier == 0))
1152 return;
1153
1154 for (req_prod = rxq->ring.req_prod_pvt;
1155 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1156 req_prod++) {
1157 struct mbuf *m;
1158 unsigned short id;
1159 grant_ref_t ref;
1160 struct netif_rx_request *req;
1161 unsigned long pfn;
1162
1163 m = xn_alloc_one_rx_buffer(rxq);
1164 if (m == NULL)
1165 break;
1166
1167 id = xn_rxidx(req_prod);
1168
1169 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1170 rxq->mbufs[id] = m;
1171
1172 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1173 KASSERT(ref != GNTTAB_LIST_END,
1174 ("reserved grant references exhuasted"));
1175 rxq->grant_ref[id] = ref;
1176
1177 pfn = atop(vtophys(mtod(m, vm_offset_t)));
1178 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1179
1180 gnttab_grant_foreign_access_ref(ref,
1181 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1182 req->id = id;
1183 req->gref = ref;
1184 }
1185
1186 rxq->ring.req_prod_pvt = req_prod;
1187
1188 /* Not enough requests? Try again later. */
1189 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1190 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1191 xn_alloc_rx_buffers_callout, rxq);
1192 return;
1193 }
1194
1195 wmb(); /* barrier so backend seens requests */
1196
1197 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1198 if (notify)
1199 xen_intr_signal(rxq->xen_intr_handle);
1200 }
1201
xn_alloc_rx_buffers_callout(void * arg)1202 static void xn_alloc_rx_buffers_callout(void *arg)
1203 {
1204 struct netfront_rxq *rxq;
1205
1206 rxq = (struct netfront_rxq *)arg;
1207 XN_RX_LOCK(rxq);
1208 xn_alloc_rx_buffers(rxq);
1209 XN_RX_UNLOCK(rxq);
1210 }
1211
1212 static void
xn_release_rx_bufs(struct netfront_rxq * rxq)1213 xn_release_rx_bufs(struct netfront_rxq *rxq)
1214 {
1215 int i, ref;
1216 struct mbuf *m;
1217
1218 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1219 m = rxq->mbufs[i];
1220
1221 if (m == NULL)
1222 continue;
1223
1224 ref = rxq->grant_ref[i];
1225 if (ref == GRANT_REF_INVALID)
1226 continue;
1227
1228 gnttab_end_foreign_access_ref(ref);
1229 gnttab_release_grant_reference(&rxq->gref_head, ref);
1230 rxq->mbufs[i] = NULL;
1231 rxq->grant_ref[i] = GRANT_REF_INVALID;
1232 m_freem(m);
1233 }
1234 }
1235
1236 static void
xn_rxeof(struct netfront_rxq * rxq)1237 xn_rxeof(struct netfront_rxq *rxq)
1238 {
1239 if_t ifp;
1240 struct netfront_info *np = rxq->info;
1241 #if (defined(INET) || defined(INET6))
1242 struct lro_ctrl *lro = &rxq->lro;
1243 #endif
1244 struct netfront_rx_info rinfo;
1245 struct netif_rx_response *rx = &rinfo.rx;
1246 struct netif_extra_info *extras = rinfo.extras;
1247 RING_IDX i, rp;
1248 struct mbuf *m;
1249 struct mbufq mbufq_rxq, mbufq_errq;
1250 int err, work_to_do;
1251
1252 XN_RX_LOCK_ASSERT(rxq);
1253
1254 if (!netfront_carrier_ok(np))
1255 return;
1256
1257 /* XXX: there should be some sane limit. */
1258 mbufq_init(&mbufq_errq, INT_MAX);
1259 mbufq_init(&mbufq_rxq, INT_MAX);
1260
1261 ifp = np->xn_ifp;
1262
1263 do {
1264 rp = rxq->ring.sring->rsp_prod;
1265 rmb(); /* Ensure we see queued responses up to 'rp'. */
1266
1267 i = rxq->ring.rsp_cons;
1268 while ((i != rp)) {
1269 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1270 memset(extras, 0, sizeof(rinfo.extras));
1271
1272 m = NULL;
1273 err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1274
1275 if (__predict_false(err)) {
1276 if (m)
1277 (void )mbufq_enqueue(&mbufq_errq, m);
1278 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1279 continue;
1280 }
1281
1282 m->m_pkthdr.rcvif = ifp;
1283 if (rx->flags & NETRXF_data_validated) {
1284 /*
1285 * According to mbuf(9) the correct way to tell
1286 * the stack that the checksum of an inbound
1287 * packet is correct, without it actually being
1288 * present (because the underlying interface
1289 * doesn't provide it), is to set the
1290 * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags,
1291 * and the csum_data field to 0xffff.
1292 */
1293 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1294 | CSUM_PSEUDO_HDR);
1295 m->m_pkthdr.csum_data = 0xffff;
1296 }
1297 if ((rx->flags & NETRXF_extra_info) != 0 &&
1298 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1299 XEN_NETIF_EXTRA_TYPE_GSO)) {
1300 m->m_pkthdr.tso_segsz =
1301 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1302 m->m_pkthdr.csum_flags |= CSUM_TSO;
1303 }
1304
1305 (void )mbufq_enqueue(&mbufq_rxq, m);
1306 }
1307
1308 rxq->ring.rsp_cons = i;
1309
1310 xn_alloc_rx_buffers(rxq);
1311
1312 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1313 } while (work_to_do);
1314
1315 mbufq_drain(&mbufq_errq);
1316 /*
1317 * Process all the mbufs after the remapping is complete.
1318 * Break the mbuf chain first though.
1319 */
1320 while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
1321 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1322 #if (defined(INET) || defined(INET6))
1323 /* Use LRO if possible */
1324 if ((if_getcapenable(ifp) & IFCAP_LRO) == 0 ||
1325 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1326 /*
1327 * If LRO fails, pass up to the stack
1328 * directly.
1329 */
1330 if_input(ifp, m);
1331 }
1332 #else
1333 if_input(ifp, m);
1334 #endif
1335 }
1336
1337 #if (defined(INET) || defined(INET6))
1338 /*
1339 * Flush any outstanding LRO work
1340 */
1341 tcp_lro_flush_all(lro);
1342 #endif
1343 }
1344
1345 static void
xn_txeof(struct netfront_txq * txq)1346 xn_txeof(struct netfront_txq *txq)
1347 {
1348 RING_IDX i, prod;
1349 unsigned short id;
1350 if_t ifp;
1351 netif_tx_response_t *txr;
1352 struct mbuf *m;
1353 struct netfront_info *np = txq->info;
1354
1355 XN_TX_LOCK_ASSERT(txq);
1356
1357 if (!netfront_carrier_ok(np))
1358 return;
1359
1360 ifp = np->xn_ifp;
1361
1362 do {
1363 prod = txq->ring.sring->rsp_prod;
1364 rmb(); /* Ensure we see responses up to 'rp'. */
1365
1366 for (i = txq->ring.rsp_cons; i != prod; i++) {
1367 txr = RING_GET_RESPONSE(&txq->ring, i);
1368 if (txr->status == NETIF_RSP_NULL)
1369 continue;
1370
1371 if (txr->status != NETIF_RSP_OKAY) {
1372 printf("%s: WARNING: response is %d!\n",
1373 __func__, txr->status);
1374 }
1375 id = txr->id;
1376 m = txq->mbufs[id];
1377 KASSERT(m != NULL, ("mbuf not found in chain"));
1378 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1379 ("mbuf already on the free list, but we're "
1380 "trying to free it again!"));
1381 M_ASSERTVALID(m);
1382
1383 if (__predict_false(gnttab_query_foreign_access(
1384 txq->grant_ref[id]) != 0)) {
1385 panic("%s: grant id %u still in use by the "
1386 "backend", __func__, id);
1387 }
1388 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1389 gnttab_release_grant_reference(
1390 &txq->gref_head, txq->grant_ref[id]);
1391 txq->grant_ref[id] = GRANT_REF_INVALID;
1392
1393 txq->mbufs[id] = NULL;
1394 add_id_to_freelist(txq->mbufs, id);
1395 txq->mbufs_cnt--;
1396 mbuf_release(m);
1397 /* Only mark the txq active if we've freed up at least one slot to try */
1398 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1399 }
1400 txq->ring.rsp_cons = prod;
1401
1402 /*
1403 * Set a new event, then check for race with update of
1404 * tx_cons. Note that it is essential to schedule a
1405 * callback, no matter how few buffers are pending. Even if
1406 * there is space in the transmit ring, higher layers may
1407 * be blocked because too much data is outstanding: in such
1408 * cases notification from Xen is likely to be the only kick
1409 * that we'll get.
1410 */
1411 txq->ring.sring->rsp_event =
1412 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1413
1414 mb();
1415 } while (prod != txq->ring.sring->rsp_prod);
1416
1417 if (txq->full &&
1418 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1419 txq->full = false;
1420 xn_txq_start(txq);
1421 }
1422 }
1423
1424 static void
xn_intr(void * xsc)1425 xn_intr(void *xsc)
1426 {
1427 struct netfront_txq *txq = xsc;
1428 struct netfront_info *np = txq->info;
1429 struct netfront_rxq *rxq = &np->rxq[txq->id];
1430
1431 /* kick both tx and rx */
1432 xn_rxq_intr(rxq);
1433 xn_txq_intr(txq);
1434 }
1435
1436 static void
xn_move_rx_slot(struct netfront_rxq * rxq,struct mbuf * m,grant_ref_t ref)1437 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1438 grant_ref_t ref)
1439 {
1440 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1441
1442 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1443 rxq->mbufs[new] = m;
1444 rxq->grant_ref[new] = ref;
1445 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1446 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1447 rxq->ring.req_prod_pvt++;
1448 }
1449
1450 static int
xn_get_extras(struct netfront_rxq * rxq,struct netif_extra_info * extras,RING_IDX rp,RING_IDX * cons)1451 xn_get_extras(struct netfront_rxq *rxq,
1452 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1453 {
1454 struct netif_extra_info *extra;
1455
1456 int err = 0;
1457
1458 do {
1459 struct mbuf *m;
1460 grant_ref_t ref;
1461
1462 if (__predict_false(*cons + 1 == rp)) {
1463 err = EINVAL;
1464 break;
1465 }
1466
1467 extra = (struct netif_extra_info *)
1468 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1469
1470 if (__predict_false(!extra->type ||
1471 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1472 err = EINVAL;
1473 } else {
1474 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1475 }
1476
1477 m = xn_get_rx_mbuf(rxq, *cons);
1478 ref = xn_get_rx_ref(rxq, *cons);
1479 xn_move_rx_slot(rxq, m, ref);
1480 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1481
1482 return err;
1483 }
1484
1485 static int
xn_get_responses(struct netfront_rxq * rxq,struct netfront_rx_info * rinfo,RING_IDX rp,RING_IDX * cons,struct mbuf ** list)1486 xn_get_responses(struct netfront_rxq *rxq,
1487 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1488 struct mbuf **list)
1489 {
1490 struct netif_rx_response *rx = &rinfo->rx;
1491 struct netif_extra_info *extras = rinfo->extras;
1492 struct mbuf *m, *m0, *m_prev;
1493 grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1494 int frags = 1;
1495 int err = 0;
1496 u_long ret __diagused;
1497
1498 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1499
1500 if (rx->flags & NETRXF_extra_info) {
1501 err = xn_get_extras(rxq, extras, rp, cons);
1502 }
1503
1504 if (m0 != NULL) {
1505 m0->m_pkthdr.len = 0;
1506 m0->m_next = NULL;
1507 }
1508
1509 for (;;) {
1510 #if 0
1511 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1512 rx->status, rx->offset, frags);
1513 #endif
1514 if (__predict_false(rx->status < 0 ||
1515 rx->offset + rx->status > PAGE_SIZE)) {
1516 xn_move_rx_slot(rxq, m, ref);
1517 if (m0 == m)
1518 m0 = NULL;
1519 m = NULL;
1520 err = EINVAL;
1521 goto next_skip_queue;
1522 }
1523
1524 /*
1525 * This definitely indicates a bug, either in this driver or in
1526 * the backend driver. In future this should flag the bad
1527 * situation to the system controller to reboot the backed.
1528 */
1529 if (ref == GRANT_REF_INVALID) {
1530 printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1531 err = EINVAL;
1532 goto next;
1533 }
1534
1535 ret = gnttab_end_foreign_access_ref(ref);
1536 KASSERT(ret, ("Unable to end access to grant references"));
1537
1538 gnttab_release_grant_reference(&rxq->gref_head, ref);
1539
1540 next:
1541 if (m == NULL)
1542 break;
1543
1544 m->m_len = rx->status;
1545 m->m_data += rx->offset;
1546 m0->m_pkthdr.len += rx->status;
1547
1548 next_skip_queue:
1549 if (!(rx->flags & NETRXF_more_data))
1550 break;
1551
1552 if (*cons + frags == rp) {
1553 if (net_ratelimit())
1554 WPRINTK("Need more frags\n");
1555 err = ENOENT;
1556 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1557 __func__, *cons, frags, rp);
1558 break;
1559 }
1560 /*
1561 * Note that m can be NULL, if rx->status < 0 or if
1562 * rx->offset + rx->status > PAGE_SIZE above.
1563 */
1564 m_prev = m;
1565
1566 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1567 m = xn_get_rx_mbuf(rxq, *cons + frags);
1568
1569 /*
1570 * m_prev == NULL can happen if rx->status < 0 or if
1571 * rx->offset + * rx->status > PAGE_SIZE above.
1572 */
1573 if (m_prev != NULL)
1574 m_prev->m_next = m;
1575
1576 /*
1577 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1578 * rx->status > PAGE_SIZE above.
1579 */
1580 if (m0 == NULL)
1581 m0 = m;
1582 m->m_next = NULL;
1583 ref = xn_get_rx_ref(rxq, *cons + frags);
1584 frags++;
1585 }
1586 *list = m0;
1587 *cons += frags;
1588
1589 return (err);
1590 }
1591
1592 /**
1593 * Given an mbuf chain, make sure we have enough room and then push
1594 * it onto the transmit ring.
1595 */
1596 static int
xn_assemble_tx_request(struct netfront_txq * txq,struct mbuf * m_head)1597 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1598 {
1599 struct netfront_info *np = txq->info;
1600 if_t ifp = np->xn_ifp;
1601 int otherend_id, error, nfrags;
1602 bus_dma_segment_t *segs = txq->segs;
1603 struct mbuf_xennet *tag;
1604 bus_dmamap_t map;
1605 unsigned int i;
1606
1607 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available"));
1608 tag = SLIST_FIRST(&txq->tags);
1609 SLIST_REMOVE_HEAD(&txq->tags, next);
1610 KASSERT(tag->count == 0, ("tag already in-use"));
1611 map = tag->dma_map;
1612 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1613 &nfrags, 0);
1614 if (error == EFBIG || nfrags > np->maxfrags) {
1615 struct mbuf *m;
1616
1617 bus_dmamap_unload(np->dma_tag, map);
1618 m = m_defrag(m_head, M_NOWAIT);
1619 if (!m) {
1620 /*
1621 * Defrag failed, so free the mbuf and
1622 * therefore drop the packet.
1623 */
1624 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1625 m_freem(m_head);
1626 return (EMSGSIZE);
1627 }
1628 m_head = m;
1629 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1630 &nfrags, 0);
1631 if (error != 0 || nfrags > np->maxfrags) {
1632 bus_dmamap_unload(np->dma_tag, map);
1633 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1634 m_freem(m_head);
1635 return (error ?: EFBIG);
1636 }
1637 } else if (error != 0) {
1638 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1639 m_freem(m_head);
1640 return (error);
1641 }
1642
1643 /**
1644 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1645 * of mbufs longer than Linux can handle. Make sure we don't
1646 * pass a too-long chain over to the other side by dropping the
1647 * packet. It doesn't look like there is currently a way to
1648 * tell the TCP stack to generate a shorter chain of packets.
1649 */
1650 if (nfrags > MAX_TX_REQ_FRAGS) {
1651 #ifdef DEBUG
1652 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1653 "won't be able to handle it, dropping\n",
1654 __func__, nfrags, MAX_TX_REQ_FRAGS);
1655 #endif
1656 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1657 bus_dmamap_unload(np->dma_tag, map);
1658 m_freem(m_head);
1659 return (EMSGSIZE);
1660 }
1661
1662 /*
1663 * This check should be redundant. We've already verified that we
1664 * have enough slots in the ring to handle a packet of maximum
1665 * size, and that our packet is less than the maximum size. Keep
1666 * it in here as an assert for now just to make certain that
1667 * chain_cnt is accurate.
1668 */
1669 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1670 ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1671 "(%d)!", __func__, (int) txq->mbufs_cnt,
1672 (int) nfrags, (int) NET_TX_RING_SIZE));
1673
1674 /*
1675 * Start packing the mbufs in this chain into
1676 * the fragment pointers. Stop when we run out
1677 * of fragments or hit the end of the mbuf chain.
1678 */
1679 otherend_id = xenbus_get_otherend_id(np->xbdev);
1680 m_tag_prepend(m_head, &tag->tag);
1681 for (i = 0; i < nfrags; i++) {
1682 netif_tx_request_t *tx;
1683 uintptr_t id;
1684 grant_ref_t ref;
1685 u_long mfn; /* XXX Wrong type? */
1686
1687 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1688 id = get_id_from_freelist(txq->mbufs);
1689 if (id == 0)
1690 panic("%s: was allocated the freelist head!\n",
1691 __func__);
1692 txq->mbufs_cnt++;
1693 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1694 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1695 __func__);
1696 mbuf_grab(m_head);
1697 txq->mbufs[id] = m_head;
1698 tx->id = id;
1699 ref = gnttab_claim_grant_reference(&txq->gref_head);
1700 KASSERT((short)ref >= 0, ("Negative ref"));
1701 mfn = atop(segs[i].ds_addr);
1702 gnttab_grant_foreign_access_ref(ref, otherend_id,
1703 mfn, GNTMAP_readonly);
1704 tx->gref = txq->grant_ref[id] = ref;
1705 tx->offset = segs[i].ds_addr & PAGE_MASK;
1706 KASSERT(tx->offset + segs[i].ds_len <= PAGE_SIZE,
1707 ("mbuf segment crosses a page boundary"));
1708 tx->flags = 0;
1709 if (i == 0) {
1710 /*
1711 * The first fragment has the entire packet
1712 * size, subsequent fragments have just the
1713 * fragment size. The backend works out the
1714 * true size of the first fragment by
1715 * subtracting the sizes of the other
1716 * fragments.
1717 */
1718 tx->size = m_head->m_pkthdr.len;
1719
1720 /*
1721 * The first fragment contains the checksum flags
1722 * and is optionally followed by extra data for
1723 * TSO etc.
1724 */
1725 /**
1726 * CSUM_TSO requires checksum offloading.
1727 * Some versions of FreeBSD fail to
1728 * set CSUM_TCP in the CSUM_TSO case,
1729 * so we have to test for CSUM_TSO
1730 * explicitly.
1731 */
1732 if (m_head->m_pkthdr.csum_flags
1733 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1734 tx->flags |= (NETTXF_csum_blank
1735 | NETTXF_data_validated);
1736 }
1737 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1738 struct netif_extra_info *gso =
1739 (struct netif_extra_info *)
1740 RING_GET_REQUEST(&txq->ring,
1741 ++txq->ring.req_prod_pvt);
1742
1743 tx->flags |= NETTXF_extra_info;
1744
1745 gso->u.gso.size = m_head->m_pkthdr.tso_segsz;
1746 gso->u.gso.type =
1747 XEN_NETIF_GSO_TYPE_TCPV4;
1748 gso->u.gso.pad = 0;
1749 gso->u.gso.features = 0;
1750
1751 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1752 gso->flags = 0;
1753 }
1754 } else {
1755 tx->size = segs[i].ds_len;
1756 }
1757 if (i != nfrags - 1)
1758 tx->flags |= NETTXF_more_data;
1759
1760 txq->ring.req_prod_pvt++;
1761 }
1762 bus_dmamap_sync(np->dma_tag, map, BUS_DMASYNC_PREWRITE);
1763 BPF_MTAP(ifp, m_head);
1764
1765 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1766 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1767 if (m_head->m_flags & M_MCAST)
1768 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1769
1770 xn_txeof(txq);
1771
1772 return (0);
1773 }
1774
1775 /* equivalent of network_open() in Linux */
1776 static void
xn_ifinit_locked(struct netfront_info * np)1777 xn_ifinit_locked(struct netfront_info *np)
1778 {
1779 if_t ifp;
1780 int i;
1781 struct netfront_rxq *rxq;
1782
1783 XN_LOCK_ASSERT(np);
1784
1785 ifp = np->xn_ifp;
1786
1787 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
1788 return;
1789
1790 xn_stop(np);
1791
1792 for (i = 0; i < np->num_queues; i++) {
1793 rxq = &np->rxq[i];
1794 XN_RX_LOCK(rxq);
1795 xn_alloc_rx_buffers(rxq);
1796 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1797 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1798 xn_rxeof(rxq);
1799 XN_RX_UNLOCK(rxq);
1800 }
1801
1802 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1803 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1804 if_link_state_change(ifp, LINK_STATE_UP);
1805 }
1806
1807 static void
xn_ifinit(void * xsc)1808 xn_ifinit(void *xsc)
1809 {
1810 struct netfront_info *sc = xsc;
1811
1812 XN_LOCK(sc);
1813 xn_ifinit_locked(sc);
1814 XN_UNLOCK(sc);
1815 }
1816
1817 static int
xn_ioctl(if_t ifp,u_long cmd,caddr_t data)1818 xn_ioctl(if_t ifp, u_long cmd, caddr_t data)
1819 {
1820 struct netfront_info *sc = if_getsoftc(ifp);
1821 struct ifreq *ifr = (struct ifreq *) data;
1822 device_t dev;
1823 #ifdef INET
1824 struct ifaddr *ifa = (struct ifaddr *)data;
1825 #endif
1826 int mask, error = 0, reinit;
1827
1828 dev = sc->xbdev;
1829
1830 switch(cmd) {
1831 case SIOCSIFADDR:
1832 #ifdef INET
1833 XN_LOCK(sc);
1834 if (ifa->ifa_addr->sa_family == AF_INET) {
1835 if_setflagbits(ifp, IFF_UP, 0);
1836 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1837 xn_ifinit_locked(sc);
1838 arp_ifinit(ifp, ifa);
1839 XN_UNLOCK(sc);
1840 } else {
1841 XN_UNLOCK(sc);
1842 #endif
1843 error = ether_ioctl(ifp, cmd, data);
1844 #ifdef INET
1845 }
1846 #endif
1847 break;
1848 case SIOCSIFMTU:
1849 if (if_getmtu(ifp) == ifr->ifr_mtu)
1850 break;
1851
1852 if_setmtu(ifp, ifr->ifr_mtu);
1853 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1854 xn_ifinit(sc);
1855 break;
1856 case SIOCSIFFLAGS:
1857 XN_LOCK(sc);
1858 if (if_getflags(ifp) & IFF_UP) {
1859 /*
1860 * If only the state of the PROMISC flag changed,
1861 * then just use the 'set promisc mode' command
1862 * instead of reinitializing the entire NIC. Doing
1863 * a full re-init means reloading the firmware and
1864 * waiting for it to start up, which may take a
1865 * second or two.
1866 */
1867 xn_ifinit_locked(sc);
1868 } else {
1869 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1870 xn_stop(sc);
1871 }
1872 }
1873 sc->xn_if_flags = if_getflags(ifp);
1874 XN_UNLOCK(sc);
1875 break;
1876 case SIOCSIFCAP:
1877 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1878 reinit = 0;
1879
1880 if (mask & IFCAP_TXCSUM) {
1881 if_togglecapenable(ifp, IFCAP_TXCSUM);
1882 if_togglehwassist(ifp, XN_CSUM_FEATURES);
1883 }
1884 if (mask & IFCAP_TSO4) {
1885 if_togglecapenable(ifp, IFCAP_TSO4);
1886 if_togglehwassist(ifp, CSUM_TSO);
1887 }
1888
1889 if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
1890 /* These Rx features require us to renegotiate. */
1891 reinit = 1;
1892
1893 if (mask & IFCAP_RXCSUM)
1894 if_togglecapenable(ifp, IFCAP_RXCSUM);
1895 if (mask & IFCAP_LRO)
1896 if_togglecapenable(ifp, IFCAP_LRO);
1897 }
1898
1899 if (reinit == 0)
1900 break;
1901
1902 /*
1903 * We must reset the interface so the backend picks up the
1904 * new features.
1905 */
1906 device_printf(sc->xbdev,
1907 "performing interface reset due to feature change\n");
1908 XN_LOCK(sc);
1909 netfront_carrier_off(sc);
1910 sc->xn_reset = true;
1911 /*
1912 * NB: the pending packet queue is not flushed, since
1913 * the interface should still support the old options.
1914 */
1915 XN_UNLOCK(sc);
1916 /*
1917 * Delete the xenstore nodes that export features.
1918 *
1919 * NB: There's a xenbus state called
1920 * "XenbusStateReconfiguring", which is what we should set
1921 * here. Sadly none of the backends know how to handle it,
1922 * and simply disconnect from the frontend, so we will just
1923 * switch back to XenbusStateInitialising in order to force
1924 * a reconnection.
1925 */
1926 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1927 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1928 xenbus_set_state(dev, XenbusStateClosing);
1929
1930 /*
1931 * Wait for the frontend to reconnect before returning
1932 * from the ioctl. 30s should be more than enough for any
1933 * sane backend to reconnect.
1934 */
1935 error = tsleep(sc, 0, "xn_rst", 30*hz);
1936 break;
1937 case SIOCADDMULTI:
1938 case SIOCDELMULTI:
1939 break;
1940 case SIOCSIFMEDIA:
1941 case SIOCGIFMEDIA:
1942 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1943 break;
1944 default:
1945 error = ether_ioctl(ifp, cmd, data);
1946 }
1947
1948 return (error);
1949 }
1950
1951 static void
xn_stop(struct netfront_info * sc)1952 xn_stop(struct netfront_info *sc)
1953 {
1954 if_t ifp;
1955
1956 XN_LOCK_ASSERT(sc);
1957
1958 ifp = sc->xn_ifp;
1959
1960 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1961 if_link_state_change(ifp, LINK_STATE_DOWN);
1962 }
1963
1964 static void
xn_rebuild_rx_bufs(struct netfront_rxq * rxq)1965 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1966 {
1967 int requeue_idx, i;
1968 grant_ref_t ref;
1969 netif_rx_request_t *req;
1970
1971 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1972 struct mbuf *m;
1973 u_long pfn;
1974
1975 if (rxq->mbufs[i] == NULL)
1976 continue;
1977
1978 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1979 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1980
1981 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1982 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1983
1984 gnttab_grant_foreign_access_ref(ref,
1985 xenbus_get_otherend_id(rxq->info->xbdev),
1986 pfn, 0);
1987
1988 req->gref = ref;
1989 req->id = requeue_idx;
1990
1991 requeue_idx++;
1992 }
1993
1994 rxq->ring.req_prod_pvt = requeue_idx;
1995 }
1996
1997 /* START of Xenolinux helper functions adapted to FreeBSD */
1998 static int
xn_connect(struct netfront_info * np)1999 xn_connect(struct netfront_info *np)
2000 {
2001 int i, error;
2002 u_int feature_rx_copy;
2003 struct netfront_rxq *rxq;
2004 struct netfront_txq *txq;
2005
2006 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2007 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
2008 if (error != 0)
2009 feature_rx_copy = 0;
2010
2011 /* We only support rx copy. */
2012 if (!feature_rx_copy)
2013 return (EPROTONOSUPPORT);
2014
2015 /* Recovery procedure: */
2016 error = talk_to_backend(np->xbdev, np);
2017 if (error != 0)
2018 return (error);
2019
2020 /* Step 1: Reinitialise variables. */
2021 xn_query_features(np);
2022 xn_configure_features(np);
2023
2024 /* Step 2: Release TX buffer */
2025 for (i = 0; i < np->num_queues; i++) {
2026 txq = &np->txq[i];
2027 xn_release_tx_bufs(txq);
2028 }
2029
2030 /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
2031 for (i = 0; i < np->num_queues; i++) {
2032 rxq = &np->rxq[i];
2033 xn_rebuild_rx_bufs(rxq);
2034 }
2035
2036 /* Step 4: All public and private state should now be sane. Get
2037 * ready to start sending and receiving packets and give the driver
2038 * domain a kick because we've probably just requeued some
2039 * packets.
2040 */
2041 netfront_carrier_on(np);
2042 wakeup(np);
2043
2044 return (0);
2045 }
2046
2047 static void
xn_kick_rings(struct netfront_info * np)2048 xn_kick_rings(struct netfront_info *np)
2049 {
2050 struct netfront_rxq *rxq;
2051 struct netfront_txq *txq;
2052 int i;
2053
2054 for (i = 0; i < np->num_queues; i++) {
2055 txq = &np->txq[i];
2056 rxq = &np->rxq[i];
2057 xen_intr_signal(txq->xen_intr_handle);
2058 XN_TX_LOCK(txq);
2059 xn_txeof(txq);
2060 XN_TX_UNLOCK(txq);
2061 XN_RX_LOCK(rxq);
2062 xn_alloc_rx_buffers(rxq);
2063 XN_RX_UNLOCK(rxq);
2064 }
2065 }
2066
2067 static void
xn_query_features(struct netfront_info * np)2068 xn_query_features(struct netfront_info *np)
2069 {
2070 int val;
2071
2072 device_printf(np->xbdev, "backend features:");
2073
2074 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2075 "feature-sg", NULL, "%d", &val) != 0)
2076 val = 0;
2077
2078 np->maxfrags = 1;
2079 if (val) {
2080 np->maxfrags = MAX_TX_REQ_FRAGS;
2081 printf(" feature-sg");
2082 }
2083
2084 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2085 "feature-gso-tcpv4", NULL, "%d", &val) != 0)
2086 val = 0;
2087
2088 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_TSO4 | IFCAP_LRO);
2089 if (val) {
2090 if_setcapabilitiesbit(np->xn_ifp, IFCAP_TSO4 | IFCAP_LRO, 0);
2091 printf(" feature-gso-tcp4");
2092 }
2093
2094 /*
2095 * HW CSUM offload is assumed to be available unless
2096 * feature-no-csum-offload is set in xenstore.
2097 */
2098 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2099 "feature-no-csum-offload", NULL, "%d", &val) != 0)
2100 val = 0;
2101
2102 if_setcapabilitiesbit(np->xn_ifp, IFCAP_HWCSUM, 0);
2103 if (val) {
2104 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_HWCSUM);
2105 printf(" feature-no-csum-offload");
2106 }
2107
2108 printf("\n");
2109 }
2110
2111 static int
xn_configure_features(struct netfront_info * np)2112 xn_configure_features(struct netfront_info *np)
2113 {
2114 int err, cap_enabled;
2115 #if (defined(INET) || defined(INET6))
2116 int i;
2117 #endif
2118 if_t ifp;
2119
2120 ifp = np->xn_ifp;
2121 err = 0;
2122
2123 if ((if_getcapenable(ifp) & if_getcapabilities(ifp)) == if_getcapenable(ifp)) {
2124 /* Current options are available, no need to do anything. */
2125 return (0);
2126 }
2127
2128 /* Try to preserve as many options as possible. */
2129 cap_enabled = if_getcapenable(ifp);
2130 if_setcapenable(ifp, 0);
2131 if_sethwassist(ifp, 0);
2132
2133 #if (defined(INET) || defined(INET6))
2134 if ((cap_enabled & IFCAP_LRO) != 0)
2135 for (i = 0; i < np->num_queues; i++)
2136 tcp_lro_free(&np->rxq[i].lro);
2137 if (xn_enable_lro &&
2138 (if_getcapabilities(ifp) & cap_enabled & IFCAP_LRO) != 0) {
2139 if_setcapenablebit(ifp, IFCAP_LRO, 0);
2140 for (i = 0; i < np->num_queues; i++) {
2141 err = tcp_lro_init(&np->rxq[i].lro);
2142 if (err != 0) {
2143 device_printf(np->xbdev,
2144 "LRO initialization failed\n");
2145 if_setcapenablebit(ifp, 0, IFCAP_LRO);
2146 break;
2147 }
2148 np->rxq[i].lro.ifp = ifp;
2149 }
2150 }
2151 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TSO4) != 0) {
2152 if_setcapenablebit(ifp, IFCAP_TSO4, 0);
2153 if_sethwassistbits(ifp, CSUM_TSO, 0);
2154 }
2155 #endif
2156 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TXCSUM) != 0) {
2157 if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
2158 if_sethwassistbits(ifp, XN_CSUM_FEATURES, 0);
2159 }
2160 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_RXCSUM) != 0)
2161 if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
2162
2163 return (err);
2164 }
2165
2166 static int
xn_txq_mq_start_locked(struct netfront_txq * txq,struct mbuf * m)2167 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2168 {
2169 struct netfront_info *np;
2170 if_t ifp;
2171 struct buf_ring *br;
2172 int error, notify;
2173
2174 np = txq->info;
2175 br = txq->br;
2176 ifp = np->xn_ifp;
2177 error = 0;
2178
2179 XN_TX_LOCK_ASSERT(txq);
2180
2181 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
2182 !netfront_carrier_ok(np)) {
2183 if (m != NULL)
2184 error = drbr_enqueue(ifp, br, m);
2185 return (error);
2186 }
2187
2188 if (m != NULL) {
2189 error = drbr_enqueue(ifp, br, m);
2190 if (error != 0)
2191 return (error);
2192 }
2193
2194 while ((m = drbr_peek(ifp, br)) != NULL) {
2195 if (!xn_tx_slot_available(txq)) {
2196 drbr_putback(ifp, br, m);
2197 break;
2198 }
2199
2200 error = xn_assemble_tx_request(txq, m);
2201 /* xn_assemble_tx_request always consumes the mbuf*/
2202 if (error != 0) {
2203 drbr_advance(ifp, br);
2204 break;
2205 }
2206
2207 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2208 if (notify)
2209 xen_intr_signal(txq->xen_intr_handle);
2210
2211 drbr_advance(ifp, br);
2212 }
2213
2214 if (RING_FULL(&txq->ring))
2215 txq->full = true;
2216
2217 return (0);
2218 }
2219
2220 static int
xn_txq_mq_start(if_t ifp,struct mbuf * m)2221 xn_txq_mq_start(if_t ifp, struct mbuf *m)
2222 {
2223 struct netfront_info *np;
2224 struct netfront_txq *txq;
2225 int i, npairs, error;
2226
2227 np = if_getsoftc(ifp);
2228 npairs = np->num_queues;
2229
2230 if (!netfront_carrier_ok(np))
2231 return (ENOBUFS);
2232
2233 KASSERT(npairs != 0, ("called with 0 available queues"));
2234
2235 /* check if flowid is set */
2236 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2237 i = m->m_pkthdr.flowid % npairs;
2238 else
2239 i = curcpu % npairs;
2240
2241 txq = &np->txq[i];
2242
2243 if (XN_TX_TRYLOCK(txq) != 0) {
2244 error = xn_txq_mq_start_locked(txq, m);
2245 XN_TX_UNLOCK(txq);
2246 } else {
2247 error = drbr_enqueue(ifp, txq->br, m);
2248 taskqueue_enqueue(txq->tq, &txq->defrtask);
2249 }
2250
2251 return (error);
2252 }
2253
2254 static void
xn_qflush(if_t ifp)2255 xn_qflush(if_t ifp)
2256 {
2257 struct netfront_info *np;
2258 struct netfront_txq *txq;
2259 struct mbuf *m;
2260 int i;
2261
2262 np = if_getsoftc(ifp);
2263
2264 for (i = 0; i < np->num_queues; i++) {
2265 txq = &np->txq[i];
2266
2267 XN_TX_LOCK(txq);
2268 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2269 m_freem(m);
2270 XN_TX_UNLOCK(txq);
2271 }
2272
2273 if_qflush(ifp);
2274 }
2275
2276 /**
2277 * Create a network device.
2278 * @param dev Newbus device representing this virtual NIC.
2279 */
2280 int
create_netdev(device_t dev)2281 create_netdev(device_t dev)
2282 {
2283 struct netfront_info *np;
2284 int err, cap_enabled;
2285 if_t ifp;
2286
2287 np = device_get_softc(dev);
2288
2289 np->xbdev = dev;
2290
2291 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2292
2293 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2294 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2295 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2296
2297 err = xen_net_read_mac(dev, np->mac);
2298 if (err != 0)
2299 goto error;
2300
2301 /* Set up ifnet structure */
2302 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2303 if_setsoftc(ifp, np);
2304 if_initname(ifp, "xn", device_get_unit(dev));
2305 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2306 if_setioctlfn(ifp, xn_ioctl);
2307
2308 if_settransmitfn(ifp, xn_txq_mq_start);
2309 if_setqflushfn(ifp, xn_qflush);
2310
2311 if_setinitfn(ifp, xn_ifinit);
2312
2313 if_sethwassist(ifp, XN_CSUM_FEATURES);
2314 /* Enable all supported features at device creation. */
2315 if_setcapabilities(ifp, IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO);
2316 cap_enabled = if_getcapabilities(ifp);
2317 if (!xn_enable_lro) {
2318 cap_enabled &= ~IFCAP_LRO;
2319 }
2320 if_setcapenable(ifp, cap_enabled);
2321
2322 if_sethwtsomax(ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2323 if_sethwtsomaxsegcount(ifp, MAX_TX_REQ_FRAGS);
2324 if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
2325
2326 ether_ifattach(ifp, np->mac);
2327 netfront_carrier_off(np);
2328
2329 err = bus_dma_tag_create(
2330 bus_get_dma_tag(dev), /* parent */
2331 1, PAGE_SIZE, /* algnmnt, boundary */
2332 BUS_SPACE_MAXADDR, /* lowaddr */
2333 BUS_SPACE_MAXADDR, /* highaddr */
2334 NULL, NULL, /* filter, filterarg */
2335 PAGE_SIZE * MAX_TX_REQ_FRAGS, /* max request size */
2336 MAX_TX_REQ_FRAGS, /* max segments */
2337 PAGE_SIZE, /* maxsegsize */
2338 BUS_DMA_ALLOCNOW, /* flags */
2339 NULL, NULL, /* lockfunc, lockarg */
2340 &np->dma_tag);
2341
2342 return (err);
2343
2344 error:
2345 KASSERT(err != 0, ("Error path with no error code specified"));
2346 return (err);
2347 }
2348
2349 static int
netfront_detach(device_t dev)2350 netfront_detach(device_t dev)
2351 {
2352 struct netfront_info *info = device_get_softc(dev);
2353
2354 DPRINTK("%s\n", xenbus_get_node(dev));
2355
2356 netif_free(info);
2357
2358 return 0;
2359 }
2360
2361 static void
netif_free(struct netfront_info * np)2362 netif_free(struct netfront_info *np)
2363 {
2364
2365 XN_LOCK(np);
2366 xn_stop(np);
2367 XN_UNLOCK(np);
2368 netif_disconnect_backend(np);
2369 ether_ifdetach(np->xn_ifp);
2370 free(np->rxq, M_DEVBUF);
2371 free(np->txq, M_DEVBUF);
2372 if_free(np->xn_ifp);
2373 np->xn_ifp = NULL;
2374 ifmedia_removeall(&np->sc_media);
2375 bus_dma_tag_destroy(np->dma_tag);
2376 }
2377
2378 static void
netif_disconnect_backend(struct netfront_info * np)2379 netif_disconnect_backend(struct netfront_info *np)
2380 {
2381 u_int i;
2382
2383 for (i = 0; i < np->num_queues; i++) {
2384 XN_RX_LOCK(&np->rxq[i]);
2385 XN_TX_LOCK(&np->txq[i]);
2386 }
2387 netfront_carrier_off(np);
2388 for (i = 0; i < np->num_queues; i++) {
2389 XN_RX_UNLOCK(&np->rxq[i]);
2390 XN_TX_UNLOCK(&np->txq[i]);
2391 }
2392
2393 for (i = 0; i < np->num_queues; i++) {
2394 disconnect_rxq(&np->rxq[i]);
2395 disconnect_txq(&np->txq[i]);
2396 }
2397 }
2398
2399 static int
xn_ifmedia_upd(if_t ifp)2400 xn_ifmedia_upd(if_t ifp)
2401 {
2402
2403 return (0);
2404 }
2405
2406 static void
xn_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)2407 xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2408 {
2409
2410 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2411 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2412 }
2413
2414 /* ** Driver registration ** */
2415 static device_method_t netfront_methods[] = {
2416 /* Device interface */
2417 DEVMETHOD(device_probe, netfront_probe),
2418 DEVMETHOD(device_attach, netfront_attach),
2419 DEVMETHOD(device_detach, netfront_detach),
2420 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2421 DEVMETHOD(device_suspend, netfront_suspend),
2422 DEVMETHOD(device_resume, netfront_resume),
2423
2424 /* Xenbus interface */
2425 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2426
2427 DEVMETHOD_END
2428 };
2429
2430 static driver_t netfront_driver = {
2431 "xn",
2432 netfront_methods,
2433 sizeof(struct netfront_info),
2434 };
2435
2436 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, NULL, NULL);
2437