1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2004-2006 Kip Macy
5 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/sockio.h>
36 #include <sys/limits.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51 #include <net/if_types.h>
52
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_lro.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <sys/bus.h>
63
64 #include <xen/xen-os.h>
65 #include <xen/hypervisor.h>
66 #include <xen/xen_intr.h>
67 #include <xen/gnttab.h>
68 #include <contrib/xen/memory.h>
69 #include <contrib/xen/io/netif.h>
70 #include <xen/xenbus/xenbusvar.h>
71
72 #include <machine/bus.h>
73
74 #include "xenbus_if.h"
75
76 /* Features supported by all backends. TSO and LRO can be negotiated */
77 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
78
79 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
80 #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
81
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
83
84 /*
85 * Should the driver do LRO on the RX end
86 * this can be toggled on the fly, but the
87 * interface must be reset (down/up) for it
88 * to take effect.
89 */
90 static int xn_enable_lro = 1;
91 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
92
93 /*
94 * Number of pairs of queues.
95 */
96 static unsigned long xn_num_queues = 4;
97 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues);
98
99 /**
100 * \brief The maximum allowed data fragments in a single transmit
101 * request.
102 *
103 * This limit is imposed by the backend driver. We assume here that
104 * we are dealing with a Linux driver domain and have set our limit
105 * to mirror the Linux MAX_SKB_FRAGS constant.
106 */
107 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
108
109 #define RX_COPY_THRESHOLD 256
110
111 #define net_ratelimit() 0
112
113 struct netfront_rxq;
114 struct netfront_txq;
115 struct netfront_info;
116 struct netfront_rx_info;
117
118 static void xn_txeof(struct netfront_txq *);
119 static void xn_rxeof(struct netfront_rxq *);
120 static void xn_alloc_rx_buffers(struct netfront_rxq *);
121 static void xn_alloc_rx_buffers_callout(void *arg);
122
123 static void xn_release_rx_bufs(struct netfront_rxq *);
124 static void xn_release_tx_bufs(struct netfront_txq *);
125
126 static void xn_rxq_intr(struct netfront_rxq *);
127 static void xn_txq_intr(struct netfront_txq *);
128 static void xn_intr(void *);
129 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
130 static int xn_ioctl(if_t, u_long, caddr_t);
131 static void xn_ifinit_locked(struct netfront_info *);
132 static void xn_ifinit(void *);
133 static void xn_stop(struct netfront_info *);
134 static void xn_query_features(struct netfront_info *np);
135 static int xn_configure_features(struct netfront_info *np);
136 static void netif_free(struct netfront_info *info);
137 static int netfront_detach(device_t dev);
138
139 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
140 static int xn_txq_mq_start(if_t, struct mbuf *);
141
142 static int talk_to_backend(device_t dev, struct netfront_info *info);
143 static int create_netdev(device_t dev);
144 static void netif_disconnect_backend(struct netfront_info *info);
145 static int setup_device(device_t dev, struct netfront_info *info,
146 unsigned long);
147 static int xn_ifmedia_upd(if_t ifp);
148 static void xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
149
150 static int xn_connect(struct netfront_info *);
151 static void xn_kick_rings(struct netfront_info *);
152
153 static int xn_get_responses(struct netfront_rxq *,
154 struct netfront_rx_info *, RING_IDX, RING_IDX *,
155 struct mbuf **);
156
157 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
158
159 #define INVALID_P2M_ENTRY (~0UL)
160 #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */
161 struct netfront_rxq {
162 struct netfront_info *info;
163 u_int id;
164 char name[XN_QUEUE_NAME_LEN];
165 struct mtx lock;
166
167 int ring_ref;
168 netif_rx_front_ring_t ring;
169 xen_intr_handle_t xen_intr_handle;
170
171 grant_ref_t gref_head;
172 grant_ref_t grant_ref[NET_RX_RING_SIZE + 1];
173
174 struct mbuf *mbufs[NET_RX_RING_SIZE + 1];
175
176 struct lro_ctrl lro;
177
178 struct callout rx_refill;
179 };
180
181 struct netfront_txq {
182 struct netfront_info *info;
183 u_int id;
184 char name[XN_QUEUE_NAME_LEN];
185 struct mtx lock;
186
187 int ring_ref;
188 netif_tx_front_ring_t ring;
189 xen_intr_handle_t xen_intr_handle;
190
191 grant_ref_t gref_head;
192 grant_ref_t grant_ref[NET_TX_RING_SIZE + 1];
193
194 struct mbuf *mbufs[NET_TX_RING_SIZE + 1];
195 int mbufs_cnt;
196 struct buf_ring *br;
197
198 struct taskqueue *tq;
199 struct task defrtask;
200
201 bus_dma_segment_t segs[MAX_TX_REQ_FRAGS];
202 struct mbuf_xennet {
203 struct m_tag tag;
204 bus_dma_tag_t dma_tag;
205 bus_dmamap_t dma_map;
206 struct netfront_txq *txq;
207 SLIST_ENTRY(mbuf_xennet) next;
208 u_int count;
209 } xennet_tag[NET_TX_RING_SIZE + 1];
210 SLIST_HEAD(, mbuf_xennet) tags;
211
212 bool full;
213 };
214
215 struct netfront_info {
216 if_t xn_ifp;
217
218 struct mtx sc_lock;
219
220 u_int num_queues;
221 struct netfront_rxq *rxq;
222 struct netfront_txq *txq;
223
224 u_int carrier;
225 u_int maxfrags;
226
227 device_t xbdev;
228 uint8_t mac[ETHER_ADDR_LEN];
229
230 int xn_if_flags;
231
232 struct ifmedia sc_media;
233
234 bus_dma_tag_t dma_tag;
235
236 bool xn_reset;
237 };
238
239 struct netfront_rx_info {
240 struct netif_rx_response rx;
241 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
242 };
243
244 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
245 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
246
247 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
248 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
249 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
250
251 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
252 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
253
254 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
255 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
256 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
257
258 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
259 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
260 #define netfront_carrier_ok(netif) ((netif)->carrier)
261
262 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
263
264 static inline void
add_id_to_freelist(struct mbuf ** list,uintptr_t id)265 add_id_to_freelist(struct mbuf **list, uintptr_t id)
266 {
267
268 KASSERT(id != 0,
269 ("%s: the head item (0) must always be free.", __func__));
270 list[id] = list[0];
271 list[0] = (struct mbuf *)id;
272 }
273
274 static inline unsigned short
get_id_from_freelist(struct mbuf ** list)275 get_id_from_freelist(struct mbuf **list)
276 {
277 uintptr_t id;
278
279 id = (uintptr_t)list[0];
280 KASSERT(id != 0,
281 ("%s: the head item (0) must always remain free.", __func__));
282 list[0] = list[id];
283 return (id);
284 }
285
286 static inline int
xn_rxidx(RING_IDX idx)287 xn_rxidx(RING_IDX idx)
288 {
289
290 return idx & (NET_RX_RING_SIZE - 1);
291 }
292
293 static inline struct mbuf *
xn_get_rx_mbuf(struct netfront_rxq * rxq,RING_IDX ri)294 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
295 {
296 int i;
297 struct mbuf *m;
298
299 i = xn_rxidx(ri);
300 m = rxq->mbufs[i];
301 rxq->mbufs[i] = NULL;
302 return (m);
303 }
304
305 static inline grant_ref_t
xn_get_rx_ref(struct netfront_rxq * rxq,RING_IDX ri)306 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
307 {
308 int i = xn_rxidx(ri);
309 grant_ref_t ref = rxq->grant_ref[i];
310
311 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
312 rxq->grant_ref[i] = GRANT_REF_INVALID;
313 return (ref);
314 }
315
316 #define MTAG_COOKIE 1218492000
317 #define MTAG_XENNET 0
318
mbuf_grab(struct mbuf * m)319 static void mbuf_grab(struct mbuf *m)
320 {
321 struct mbuf_xennet *ref;
322
323 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE,
324 MTAG_XENNET, NULL);
325 KASSERT(ref != NULL, ("Cannot find refcount"));
326 ref->count++;
327 }
328
mbuf_release(struct mbuf * m)329 static void mbuf_release(struct mbuf *m)
330 {
331 struct mbuf_xennet *ref;
332
333 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE,
334 MTAG_XENNET, NULL);
335 KASSERT(ref != NULL, ("Cannot find refcount"));
336 KASSERT(ref->count > 0, ("Invalid reference count"));
337
338 if (--ref->count == 0) {
339 /*
340 * Explicitly free the tag while we hold the tx queue lock.
341 * This ensures that the tag is deleted promptly in case
342 * something else is holding extra references to the mbuf chain,
343 * such as netmap.
344 */
345 m_tag_delete(m, &ref->tag);
346 m_freem(m);
347 }
348 }
349
tag_free(struct m_tag * t)350 static void tag_free(struct m_tag *t)
351 {
352 struct mbuf_xennet *ref = (struct mbuf_xennet *)t;
353
354 KASSERT(ref->count == 0, ("Free mbuf tag with pending refcnt"));
355 bus_dmamap_sync(ref->dma_tag, ref->dma_map, BUS_DMASYNC_POSTWRITE);
356 bus_dmamap_destroy(ref->dma_tag, ref->dma_map);
357 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next);
358 }
359
360 #define IPRINTK(fmt, args...) \
361 printf("[XEN] " fmt, ##args)
362 #ifdef INVARIANTS
363 #define WPRINTK(fmt, args...) \
364 printf("[XEN] " fmt, ##args)
365 #else
366 #define WPRINTK(fmt, args...)
367 #endif
368 #ifdef DEBUG
369 #define DPRINTK(fmt, args...) \
370 printf("[XEN] %s: " fmt, __func__, ##args)
371 #else
372 #define DPRINTK(fmt, args...)
373 #endif
374
375 /**
376 * Read the 'mac' node at the given device's node in the store, and parse that
377 * as colon-separated octets, placing result the given mac array. mac must be
378 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
379 * Return 0 on success, or errno on error.
380 */
381 static int
xen_net_read_mac(device_t dev,uint8_t mac[])382 xen_net_read_mac(device_t dev, uint8_t mac[])
383 {
384 int error, i;
385 char *s, *e, *macstr;
386 const char *path;
387
388 path = xenbus_get_node(dev);
389 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
390 if (error == ENOENT) {
391 /*
392 * Deal with missing mac XenStore nodes on devices with
393 * HVM emulation (the 'ioemu' configuration attribute)
394 * enabled.
395 *
396 * The HVM emulator may execute in a stub device model
397 * domain which lacks the permission, only given to Dom0,
398 * to update the guest's XenStore tree. For this reason,
399 * the HVM emulator doesn't even attempt to write the
400 * front-side mac node, even when operating in Dom0.
401 * However, there should always be a mac listed in the
402 * backend tree. Fallback to this version if our query
403 * of the front side XenStore location doesn't find
404 * anything.
405 */
406 path = xenbus_get_otherend_path(dev);
407 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
408 }
409 if (error != 0) {
410 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
411 return (error);
412 }
413
414 s = macstr;
415 for (i = 0; i < ETHER_ADDR_LEN; i++) {
416 mac[i] = strtoul(s, &e, 16);
417 if (s == e || (e[0] != ':' && e[0] != 0)) {
418 free(macstr, M_XENBUS);
419 return (ENOENT);
420 }
421 s = &e[1];
422 }
423 free(macstr, M_XENBUS);
424 return (0);
425 }
426
427 /**
428 * Entry point to this code when a new device is created. Allocate the basic
429 * structures and the ring buffers for communication with the backend, and
430 * inform the backend of the appropriate details for those. Switch to
431 * Connected state.
432 */
433 static int
netfront_probe(device_t dev)434 netfront_probe(device_t dev)
435 {
436
437 if (xen_pv_nics_disabled())
438 return (ENXIO);
439
440 if (!strcmp(xenbus_get_type(dev), "vif")) {
441 device_set_desc(dev, "Virtual Network Interface");
442 return (0);
443 }
444
445 return (ENXIO);
446 }
447
448 static int
netfront_attach(device_t dev)449 netfront_attach(device_t dev)
450 {
451 int err;
452
453 err = create_netdev(dev);
454 if (err != 0) {
455 xenbus_dev_fatal(dev, err, "creating netdev");
456 return (err);
457 }
458
459 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "enable_lro", CTLFLAG_RW,
462 &xn_enable_lro, 0, "Large Receive Offload");
463
464 SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev),
465 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
466 OID_AUTO, "num_queues", CTLFLAG_RD,
467 &xn_num_queues, "Number of pairs of queues");
468
469 return (0);
470 }
471
472 static int
netfront_suspend(device_t dev)473 netfront_suspend(device_t dev)
474 {
475 struct netfront_info *np = device_get_softc(dev);
476 u_int i;
477
478 for (i = 0; i < np->num_queues; i++) {
479 XN_RX_LOCK(&np->rxq[i]);
480 XN_TX_LOCK(&np->txq[i]);
481 }
482 netfront_carrier_off(np);
483 for (i = 0; i < np->num_queues; i++) {
484 XN_RX_UNLOCK(&np->rxq[i]);
485 XN_TX_UNLOCK(&np->txq[i]);
486 }
487 return (0);
488 }
489
490 /**
491 * We are reconnecting to the backend, due to a suspend/resume, or a backend
492 * driver restart. We tear down our netif structure and recreate it, but
493 * leave the device-layer structures intact so that this is transparent to the
494 * rest of the kernel.
495 */
496 static int
netfront_resume(device_t dev)497 netfront_resume(device_t dev)
498 {
499 struct netfront_info *info = device_get_softc(dev);
500 u_int i;
501
502 if (xen_suspend_cancelled) {
503 for (i = 0; i < info->num_queues; i++) {
504 XN_RX_LOCK(&info->rxq[i]);
505 XN_TX_LOCK(&info->txq[i]);
506 }
507 netfront_carrier_on(info);
508 for (i = 0; i < info->num_queues; i++) {
509 XN_RX_UNLOCK(&info->rxq[i]);
510 XN_TX_UNLOCK(&info->txq[i]);
511 }
512 return (0);
513 }
514
515 netif_disconnect_backend(info);
516 return (0);
517 }
518
519 static int
write_queue_xenstore_keys(device_t dev,struct netfront_rxq * rxq,struct netfront_txq * txq,struct xs_transaction * xst,bool hierarchy)520 write_queue_xenstore_keys(device_t dev,
521 struct netfront_rxq *rxq,
522 struct netfront_txq *txq,
523 struct xs_transaction *xst, bool hierarchy)
524 {
525 int err;
526 const char *message;
527 const char *node = xenbus_get_node(dev);
528 char *path;
529 size_t path_size;
530
531 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
532 /* Split event channel support is not yet there. */
533 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
534 ("Split event channels are not supported"));
535
536 if (hierarchy) {
537 path_size = strlen(node) + 10;
538 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
539 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
540 } else {
541 path_size = strlen(node) + 1;
542 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
543 snprintf(path, path_size, "%s", node);
544 }
545
546 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
547 if (err != 0) {
548 message = "writing tx ring-ref";
549 goto error;
550 }
551 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
552 if (err != 0) {
553 message = "writing rx ring-ref";
554 goto error;
555 }
556 err = xs_printf(*xst, path, "event-channel", "%u",
557 xen_intr_port(rxq->xen_intr_handle));
558 if (err != 0) {
559 message = "writing event-channel";
560 goto error;
561 }
562
563 free(path, M_DEVBUF);
564
565 return (0);
566
567 error:
568 free(path, M_DEVBUF);
569 xenbus_dev_fatal(dev, err, "%s", message);
570
571 return (err);
572 }
573
574 /* Common code used when first setting up, and when resuming. */
575 static int
talk_to_backend(device_t dev,struct netfront_info * info)576 talk_to_backend(device_t dev, struct netfront_info *info)
577 {
578 const char *message;
579 struct xs_transaction xst;
580 const char *node = xenbus_get_node(dev);
581 int err;
582 unsigned long num_queues, max_queues = 0;
583 unsigned int i;
584
585 err = xen_net_read_mac(dev, info->mac);
586 if (err != 0) {
587 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
588 goto out;
589 }
590
591 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
592 "multi-queue-max-queues", NULL, "%lu", &max_queues);
593 if (err != 0)
594 max_queues = 1;
595 num_queues = xn_num_queues;
596 if (num_queues > max_queues)
597 num_queues = max_queues;
598
599 err = setup_device(dev, info, num_queues);
600 if (err != 0) {
601 xenbus_dev_fatal(dev, err, "setup device");
602 goto out;
603 }
604
605 again:
606 err = xs_transaction_start(&xst);
607 if (err != 0) {
608 xenbus_dev_fatal(dev, err, "starting transaction");
609 goto free;
610 }
611
612 if (info->num_queues == 1) {
613 err = write_queue_xenstore_keys(dev, &info->rxq[0],
614 &info->txq[0], &xst, false);
615 if (err != 0)
616 goto abort_transaction_no_def_error;
617 } else {
618 err = xs_printf(xst, node, "multi-queue-num-queues",
619 "%u", info->num_queues);
620 if (err != 0) {
621 message = "writing multi-queue-num-queues";
622 goto abort_transaction;
623 }
624
625 for (i = 0; i < info->num_queues; i++) {
626 err = write_queue_xenstore_keys(dev, &info->rxq[i],
627 &info->txq[i], &xst, true);
628 if (err != 0)
629 goto abort_transaction_no_def_error;
630 }
631 }
632
633 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
634 if (err != 0) {
635 message = "writing request-rx-copy";
636 goto abort_transaction;
637 }
638 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
639 if (err != 0) {
640 message = "writing feature-rx-notify";
641 goto abort_transaction;
642 }
643 err = xs_printf(xst, node, "feature-sg", "%d", 1);
644 if (err != 0) {
645 message = "writing feature-sg";
646 goto abort_transaction;
647 }
648 if ((if_getcapenable(info->xn_ifp) & IFCAP_LRO) != 0) {
649 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
650 if (err != 0) {
651 message = "writing feature-gso-tcpv4";
652 goto abort_transaction;
653 }
654 }
655 if ((if_getcapenable(info->xn_ifp) & IFCAP_RXCSUM) == 0) {
656 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
657 if (err != 0) {
658 message = "writing feature-no-csum-offload";
659 goto abort_transaction;
660 }
661 }
662
663 err = xs_transaction_end(xst, 0);
664 if (err != 0) {
665 if (err == EAGAIN)
666 goto again;
667 xenbus_dev_fatal(dev, err, "completing transaction");
668 goto free;
669 }
670
671 return 0;
672
673 abort_transaction:
674 xenbus_dev_fatal(dev, err, "%s", message);
675 abort_transaction_no_def_error:
676 xs_transaction_end(xst, 1);
677 free:
678 netif_free(info);
679 out:
680 return (err);
681 }
682
683 static void
xn_rxq_intr(struct netfront_rxq * rxq)684 xn_rxq_intr(struct netfront_rxq *rxq)
685 {
686
687 XN_RX_LOCK(rxq);
688 xn_rxeof(rxq);
689 XN_RX_UNLOCK(rxq);
690 }
691
692 static void
xn_txq_start(struct netfront_txq * txq)693 xn_txq_start(struct netfront_txq *txq)
694 {
695 struct netfront_info *np = txq->info;
696 if_t ifp = np->xn_ifp;
697
698 XN_TX_LOCK_ASSERT(txq);
699 if (!drbr_empty(ifp, txq->br))
700 xn_txq_mq_start_locked(txq, NULL);
701 }
702
703 static void
xn_txq_intr(struct netfront_txq * txq)704 xn_txq_intr(struct netfront_txq *txq)
705 {
706
707 XN_TX_LOCK(txq);
708 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
709 xn_txeof(txq);
710 xn_txq_start(txq);
711 XN_TX_UNLOCK(txq);
712 }
713
714 static void
xn_txq_tq_deferred(void * xtxq,int pending)715 xn_txq_tq_deferred(void *xtxq, int pending)
716 {
717 struct netfront_txq *txq = xtxq;
718
719 XN_TX_LOCK(txq);
720 xn_txq_start(txq);
721 XN_TX_UNLOCK(txq);
722 }
723
724 static void
disconnect_rxq(struct netfront_rxq * rxq)725 disconnect_rxq(struct netfront_rxq *rxq)
726 {
727
728 xn_release_rx_bufs(rxq);
729 gnttab_free_grant_references(rxq->gref_head);
730 if (rxq->ring_ref != GRANT_REF_INVALID) {
731 gnttab_end_foreign_access(rxq->ring_ref, NULL);
732 rxq->ring_ref = GRANT_REF_INVALID;
733 }
734 /*
735 * No split event channel support at the moment, handle will
736 * be unbound in tx. So no need to call xen_intr_unbind here,
737 * but we do want to reset the handler to 0.
738 */
739 rxq->xen_intr_handle = 0;
740 }
741
742 static void
destroy_rxq(struct netfront_rxq * rxq)743 destroy_rxq(struct netfront_rxq *rxq)
744 {
745
746 callout_drain(&rxq->rx_refill);
747 free(rxq->ring.sring, M_DEVBUF);
748 rxq->ring.sring = NULL;
749 }
750
751 static void
destroy_rxqs(struct netfront_info * np)752 destroy_rxqs(struct netfront_info *np)
753 {
754 int i;
755
756 for (i = 0; i < np->num_queues; i++)
757 destroy_rxq(&np->rxq[i]);
758
759 free(np->rxq, M_DEVBUF);
760 np->rxq = NULL;
761 }
762
763 static int
setup_rxqs(device_t dev,struct netfront_info * info,unsigned long num_queues)764 setup_rxqs(device_t dev, struct netfront_info *info,
765 unsigned long num_queues)
766 {
767 int q, i;
768 int error;
769 netif_rx_sring_t *rxs;
770 struct netfront_rxq *rxq;
771
772 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
773 M_DEVBUF, M_WAITOK|M_ZERO);
774
775 for (q = 0; q < num_queues; q++) {
776 rxq = &info->rxq[q];
777
778 rxq->id = q;
779 rxq->info = info;
780
781 rxq->gref_head = GNTTAB_LIST_END;
782 rxq->ring_ref = GRANT_REF_INVALID;
783 rxq->ring.sring = NULL;
784 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
785 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
786 MTX_DEF);
787
788 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
789 rxq->mbufs[i] = NULL;
790 rxq->grant_ref[i] = GRANT_REF_INVALID;
791 }
792
793 /* Start resources allocation */
794
795 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
796 &rxq->gref_head) != 0) {
797 device_printf(dev, "allocating rx gref");
798 error = ENOMEM;
799 goto fail;
800 }
801
802 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
803 M_WAITOK|M_ZERO);
804 SHARED_RING_INIT(rxs);
805 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
806
807 error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
808 &rxq->ring_ref);
809 if (error != 0) {
810 device_printf(dev, "granting rx ring page");
811 goto fail_grant_ring;
812 }
813
814 callout_init(&rxq->rx_refill, 1);
815 }
816
817 return (0);
818
819 fail_grant_ring:
820 gnttab_free_grant_references(rxq->gref_head);
821 free(rxq->ring.sring, M_DEVBUF);
822 fail:
823 for (; q >= 0; q--) {
824 disconnect_rxq(&info->rxq[q]);
825 destroy_rxq(&info->rxq[q]);
826 }
827
828 free(info->rxq, M_DEVBUF);
829 return (error);
830 }
831
832 static void
disconnect_txq(struct netfront_txq * txq)833 disconnect_txq(struct netfront_txq *txq)
834 {
835
836 xn_release_tx_bufs(txq);
837 gnttab_free_grant_references(txq->gref_head);
838 if (txq->ring_ref != GRANT_REF_INVALID) {
839 gnttab_end_foreign_access(txq->ring_ref, NULL);
840 txq->ring_ref = GRANT_REF_INVALID;
841 }
842 xen_intr_unbind(&txq->xen_intr_handle);
843 }
844
845 static void
destroy_txq(struct netfront_txq * txq)846 destroy_txq(struct netfront_txq *txq)
847 {
848 unsigned int i;
849
850 free(txq->ring.sring, M_DEVBUF);
851 txq->ring.sring = NULL;
852 buf_ring_free(txq->br, M_DEVBUF);
853 txq->br = NULL;
854 if (txq->tq) {
855 taskqueue_drain_all(txq->tq);
856 taskqueue_free(txq->tq);
857 txq->tq = NULL;
858 }
859
860 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
861 bus_dmamap_destroy(txq->info->dma_tag,
862 txq->xennet_tag[i].dma_map);
863 txq->xennet_tag[i].dma_map = NULL;
864 }
865 }
866
867 static void
destroy_txqs(struct netfront_info * np)868 destroy_txqs(struct netfront_info *np)
869 {
870 int i;
871
872 for (i = 0; i < np->num_queues; i++)
873 destroy_txq(&np->txq[i]);
874
875 free(np->txq, M_DEVBUF);
876 np->txq = NULL;
877 }
878
879 static int
setup_txqs(device_t dev,struct netfront_info * info,unsigned long num_queues)880 setup_txqs(device_t dev, struct netfront_info *info,
881 unsigned long num_queues)
882 {
883 int q, i;
884 int error;
885 netif_tx_sring_t *txs;
886 struct netfront_txq *txq;
887
888 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
889 M_DEVBUF, M_WAITOK|M_ZERO);
890
891 for (q = 0; q < num_queues; q++) {
892 txq = &info->txq[q];
893
894 txq->id = q;
895 txq->info = info;
896
897 txq->gref_head = GNTTAB_LIST_END;
898 txq->ring_ref = GRANT_REF_INVALID;
899 txq->ring.sring = NULL;
900
901 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
902
903 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
904 MTX_DEF);
905 SLIST_INIT(&txq->tags);
906
907 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
908 txq->mbufs[i] = (void *) ((u_long) i+1);
909 txq->grant_ref[i] = GRANT_REF_INVALID;
910 txq->xennet_tag[i].txq = txq;
911 txq->xennet_tag[i].dma_tag = info->dma_tag;
912 error = bus_dmamap_create(info->dma_tag, 0,
913 &txq->xennet_tag[i].dma_map);
914 if (error != 0) {
915 device_printf(dev,
916 "failed to allocate dma map\n");
917 goto fail;
918 }
919 m_tag_setup(&txq->xennet_tag[i].tag,
920 MTAG_COOKIE, MTAG_XENNET,
921 sizeof(txq->xennet_tag[i]) -
922 sizeof(txq->xennet_tag[i].tag));
923 txq->xennet_tag[i].tag.m_tag_free = &tag_free;
924 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i],
925 next);
926 }
927 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
928
929 /* Start resources allocation. */
930
931 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
932 &txq->gref_head) != 0) {
933 device_printf(dev, "failed to allocate tx grant refs\n");
934 error = ENOMEM;
935 goto fail;
936 }
937
938 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
939 M_WAITOK|M_ZERO);
940 SHARED_RING_INIT(txs);
941 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
942
943 error = xenbus_grant_ring(dev, virt_to_mfn(txs),
944 &txq->ring_ref);
945 if (error != 0) {
946 device_printf(dev, "failed to grant tx ring\n");
947 goto fail_grant_ring;
948 }
949
950 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
951 M_WAITOK, &txq->lock);
952 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
953
954 txq->tq = taskqueue_create(txq->name, M_WAITOK,
955 taskqueue_thread_enqueue, &txq->tq);
956
957 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
958 "%s txq %d", device_get_nameunit(dev), txq->id);
959 if (error != 0) {
960 device_printf(dev, "failed to start tx taskq %d\n",
961 txq->id);
962 goto fail_start_thread;
963 }
964
965 error = xen_intr_alloc_and_bind_local_port(dev,
966 xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr,
967 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
968 &txq->xen_intr_handle);
969
970 if (error != 0) {
971 device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n");
972 goto fail_bind_port;
973 }
974 }
975
976 return (0);
977
978 fail_bind_port:
979 taskqueue_drain_all(txq->tq);
980 fail_start_thread:
981 buf_ring_free(txq->br, M_DEVBUF);
982 taskqueue_free(txq->tq);
983 gnttab_end_foreign_access(txq->ring_ref, NULL);
984 fail_grant_ring:
985 gnttab_free_grant_references(txq->gref_head);
986 free(txq->ring.sring, M_DEVBUF);
987 fail:
988 for (; q >= 0; q--) {
989 disconnect_txq(&info->txq[q]);
990 destroy_txq(&info->txq[q]);
991 }
992
993 free(info->txq, M_DEVBUF);
994 return (error);
995 }
996
997 static int
setup_device(device_t dev,struct netfront_info * info,unsigned long num_queues)998 setup_device(device_t dev, struct netfront_info *info,
999 unsigned long num_queues)
1000 {
1001 int error;
1002 int q;
1003
1004 if (info->txq)
1005 destroy_txqs(info);
1006
1007 if (info->rxq)
1008 destroy_rxqs(info);
1009
1010 info->num_queues = 0;
1011
1012 error = setup_rxqs(dev, info, num_queues);
1013 if (error != 0)
1014 goto out;
1015 error = setup_txqs(dev, info, num_queues);
1016 if (error != 0)
1017 goto out;
1018
1019 info->num_queues = num_queues;
1020
1021 /* No split event channel at the moment. */
1022 for (q = 0; q < num_queues; q++)
1023 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
1024
1025 return (0);
1026
1027 out:
1028 KASSERT(error != 0, ("Error path taken without providing an error code"));
1029 return (error);
1030 }
1031
1032 #ifdef INET
1033 static u_int
netfront_addr_cb(void * arg,struct ifaddr * a,u_int count)1034 netfront_addr_cb(void *arg, struct ifaddr *a, u_int count)
1035 {
1036 arp_ifinit((if_t)arg, a);
1037 return (1);
1038 }
1039 /**
1040 * If this interface has an ipv4 address, send an arp for it. This
1041 * helps to get the network going again after migrating hosts.
1042 */
1043 static void
netfront_send_fake_arp(device_t dev,struct netfront_info * info)1044 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
1045 {
1046 if_t ifp;
1047
1048 ifp = info->xn_ifp;
1049 if_foreach_addr_type(ifp, AF_INET, netfront_addr_cb, ifp);
1050 }
1051 #endif
1052
1053 /**
1054 * Callback received when the backend's state changes.
1055 */
1056 static void
netfront_backend_changed(device_t dev,XenbusState newstate)1057 netfront_backend_changed(device_t dev, XenbusState newstate)
1058 {
1059 struct netfront_info *sc = device_get_softc(dev);
1060
1061 DPRINTK("newstate=%d\n", newstate);
1062
1063 CURVNET_SET(if_getvnet(sc->xn_ifp));
1064
1065 switch (newstate) {
1066 case XenbusStateInitialising:
1067 case XenbusStateInitialised:
1068 case XenbusStateUnknown:
1069 case XenbusStateReconfigured:
1070 case XenbusStateReconfiguring:
1071 break;
1072 case XenbusStateInitWait:
1073 if (xenbus_get_state(dev) != XenbusStateInitialising)
1074 break;
1075 if (xn_connect(sc) != 0)
1076 break;
1077 /* Switch to connected state before kicking the rings. */
1078 xenbus_set_state(sc->xbdev, XenbusStateConnected);
1079 xn_kick_rings(sc);
1080 break;
1081 case XenbusStateClosing:
1082 xenbus_set_state(dev, XenbusStateClosed);
1083 break;
1084 case XenbusStateClosed:
1085 if (sc->xn_reset) {
1086 netif_disconnect_backend(sc);
1087 xenbus_set_state(dev, XenbusStateInitialising);
1088 sc->xn_reset = false;
1089 }
1090 break;
1091 case XenbusStateConnected:
1092 #ifdef INET
1093 netfront_send_fake_arp(dev, sc);
1094 #endif
1095 break;
1096 }
1097
1098 CURVNET_RESTORE();
1099 }
1100
1101 /**
1102 * \brief Verify that there is sufficient space in the Tx ring
1103 * buffer for a maximally sized request to be enqueued.
1104 *
1105 * A transmit request requires a transmit descriptor for each packet
1106 * fragment, plus up to 2 entries for "options" (e.g. TSO).
1107 */
1108 static inline int
xn_tx_slot_available(struct netfront_txq * txq)1109 xn_tx_slot_available(struct netfront_txq *txq)
1110 {
1111
1112 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1113 }
1114
1115 static void
xn_release_tx_bufs(struct netfront_txq * txq)1116 xn_release_tx_bufs(struct netfront_txq *txq)
1117 {
1118 int i;
1119
1120 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1121 struct mbuf *m;
1122
1123 m = txq->mbufs[i];
1124
1125 /*
1126 * We assume that no kernel addresses are
1127 * less than NET_TX_RING_SIZE. Any entry
1128 * in the table that is below this number
1129 * must be an index from free-list tracking.
1130 */
1131 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
1132 continue;
1133 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1134 gnttab_release_grant_reference(&txq->gref_head,
1135 txq->grant_ref[i]);
1136 txq->grant_ref[i] = GRANT_REF_INVALID;
1137 add_id_to_freelist(txq->mbufs, i);
1138 txq->mbufs_cnt--;
1139 if (txq->mbufs_cnt < 0) {
1140 panic("%s: tx_chain_cnt must be >= 0", __func__);
1141 }
1142 mbuf_release(m);
1143 }
1144 }
1145
1146 static struct mbuf *
xn_alloc_one_rx_buffer(struct netfront_rxq * rxq)1147 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1148 {
1149 struct mbuf *m;
1150
1151 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1152 if (m == NULL)
1153 return NULL;
1154 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1155
1156 return (m);
1157 }
1158
1159 static void
xn_alloc_rx_buffers(struct netfront_rxq * rxq)1160 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1161 {
1162 RING_IDX req_prod;
1163 int notify;
1164
1165 XN_RX_LOCK_ASSERT(rxq);
1166
1167 if (__predict_false(rxq->info->carrier == 0))
1168 return;
1169
1170 for (req_prod = rxq->ring.req_prod_pvt;
1171 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1172 req_prod++) {
1173 struct mbuf *m;
1174 unsigned short id;
1175 grant_ref_t ref;
1176 struct netif_rx_request *req;
1177 unsigned long pfn;
1178
1179 m = xn_alloc_one_rx_buffer(rxq);
1180 if (m == NULL)
1181 break;
1182
1183 id = xn_rxidx(req_prod);
1184
1185 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1186 rxq->mbufs[id] = m;
1187
1188 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1189 KASSERT(ref != GNTTAB_LIST_END,
1190 ("reserved grant references exhuasted"));
1191 rxq->grant_ref[id] = ref;
1192
1193 pfn = atop(vtophys(mtod(m, vm_offset_t)));
1194 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1195
1196 gnttab_grant_foreign_access_ref(ref,
1197 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1198 req->id = id;
1199 req->gref = ref;
1200 }
1201
1202 rxq->ring.req_prod_pvt = req_prod;
1203
1204 /* Not enough requests? Try again later. */
1205 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1206 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1207 xn_alloc_rx_buffers_callout, rxq);
1208 return;
1209 }
1210
1211 wmb(); /* barrier so backend seens requests */
1212
1213 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1214 if (notify)
1215 xen_intr_signal(rxq->xen_intr_handle);
1216 }
1217
xn_alloc_rx_buffers_callout(void * arg)1218 static void xn_alloc_rx_buffers_callout(void *arg)
1219 {
1220 struct netfront_rxq *rxq;
1221
1222 rxq = (struct netfront_rxq *)arg;
1223 XN_RX_LOCK(rxq);
1224 xn_alloc_rx_buffers(rxq);
1225 XN_RX_UNLOCK(rxq);
1226 }
1227
1228 static void
xn_release_rx_bufs(struct netfront_rxq * rxq)1229 xn_release_rx_bufs(struct netfront_rxq *rxq)
1230 {
1231 int i, ref;
1232 struct mbuf *m;
1233
1234 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1235 m = rxq->mbufs[i];
1236
1237 if (m == NULL)
1238 continue;
1239
1240 ref = rxq->grant_ref[i];
1241 if (ref == GRANT_REF_INVALID)
1242 continue;
1243
1244 gnttab_end_foreign_access_ref(ref);
1245 gnttab_release_grant_reference(&rxq->gref_head, ref);
1246 rxq->mbufs[i] = NULL;
1247 rxq->grant_ref[i] = GRANT_REF_INVALID;
1248 m_freem(m);
1249 }
1250 }
1251
1252 static void
xn_rxeof(struct netfront_rxq * rxq)1253 xn_rxeof(struct netfront_rxq *rxq)
1254 {
1255 if_t ifp;
1256 struct netfront_info *np = rxq->info;
1257 #if (defined(INET) || defined(INET6))
1258 struct lro_ctrl *lro = &rxq->lro;
1259 #endif
1260 struct netfront_rx_info rinfo;
1261 struct netif_rx_response *rx = &rinfo.rx;
1262 struct netif_extra_info *extras = rinfo.extras;
1263 RING_IDX i, rp;
1264 struct mbuf *m;
1265 struct mbufq mbufq_rxq, mbufq_errq;
1266 int err, work_to_do;
1267
1268 XN_RX_LOCK_ASSERT(rxq);
1269
1270 if (!netfront_carrier_ok(np))
1271 return;
1272
1273 /* XXX: there should be some sane limit. */
1274 mbufq_init(&mbufq_errq, INT_MAX);
1275 mbufq_init(&mbufq_rxq, INT_MAX);
1276
1277 ifp = np->xn_ifp;
1278
1279 do {
1280 rp = rxq->ring.sring->rsp_prod;
1281 rmb(); /* Ensure we see queued responses up to 'rp'. */
1282
1283 i = rxq->ring.rsp_cons;
1284 while ((i != rp)) {
1285 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1286 memset(extras, 0, sizeof(rinfo.extras));
1287
1288 m = NULL;
1289 err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1290
1291 if (__predict_false(err)) {
1292 if (m)
1293 (void )mbufq_enqueue(&mbufq_errq, m);
1294 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1295 continue;
1296 }
1297
1298 m->m_pkthdr.rcvif = ifp;
1299 if (rx->flags & NETRXF_data_validated) {
1300 /*
1301 * According to mbuf(9) the correct way to tell
1302 * the stack that the checksum of an inbound
1303 * packet is correct, without it actually being
1304 * present (because the underlying interface
1305 * doesn't provide it), is to set the
1306 * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags,
1307 * and the csum_data field to 0xffff.
1308 */
1309 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1310 | CSUM_PSEUDO_HDR);
1311 m->m_pkthdr.csum_data = 0xffff;
1312 }
1313 if ((rx->flags & NETRXF_extra_info) != 0 &&
1314 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1315 XEN_NETIF_EXTRA_TYPE_GSO)) {
1316 m->m_pkthdr.tso_segsz =
1317 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1318 m->m_pkthdr.csum_flags |= CSUM_TSO;
1319 }
1320
1321 (void )mbufq_enqueue(&mbufq_rxq, m);
1322 }
1323
1324 rxq->ring.rsp_cons = i;
1325
1326 xn_alloc_rx_buffers(rxq);
1327
1328 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1329 } while (work_to_do);
1330
1331 mbufq_drain(&mbufq_errq);
1332 /*
1333 * Process all the mbufs after the remapping is complete.
1334 * Break the mbuf chain first though.
1335 */
1336 while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
1337 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1338 #if (defined(INET) || defined(INET6))
1339 /* Use LRO if possible */
1340 if ((if_getcapenable(ifp) & IFCAP_LRO) == 0 ||
1341 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1342 /*
1343 * If LRO fails, pass up to the stack
1344 * directly.
1345 */
1346 if_input(ifp, m);
1347 }
1348 #else
1349 if_input(ifp, m);
1350 #endif
1351 }
1352
1353 #if (defined(INET) || defined(INET6))
1354 /*
1355 * Flush any outstanding LRO work
1356 */
1357 tcp_lro_flush_all(lro);
1358 #endif
1359 }
1360
1361 static void
xn_txeof(struct netfront_txq * txq)1362 xn_txeof(struct netfront_txq *txq)
1363 {
1364 RING_IDX i, prod;
1365 unsigned short id;
1366 if_t ifp;
1367 netif_tx_response_t *txr;
1368 struct mbuf *m;
1369 struct netfront_info *np = txq->info;
1370
1371 XN_TX_LOCK_ASSERT(txq);
1372
1373 if (!netfront_carrier_ok(np))
1374 return;
1375
1376 ifp = np->xn_ifp;
1377
1378 do {
1379 prod = txq->ring.sring->rsp_prod;
1380 rmb(); /* Ensure we see responses up to 'rp'. */
1381
1382 for (i = txq->ring.rsp_cons; i != prod; i++) {
1383 txr = RING_GET_RESPONSE(&txq->ring, i);
1384 if (txr->status == NETIF_RSP_NULL)
1385 continue;
1386
1387 if (txr->status != NETIF_RSP_OKAY) {
1388 printf("%s: WARNING: response is %d!\n",
1389 __func__, txr->status);
1390 }
1391 id = txr->id;
1392 m = txq->mbufs[id];
1393 KASSERT(m != NULL, ("mbuf not found in chain"));
1394 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1395 ("mbuf already on the free list, but we're "
1396 "trying to free it again!"));
1397 M_ASSERTVALID(m);
1398
1399 if (__predict_false(gnttab_query_foreign_access(
1400 txq->grant_ref[id]) != 0)) {
1401 panic("%s: grant id %u still in use by the "
1402 "backend", __func__, id);
1403 }
1404 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1405 gnttab_release_grant_reference(
1406 &txq->gref_head, txq->grant_ref[id]);
1407 txq->grant_ref[id] = GRANT_REF_INVALID;
1408
1409 txq->mbufs[id] = NULL;
1410 add_id_to_freelist(txq->mbufs, id);
1411 txq->mbufs_cnt--;
1412 mbuf_release(m);
1413 /* Only mark the txq active if we've freed up at least one slot to try */
1414 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1415 }
1416 txq->ring.rsp_cons = prod;
1417
1418 /*
1419 * Set a new event, then check for race with update of
1420 * tx_cons. Note that it is essential to schedule a
1421 * callback, no matter how few buffers are pending. Even if
1422 * there is space in the transmit ring, higher layers may
1423 * be blocked because too much data is outstanding: in such
1424 * cases notification from Xen is likely to be the only kick
1425 * that we'll get.
1426 */
1427 txq->ring.sring->rsp_event =
1428 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1429
1430 mb();
1431 } while (prod != txq->ring.sring->rsp_prod);
1432
1433 if (txq->full &&
1434 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1435 txq->full = false;
1436 xn_txq_start(txq);
1437 }
1438 }
1439
1440 static void
xn_intr(void * xsc)1441 xn_intr(void *xsc)
1442 {
1443 struct netfront_txq *txq = xsc;
1444 struct netfront_info *np = txq->info;
1445 struct netfront_rxq *rxq = &np->rxq[txq->id];
1446
1447 /* kick both tx and rx */
1448 xn_rxq_intr(rxq);
1449 xn_txq_intr(txq);
1450 }
1451
1452 static void
xn_move_rx_slot(struct netfront_rxq * rxq,struct mbuf * m,grant_ref_t ref)1453 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1454 grant_ref_t ref)
1455 {
1456 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1457
1458 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1459 rxq->mbufs[new] = m;
1460 rxq->grant_ref[new] = ref;
1461 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1462 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1463 rxq->ring.req_prod_pvt++;
1464 }
1465
1466 static int
xn_get_extras(struct netfront_rxq * rxq,struct netif_extra_info * extras,RING_IDX rp,RING_IDX * cons)1467 xn_get_extras(struct netfront_rxq *rxq,
1468 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1469 {
1470 struct netif_extra_info *extra;
1471
1472 int err = 0;
1473
1474 do {
1475 struct mbuf *m;
1476 grant_ref_t ref;
1477
1478 if (__predict_false(*cons + 1 == rp)) {
1479 err = EINVAL;
1480 break;
1481 }
1482
1483 extra = (struct netif_extra_info *)
1484 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1485
1486 if (__predict_false(!extra->type ||
1487 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1488 err = EINVAL;
1489 } else {
1490 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1491 }
1492
1493 m = xn_get_rx_mbuf(rxq, *cons);
1494 ref = xn_get_rx_ref(rxq, *cons);
1495 xn_move_rx_slot(rxq, m, ref);
1496 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1497
1498 return err;
1499 }
1500
1501 static int
xn_get_responses(struct netfront_rxq * rxq,struct netfront_rx_info * rinfo,RING_IDX rp,RING_IDX * cons,struct mbuf ** list)1502 xn_get_responses(struct netfront_rxq *rxq,
1503 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1504 struct mbuf **list)
1505 {
1506 struct netif_rx_response *rx = &rinfo->rx;
1507 struct netif_extra_info *extras = rinfo->extras;
1508 struct mbuf *m, *m0, *m_prev;
1509 grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1510 int frags = 1;
1511 int err = 0;
1512 u_long ret __diagused;
1513
1514 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1515
1516 if (rx->flags & NETRXF_extra_info) {
1517 err = xn_get_extras(rxq, extras, rp, cons);
1518 }
1519
1520 if (m0 != NULL) {
1521 m0->m_pkthdr.len = 0;
1522 m0->m_next = NULL;
1523 }
1524
1525 for (;;) {
1526 #if 0
1527 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1528 rx->status, rx->offset, frags);
1529 #endif
1530 if (__predict_false(rx->status < 0 ||
1531 rx->offset + rx->status > PAGE_SIZE)) {
1532 xn_move_rx_slot(rxq, m, ref);
1533 if (m0 == m)
1534 m0 = NULL;
1535 m = NULL;
1536 err = EINVAL;
1537 goto next_skip_queue;
1538 }
1539
1540 /*
1541 * This definitely indicates a bug, either in this driver or in
1542 * the backend driver. In future this should flag the bad
1543 * situation to the system controller to reboot the backed.
1544 */
1545 if (ref == GRANT_REF_INVALID) {
1546 printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1547 err = EINVAL;
1548 goto next;
1549 }
1550
1551 ret = gnttab_end_foreign_access_ref(ref);
1552 KASSERT(ret, ("Unable to end access to grant references"));
1553
1554 gnttab_release_grant_reference(&rxq->gref_head, ref);
1555
1556 next:
1557 if (m == NULL)
1558 break;
1559
1560 m->m_len = rx->status;
1561 m->m_data += rx->offset;
1562 m0->m_pkthdr.len += rx->status;
1563
1564 next_skip_queue:
1565 if (!(rx->flags & NETRXF_more_data))
1566 break;
1567
1568 if (*cons + frags == rp) {
1569 if (net_ratelimit())
1570 WPRINTK("Need more frags\n");
1571 err = ENOENT;
1572 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1573 __func__, *cons, frags, rp);
1574 break;
1575 }
1576 /*
1577 * Note that m can be NULL, if rx->status < 0 or if
1578 * rx->offset + rx->status > PAGE_SIZE above.
1579 */
1580 m_prev = m;
1581
1582 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1583 m = xn_get_rx_mbuf(rxq, *cons + frags);
1584
1585 /*
1586 * m_prev == NULL can happen if rx->status < 0 or if
1587 * rx->offset + * rx->status > PAGE_SIZE above.
1588 */
1589 if (m_prev != NULL)
1590 m_prev->m_next = m;
1591
1592 /*
1593 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1594 * rx->status > PAGE_SIZE above.
1595 */
1596 if (m0 == NULL)
1597 m0 = m;
1598 m->m_next = NULL;
1599 ref = xn_get_rx_ref(rxq, *cons + frags);
1600 frags++;
1601 }
1602 *list = m0;
1603 *cons += frags;
1604
1605 return (err);
1606 }
1607
1608 /**
1609 * Given an mbuf chain, make sure we have enough room and then push
1610 * it onto the transmit ring.
1611 */
1612 static int
xn_assemble_tx_request(struct netfront_txq * txq,struct mbuf * m_head)1613 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1614 {
1615 struct netfront_info *np = txq->info;
1616 if_t ifp = np->xn_ifp;
1617 int otherend_id, error, nfrags;
1618 bus_dma_segment_t *segs = txq->segs;
1619 struct mbuf_xennet *tag;
1620 bus_dmamap_t map;
1621 unsigned int i;
1622
1623 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available"));
1624 tag = SLIST_FIRST(&txq->tags);
1625 SLIST_REMOVE_HEAD(&txq->tags, next);
1626 KASSERT(tag->count == 0, ("tag already in-use"));
1627 map = tag->dma_map;
1628 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1629 &nfrags, 0);
1630 if (error == EFBIG || nfrags > np->maxfrags) {
1631 struct mbuf *m;
1632
1633 bus_dmamap_unload(np->dma_tag, map);
1634 m = m_defrag(m_head, M_NOWAIT);
1635 if (!m) {
1636 /*
1637 * Defrag failed, so free the mbuf and
1638 * therefore drop the packet.
1639 */
1640 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1641 m_freem(m_head);
1642 return (EMSGSIZE);
1643 }
1644 m_head = m;
1645 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1646 &nfrags, 0);
1647 if (error != 0 || nfrags > np->maxfrags) {
1648 bus_dmamap_unload(np->dma_tag, map);
1649 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1650 m_freem(m_head);
1651 return (error ?: EFBIG);
1652 }
1653 } else if (error != 0) {
1654 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1655 m_freem(m_head);
1656 return (error);
1657 }
1658
1659 /**
1660 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1661 * of mbufs longer than Linux can handle. Make sure we don't
1662 * pass a too-long chain over to the other side by dropping the
1663 * packet. It doesn't look like there is currently a way to
1664 * tell the TCP stack to generate a shorter chain of packets.
1665 */
1666 if (nfrags > MAX_TX_REQ_FRAGS) {
1667 #ifdef DEBUG
1668 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1669 "won't be able to handle it, dropping\n",
1670 __func__, nfrags, MAX_TX_REQ_FRAGS);
1671 #endif
1672 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1673 bus_dmamap_unload(np->dma_tag, map);
1674 m_freem(m_head);
1675 return (EMSGSIZE);
1676 }
1677
1678 /*
1679 * This check should be redundant. We've already verified that we
1680 * have enough slots in the ring to handle a packet of maximum
1681 * size, and that our packet is less than the maximum size. Keep
1682 * it in here as an assert for now just to make certain that
1683 * chain_cnt is accurate.
1684 */
1685 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1686 ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1687 "(%d)!", __func__, (int) txq->mbufs_cnt,
1688 (int) nfrags, (int) NET_TX_RING_SIZE));
1689
1690 /*
1691 * Start packing the mbufs in this chain into
1692 * the fragment pointers. Stop when we run out
1693 * of fragments or hit the end of the mbuf chain.
1694 */
1695 otherend_id = xenbus_get_otherend_id(np->xbdev);
1696 m_tag_prepend(m_head, &tag->tag);
1697 for (i = 0; i < nfrags; i++) {
1698 netif_tx_request_t *tx;
1699 uintptr_t id;
1700 grant_ref_t ref;
1701 u_long mfn; /* XXX Wrong type? */
1702
1703 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1704 id = get_id_from_freelist(txq->mbufs);
1705 if (id == 0)
1706 panic("%s: was allocated the freelist head!\n",
1707 __func__);
1708 txq->mbufs_cnt++;
1709 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1710 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1711 __func__);
1712 mbuf_grab(m_head);
1713 txq->mbufs[id] = m_head;
1714 tx->id = id;
1715 ref = gnttab_claim_grant_reference(&txq->gref_head);
1716 KASSERT((short)ref >= 0, ("Negative ref"));
1717 mfn = atop(segs[i].ds_addr);
1718 gnttab_grant_foreign_access_ref(ref, otherend_id,
1719 mfn, GNTMAP_readonly);
1720 tx->gref = txq->grant_ref[id] = ref;
1721 tx->offset = segs[i].ds_addr & PAGE_MASK;
1722 KASSERT(tx->offset + segs[i].ds_len <= PAGE_SIZE,
1723 ("mbuf segment crosses a page boundary"));
1724 tx->flags = 0;
1725 if (i == 0) {
1726 /*
1727 * The first fragment has the entire packet
1728 * size, subsequent fragments have just the
1729 * fragment size. The backend works out the
1730 * true size of the first fragment by
1731 * subtracting the sizes of the other
1732 * fragments.
1733 */
1734 tx->size = m_head->m_pkthdr.len;
1735
1736 /*
1737 * The first fragment contains the checksum flags
1738 * and is optionally followed by extra data for
1739 * TSO etc.
1740 */
1741 /**
1742 * CSUM_TSO requires checksum offloading.
1743 * Some versions of FreeBSD fail to
1744 * set CSUM_TCP in the CSUM_TSO case,
1745 * so we have to test for CSUM_TSO
1746 * explicitly.
1747 */
1748 if (m_head->m_pkthdr.csum_flags
1749 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1750 tx->flags |= (NETTXF_csum_blank
1751 | NETTXF_data_validated);
1752 }
1753 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1754 struct netif_extra_info *gso =
1755 (struct netif_extra_info *)
1756 RING_GET_REQUEST(&txq->ring,
1757 ++txq->ring.req_prod_pvt);
1758
1759 tx->flags |= NETTXF_extra_info;
1760
1761 gso->u.gso.size = m_head->m_pkthdr.tso_segsz;
1762 gso->u.gso.type =
1763 XEN_NETIF_GSO_TYPE_TCPV4;
1764 gso->u.gso.pad = 0;
1765 gso->u.gso.features = 0;
1766
1767 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1768 gso->flags = 0;
1769 }
1770 } else {
1771 tx->size = segs[i].ds_len;
1772 }
1773 if (i != nfrags - 1)
1774 tx->flags |= NETTXF_more_data;
1775
1776 txq->ring.req_prod_pvt++;
1777 }
1778 bus_dmamap_sync(np->dma_tag, map, BUS_DMASYNC_PREWRITE);
1779 BPF_MTAP(ifp, m_head);
1780
1781 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1782 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1783 if (m_head->m_flags & M_MCAST)
1784 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1785
1786 xn_txeof(txq);
1787
1788 return (0);
1789 }
1790
1791 /* equivalent of network_open() in Linux */
1792 static void
xn_ifinit_locked(struct netfront_info * np)1793 xn_ifinit_locked(struct netfront_info *np)
1794 {
1795 if_t ifp;
1796 int i;
1797 struct netfront_rxq *rxq;
1798
1799 XN_LOCK_ASSERT(np);
1800
1801 ifp = np->xn_ifp;
1802
1803 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
1804 return;
1805
1806 xn_stop(np);
1807
1808 for (i = 0; i < np->num_queues; i++) {
1809 rxq = &np->rxq[i];
1810 XN_RX_LOCK(rxq);
1811 xn_alloc_rx_buffers(rxq);
1812 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1813 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1814 xn_rxeof(rxq);
1815 XN_RX_UNLOCK(rxq);
1816 }
1817
1818 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1819 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1820 if_link_state_change(ifp, LINK_STATE_UP);
1821 }
1822
1823 static void
xn_ifinit(void * xsc)1824 xn_ifinit(void *xsc)
1825 {
1826 struct netfront_info *sc = xsc;
1827
1828 XN_LOCK(sc);
1829 xn_ifinit_locked(sc);
1830 XN_UNLOCK(sc);
1831 }
1832
1833 static int
xn_ioctl(if_t ifp,u_long cmd,caddr_t data)1834 xn_ioctl(if_t ifp, u_long cmd, caddr_t data)
1835 {
1836 struct netfront_info *sc = if_getsoftc(ifp);
1837 struct ifreq *ifr = (struct ifreq *) data;
1838 device_t dev;
1839 #ifdef INET
1840 struct ifaddr *ifa = (struct ifaddr *)data;
1841 #endif
1842 int mask, error = 0, reinit;
1843
1844 dev = sc->xbdev;
1845
1846 switch(cmd) {
1847 case SIOCSIFADDR:
1848 #ifdef INET
1849 XN_LOCK(sc);
1850 if (ifa->ifa_addr->sa_family == AF_INET) {
1851 if_setflagbits(ifp, IFF_UP, 0);
1852 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1853 xn_ifinit_locked(sc);
1854 arp_ifinit(ifp, ifa);
1855 XN_UNLOCK(sc);
1856 } else {
1857 XN_UNLOCK(sc);
1858 #endif
1859 error = ether_ioctl(ifp, cmd, data);
1860 #ifdef INET
1861 }
1862 #endif
1863 break;
1864 case SIOCSIFMTU:
1865 if (if_getmtu(ifp) == ifr->ifr_mtu)
1866 break;
1867
1868 if_setmtu(ifp, ifr->ifr_mtu);
1869 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1870 xn_ifinit(sc);
1871 break;
1872 case SIOCSIFFLAGS:
1873 XN_LOCK(sc);
1874 if (if_getflags(ifp) & IFF_UP) {
1875 /*
1876 * If only the state of the PROMISC flag changed,
1877 * then just use the 'set promisc mode' command
1878 * instead of reinitializing the entire NIC. Doing
1879 * a full re-init means reloading the firmware and
1880 * waiting for it to start up, which may take a
1881 * second or two.
1882 */
1883 xn_ifinit_locked(sc);
1884 } else {
1885 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1886 xn_stop(sc);
1887 }
1888 }
1889 sc->xn_if_flags = if_getflags(ifp);
1890 XN_UNLOCK(sc);
1891 break;
1892 case SIOCSIFCAP:
1893 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1894 reinit = 0;
1895
1896 if (mask & IFCAP_TXCSUM) {
1897 if_togglecapenable(ifp, IFCAP_TXCSUM);
1898 if_togglehwassist(ifp, XN_CSUM_FEATURES);
1899 }
1900 if (mask & IFCAP_TSO4) {
1901 if_togglecapenable(ifp, IFCAP_TSO4);
1902 if_togglehwassist(ifp, CSUM_TSO);
1903 }
1904
1905 if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
1906 /* These Rx features require us to renegotiate. */
1907 reinit = 1;
1908
1909 if (mask & IFCAP_RXCSUM)
1910 if_togglecapenable(ifp, IFCAP_RXCSUM);
1911 if (mask & IFCAP_LRO)
1912 if_togglecapenable(ifp, IFCAP_LRO);
1913 }
1914
1915 if (reinit == 0)
1916 break;
1917
1918 /*
1919 * We must reset the interface so the backend picks up the
1920 * new features.
1921 */
1922 device_printf(sc->xbdev,
1923 "performing interface reset due to feature change\n");
1924 XN_LOCK(sc);
1925 netfront_carrier_off(sc);
1926 sc->xn_reset = true;
1927 /*
1928 * NB: the pending packet queue is not flushed, since
1929 * the interface should still support the old options.
1930 */
1931 XN_UNLOCK(sc);
1932 /*
1933 * Delete the xenstore nodes that export features.
1934 *
1935 * NB: There's a xenbus state called
1936 * "XenbusStateReconfiguring", which is what we should set
1937 * here. Sadly none of the backends know how to handle it,
1938 * and simply disconnect from the frontend, so we will just
1939 * switch back to XenbusStateInitialising in order to force
1940 * a reconnection.
1941 */
1942 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1943 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1944 xenbus_set_state(dev, XenbusStateClosing);
1945
1946 /*
1947 * Wait for the frontend to reconnect before returning
1948 * from the ioctl. 30s should be more than enough for any
1949 * sane backend to reconnect.
1950 */
1951 error = tsleep(sc, 0, "xn_rst", 30*hz);
1952 break;
1953 case SIOCADDMULTI:
1954 case SIOCDELMULTI:
1955 break;
1956 case SIOCSIFMEDIA:
1957 case SIOCGIFMEDIA:
1958 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1959 break;
1960 default:
1961 error = ether_ioctl(ifp, cmd, data);
1962 }
1963
1964 return (error);
1965 }
1966
1967 static void
xn_stop(struct netfront_info * sc)1968 xn_stop(struct netfront_info *sc)
1969 {
1970 if_t ifp;
1971
1972 XN_LOCK_ASSERT(sc);
1973
1974 ifp = sc->xn_ifp;
1975
1976 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1977 if_link_state_change(ifp, LINK_STATE_DOWN);
1978 }
1979
1980 static void
xn_rebuild_rx_bufs(struct netfront_rxq * rxq)1981 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1982 {
1983 int requeue_idx, i;
1984 grant_ref_t ref;
1985 netif_rx_request_t *req;
1986
1987 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1988 struct mbuf *m;
1989 u_long pfn;
1990
1991 if (rxq->mbufs[i] == NULL)
1992 continue;
1993
1994 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1995 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1996
1997 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1998 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1999
2000 gnttab_grant_foreign_access_ref(ref,
2001 xenbus_get_otherend_id(rxq->info->xbdev),
2002 pfn, 0);
2003
2004 req->gref = ref;
2005 req->id = requeue_idx;
2006
2007 requeue_idx++;
2008 }
2009
2010 rxq->ring.req_prod_pvt = requeue_idx;
2011 }
2012
2013 /* START of Xenolinux helper functions adapted to FreeBSD */
2014 static int
xn_connect(struct netfront_info * np)2015 xn_connect(struct netfront_info *np)
2016 {
2017 int i, error;
2018 u_int feature_rx_copy;
2019 struct netfront_rxq *rxq;
2020 struct netfront_txq *txq;
2021
2022 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2023 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
2024 if (error != 0)
2025 feature_rx_copy = 0;
2026
2027 /* We only support rx copy. */
2028 if (!feature_rx_copy)
2029 return (EPROTONOSUPPORT);
2030
2031 /* Recovery procedure: */
2032 error = talk_to_backend(np->xbdev, np);
2033 if (error != 0)
2034 return (error);
2035
2036 /* Step 1: Reinitialise variables. */
2037 xn_query_features(np);
2038 xn_configure_features(np);
2039
2040 /* Step 2: Release TX buffer */
2041 for (i = 0; i < np->num_queues; i++) {
2042 txq = &np->txq[i];
2043 xn_release_tx_bufs(txq);
2044 }
2045
2046 /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
2047 for (i = 0; i < np->num_queues; i++) {
2048 rxq = &np->rxq[i];
2049 xn_rebuild_rx_bufs(rxq);
2050 }
2051
2052 /* Step 4: All public and private state should now be sane. Get
2053 * ready to start sending and receiving packets and give the driver
2054 * domain a kick because we've probably just requeued some
2055 * packets.
2056 */
2057 netfront_carrier_on(np);
2058 wakeup(np);
2059
2060 return (0);
2061 }
2062
2063 static void
xn_kick_rings(struct netfront_info * np)2064 xn_kick_rings(struct netfront_info *np)
2065 {
2066 struct netfront_rxq *rxq;
2067 struct netfront_txq *txq;
2068 int i;
2069
2070 for (i = 0; i < np->num_queues; i++) {
2071 txq = &np->txq[i];
2072 rxq = &np->rxq[i];
2073 xen_intr_signal(txq->xen_intr_handle);
2074 XN_TX_LOCK(txq);
2075 xn_txeof(txq);
2076 XN_TX_UNLOCK(txq);
2077 XN_RX_LOCK(rxq);
2078 xn_alloc_rx_buffers(rxq);
2079 XN_RX_UNLOCK(rxq);
2080 }
2081 }
2082
2083 static void
xn_query_features(struct netfront_info * np)2084 xn_query_features(struct netfront_info *np)
2085 {
2086 int val;
2087
2088 device_printf(np->xbdev, "backend features:");
2089
2090 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2091 "feature-sg", NULL, "%d", &val) != 0)
2092 val = 0;
2093
2094 np->maxfrags = 1;
2095 if (val) {
2096 np->maxfrags = MAX_TX_REQ_FRAGS;
2097 printf(" feature-sg");
2098 }
2099
2100 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2101 "feature-gso-tcpv4", NULL, "%d", &val) != 0)
2102 val = 0;
2103
2104 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_TSO4 | IFCAP_LRO);
2105 if (val) {
2106 if_setcapabilitiesbit(np->xn_ifp, IFCAP_TSO4 | IFCAP_LRO, 0);
2107 printf(" feature-gso-tcp4");
2108 }
2109
2110 /*
2111 * HW CSUM offload is assumed to be available unless
2112 * feature-no-csum-offload is set in xenstore.
2113 */
2114 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2115 "feature-no-csum-offload", NULL, "%d", &val) != 0)
2116 val = 0;
2117
2118 if_setcapabilitiesbit(np->xn_ifp, IFCAP_HWCSUM, 0);
2119 if (val) {
2120 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_HWCSUM);
2121 printf(" feature-no-csum-offload");
2122 }
2123
2124 printf("\n");
2125 }
2126
2127 static int
xn_configure_features(struct netfront_info * np)2128 xn_configure_features(struct netfront_info *np)
2129 {
2130 int err, cap_enabled;
2131 #if (defined(INET) || defined(INET6))
2132 int i;
2133 #endif
2134 if_t ifp;
2135
2136 ifp = np->xn_ifp;
2137 err = 0;
2138
2139 if ((if_getcapenable(ifp) & if_getcapabilities(ifp)) == if_getcapenable(ifp)) {
2140 /* Current options are available, no need to do anything. */
2141 return (0);
2142 }
2143
2144 /* Try to preserve as many options as possible. */
2145 cap_enabled = if_getcapenable(ifp);
2146 if_setcapenable(ifp, 0);
2147 if_sethwassist(ifp, 0);
2148
2149 #if (defined(INET) || defined(INET6))
2150 if ((cap_enabled & IFCAP_LRO) != 0)
2151 for (i = 0; i < np->num_queues; i++)
2152 tcp_lro_free(&np->rxq[i].lro);
2153 if (xn_enable_lro &&
2154 (if_getcapabilities(ifp) & cap_enabled & IFCAP_LRO) != 0) {
2155 if_setcapenablebit(ifp, IFCAP_LRO, 0);
2156 for (i = 0; i < np->num_queues; i++) {
2157 err = tcp_lro_init(&np->rxq[i].lro);
2158 if (err != 0) {
2159 device_printf(np->xbdev,
2160 "LRO initialization failed\n");
2161 if_setcapenablebit(ifp, 0, IFCAP_LRO);
2162 break;
2163 }
2164 np->rxq[i].lro.ifp = ifp;
2165 }
2166 }
2167 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TSO4) != 0) {
2168 if_setcapenablebit(ifp, IFCAP_TSO4, 0);
2169 if_sethwassistbits(ifp, CSUM_TSO, 0);
2170 }
2171 #endif
2172 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TXCSUM) != 0) {
2173 if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
2174 if_sethwassistbits(ifp, XN_CSUM_FEATURES, 0);
2175 }
2176 if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_RXCSUM) != 0)
2177 if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
2178
2179 return (err);
2180 }
2181
2182 static int
xn_txq_mq_start_locked(struct netfront_txq * txq,struct mbuf * m)2183 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2184 {
2185 struct netfront_info *np;
2186 if_t ifp;
2187 struct buf_ring *br;
2188 int error, notify;
2189
2190 np = txq->info;
2191 br = txq->br;
2192 ifp = np->xn_ifp;
2193 error = 0;
2194
2195 XN_TX_LOCK_ASSERT(txq);
2196
2197 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
2198 !netfront_carrier_ok(np)) {
2199 if (m != NULL)
2200 error = drbr_enqueue(ifp, br, m);
2201 return (error);
2202 }
2203
2204 if (m != NULL) {
2205 error = drbr_enqueue(ifp, br, m);
2206 if (error != 0)
2207 return (error);
2208 }
2209
2210 while ((m = drbr_peek(ifp, br)) != NULL) {
2211 if (!xn_tx_slot_available(txq)) {
2212 drbr_putback(ifp, br, m);
2213 break;
2214 }
2215
2216 error = xn_assemble_tx_request(txq, m);
2217 /* xn_assemble_tx_request always consumes the mbuf*/
2218 if (error != 0) {
2219 drbr_advance(ifp, br);
2220 break;
2221 }
2222
2223 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2224 if (notify)
2225 xen_intr_signal(txq->xen_intr_handle);
2226
2227 drbr_advance(ifp, br);
2228 }
2229
2230 if (RING_FULL(&txq->ring))
2231 txq->full = true;
2232
2233 return (0);
2234 }
2235
2236 static int
xn_txq_mq_start(if_t ifp,struct mbuf * m)2237 xn_txq_mq_start(if_t ifp, struct mbuf *m)
2238 {
2239 struct netfront_info *np;
2240 struct netfront_txq *txq;
2241 int i, npairs, error;
2242
2243 np = if_getsoftc(ifp);
2244 npairs = np->num_queues;
2245
2246 if (!netfront_carrier_ok(np))
2247 return (ENOBUFS);
2248
2249 KASSERT(npairs != 0, ("called with 0 available queues"));
2250
2251 /* check if flowid is set */
2252 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2253 i = m->m_pkthdr.flowid % npairs;
2254 else
2255 i = curcpu % npairs;
2256
2257 txq = &np->txq[i];
2258
2259 if (XN_TX_TRYLOCK(txq) != 0) {
2260 error = xn_txq_mq_start_locked(txq, m);
2261 XN_TX_UNLOCK(txq);
2262 } else {
2263 error = drbr_enqueue(ifp, txq->br, m);
2264 taskqueue_enqueue(txq->tq, &txq->defrtask);
2265 }
2266
2267 return (error);
2268 }
2269
2270 static void
xn_qflush(if_t ifp)2271 xn_qflush(if_t ifp)
2272 {
2273 struct netfront_info *np;
2274 struct netfront_txq *txq;
2275 struct mbuf *m;
2276 int i;
2277
2278 np = if_getsoftc(ifp);
2279
2280 for (i = 0; i < np->num_queues; i++) {
2281 txq = &np->txq[i];
2282
2283 XN_TX_LOCK(txq);
2284 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2285 m_freem(m);
2286 XN_TX_UNLOCK(txq);
2287 }
2288
2289 if_qflush(ifp);
2290 }
2291
2292 /**
2293 * Create a network device.
2294 * @param dev Newbus device representing this virtual NIC.
2295 */
2296 int
create_netdev(device_t dev)2297 create_netdev(device_t dev)
2298 {
2299 struct netfront_info *np;
2300 int err, cap_enabled;
2301 if_t ifp;
2302
2303 np = device_get_softc(dev);
2304
2305 np->xbdev = dev;
2306
2307 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2308
2309 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2310 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2311 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2312
2313 err = xen_net_read_mac(dev, np->mac);
2314 if (err != 0)
2315 goto error;
2316
2317 /* Set up ifnet structure */
2318 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2319 if_setsoftc(ifp, np);
2320 if_initname(ifp, "xn", device_get_unit(dev));
2321 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2322 if_setioctlfn(ifp, xn_ioctl);
2323
2324 if_settransmitfn(ifp, xn_txq_mq_start);
2325 if_setqflushfn(ifp, xn_qflush);
2326
2327 if_setinitfn(ifp, xn_ifinit);
2328
2329 if_sethwassist(ifp, XN_CSUM_FEATURES);
2330 /* Enable all supported features at device creation. */
2331 if_setcapabilities(ifp, IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO);
2332 cap_enabled = if_getcapabilities(ifp);
2333 if (!xn_enable_lro) {
2334 cap_enabled &= ~IFCAP_LRO;
2335 }
2336 if_setcapenable(ifp, cap_enabled);
2337
2338 if_sethwtsomax(ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2339 if_sethwtsomaxsegcount(ifp, MAX_TX_REQ_FRAGS);
2340 if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
2341
2342 ether_ifattach(ifp, np->mac);
2343 netfront_carrier_off(np);
2344
2345 err = bus_dma_tag_create(
2346 bus_get_dma_tag(dev), /* parent */
2347 1, PAGE_SIZE, /* algnmnt, boundary */
2348 BUS_SPACE_MAXADDR, /* lowaddr */
2349 BUS_SPACE_MAXADDR, /* highaddr */
2350 NULL, NULL, /* filter, filterarg */
2351 PAGE_SIZE * MAX_TX_REQ_FRAGS, /* max request size */
2352 MAX_TX_REQ_FRAGS, /* max segments */
2353 PAGE_SIZE, /* maxsegsize */
2354 BUS_DMA_ALLOCNOW, /* flags */
2355 NULL, NULL, /* lockfunc, lockarg */
2356 &np->dma_tag);
2357
2358 return (err);
2359
2360 error:
2361 KASSERT(err != 0, ("Error path with no error code specified"));
2362 return (err);
2363 }
2364
2365 static int
netfront_detach(device_t dev)2366 netfront_detach(device_t dev)
2367 {
2368 struct netfront_info *info = device_get_softc(dev);
2369
2370 DPRINTK("%s\n", xenbus_get_node(dev));
2371
2372 netif_free(info);
2373
2374 return 0;
2375 }
2376
2377 static void
netif_free(struct netfront_info * np)2378 netif_free(struct netfront_info *np)
2379 {
2380
2381 XN_LOCK(np);
2382 xn_stop(np);
2383 XN_UNLOCK(np);
2384 netif_disconnect_backend(np);
2385 ether_ifdetach(np->xn_ifp);
2386 free(np->rxq, M_DEVBUF);
2387 free(np->txq, M_DEVBUF);
2388 if_free(np->xn_ifp);
2389 np->xn_ifp = NULL;
2390 ifmedia_removeall(&np->sc_media);
2391 bus_dma_tag_destroy(np->dma_tag);
2392 }
2393
2394 static void
netif_disconnect_backend(struct netfront_info * np)2395 netif_disconnect_backend(struct netfront_info *np)
2396 {
2397 u_int i;
2398
2399 for (i = 0; i < np->num_queues; i++) {
2400 XN_RX_LOCK(&np->rxq[i]);
2401 XN_TX_LOCK(&np->txq[i]);
2402 }
2403 netfront_carrier_off(np);
2404 for (i = 0; i < np->num_queues; i++) {
2405 XN_RX_UNLOCK(&np->rxq[i]);
2406 XN_TX_UNLOCK(&np->txq[i]);
2407 }
2408
2409 for (i = 0; i < np->num_queues; i++) {
2410 disconnect_rxq(&np->rxq[i]);
2411 disconnect_txq(&np->txq[i]);
2412 }
2413 }
2414
2415 static int
xn_ifmedia_upd(if_t ifp)2416 xn_ifmedia_upd(if_t ifp)
2417 {
2418
2419 return (0);
2420 }
2421
2422 static void
xn_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)2423 xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2424 {
2425
2426 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2427 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2428 }
2429
2430 /* ** Driver registration ** */
2431 static device_method_t netfront_methods[] = {
2432 /* Device interface */
2433 DEVMETHOD(device_probe, netfront_probe),
2434 DEVMETHOD(device_attach, netfront_attach),
2435 DEVMETHOD(device_detach, netfront_detach),
2436 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2437 DEVMETHOD(device_suspend, netfront_suspend),
2438 DEVMETHOD(device_resume, netfront_resume),
2439
2440 /* Xenbus interface */
2441 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2442
2443 DEVMETHOD_END
2444 };
2445
2446 static driver_t netfront_driver = {
2447 "xn",
2448 netfront_methods,
2449 sizeof(struct netfront_info),
2450 };
2451
2452 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, NULL, NULL);
2453