Lines Matching +full:network +full:- +full:oriented
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - 2019 Cambridge Greys Limited
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
11 #define pr_fmt(fmt) "uml-vector: " fmt
38 * Adapted from network devices with the following major changes:
39 * All transports are static - simplifies the code significantly
43 * Configuration is no longer positional - L2TPv3 and GRE require up to
49 #define DRIVER_NAME "uml-vector"
113 spin_lock(&vp->rx_queue->head_lock); in vector_reset_stats()
114 vp->estats.rx_queue_max = 0; in vector_reset_stats()
115 vp->estats.rx_queue_running_average = 0; in vector_reset_stats()
116 vp->estats.rx_encaps_errors = 0; in vector_reset_stats()
117 vp->estats.sg_ok = 0; in vector_reset_stats()
118 vp->estats.sg_linearized = 0; in vector_reset_stats()
119 spin_unlock(&vp->rx_queue->head_lock); in vector_reset_stats()
125 spin_lock(&vp->tx_queue->head_lock); in vector_reset_stats()
126 vp->estats.tx_timeout_count = 0; in vector_reset_stats()
127 vp->estats.tx_restart_queue = 0; in vector_reset_stats()
128 vp->estats.tx_kicks = 0; in vector_reset_stats()
129 vp->estats.tx_flow_control_xon = 0; in vector_reset_stats()
130 vp->estats.tx_flow_control_xoff = 0; in vector_reset_stats()
131 vp->estats.tx_queue_max = 0; in vector_reset_stats()
132 vp->estats.tx_queue_running_average = 0; in vector_reset_stats()
133 spin_unlock(&vp->tx_queue->head_lock); in vector_reset_stats()
143 if ((result < (1 << 16) - 1) && (result >= 576)) in get_mtu()
217 return -EINVAL; in get_transport_options()
241 /* A mini-buffer for packet drop read
242 * All of our supported transports are datagram oriented and we always
255 * maximum enqueue/dequeue-at-once capacity if possible. Called by
261 qi->head = in vector_advancehead()
262 (qi->head + advance) in vector_advancehead()
263 % qi->max_depth; in vector_advancehead()
266 atomic_sub(advance, &qi->queue_depth); in vector_advancehead()
267 return atomic_read(&qi->queue_depth); in vector_advancehead()
277 qi->tail = in vector_advancetail()
278 (qi->tail + advance) in vector_advancetail()
279 % qi->max_depth; in vector_advancetail()
280 atomic_add(advance, &qi->queue_depth); in vector_advancetail()
281 return atomic_read(&qi->queue_depth); in vector_advancetail()
292 nr_frags = skb_shinfo(skb)->nr_frags; in prep_msg()
297 if (vp->header_size > 0) { in prep_msg()
298 iov[iov_index].iov_len = vp->header_size; in prep_msg()
299 vp->form_header(iov[iov_index].iov_base, skb, vp); in prep_msg()
302 iov[iov_index].iov_base = skb->data; in prep_msg()
304 iov[iov_index].iov_len = skb->len - skb->data_len; in prep_msg()
305 vp->estats.sg_ok++; in prep_msg()
307 iov[iov_index].iov_len = skb->len; in prep_msg()
310 skb_frag = &skb_shinfo(skb)->frags[frag]; in prep_msg()
317 return -1; in prep_msg()
327 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue()
330 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_enqueue()
333 spin_lock(&qi->tail_lock); in vector_enqueue()
334 queue_depth = atomic_read(&qi->queue_depth); in vector_enqueue()
337 packet_len = skb->len; in vector_enqueue()
339 if (queue_depth < qi->max_depth) { in vector_enqueue()
341 *(qi->skbuff_vector + qi->tail) = skb; in vector_enqueue()
342 mmsg_vector += qi->tail; in vector_enqueue()
346 mmsg_vector->msg_hdr.msg_iov in vector_enqueue()
350 mmsg_vector->msg_hdr.msg_iovlen = iov_count; in vector_enqueue()
351 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; in vector_enqueue()
352 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; in vector_enqueue()
357 spin_unlock(&qi->tail_lock); in vector_enqueue()
360 qi->dev->stats.tx_dropped++; in vector_enqueue()
362 packet_len = skb->len; in vector_enqueue()
364 netdev_completed_queue(qi->dev, 1, packet_len); in vector_enqueue()
366 spin_unlock(&qi->tail_lock); in vector_enqueue()
376 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { in consume_vector_skbs()
377 skb = *(qi->skbuff_vector + skb_index); in consume_vector_skbs()
381 bytes_compl += skb->len; in consume_vector_skbs()
382 *(qi->skbuff_vector + skb_index) = NULL; in consume_vector_skbs()
385 qi->dev->stats.tx_bytes += bytes_compl; in consume_vector_skbs()
386 qi->dev->stats.tx_packets += count; in consume_vector_skbs()
387 netdev_completed_queue(qi->dev, count, bytes_compl); in consume_vector_skbs()
400 struct vector_private *vp = netdev_priv(qi->dev); in vector_send()
404 if (spin_trylock(&qi->head_lock)) { in vector_send()
406 while (atomic_read(&qi->queue_depth) > 0) { in vector_send()
408 send_len = atomic_read(&qi->queue_depth); in vector_send()
409 send_from = qi->mmsg_vector; in vector_send()
410 send_from += qi->head; in vector_send()
412 if (send_len + qi->head > qi->max_depth) in vector_send()
413 send_len = qi->max_depth - qi->head; in vector_send()
417 vp->fds->tx_fd, in vector_send()
422 vp->in_write_poll = in vector_send()
428 * them all TX-ed and blame the network. in vector_send()
432 netdev_err(vp->dev, "sendmmsg err=%i\n", in vector_send()
434 vp->in_error = true; in vector_send()
443 if (result > vp->estats.tx_queue_max) in vector_send()
444 vp->estats.tx_queue_max = result; in vector_send()
445 vp->estats.tx_queue_running_average = in vector_send()
446 (vp->estats.tx_queue_running_average + result) >> 1; in vector_send()
448 netif_wake_queue(qi->dev); in vector_send()
453 vp->estats.tx_restart_queue++; in vector_send()
457 spin_unlock(&qi->head_lock); in vector_send()
459 return atomic_read(&qi->queue_depth); in vector_send()
470 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue()
475 /* deallocate any skbuffs - we rely on any unused to be in destroy_queue()
478 if (qi->skbuff_vector != NULL) { in destroy_queue()
479 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
480 if (*(qi->skbuff_vector + i) != NULL) in destroy_queue()
481 dev_kfree_skb_any(*(qi->skbuff_vector + i)); in destroy_queue()
483 kfree(qi->skbuff_vector); in destroy_queue()
486 if (qi->mmsg_vector != NULL) { in destroy_queue()
487 mmsg_vector = qi->mmsg_vector; in destroy_queue()
488 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
489 iov = mmsg_vector->msg_hdr.msg_iov; in destroy_queue()
491 if ((vp->header_size > 0) && in destroy_queue()
492 (iov->iov_base != NULL)) in destroy_queue()
493 kfree(iov->iov_base); in destroy_queue()
498 kfree(qi->mmsg_vector); in destroy_queue()
520 result->max_depth = max_size; in create_queue()
521 result->dev = vp->dev; in create_queue()
522 result->mmsg_vector = kmalloc( in create_queue()
524 if (result->mmsg_vector == NULL) in create_queue()
526 result->skbuff_vector = kmalloc( in create_queue()
528 if (result->skbuff_vector == NULL) in create_queue()
533 mmsg_vector = result->mmsg_vector; in create_queue()
535 /* Clear all pointers - we use non-NULL as marking on in create_queue()
538 *(result->skbuff_vector + i) = NULL; in create_queue()
539 mmsg_vector->msg_hdr.msg_iov = NULL; in create_queue()
542 mmsg_vector = result->mmsg_vector; in create_queue()
543 result->max_iov_frags = num_extra_frags; in create_queue()
545 if (vp->header_size > 0) in create_queue()
557 mmsg_vector->msg_hdr.msg_iov = iov; in create_queue()
558 mmsg_vector->msg_hdr.msg_iovlen = 1; in create_queue()
559 mmsg_vector->msg_hdr.msg_control = NULL; in create_queue()
560 mmsg_vector->msg_hdr.msg_controllen = 0; in create_queue()
561 mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT; in create_queue()
562 mmsg_vector->msg_hdr.msg_name = NULL; in create_queue()
563 mmsg_vector->msg_hdr.msg_namelen = 0; in create_queue()
564 if (vp->header_size > 0) { in create_queue()
565 iov->iov_base = kmalloc(header_size, GFP_KERNEL); in create_queue()
566 if (iov->iov_base == NULL) in create_queue()
568 iov->iov_len = header_size; in create_queue()
569 mmsg_vector->msg_hdr.msg_iovlen = 2; in create_queue()
572 iov->iov_base = NULL; in create_queue()
573 iov->iov_len = 0; in create_queue()
576 spin_lock_init(&result->head_lock); in create_queue()
577 spin_lock_init(&result->tail_lock); in create_queue()
578 atomic_set(&result->queue_depth, 0); in create_queue()
579 result->head = 0; in create_queue()
580 result->tail = 0; in create_queue()
583 kfree(result->mmsg_vector); in create_queue()
595 * happens in-line. While we can try using the return code of
605 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; in prep_skb()
608 struct iovec *iov = msg->msg_iov; in prep_skb()
612 if (vp->req_size <= linear) in prep_skb()
615 len = vp->req_size; in prep_skb()
618 len - vp->max_packet, in prep_skb()
623 if (vp->header_size > 0) in prep_skb()
630 skb_reserve(result, vp->headroom); in prep_skb()
631 result->dev = vp->dev; in prep_skb()
632 skb_put(result, vp->max_packet); in prep_skb()
633 result->data_len = len - vp->max_packet; in prep_skb()
634 result->len += len - vp->max_packet; in prep_skb()
636 result->ip_summed = CHECKSUM_NONE; in prep_skb()
637 iov[iov_index].iov_base = result->data; in prep_skb()
638 iov[iov_index].iov_len = vp->max_packet; in prep_skb()
641 nr_frags = skb_shinfo(result)->nr_frags; in prep_skb()
643 skb_frag = &skb_shinfo(result)->frags[frag]; in prep_skb()
652 msg->msg_iovlen = iov_index; in prep_skb()
657 /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs */
661 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx()
662 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in prep_queue_for_rx()
663 void **skbuff_vector = qi->skbuff_vector; in prep_queue_for_rx()
666 queue_depth = atomic_read(&qi->queue_depth); in prep_queue_for_rx()
675 qi->head = qi->tail = 0; in prep_queue_for_rx()
678 /* it is OK if allocation fails - recvmmsg with NULL data in in prep_queue_for_rx()
683 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); in prep_queue_for_rx()
687 atomic_set(&qi->queue_depth, 0); in prep_queue_for_rx()
698 if (device->unit == n) in find_device()
717 return -EINVAL; in vector_parse()
730 return -EINVAL; in vector_parse()
756 return -ENOMEM; in vector_config()
764 return -EINVAL; in vector_config()
778 return -1; in vector_id()
794 return -ENODEV; in vector_remove()
795 dev = vec_d->dev; in vector_remove()
797 if (vp->fds != NULL) in vector_remove()
798 return -EBUSY; in vector_remove()
800 platform_device_unregister(&vec_d->pdev); in vector_remove()
805 * There is no shared per-transport initialization code, so
821 struct net_device *netdev = device->dev; in vector_device_release()
823 list_del(&device->list); in vector_device_release()
828 /* Bog standard recv using recvmsg - not used normally unless the user
848 if (vp->header_size > 0) { in vector_legacy_rx()
849 iov[0].iov_base = vp->header_rxbuffer; in vector_legacy_rx()
850 iov[0].iov_len = vp->header_size; in vector_legacy_rx()
862 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
865 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); in vector_legacy_rx()
867 vp->in_error = true; in vector_legacy_rx()
872 if (pkt_len > vp->header_size) { in vector_legacy_rx()
873 if (vp->header_size > 0) { in vector_legacy_rx()
874 header_check = vp->verify_header( in vector_legacy_rx()
875 vp->header_rxbuffer, skb, vp); in vector_legacy_rx()
878 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
879 vp->estats.rx_encaps_errors++; in vector_legacy_rx()
883 vp->estats.rx_csum_offload_good++; in vector_legacy_rx()
884 skb->ip_summed = CHECKSUM_UNNECESSARY; in vector_legacy_rx()
887 pskb_trim(skb, pkt_len - vp->rx_header_size); in vector_legacy_rx()
888 skb->protocol = eth_type_trans(skb, skb->dev); in vector_legacy_rx()
889 vp->dev->stats.rx_bytes += skb->len; in vector_legacy_rx()
890 vp->dev->stats.rx_packets++; in vector_legacy_rx()
891 napi_gro_receive(&vp->napi, skb); in vector_legacy_rx()
911 iov[0].iov_base = vp->header_txbuffer; in writev_tx()
918 vp->fds->tx_fd, in writev_tx()
926 netif_trans_update(vp->dev); in writev_tx()
927 netif_wake_queue(vp->dev); in writev_tx()
930 vp->dev->stats.tx_bytes += skb->len; in writev_tx()
931 vp->dev->stats.tx_packets++; in writev_tx()
933 vp->dev->stats.tx_dropped++; in writev_tx()
938 vp->dev->stats.tx_dropped++; in writev_tx()
941 vp->in_error = true; in writev_tx()
953 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx()
955 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_mmsg_rx()
956 void **skbuff_vector = qi->skbuff_vector; in vector_mmsg_rx()
965 /* Fire the Lazy Gun - get as many packets as we can in one go. */ in vector_mmsg_rx()
967 if (budget > qi->max_depth) in vector_mmsg_rx()
968 budget = qi->max_depth; in vector_mmsg_rx()
971 vp->fds->rx_fd, qi->mmsg_vector, budget, 0); in vector_mmsg_rx()
974 vp->in_error = true; in vector_mmsg_rx()
984 atomic_add(packet_count, &qi->queue_depth); in vector_mmsg_rx()
988 if (mmsg_vector->msg_len > vp->header_size) { in vector_mmsg_rx()
989 if (vp->header_size > 0) { in vector_mmsg_rx()
990 header_check = vp->verify_header( in vector_mmsg_rx()
991 mmsg_vector->msg_hdr.msg_iov->iov_base, in vector_mmsg_rx()
996 /* Overlay header failed to verify - discard. in vector_mmsg_rx()
1002 vp->estats.rx_encaps_errors++; in vector_mmsg_rx()
1006 vp->estats.rx_csum_offload_good++; in vector_mmsg_rx()
1007 skb->ip_summed = CHECKSUM_UNNECESSARY; in vector_mmsg_rx()
1011 mmsg_vector->msg_len - vp->rx_header_size); in vector_mmsg_rx()
1012 skb->protocol = eth_type_trans(skb, skb->dev); in vector_mmsg_rx()
1015 * The interrupt loop is non-reentrant. in vector_mmsg_rx()
1017 vp->dev->stats.rx_bytes += skb->len; in vector_mmsg_rx()
1018 vp->dev->stats.rx_packets++; in vector_mmsg_rx()
1019 napi_gro_receive(&vp->napi, skb); in vector_mmsg_rx()
1021 /* Overlay header too short to do anything - discard. in vector_mmsg_rx()
1034 if (vp->estats.rx_queue_max < packet_count) in vector_mmsg_rx()
1035 vp->estats.rx_queue_max = packet_count; in vector_mmsg_rx()
1036 vp->estats.rx_queue_running_average = in vector_mmsg_rx()
1037 (vp->estats.rx_queue_running_average + packet_count) >> 1; in vector_mmsg_rx()
1047 if (vp->in_error) { in vector_net_start_xmit()
1048 deactivate_fd(vp->fds->rx_fd, vp->rx_irq); in vector_net_start_xmit()
1049 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0)) in vector_net_start_xmit()
1050 deactivate_fd(vp->fds->tx_fd, vp->tx_irq); in vector_net_start_xmit()
1054 if ((vp->options & VECTOR_TX) == 0) { in vector_net_start_xmit()
1063 netdev_sent_queue(vp->dev, skb->len); in vector_net_start_xmit()
1064 queue_depth = vector_enqueue(vp->tx_queue, skb); in vector_net_start_xmit()
1066 if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) { in vector_net_start_xmit()
1067 mod_timer(&vp->tl, vp->coalesce); in vector_net_start_xmit()
1070 queue_depth = vector_send(vp->tx_queue); in vector_net_start_xmit()
1072 napi_schedule(&vp->napi); in vector_net_start_xmit()
1085 napi_schedule(&vp->napi); in vector_rx_interrupt()
1098 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise in vector_tx_interrupt()
1104 napi_schedule(&vp->napi); in vector_tx_interrupt()
1116 timer_delete(&vp->tl); in vector_net_close()
1118 vp->opened = false; in vector_net_close()
1120 if (vp->fds == NULL) in vector_net_close()
1124 if (vp->rx_irq > 0) { in vector_net_close()
1125 um_free_irq(vp->rx_irq, dev); in vector_net_close()
1126 vp->rx_irq = 0; in vector_net_close()
1128 if (vp->tx_irq > 0) { in vector_net_close()
1129 um_free_irq(vp->tx_irq, dev); in vector_net_close()
1130 vp->tx_irq = 0; in vector_net_close()
1132 napi_disable(&vp->napi); in vector_net_close()
1133 netif_napi_del(&vp->napi); in vector_net_close()
1134 if (vp->fds->rx_fd > 0) { in vector_net_close()
1135 if (vp->bpf) in vector_net_close()
1136 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_close()
1137 os_close_file(vp->fds->rx_fd); in vector_net_close()
1138 vp->fds->rx_fd = -1; in vector_net_close()
1140 if (vp->fds->tx_fd > 0) { in vector_net_close()
1141 os_close_file(vp->fds->tx_fd); in vector_net_close()
1142 vp->fds->tx_fd = -1; in vector_net_close()
1144 if (vp->bpf != NULL) in vector_net_close()
1145 kfree(vp->bpf->filter); in vector_net_close()
1146 kfree(vp->bpf); in vector_net_close()
1147 vp->bpf = NULL; in vector_net_close()
1148 kfree(vp->fds->remote_addr); in vector_net_close()
1149 kfree(vp->transport_data); in vector_net_close()
1150 kfree(vp->header_rxbuffer); in vector_net_close()
1151 kfree(vp->header_txbuffer); in vector_net_close()
1152 if (vp->rx_queue != NULL) in vector_net_close()
1153 destroy_queue(vp->rx_queue); in vector_net_close()
1154 if (vp->tx_queue != NULL) in vector_net_close()
1155 destroy_queue(vp->tx_queue); in vector_net_close()
1156 kfree(vp->fds); in vector_net_close()
1157 vp->fds = NULL; in vector_net_close()
1158 vp->in_error = false; in vector_net_close()
1169 if ((vp->options & VECTOR_TX) != 0) in vector_poll()
1170 tx_enqueued = (vector_send(vp->tx_queue) > 0); in vector_poll()
1171 spin_lock(&vp->rx_queue->head_lock); in vector_poll()
1172 if ((vp->options & VECTOR_RX) > 0) in vector_poll()
1179 spin_unlock(&vp->rx_queue->head_lock); in vector_poll()
1194 netdev_reset_queue(vp->dev); in vector_reset_tx()
1195 netif_start_queue(vp->dev); in vector_reset_tx()
1196 netif_wake_queue(vp->dev); in vector_reset_tx()
1202 int err = -EINVAL; in vector_net_open()
1205 if (vp->opened) in vector_net_open()
1206 return -ENXIO; in vector_net_open()
1207 vp->opened = true; in vector_net_open()
1209 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed)); in vector_net_open()
1211 vp->fds = uml_vector_user_open(vp->unit, vp->parsed); in vector_net_open()
1213 if (vp->fds == NULL) in vector_net_open()
1219 if ((vp->options & VECTOR_RX) > 0) { in vector_net_open()
1220 vp->rx_queue = create_queue( in vector_net_open()
1222 get_depth(vp->parsed), in vector_net_open()
1223 vp->rx_header_size, in vector_net_open()
1226 atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed)); in vector_net_open()
1228 vp->header_rxbuffer = kmalloc( in vector_net_open()
1229 vp->rx_header_size, in vector_net_open()
1232 if (vp->header_rxbuffer == NULL) in vector_net_open()
1235 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1236 vp->tx_queue = create_queue( in vector_net_open()
1238 get_depth(vp->parsed), in vector_net_open()
1239 vp->header_size, in vector_net_open()
1243 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); in vector_net_open()
1244 if (vp->header_txbuffer == NULL) in vector_net_open()
1248 netif_napi_add_weight(vp->dev, &vp->napi, vector_poll, in vector_net_open()
1249 get_depth(vp->parsed)); in vector_net_open()
1250 napi_enable(&vp->napi); in vector_net_open()
1254 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, in vector_net_open()
1256 IRQF_SHARED, dev->name, dev); in vector_net_open()
1259 err = -ENETUNREACH; in vector_net_open()
1262 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1263 dev->irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1266 /* WRITE IRQ - we need it only if we have vector TX */ in vector_net_open()
1267 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1269 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, in vector_net_open()
1271 IRQF_SHARED, dev->name, dev); in vector_net_open()
1275 err = -ENETUNREACH; in vector_net_open()
1278 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1282 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { in vector_net_open()
1283 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) in vector_net_open()
1284 vp->options |= VECTOR_BPF; in vector_net_open()
1286 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL)) in vector_net_open()
1287 vp->bpf = uml_vector_default_bpf(dev->dev_addr); in vector_net_open()
1289 if (vp->bpf != NULL) in vector_net_open()
1290 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_open()
1295 /* clear buffer - it can happen that the host side of the interface in vector_net_open()
1300 napi_schedule(&vp->napi); in vector_net_open()
1302 vdevice = find_device(vp->unit); in vector_net_open()
1303 vdevice->opened = 1; in vector_net_open()
1305 if ((vp->options & VECTOR_TX) != 0) in vector_net_open()
1306 add_timer(&vp->tl); in vector_net_open()
1316 /* TODO: - we can do some BPF games here */ in vector_net_set_multicast_list()
1324 vp->estats.tx_timeout_count++; in vector_net_tx_timeout()
1326 schedule_work(&vp->reset_tx); in vector_net_tx_timeout()
1345 /* All new frame buffers will be GRO-sized */ in vector_set_features()
1346 vp->req_size = 65536; in vector_set_features()
1349 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; in vector_set_features()
1356 disable_irq(dev->irq); in vector_net_poll_controller()
1357 vector_rx_interrupt(dev->irq, dev); in vector_net_poll_controller()
1358 enable_irq(dev->irq); in vector_net_poll_controller()
1365 strscpy(info->driver, DRIVER_NAME); in vector_net_get_drvinfo()
1376 if (!(vp->options & VECTOR_BPF_FLASH)) { in vector_net_load_bpf_flash()
1377 netdev_err(dev, "loading firmware not permitted: %s\n", efl->data); in vector_net_load_bpf_flash()
1378 return -1; in vector_net_load_bpf_flash()
1381 if (vp->bpf != NULL) { in vector_net_load_bpf_flash()
1382 if (vp->opened) in vector_net_load_bpf_flash()
1383 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1384 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1385 vp->bpf->filter = NULL; in vector_net_load_bpf_flash()
1387 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); in vector_net_load_bpf_flash()
1388 if (vp->bpf == NULL) { in vector_net_load_bpf_flash()
1394 vdevice = find_device(vp->unit); in vector_net_load_bpf_flash()
1396 if (request_firmware(&fw, efl->data, &vdevice->pdev.dev)) in vector_net_load_bpf_flash()
1399 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); in vector_net_load_bpf_flash()
1400 if (!vp->bpf->filter) in vector_net_load_bpf_flash()
1403 vp->bpf->len = fw->size / sizeof(struct sock_filter); in vector_net_load_bpf_flash()
1406 if (vp->opened) in vector_net_load_bpf_flash()
1407 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1415 if (vp->bpf != NULL) in vector_net_load_bpf_flash()
1416 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1417 kfree(vp->bpf); in vector_net_load_bpf_flash()
1418 vp->bpf = NULL; in vector_net_load_bpf_flash()
1419 return -1; in vector_net_load_bpf_flash()
1429 ring->rx_max_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1430 ring->tx_max_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1431 ring->rx_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1432 ring->tx_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1458 return -EOPNOTSUPP; in vector_get_sset_count()
1474 spin_lock(&vp->tx_queue->head_lock); in vector_get_ethtool_stats()
1475 spin_lock(&vp->rx_queue->head_lock); in vector_get_ethtool_stats()
1476 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); in vector_get_ethtool_stats()
1477 spin_unlock(&vp->rx_queue->head_lock); in vector_get_ethtool_stats()
1478 spin_unlock(&vp->tx_queue->head_lock); in vector_get_ethtool_stats()
1488 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; in vector_get_coalesce()
1499 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; in vector_set_coalesce()
1500 if (vp->coalesce == 0) in vector_set_coalesce()
1501 vp->coalesce = 1; in vector_set_coalesce()
1539 vp->estats.tx_kicks++; in vector_timer_expire()
1540 napi_schedule(&vp->napi); in vector_timer_expire()
1600 dev->mtu = get_mtu(def); in vector_eth_configure()
1602 INIT_LIST_HEAD(&device->list); in vector_eth_configure()
1603 device->unit = n; in vector_eth_configure()
1609 snprintf(dev->name, sizeof(dev->name), "vec%d", n); in vector_eth_configure()
1618 device->pdev.id = n; in vector_eth_configure()
1619 device->pdev.name = DRIVER_NAME; in vector_eth_configure()
1620 device->pdev.dev.release = vector_device_release; in vector_eth_configure()
1621 dev_set_drvdata(&device->pdev.dev, device); in vector_eth_configure()
1622 if (platform_device_register(&device->pdev)) in vector_eth_configure()
1624 SET_NETDEV_DEV(dev, &device->pdev.dev); in vector_eth_configure()
1626 device->dev = dev; in vector_eth_configure()
1628 INIT_LIST_HEAD(&vp->list); in vector_eth_configure()
1629 vp->dev = dev; in vector_eth_configure()
1630 vp->unit = n; in vector_eth_configure()
1631 vp->options = get_transport_options(def); in vector_eth_configure()
1632 vp->parsed = def; in vector_eth_configure()
1633 vp->max_packet = get_mtu(def) + ETH_HEADER_OTHER; in vector_eth_configure()
1635 * TODO - we need to calculate headroom so that ip header in vector_eth_configure()
1638 vp->headroom = get_headroom(def); in vector_eth_configure()
1639 vp->coalesce = 2; in vector_eth_configure()
1640 vp->req_size = get_req_size(def); in vector_eth_configure()
1642 dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); in vector_eth_configure()
1643 INIT_WORK(&vp->reset_tx, vector_reset_tx); in vector_eth_configure()
1645 timer_setup(&vp->tl, vector_timer_expire, 0); in vector_eth_configure()
1648 dev->netdev_ops = &vector_netdev_ops; in vector_eth_configure()
1649 dev->ethtool_ops = &vector_net_ethtool_ops; in vector_eth_configure()
1650 dev->watchdog_timeo = (HZ >> 1); in vector_eth_configure()
1651 /* primary IRQ - fixme */ in vector_eth_configure()
1652 dev->irq = 0; /* we will adjust this once opened */ in vector_eth_configure()
1661 list_add(&device->list, &vector_devices); in vector_eth_configure()
1689 parsed = uml_parse_vector_ifspec(def->arguments); in vector_init()
1691 vector_eth_configure(def->unit, parsed); in vector_init()
1714 INIT_LIST_HEAD(&new->list); in vector_setup()
1715 new->unit = n; in vector_setup()
1716 new->arguments = str; in vector_setup()
1717 list_add_tail(&new->list, &vec_cmd_line); in vector_setup()
1723 "vec[0-9]+:<option>=<value>,<option>=<value>\n"
1724 " Configure a vector io network device.\n\n"