Lines Matching full:vp
71 static int vector_mmsg_rx(struct vector_private *vp, int budget);
104 static void vector_reset_stats(struct vector_private *vp)
112 spin_lock(&vp->rx_queue->head_lock);
113 vp->estats.rx_queue_max = 0;
114 vp->estats.rx_queue_running_average = 0;
115 vp->estats.rx_encaps_errors = 0;
116 vp->estats.sg_ok = 0;
117 vp->estats.sg_linearized = 0;
118 spin_unlock(&vp->rx_queue->head_lock);
124 spin_lock(&vp->tx_queue->head_lock);
125 vp->estats.tx_timeout_count = 0;
126 vp->estats.tx_restart_queue = 0;
127 vp->estats.tx_kicks = 0;
128 vp->estats.tx_flow_control_xon = 0;
129 vp->estats.tx_flow_control_xoff = 0;
130 vp->estats.tx_queue_max = 0;
131 vp->estats.tx_queue_running_average = 0;
132 spin_unlock(&vp->tx_queue->head_lock);
283 static int prep_msg(struct vector_private *vp,
296 if (vp->header_size > 0) {
297 iov[iov_index].iov_len = vp->header_size;
298 vp->form_header(iov[iov_index].iov_base, skb, vp);
304 vp->estats.sg_ok++;
326 struct vector_private *vp = netdev_priv(qi->dev);
343 vp,
350 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
351 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
399 struct vector_private *vp = netdev_priv(qi->dev);
416 vp->fds->tx_fd,
421 vp->in_write_poll =
431 netdev_err(vp->dev, "sendmmsg err=%i\n",
433 vp->in_error = true;
442 if (result > vp->estats.tx_queue_max)
443 vp->estats.tx_queue_max = result;
444 vp->estats.tx_queue_running_average =
445 (vp->estats.tx_queue_running_average + result) >> 1;
452 vp->estats.tx_restart_queue++;
469 struct vector_private *vp = netdev_priv(qi->dev);
490 if ((vp->header_size > 0) &&
506 struct vector_private *vp,
520 result->dev = vp->dev;
544 if (vp->header_size > 0)
563 if (vp->header_size > 0) {
601 struct vector_private *vp,
604 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
611 if (vp->req_size <= linear)
614 len = vp->req_size;
617 len - vp->max_packet,
622 if (vp->header_size > 0)
629 skb_reserve(result, vp->headroom);
630 result->dev = vp->dev;
631 skb_put(result, vp->max_packet);
632 result->data_len = len - vp->max_packet;
633 result->len += len - vp->max_packet;
637 iov[iov_index].iov_len = vp->max_packet;
660 struct vector_private *vp = netdev_priv(qi->dev);
682 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
789 struct vector_private *vp;
795 vp = netdev_priv(dev);
796 if (vp->fds != NULL)
831 static int vector_legacy_rx(struct vector_private *vp)
847 if (vp->header_size > 0) {
848 iov[0].iov_base = vp->header_rxbuffer;
849 iov[0].iov_len = vp->header_size;
852 skb = prep_skb(vp, &hdr);
861 vp->dev->stats.rx_dropped++;
864 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
866 vp->in_error = true;
871 if (pkt_len > vp->header_size) {
872 if (vp->header_size > 0) {
873 header_check = vp->verify_header(
874 vp->header_rxbuffer, skb, vp);
877 vp->dev->stats.rx_dropped++;
878 vp->estats.rx_encaps_errors++;
882 vp->estats.rx_csum_offload_good++;
886 pskb_trim(skb, pkt_len - vp->rx_header_size);
888 vp->dev->stats.rx_bytes += skb->len;
889 vp->dev->stats.rx_packets++;
890 napi_gro_receive(&vp->napi, skb);
905 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
910 iov[0].iov_base = vp->header_txbuffer;
911 iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
917 vp->fds->tx_fd,
925 netif_trans_update(vp->dev);
926 netif_wake_queue(vp->dev);
929 vp->dev->stats.tx_bytes += skb->len;
930 vp->dev->stats.tx_packets++;
932 vp->dev->stats.tx_dropped++;
937 vp->dev->stats.tx_dropped++;
940 vp->in_error = true;
949 static int vector_mmsg_rx(struct vector_private *vp, int budget)
952 struct vector_queue *qi = vp->rx_queue;
970 vp->fds->rx_fd, qi->mmsg_vector, budget, 0);
973 vp->in_error = true;
987 if (mmsg_vector->msg_len > vp->header_size) {
988 if (vp->header_size > 0) {
989 header_check = vp->verify_header(
992 vp
1001 vp->estats.rx_encaps_errors++;
1005 vp->estats.rx_csum_offload_good++;
1010 mmsg_vector->msg_len - vp->rx_header_size);
1016 vp->dev->stats.rx_bytes += skb->len;
1017 vp->dev->stats.rx_packets++;
1018 napi_gro_receive(&vp->napi, skb);
1033 if (vp->estats.rx_queue_max < packet_count)
1034 vp->estats.rx_queue_max = packet_count;
1035 vp->estats.rx_queue_running_average =
1036 (vp->estats.rx_queue_running_average + packet_count) >> 1;
1043 struct vector_private *vp = netdev_priv(dev);
1046 if (vp->in_error) {
1047 deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1048 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1049 deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1053 if ((vp->options & VECTOR_TX) == 0) {
1054 writev_tx(vp, skb);
1062 netdev_sent_queue(vp->dev, skb->len);
1063 queue_depth = vector_enqueue(vp->tx_queue, skb);
1065 if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
1066 mod_timer(&vp->tl, vp->coalesce);
1069 queue_depth = vector_send(vp->tx_queue);
1071 napi_schedule(&vp->napi);
1080 struct vector_private *vp = netdev_priv(dev);
1084 napi_schedule(&vp->napi);
1092 struct vector_private *vp = netdev_priv(dev);
1103 napi_schedule(&vp->napi);
1112 struct vector_private *vp = netdev_priv(dev);
1115 del_timer(&vp->tl);
1117 vp->opened = false;
1119 if (vp->fds == NULL)
1123 if (vp->rx_irq > 0) {
1124 um_free_irq(vp->rx_irq, dev);
1125 vp->rx_irq = 0;
1127 if (vp->tx_irq > 0) {
1128 um_free_irq(vp->tx_irq, dev);
1129 vp->tx_irq = 0;
1131 napi_disable(&vp->napi);
1132 netif_napi_del(&vp->napi);
1133 if (vp->fds->rx_fd > 0) {
1134 if (vp->bpf)
1135 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1136 os_close_file(vp->fds->rx_fd);
1137 vp->fds->rx_fd = -1;
1139 if (vp->fds->tx_fd > 0) {
1140 os_close_file(vp->fds->tx_fd);
1141 vp->fds->tx_fd = -1;
1143 if (vp->bpf != NULL)
1144 kfree(vp->bpf->filter);
1145 kfree(vp->bpf);
1146 vp->bpf = NULL;
1147 kfree(vp->fds->remote_addr);
1148 kfree(vp->transport_data);
1149 kfree(vp->header_rxbuffer);
1150 kfree(vp->header_txbuffer);
1151 if (vp->rx_queue != NULL)
1152 destroy_queue(vp->rx_queue);
1153 if (vp->tx_queue != NULL)
1154 destroy_queue(vp->tx_queue);
1155 kfree(vp->fds);
1156 vp->fds = NULL;
1157 vp->in_error = false;
1163 struct vector_private *vp = container_of(napi, struct vector_private, napi);
1168 if ((vp->options & VECTOR_TX) != 0)
1169 tx_enqueued = (vector_send(vp->tx_queue) > 0);
1170 spin_lock(&vp->rx_queue->head_lock);
1171 if ((vp->options & VECTOR_RX) > 0)
1172 err = vector_mmsg_rx(vp, budget);
1174 err = vector_legacy_rx(vp);
1178 spin_unlock(&vp->rx_queue->head_lock);
1191 struct vector_private *vp =
1193 netdev_reset_queue(vp->dev);
1194 netif_start_queue(vp->dev);
1195 netif_wake_queue(vp->dev);
1200 struct vector_private *vp = netdev_priv(dev);
1204 if (vp->opened)
1206 vp->opened = true;
1208 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
1210 vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1212 if (vp->fds == NULL)
1215 if (build_transport_data(vp) < 0)
1218 if ((vp->options & VECTOR_RX) > 0) {
1219 vp->rx_queue = create_queue(
1220 vp,
1221 get_depth(vp->parsed),
1222 vp->rx_header_size,
1225 atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed));
1227 vp->header_rxbuffer = kmalloc(
1228 vp->rx_header_size,
1231 if (vp->header_rxbuffer == NULL)
1234 if ((vp->options & VECTOR_TX) > 0) {
1235 vp->tx_queue = create_queue(
1236 vp,
1237 get_depth(vp->parsed),
1238 vp->header_size,
1242 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1243 if (vp->header_txbuffer == NULL)
1247 netif_napi_add_weight(vp->dev, &vp->napi, vector_poll,
1248 get_depth(vp->parsed));
1249 napi_enable(&vp->napi);
1253 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1261 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1266 if ((vp->options & VECTOR_TX) > 0) {
1268 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1277 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1281 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1282 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1283 vp->options |= VECTOR_BPF;
1285 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
1286 vp->bpf = uml_vector_default_bpf(dev->dev_addr);
1288 if (vp->bpf != NULL)
1289 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1292 vector_reset_stats(vp);
1299 napi_schedule(&vp->napi);
1301 vdevice = find_device(vp->unit);
1304 if ((vp->options & VECTOR_TX) != 0)
1305 add_timer(&vp->tl);
1321 struct vector_private *vp = netdev_priv(dev);
1323 vp->estats.tx_timeout_count++;
1325 schedule_work(&vp->reset_tx);
1338 struct vector_private *vp = netdev_priv(dev);
1345 vp->req_size = 65536;
1348 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1370 struct vector_private *vp = netdev_priv(dev);
1375 if (!(vp->options & VECTOR_BPF_FLASH)) {
1380 if (vp->bpf != NULL) {
1381 if (vp->opened)
1382 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1383 kfree(vp->bpf->filter);
1384 vp->bpf->filter = NULL;
1386 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
1387 if (vp->bpf == NULL) {
1393 vdevice = find_device(vp->unit);
1398 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
1399 if (!vp->bpf->filter)
1402 vp->bpf->len = fw->size / sizeof(struct sock_filter);
1405 if (vp->opened)
1406 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1414 if (vp->bpf != NULL)
1415 kfree(vp->bpf->filter);
1416 kfree(vp->bpf);
1417 vp->bpf = NULL;
1426 struct vector_private *vp = netdev_priv(netdev);
1428 ring->rx_max_pending = vp->rx_queue->max_depth;
1429 ring->tx_max_pending = vp->tx_queue->max_depth;
1430 ring->rx_pending = vp->rx_queue->max_depth;
1431 ring->tx_pending = vp->tx_queue->max_depth;
1465 struct vector_private *vp = netdev_priv(dev);
1473 spin_lock(&vp->tx_queue->head_lock);
1474 spin_lock(&vp->rx_queue->head_lock);
1475 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1476 spin_unlock(&vp->rx_queue->head_lock);
1477 spin_unlock(&vp->tx_queue->head_lock);
1485 struct vector_private *vp = netdev_priv(netdev);
1487 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1496 struct vector_private *vp = netdev_priv(netdev);
1498 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1499 if (vp->coalesce == 0)
1500 vp->coalesce = 1;
1536 struct vector_private *vp = from_timer(vp, t, tl);
1538 vp->estats.tx_kicks++;
1539 napi_schedule(&vp->napi);
1551 struct vector_private *vp;
1578 vp = netdev_priv(dev);
1595 *vp = ((struct vector_private)
1597 .list = LIST_HEAD_INIT(vp->list),
1626 INIT_WORK(&vp->reset_tx, vector_reset_tx);
1628 timer_setup(&vp->tl, vector_timer_expire, 0);