Lines Matching +full:num +full:- +full:rxq

1 // SPDX-License-Identifier: GPL-2.0-only
37 return q->flags & TAP_VNET_BE ? false :
43 int s = !!(q->flags & TAP_VNET_BE);
46 return -EFAULT;
56 return -EFAULT;
59 q->flags |= TAP_VNET_BE;
61 q->flags &= ~TAP_VNET_BE;
73 return -EINVAL;
78 return -EINVAL;
84 return q->flags & TAP_VNET_LE ||
126 return rcu_dereference(dev->rx_handler_data);
136 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
137 * q->vlan becomes inaccessible. When the files gets closed,
149 int err = -EINVAL;
153 if (q->enabled)
157 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
158 q->queue_index = tap->numvtaps;
159 q->enabled = true;
161 tap->numvtaps++;
170 if (tap->numqueues == MAX_TAP_QUEUES)
171 return -EBUSY;
173 rcu_assign_pointer(q->tap, tap);
174 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
175 sock_hold(&q->sk);
177 q->file = file;
178 q->queue_index = tap->numvtaps;
179 q->enabled = true;
180 file->private_data = q;
181 list_add_tail(&q->next, &tap->queue_list);
183 tap->numvtaps++;
184 tap->numqueues++;
195 if (!q->enabled)
196 return -EINVAL;
198 tap = rtnl_dereference(q->tap);
201 int index = q->queue_index;
202 BUG_ON(index >= tap->numvtaps);
203 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
204 nq->queue_index = index;
206 rcu_assign_pointer(tap->taps[index], nq);
207 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
208 q->enabled = false;
210 tap->numvtaps--;
229 tap = rtnl_dereference(q->tap);
232 if (q->enabled)
235 tap->numqueues--;
236 RCU_INIT_POINTER(q->tap, NULL);
237 sock_put(&q->sk);
238 list_del_init(&q->next);
244 sock_put(&q->sk);
248 * Select a queue based on the rxq of the device on which this packet
251 * Cache vlan->numvtaps since it can become zero during the execution
260 * and validate that the result isn't NULL - in case we are
263 int numvtaps = READ_ONCE(tap->numvtaps);
264 __u32 rxq;
273 rxq = skb_get_hash(skb);
274 if (rxq) {
275 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
280 rxq = skb_get_rx_queue(skb);
282 while (unlikely(rxq >= numvtaps))
283 rxq -= numvtaps;
285 queue = rcu_dereference(tap->taps[rxq]);
290 queue = rcu_dereference(tap->taps[0]);
305 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
306 list_del_init(&q->next);
307 RCU_INIT_POINTER(q->tap, NULL);
308 if (q->enabled)
309 tap->numvtaps--;
310 tap->numqueues--;
311 sock_put(&q->sk);
313 BUG_ON(tap->numvtaps);
314 BUG_ON(tap->numqueues);
316 tap->numvtaps = MAX_TAP_QUEUES;
323 struct net_device *dev = skb->dev;
343 if (q->flags & IFF_VNET_HDR)
344 features |= tap->tap_features;
355 if (ptr_ring_produce(&q->ring, skb)) {
365 if (ptr_ring_produce(&q->ring, skb)) {
378 if (skb->ip_summed == CHECKSUM_PARTIAL &&
384 if (ptr_ring_produce(&q->ring, skb)) {
391 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
396 if (tap->count_rx_dropped)
397 tap->count_rx_dropped(tap);
408 if (tap_major->major == major)
417 int retval = -ENOMEM;
423 retval = -EINVAL;
427 spin_lock(&tap_major->minor_lock);
428 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
430 tap->minor = retval;
431 } else if (retval == -ENOSPC) {
432 netdev_err(tap->dev, "Too many tap devices\n");
433 retval = -EINVAL;
435 spin_unlock(&tap_major->minor_lock);
453 spin_lock(&tap_major->minor_lock);
454 if (tap->minor) {
455 idr_remove(&tap_major->minor_idr, tap->minor);
456 tap->minor = 0;
458 spin_unlock(&tap_major->minor_lock);
478 spin_lock(&tap_major->minor_lock);
479 tap = idr_find(&tap_major->minor_idr, minor);
481 dev = tap->dev;
484 spin_unlock(&tap_major->minor_lock);
496 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
508 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
513 struct net *net = current->nsproxy->net_ns;
516 int err = -ENODEV;
523 err = -ENOMEM;
528 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
529 sk_free(&q->sk);
533 init_waitqueue_head(&q->sock.wq.wait);
534 q->sock.type = SOCK_RAW;
535 q->sock.state = SS_CONNECTED;
536 q->sock.file = file;
537 q->sock.ops = &tap_socket_ops;
538 sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
539 q->sk.sk_write_space = tap_sock_write_space;
540 q->sk.sk_destruct = tap_sock_destruct;
541 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
542 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
551 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
552 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
561 file->f_mode |= FMODE_NOWAIT;
563 dev_put(tap->dev);
569 sock_put(&q->sk);
572 dev_put(tap->dev);
580 struct tap_queue *q = file->private_data;
587 struct tap_queue *q = file->private_data;
594 poll_wait(file, &q->sock.wq.wait, wait);
596 if (!ptr_ring_empty(&q->ring))
599 if (sock_writeable(&q->sk) ||
600 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
601 sock_writeable(&q->sk)))
618 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
619 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
620 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
627 skb->data_len = len - linear;
628 skb->len += len - linear;
654 if (q->flags & IFF_VNET_HDR) {
655 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
657 err = -EINVAL;
660 len -= vnet_hdr_len;
662 err = -EFAULT;
665 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
673 err = -EINVAL;
678 err = -EINVAL;
682 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
707 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
724 skb->protocol = eth_hdr(skb)->h_proto;
727 tap = rcu_dereference(q->tap);
733 skb->dev = tap->dev;
748 if (eth_type_vlan(skb->protocol) &&
749 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
757 uarg->ops->complete(NULL, uarg, false);
769 tap = rcu_dereference(q->tap);
770 if (tap && tap->count_tx_dropped)
771 tap->count_tx_dropped(tap);
779 struct file *file = iocb->ki_filp;
780 struct tap_queue *q = file->private_data;
783 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
799 if (q->flags & IFF_VNET_HDR) {
803 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
805 return -EINVAL;
814 return -EFAULT;
816 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
819 total += skb->len;
826 veth.h_vlan_proto = skb->vlan_proto;
842 skb->len - vlan_offset);
865 prepare_to_wait(sk_sleep(&q->sk), &wait,
869 skb = ptr_ring_consume(&q->ring);
873 ret = -EAGAIN;
877 ret = -ERESTARTSYS;
884 finish_wait(sk_sleep(&q->sk), &wait);
899 struct file *file = iocb->ki_filp;
900 struct tap_queue *q = file->private_data;
904 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
910 iocb->ki_pos = ret;
919 tap = rtnl_dereference(q->tap);
921 dev_hold(tap->dev);
928 dev_put(tap->dev);
933 struct tap_queue *q = file->private_data;
939 return -EINVAL;
946 ret = -EINVAL;
958 tap = rtnl_dereference(q->tap);
960 return -ENOLINK;
962 features = tap->dev->features;
987 * user-space will not receive TSO frames.
998 tap->tap_features = feature_mask;
999 if (tap->update_features)
1000 tap->update_features(tap, features);
1011 struct tap_queue *q = file->private_data;
1025 if (get_user(u, &ifr->ifr_flags))
1026 return -EFAULT;
1030 ret = -EINVAL;
1032 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
1041 return -ENOLINK;
1045 u = q->flags;
1046 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1047 put_user(u, &ifr->ifr_flags))
1048 ret = -EFAULT;
1054 if (get_user(u, &ifr->ifr_flags))
1055 return -EFAULT;
1063 return -EFAULT;
1068 return -EFAULT;
1070 return -EINVAL;
1072 q->sk.sk_sndbuf = s;
1076 s = q->vnet_hdr_sz;
1078 return -EFAULT;
1083 return -EFAULT;
1085 return -EINVAL;
1087 q->vnet_hdr_sz = s;
1091 s = !!(q->flags & TAP_VNET_LE);
1093 return -EFAULT;
1098 return -EFAULT;
1100 q->flags |= TAP_VNET_LE;
1102 q->flags &= ~TAP_VNET_LE;
1116 return -EINVAL;
1128 return -ENOLINK;
1131 dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
1132 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1133 copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
1134 ret = -EFAULT;
1140 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1141 return -EFAULT;
1146 return -ENOLINK;
1148 ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
1154 return -EINVAL;
1171 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1172 struct virtio_net_hdr *gso = &hdr->gso;
1173 int buflen = hdr->buflen;
1179 if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
1180 err = -EINVAL;
1184 if (q->flags & IFF_VNET_HDR)
1185 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1187 skb = build_skb(xdp->data_hard_start, buflen);
1189 err = -ENOMEM;
1193 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1194 skb_put(skb, xdp->data_end - xdp->data);
1198 skb->protocol = eth_hdr(skb)->h_proto;
1207 if (eth_type_vlan(skb->protocol) &&
1208 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1212 tap = rcu_dereference(q->tap);
1214 skb->dev = tap->dev;
1228 tap = rcu_dereference(q->tap);
1229 if (tap && tap->count_tx_dropped)
1230 tap->count_tx_dropped(tap);
1239 struct tun_msg_ctl *ctl = m->msg_control;
1243 if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
1244 ctl && ctl->type == TUN_MSG_PTR) {
1245 for (i = 0; i < ctl->num; i++) {
1246 xdp = &((struct xdp_buff *)ctl->ptr)[i];
1252 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1253 m->msg_flags & MSG_DONTWAIT);
1260 struct sk_buff *skb = m->msg_control;
1264 return -EINVAL;
1266 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1268 m->msg_flags |= MSG_TRUNC;
1278 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
1295 if (file->f_op != &tap_fops)
1296 return ERR_PTR(-EINVAL);
1297 q = file->private_data;
1299 return ERR_PTR(-EBADFD);
1300 return &q->sock;
1308 if (file->f_op != &tap_fops)
1309 return ERR_PTR(-EINVAL);
1310 q = file->private_data;
1312 return ERR_PTR(-EBADFD);
1313 return &q->ring;
1319 struct net_device *dev = tap->dev;
1322 int n = tap->numqueues;
1327 return -ENOMEM;
1329 list_for_each_entry(q, &tap->queue_list, next)
1330 rings[i++] = &q->ring;
1333 dev->tx_queue_len, GFP_KERNEL,
1347 return -ENOMEM;
1349 tap_major->major = MAJOR(major);
1351 idr_init(&tap_major->minor_idr);
1352 spin_lock_init(&tap_major->minor_lock);
1354 tap_major->device_name = device_name;
1356 list_add_tail_rcu(&tap_major->next, &major_list);
1370 tap_cdev->owner = module;
1397 if (tap_major->major == MAJOR(major)) {
1398 idr_destroy(&tap_major->minor_idr);
1399 list_del_rcu(&tap_major->next);