Lines Matching +full:sp +full:- +full:disabled +full:- +full:ports

1 // SPDX-License-Identifier: GPL-2.0-or-later
33 * Fred Van Kempen : Net2e support for sk->broadcast.
63 * bound-to-device socket
77 #include <linux/bpf-cgroup.h>
138 return sk->sk_prot->h.udp_table ? : sock_net(sk)->ipv4.udp_table; in udp_get_table_prot()
149 sk_for_each(sk2, &hslot->head) { in udp_lib_lport_inuse()
152 (bitmap || udp_sk(sk2)->udp_port_hash == num) && in udp_lib_lport_inuse()
153 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse()
154 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse()
155 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse()
157 if (sk2->sk_reuseport && sk->sk_reuseport && in udp_lib_lport_inuse()
158 !rcu_access_pointer(sk->sk_reuseport_cb) && in udp_lib_lport_inuse()
165 __set_bit(udp_sk(sk2)->udp_port_hash >> log, in udp_lib_lport_inuse()
185 spin_lock(&hslot2->lock); in udp_lib_lport_inuse2()
186 udp_portaddr_for_each_entry(sk2, &hslot2->head) { in udp_lib_lport_inuse2()
189 (udp_sk(sk2)->udp_port_hash == num) && in udp_lib_lport_inuse2()
190 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse2()
191 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse2()
192 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse2()
194 if (sk2->sk_reuseport && sk->sk_reuseport && in udp_lib_lport_inuse2()
195 !rcu_access_pointer(sk->sk_reuseport_cb) && in udp_lib_lport_inuse2()
204 spin_unlock(&hslot2->lock); in udp_lib_lport_inuse2()
214 sk_for_each(sk2, &hslot->head) { in udp_reuseport_add_sock()
217 sk2->sk_family == sk->sk_family && in udp_reuseport_add_sock()
219 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && in udp_reuseport_add_sock()
220 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_reuseport_add_sock()
221 sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) && in udp_reuseport_add_sock()
232 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
236 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
245 int error = -EADDRINUSE; in udp_lib_get_port()
254 remaining = (high - low) + 1; in udp_lib_get_port()
261 rand = (rand | 1) * (udptable->mask + 1); in udp_lib_get_port()
262 last = first + udptable->mask + 1; in udp_lib_get_port()
266 spin_lock_bh(&hslot->lock); in udp_lib_get_port()
268 udptable->log); in udp_lib_get_port()
278 !test_bit(snum >> udptable->log, bitmap) && in udp_lib_get_port()
283 spin_unlock_bh(&hslot->lock); in udp_lib_get_port()
289 spin_lock_bh(&hslot->lock); in udp_lib_get_port()
290 if (hslot->count > 10) { in udp_lib_get_port()
292 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; in udp_lib_get_port()
294 slot2 &= udptable->mask; in udp_lib_get_port()
295 hash2_nulladdr &= udptable->mask; in udp_lib_get_port()
298 if (hslot->count < hslot2->count) in udp_lib_get_port()
317 inet_sk(sk)->inet_num = snum; in udp_lib_get_port()
318 udp_sk(sk)->udp_port_hash = snum; in udp_lib_get_port()
319 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
321 if (sk->sk_reuseport && in udp_lib_get_port()
323 inet_sk(sk)->inet_num = 0; in udp_lib_get_port()
324 udp_sk(sk)->udp_port_hash = 0; in udp_lib_get_port()
325 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
331 sk_add_node_rcu(sk, &hslot->head); in udp_lib_get_port()
332 hslot->count++; in udp_lib_get_port()
333 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in udp_lib_get_port()
335 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_get_port()
336 spin_lock(&hslot2->lock); in udp_lib_get_port()
337 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in udp_lib_get_port()
338 sk->sk_family == AF_INET6) in udp_lib_get_port()
339 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
340 &hslot2->head); in udp_lib_get_port()
342 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
343 &hslot2->head); in udp_lib_get_port()
344 hslot2->count++; in udp_lib_get_port()
345 spin_unlock(&hslot2->lock); in udp_lib_get_port()
350 spin_unlock_bh(&hslot->lock); in udp_lib_get_port()
361 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); in udp_v4_get_port()
364 udp_sk(sk)->udp_portaddr_hash = hash2_partial; in udp_v4_get_port()
378 udp_sk(sk)->udp_port_hash != hnum || in compute_score()
380 return -1; in compute_score()
382 if (sk->sk_rcv_saddr != daddr) in compute_score()
383 return -1; in compute_score()
385 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score()
388 if (inet->inet_daddr) { in compute_score()
389 if (inet->inet_daddr != saddr) in compute_score()
390 return -1; in compute_score()
394 if (inet->inet_dport) { in compute_score()
395 if (inet->inet_dport != sport) in compute_score()
396 return -1; in compute_score()
400 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, in compute_score()
403 return -1; in compute_score()
404 if (sk->sk_bound_dev_if) in compute_score()
407 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) in compute_score()
423 * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
448 unsigned int slot = udp_hashfn(net, hnum, udptable->mask); in udp4_lib_lookup1()
449 struct udp_hslot *hslot = &udptable->hash[slot]; in udp4_lib_lookup1()
453 sk_for_each_rcu(sk, &hslot->head) { in udp4_lib_lookup1()
479 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { in udp4_lib_lookup2()
490 if (sk->sk_state == TCP_ESTABLISHED) { in udp4_lib_lookup2()
548 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); in udp4_lib_lookup4() local
556 slot = hash4 & udptable->mask; in udp4_lib_lookup4()
557 hslot4 = &udptable->hash4[slot]; in udp4_lib_lookup4()
562 udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { in udp4_lib_lookup4()
564 if (inet_match(net, sk, acookie, ports, dif, sdif)) in udp4_lib_lookup4()
584 hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); in udp_rehash4()
586 udp_sk(sk)->udp_lrpa_hash = newhash4; in udp_rehash4()
589 spin_lock_bh(&hslot4->lock); in udp_rehash4()
590 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); in udp_rehash4()
591 hslot4->count--; in udp_rehash4()
592 spin_unlock_bh(&hslot4->lock); in udp_rehash4()
594 spin_lock_bh(&nhslot4->lock); in udp_rehash4()
595 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, in udp_rehash4()
596 &nhslot4->nulls_head); in udp_rehash4()
597 nhslot4->count++; in udp_rehash4()
598 spin_unlock_bh(&nhslot4->lock); in udp_rehash4()
607 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_unhash4()
608 hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); in udp_unhash4()
610 spin_lock(&hslot4->lock); in udp_unhash4()
611 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); in udp_unhash4()
612 hslot4->count--; in udp_unhash4()
613 spin_unlock(&hslot4->lock); in udp_unhash4()
615 spin_lock(&hslot2->lock); in udp_unhash4()
617 spin_unlock(&hslot2->lock); in udp_unhash4()
627 /* Connected udp socket can re-connect to another remote address, which in udp_lib_hash4()
633 udptable = net->ipv4.udp_table; in udp_lib_hash4()
634 hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash); in udp_lib_hash4()
635 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_hash4()
637 udp_sk(sk)->udp_lrpa_hash = hash; in udp_lib_hash4()
639 spin_lock_bh(&hslot->lock); in udp_lib_hash4()
640 if (rcu_access_pointer(sk->sk_reuseport_cb)) in udp_lib_hash4()
643 spin_lock(&hslot4->lock); in udp_lib_hash4()
644 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, in udp_lib_hash4()
645 &hslot4->nulls_head); in udp_lib_hash4()
646 hslot4->count++; in udp_lib_hash4()
647 spin_unlock(&hslot4->lock); in udp_lib_hash4()
649 spin_lock(&hslot2->lock); in udp_lib_hash4()
651 spin_unlock(&hslot2->lock); in udp_lib_hash4()
653 spin_unlock_bh(&hslot->lock); in udp_lib_hash4()
663 if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY)) in udp4_hash4()
666 hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num, in udp4_hash4()
667 sk->sk_daddr, sk->sk_dport); in udp4_hash4()
675 * harder than this. -DaveM
696 /* Lookup connected or non-wildcard socket */ in __udp4_lib_lookup()
700 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) in __udp4_lib_lookup()
705 udptable == net->ipv4.udp_table) { in __udp4_lib_lookup()
715 /* Got non-wildcard socket or error on first lookup */ in __udp4_lib_lookup()
732 * 3. rehash operation updating _secondary and four-tuple_ hashes in __udp4_lib_lookup()
752 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, in __udp4_lib_lookup_skb()
753 iph->daddr, dport, inet_iif(skb), in __udp4_lib_lookup_skb()
760 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; in udp4_lib_lookup_skb()
761 const struct iphdr *iph = (struct iphdr *)(skb->data + offset); in udp4_lib_lookup_skb()
762 struct net *net = dev_net(skb->dev); in udp4_lib_lookup_skb()
767 return __udp4_lib_lookup(net, iph->saddr, sport, in udp4_lib_lookup_skb()
768 iph->daddr, dport, iif, in udp4_lib_lookup_skb()
769 sdif, net->ipv4.udp_table, NULL); in udp4_lib_lookup_skb()
782 dif, 0, net->ipv4.udp_table, NULL); in udp4_lib_lookup()
783 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) in udp4_lib_lookup()
798 udp_sk(sk)->udp_port_hash != hnum || in __udp_is_mcast_sock()
799 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || in __udp_is_mcast_sock()
800 (inet->inet_dport != rmt_port && inet->inet_dport) || in __udp_is_mcast_sock()
801 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || in __udp_is_mcast_sock()
803 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) in __udp_is_mcast_sock()
830 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
844 handler = encap->err_handler; in __udp4_lib_err_encap_no_sk()
849 return -ENOENT; in __udp4_lib_err_encap_no_sk()
856 * different destination ports on endpoints, in this case we won't be able to
860 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
887 skb_set_transport_header(skb, iph->ihl << 2); in __udp4_lib_err_encap()
892 lookup = READ_ONCE(up->encap_err_lookup); in __udp4_lib_err_encap()
899 sk = __udp4_lib_lookup(net, iph->daddr, uh->source, in __udp4_lib_err_encap()
900 iph->saddr, uh->dest, skb->dev->ifindex, 0, in __udp4_lib_err_encap()
905 lookup = READ_ONCE(up->encap_err_lookup); in __udp4_lib_err_encap()
934 const struct iphdr *iph = (const struct iphdr *)skb->data; in __udp4_lib_err()
935 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); in __udp4_lib_err()
936 const int type = icmp_hdr(skb)->type; in __udp4_lib_err()
937 const int code = icmp_hdr(skb)->code; in __udp4_lib_err()
942 struct net *net = dev_net(skb->dev); in __udp4_lib_err()
944 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, in __udp4_lib_err()
945 iph->saddr, uh->source, skb->dev->ifindex, in __udp4_lib_err()
948 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { in __udp4_lib_err()
956 sk = ERR_PTR(-ENOENT); in __udp4_lib_err()
984 if (READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT) { in __udp4_lib_err()
1008 if (udp_sk(sk)->encap_err_rcv) in __udp4_lib_err()
1009 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info, in __udp4_lib_err()
1014 if (!harderr || sk->sk_state != TCP_ESTABLISHED) in __udp4_lib_err()
1017 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); in __udp4_lib_err()
1019 sk->sk_err = err; in __udp4_lib_err()
1027 return __udp4_lib_err(skb, info, dev_net(skb->dev)->ipv4.udp_table); in udp_err()
1037 if (up->pending) { in udp_flush_pending_frames()
1038 up->len = 0; in udp_flush_pending_frames()
1039 WRITE_ONCE(up->pending, 0); in udp_flush_pending_frames()
1046 * udp4_hwcsum - handle outgoing HW checksumming
1047 * @skb: sk_buff containing the filled-in UDP header
1056 int len = skb->len - offset; in udp4_hwcsum()
1064 skb->csum_start = skb_transport_header(skb) - skb->head; in udp4_hwcsum()
1065 skb->csum_offset = offsetof(struct udphdr, check); in udp4_hwcsum()
1066 uh->check = ~csum_tcpudp_magic(src, dst, len, in udp4_hwcsum()
1072 * HW-checksum won't work as there are two or more in udp4_hwcsum()
1077 csum = csum_add(csum, frags->csum); in udp4_hwcsum()
1078 hlen -= frags->len; in udp4_hwcsum()
1082 skb->ip_summed = CHECKSUM_NONE; in udp4_hwcsum()
1084 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); in udp4_hwcsum()
1085 if (uh->check == 0) in udp4_hwcsum()
1086 uh->check = CSUM_MANGLED_0; in udp4_hwcsum()
1100 uh->check = 0; in udp_set_csum()
1102 uh->check = ~udp_v4_check(len, saddr, daddr, 0); in udp_set_csum()
1103 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in udp_set_csum()
1104 uh->check = 0; in udp_set_csum()
1105 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); in udp_set_csum()
1106 if (uh->check == 0) in udp_set_csum()
1107 uh->check = CSUM_MANGLED_0; in udp_set_csum()
1109 skb->ip_summed = CHECKSUM_PARTIAL; in udp_set_csum()
1110 skb->csum_start = skb_transport_header(skb) - skb->head; in udp_set_csum()
1111 skb->csum_offset = offsetof(struct udphdr, check); in udp_set_csum()
1112 uh->check = ~udp_v4_check(len, saddr, daddr, 0); in udp_set_csum()
1120 struct sock *sk = skb->sk; in udp_send_skb()
1126 int len = skb->len - offset; in udp_send_skb()
1127 int datalen = len - sizeof(*uh); in udp_send_skb()
1134 uh->source = inet->inet_sport; in udp_send_skb()
1135 uh->dest = fl4->fl4_dport; in udp_send_skb()
1136 uh->len = htons(len); in udp_send_skb()
1137 uh->check = 0; in udp_send_skb()
1139 if (cork->gso_size) { in udp_send_skb()
1143 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) { in udp_send_skb()
1145 return -EMSGSIZE; in udp_send_skb()
1147 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { in udp_send_skb()
1149 return -EINVAL; in udp_send_skb()
1151 if (sk->sk_no_check_tx) { in udp_send_skb()
1153 return -EINVAL; in udp_send_skb()
1157 return -EIO; in udp_send_skb()
1160 if (datalen > cork->gso_size) { in udp_send_skb()
1161 skb_shinfo(skb)->gso_size = cork->gso_size; in udp_send_skb()
1162 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; in udp_send_skb()
1163 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, in udp_send_skb()
1164 cork->gso_size); in udp_send_skb()
1171 if (is_udplite) /* UDP-Lite */ in udp_send_skb()
1174 else if (sk->sk_no_check_tx) { /* UDP csum off */ in udp_send_skb()
1176 skb->ip_summed = CHECKSUM_NONE; in udp_send_skb()
1179 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ in udp_send_skb()
1182 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); in udp_send_skb()
1188 /* add protocol-dependent pseudo-header */ in udp_send_skb()
1189 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, in udp_send_skb()
1190 sk->sk_protocol, csum); in udp_send_skb()
1191 if (uh->check == 0) in udp_send_skb()
1192 uh->check = CSUM_MANGLED_0; in udp_send_skb()
1197 if (err == -ENOBUFS && in udp_send_skb()
1216 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; in udp_push_pending_frames()
1224 err = udp_send_skb(skb, fl4, &inet->cork.base); in udp_push_pending_frames()
1227 up->len = 0; in udp_push_pending_frames()
1228 WRITE_ONCE(up->pending, 0); in udp_push_pending_frames()
1235 switch (cmsg->cmsg_type) { in __udp_cmsg_send()
1237 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) in __udp_cmsg_send()
1238 return -EINVAL; in __udp_cmsg_send()
1242 return -EINVAL; in __udp_cmsg_send()
1254 return -EINVAL; in udp_cmsg_send()
1256 if (cmsg->cmsg_level != SOL_UDP) { in udp_cmsg_send()
1274 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); in udp_sendmsg()
1286 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; in udp_sendmsg()
1293 return -EMSGSIZE; in udp_sendmsg()
1299 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ in udp_sendmsg()
1300 return -EOPNOTSUPP; in udp_sendmsg()
1304 fl4 = &inet->cork.fl.u.ip4; in udp_sendmsg()
1305 if (READ_ONCE(up->pending)) { in udp_sendmsg()
1311 if (likely(up->pending)) { in udp_sendmsg()
1312 if (unlikely(up->pending != AF_INET)) { in udp_sendmsg()
1314 return -EINVAL; in udp_sendmsg()
1326 if (msg->msg_namelen < sizeof(*usin)) in udp_sendmsg()
1327 return -EINVAL; in udp_sendmsg()
1328 if (usin->sin_family != AF_INET) { in udp_sendmsg()
1329 if (usin->sin_family != AF_UNSPEC) in udp_sendmsg()
1330 return -EAFNOSUPPORT; in udp_sendmsg()
1333 daddr = usin->sin_addr.s_addr; in udp_sendmsg()
1334 dport = usin->sin_port; in udp_sendmsg()
1336 return -EINVAL; in udp_sendmsg()
1338 if (sk->sk_state != TCP_ESTABLISHED) in udp_sendmsg()
1339 return -EDESTADDRREQ; in udp_sendmsg()
1340 daddr = inet->inet_daddr; in udp_sendmsg()
1341 dport = inet->inet_dport; in udp_sendmsg()
1349 ipc.gso_size = READ_ONCE(up->gso_size); in udp_sendmsg()
1351 if (msg->msg_controllen) { in udp_sendmsg()
1355 sk->sk_family == AF_INET6); in udp_sendmsg()
1369 inet_opt = rcu_dereference(inet->inet_opt); in udp_sendmsg()
1372 sizeof(*inet_opt) + inet_opt->opt.optlen); in udp_sendmsg()
1381 &msg->msg_namelen, in udp_sendmsg()
1386 if (usin->sin_port == 0) { in udp_sendmsg()
1388 err = -EINVAL; in udp_sendmsg()
1391 daddr = usin->sin_addr.s_addr; in udp_sendmsg()
1392 dport = usin->sin_port; in udp_sendmsg()
1399 if (ipc.opt && ipc.opt->opt.srr) { in udp_sendmsg()
1401 err = -EINVAL; in udp_sendmsg()
1404 faddr = ipc.opt->opt.faddr; in udp_sendmsg()
1411 uc_index = READ_ONCE(inet->uc_index); in udp_sendmsg()
1414 ipc.oif = READ_ONCE(inet->mc_index); in udp_sendmsg()
1416 saddr = READ_ONCE(inet->mc_addr); in udp_sendmsg()
1445 sk->sk_protocol, flow_flags, faddr, saddr, in udp_sendmsg()
1446 dport, inet->inet_sport, in udp_sendmsg()
1454 if (err == -ENETUNREACH) in udp_sendmsg()
1459 err = -EACCES; in udp_sendmsg()
1460 if ((rt->rt_flags & RTCF_BROADCAST) && in udp_sendmsg()
1464 sk_dst_set(sk, dst_clone(&rt->dst)); in udp_sendmsg()
1467 if (msg->msg_flags&MSG_CONFIRM) in udp_sendmsg()
1471 saddr = fl4->saddr; in udp_sendmsg()
1473 daddr = ipc.addr = fl4->daddr; in udp_sendmsg()
1475 /* Lockless fast path for the non-corking case. */ in udp_sendmsg()
1481 &cork, msg->msg_flags); in udp_sendmsg()
1489 if (unlikely(up->pending)) { in udp_sendmsg()
1491 /* ... which is an evident application bug. --ANK */ in udp_sendmsg()
1495 err = -EINVAL; in udp_sendmsg()
1501 fl4 = &inet->cork.fl.u.ip4; in udp_sendmsg()
1502 fl4->daddr = daddr; in udp_sendmsg()
1503 fl4->saddr = saddr; in udp_sendmsg()
1504 fl4->fl4_dport = dport; in udp_sendmsg()
1505 fl4->fl4_sport = inet->inet_sport; in udp_sendmsg()
1506 WRITE_ONCE(up->pending, AF_INET); in udp_sendmsg()
1509 up->len += ulen; in udp_sendmsg()
1512 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); in udp_sendmsg()
1517 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) in udp_sendmsg()
1518 WRITE_ONCE(up->pending, 0); in udp_sendmsg()
1535 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in udp_sendmsg()
1542 if (msg->msg_flags & MSG_PROBE) in udp_sendmsg()
1543 dst_confirm_neigh(&rt->dst, &fl4->daddr); in udp_sendmsg()
1544 if (!(msg->msg_flags&MSG_PROBE) || len) in udp_sendmsg()
1553 struct sock *sk = sock->sk; in udp_splice_eof()
1556 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk)) in udp_splice_eof()
1560 if (up->pending && !udp_test_bit(CORK, sk)) in udp_splice_eof()
1594 scratch->_tsize_state = skb->truesize; in udp_set_dev_scratch()
1596 scratch->len = skb->len; in udp_set_dev_scratch()
1597 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); in udp_set_dev_scratch()
1598 scratch->is_linear = !skb_is_nonlinear(skb); in udp_set_dev_scratch()
1601 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; in udp_set_dev_scratch()
1608 * set skb->csum_valid to 1. in udp_skb_csum_unnecessary_set()
1614 udp_skb_scratch(skb)->csum_unnecessary = true; in udp_skb_csum_unnecessary_set()
1620 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; in udp_skb_truesize()
1625 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); in udp_skb_has_head_state()
1637 up->forward_deficit += size; in udp_rmem_release()
1638 size = up->forward_deficit; in udp_rmem_release()
1639 if (size < READ_ONCE(up->forward_threshold) && in udp_rmem_release()
1640 !skb_queue_empty(&up->reader_queue)) in udp_rmem_release()
1643 size += up->forward_deficit; in udp_rmem_release()
1645 up->forward_deficit = 0; in udp_rmem_release()
1650 sk_queue = &sk->sk_receive_queue; in udp_rmem_release()
1652 spin_lock(&sk_queue->lock); in udp_rmem_release()
1654 amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); in udp_rmem_release()
1655 sk_forward_alloc_add(sk, size - amt); in udp_rmem_release()
1660 atomic_sub(size, &sk->sk_rmem_alloc); in udp_rmem_release()
1663 skb_queue_splice_tail_init(sk_queue, &up->reader_queue); in udp_rmem_release()
1666 spin_unlock(&sk_queue->lock); in udp_rmem_release()
1670 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1676 prefetch(&skb->data); in udp_skb_destructor()
1684 prefetch(&skb->data); in udp_skb_dtor_locked()
1717 delta = size - sk->sk_forward_alloc; in udp_rmem_schedule()
1719 return -ENOBUFS; in udp_rmem_schedule()
1726 struct sk_buff_head *list = &sk->sk_receive_queue; in __udp_enqueue_schedule_skb()
1729 int size, err = -ENOMEM; in __udp_enqueue_schedule_skb()
1731 rmem = atomic_read(&sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1732 rcvbuf = READ_ONCE(sk->sk_rcvbuf); in __udp_enqueue_schedule_skb()
1733 size = skb->truesize; in __udp_enqueue_schedule_skb()
1749 * - Reduce memory overhead and thus increase receive queue capacity in __udp_enqueue_schedule_skb()
1750 * - Less cache line misses at copyout() time in __udp_enqueue_schedule_skb()
1751 * - Less work at consume_skb() (less alien page frag freeing) in __udp_enqueue_schedule_skb()
1755 size = skb->truesize; in __udp_enqueue_schedule_skb()
1761 atomic_add(size, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1763 spin_lock(&list->lock); in __udp_enqueue_schedule_skb()
1766 spin_unlock(&list->lock); in __udp_enqueue_schedule_skb()
1770 sk_forward_alloc_add(sk, -size); in __udp_enqueue_schedule_skb()
1778 spin_unlock(&list->lock); in __udp_enqueue_schedule_skb()
1781 INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); in __udp_enqueue_schedule_skb()
1787 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1790 atomic_inc(&sk->sk_drops); in __udp_enqueue_schedule_skb()
1803 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); in udp_destruct_common()
1804 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { in udp_destruct_common()
1805 total += skb->truesize; in udp_destruct_common()
1821 sk->sk_destruct = udp_destruct_sock; in udp_init_sock()
1822 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); in udp_init_sock()
1828 if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset))) in skb_consume_udp()
1855 atomic_inc(&sk->sk_drops); in __first_packet_length()
1857 *total += skb->truesize; in __first_packet_length()
1868 * first_packet_length - return length of first packet in receive queue
1872 * Returns the length of found skb, or -1 if none is found.
1876 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; in first_packet_length()
1877 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; in first_packet_length()
1882 spin_lock_bh(&rcvq->lock); in first_packet_length()
1885 spin_lock(&sk_queue->lock); in first_packet_length()
1887 spin_unlock(&sk_queue->lock); in first_packet_length()
1891 res = skb ? skb->len : -1; in first_packet_length()
1894 spin_unlock_bh(&rcvq->lock); in first_packet_length()
1918 return -ENOIOCTLCMD; in udp_ioctl()
1928 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; in __skb_recv_udp()
1934 queue = &udp_sk(sk)->reader_queue; in __skb_recv_udp()
1943 error = -EAGAIN; in __skb_recv_udp()
1945 spin_lock_bh(&queue->lock); in __skb_recv_udp()
1951 spin_unlock_bh(&queue->lock); in __skb_recv_udp()
1956 spin_unlock_bh(&queue->lock); in __skb_recv_udp()
1961 * keep both queues locked to avoid re-acquiring in __skb_recv_udp()
1965 spin_lock(&sk_queue->lock); in __skb_recv_udp()
1972 spin_unlock(&sk_queue->lock); in __skb_recv_udp()
1973 spin_unlock_bh(&queue->lock); in __skb_recv_udp()
1986 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, in __skb_recv_udp()
2011 atomic_inc(&sk->sk_drops); in udp_read_skb()
2030 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); in udp_recvmsg()
2048 if (copied > ulen - off) in udp_recvmsg()
2049 copied = ulen - off; in udp_recvmsg()
2051 msg->msg_flags |= MSG_TRUNC; in udp_recvmsg()
2056 * coverage checksum (UDP-Lite), do it before the copy. in udp_recvmsg()
2060 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { in udp_recvmsg()
2069 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); in udp_recvmsg()
2075 if (err == -EINVAL) in udp_recvmsg()
2081 atomic_inc(&sk->sk_drops); in udp_recvmsg()
2097 sin->sin_family = AF_INET; in udp_recvmsg()
2098 sin->sin_port = udp_hdr(skb)->source; in udp_recvmsg()
2099 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; in udp_recvmsg()
2100 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); in udp_recvmsg()
2118 skb_consume_udp(sk, skb, peeking ? -err : err); in udp_recvmsg()
2122 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, in udp_recvmsg()
2131 msg->msg_flags &= ~MSG_TRUNC; in udp_recvmsg()
2142 return -EINVAL; in udp_pre_connect()
2164 * 1003.1g - break association. in __udp_disconnect()
2167 sk->sk_state = TCP_CLOSE; in __udp_disconnect()
2168 inet->inet_daddr = 0; in __udp_disconnect()
2169 inet->inet_dport = 0; in __udp_disconnect()
2171 sk->sk_bound_dev_if = 0; in __udp_disconnect()
2172 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { in __udp_disconnect()
2174 if (sk->sk_prot->rehash && in __udp_disconnect()
2175 (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in __udp_disconnect()
2176 sk->sk_prot->rehash(sk); in __udp_disconnect()
2179 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { in __udp_disconnect()
2180 sk->sk_prot->unhash(sk); in __udp_disconnect()
2181 inet->inet_sport = 0; in __udp_disconnect()
2205 udp_sk(sk)->udp_port_hash); in udp_lib_unhash()
2206 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_unhash()
2208 spin_lock_bh(&hslot->lock); in udp_lib_unhash()
2209 if (rcu_access_pointer(sk->sk_reuseport_cb)) in udp_lib_unhash()
2212 hslot->count--; in udp_lib_unhash()
2213 inet_sk(sk)->inet_num = 0; in udp_lib_unhash()
2214 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in udp_lib_unhash()
2216 spin_lock(&hslot2->lock); in udp_lib_unhash()
2217 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_unhash()
2218 hslot2->count--; in udp_lib_unhash()
2219 spin_unlock(&hslot2->lock); in udp_lib_unhash()
2223 spin_unlock_bh(&hslot->lock); in udp_lib_unhash()
2238 udp_sk(sk)->udp_port_hash); in udp_lib_rehash()
2239 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_rehash()
2241 udp_sk(sk)->udp_portaddr_hash = newhash; in udp_lib_rehash()
2244 rcu_access_pointer(sk->sk_reuseport_cb)) { in udp_lib_rehash()
2246 spin_lock_bh(&hslot->lock); in udp_lib_rehash()
2247 if (rcu_access_pointer(sk->sk_reuseport_cb)) in udp_lib_rehash()
2251 spin_lock(&hslot2->lock); in udp_lib_rehash()
2252 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_rehash()
2253 hslot2->count--; in udp_lib_rehash()
2254 spin_unlock(&hslot2->lock); in udp_lib_rehash()
2256 spin_lock(&nhslot2->lock); in udp_lib_rehash()
2257 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_rehash()
2258 &nhslot2->head); in udp_lib_rehash()
2259 nhslot2->count++; in udp_lib_rehash()
2260 spin_unlock(&nhslot2->lock); in udp_lib_rehash()
2263 spin_unlock_bh(&hslot->lock); in udp_lib_rehash()
2268 * (2) update hslot2->hash4_cnt. in udp_lib_rehash()
2273 spin_lock_bh(&hslot->lock); in udp_lib_rehash()
2277 spin_lock(&hslot2->lock); in udp_lib_rehash()
2279 spin_unlock(&hslot2->lock); in udp_lib_rehash()
2281 spin_lock(&nhslot2->lock); in udp_lib_rehash()
2283 spin_unlock(&nhslot2->lock); in udp_lib_rehash()
2286 spin_unlock_bh(&hslot->lock); in udp_lib_rehash()
2295 inet_sk(sk)->inet_rcv_saddr, in udp_v4_rehash()
2296 inet_sk(sk)->inet_num); in udp_v4_rehash()
2298 sk->sk_rcv_saddr, sk->sk_num, in udp_v4_rehash()
2299 sk->sk_daddr, sk->sk_dport); in udp_v4_rehash()
2308 if (inet_sk(sk)->inet_daddr) { in __udp_queue_rcv_skb()
2322 if (rc == -ENOMEM) { in __udp_queue_rcv_skb()
2334 return -1; in __udp_queue_rcv_skb()
2341 * -1: error
2364 READ_ONCE(up->encap_type)) { in udp_queue_rcv_one_skb()
2371 * up->encap_rcv() returns the following value: in udp_queue_rcv_one_skb()
2375 * <0 if skb should be resubmitted as proto -N in udp_queue_rcv_one_skb()
2379 encap_rcv = READ_ONCE(up->encap_rcv); in udp_queue_rcv_one_skb()
2392 return -ret; in udp_queue_rcv_one_skb()
2396 /* FALLTHROUGH -- it's a UDP Packet */ in udp_queue_rcv_one_skb()
2400 * UDP-Lite specific tests, ignored on UDP sockets in udp_queue_rcv_one_skb()
2402 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { in udp_queue_rcv_one_skb()
2403 u16 pcrlen = READ_ONCE(up->pcrlen); in udp_queue_rcv_one_skb()
2407 * disabled for the following two types of errors: these depend in udp_queue_rcv_one_skb()
2418 UDP_SKB_CB(skb)->cscov, skb->len); in udp_queue_rcv_one_skb()
2425 * Therefore the above ...()->partial_cov statement is essential. in udp_queue_rcv_one_skb()
2427 if (UDP_SKB_CB(skb)->cscov < pcrlen) { in udp_queue_rcv_one_skb()
2429 UDP_SKB_CB(skb)->cscov, pcrlen); in udp_queue_rcv_one_skb()
2434 prefetch(&sk->sk_rmem_alloc); in udp_queue_rcv_one_skb()
2435 if (rcu_access_pointer(sk->sk_filter) && in udp_queue_rcv_one_skb()
2452 atomic_inc(&sk->sk_drops); in udp_queue_rcv_one_skb()
2454 return -1; in udp_queue_rcv_one_skb()
2466 __skb_push(skb, -skb_mac_offset(skb)); in udp_queue_rcv_skb()
2474 ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); in udp_queue_rcv_skb()
2487 old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst))); in udp_sk_rx_dst_set()
2507 unsigned short hnum = ntohs(uh->dest); in __udp4_lib_mcast_deliver()
2509 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); in __udp4_lib_mcast_deliver()
2511 int dif = skb->dev->ifindex; in __udp4_lib_mcast_deliver()
2518 udptable->mask; in __udp4_lib_mcast_deliver()
2519 hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; in __udp4_lib_mcast_deliver()
2521 hslot = &udptable->hash2[hash2].hslot; in __udp4_lib_mcast_deliver()
2525 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { in __udp4_lib_mcast_deliver()
2526 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, in __udp4_lib_mcast_deliver()
2527 uh->source, saddr, dif, sdif, hnum)) in __udp4_lib_mcast_deliver()
2537 atomic_inc(&sk->sk_drops); in __udp4_lib_mcast_deliver()
2568 * including udp header and folding it to skb->csum.
2575 UDP_SKB_CB(skb)->partial_cov = 0; in udp4_csum_init()
2576 UDP_SKB_CB(skb)->cscov = skb->len; in udp4_csum_init()
2583 if (UDP_SKB_CB(skb)->partial_cov) { in udp4_csum_init()
2584 skb->csum = inet_compute_pseudo(skb, proto); in udp4_csum_init()
2592 err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, in udp4_csum_init()
2597 if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { in udp4_csum_init()
2599 if (skb->csum_complete_sw) in udp4_csum_init()
2603 * skb->csum is no longer the full packet checksum, in udp4_csum_init()
2620 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) in udp_unicast_rcv_skb()
2626 * it wants the return to be -protocol, or 0 in udp_unicast_rcv_skb()
2629 return -ret; in udp_unicast_rcv_skb()
2645 struct net *net = dev_net(skb->dev); in __udp4_lib_rcv()
2658 ulen = ntohs(uh->len); in __udp4_lib_rcv()
2659 saddr = ip_hdr(skb)->saddr; in __udp4_lib_rcv()
2660 daddr = ip_hdr(skb)->daddr; in __udp4_lib_rcv()
2662 if (ulen > skb->len) in __udp4_lib_rcv()
2675 sk = inet_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest, in __udp4_lib_rcv()
2684 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) in __udp4_lib_rcv()
2693 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) in __udp4_lib_rcv()
2697 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); in __udp4_lib_rcv()
2724 &saddr, ntohs(uh->source), in __udp4_lib_rcv()
2725 ulen, skb->len, in __udp4_lib_rcv()
2726 &daddr, ntohs(uh->dest)); in __udp4_lib_rcv()
2737 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), in __udp4_lib_rcv()
2754 struct udp_table *udptable = net->ipv4.udp_table; in __udp4_lib_mcast_demux_lookup()
2760 slot = udp_hashfn(net, hnum, udptable->mask); in __udp4_lib_mcast_demux_lookup()
2761 hslot = &udptable->hash[slot]; in __udp4_lib_mcast_demux_lookup()
2764 if (hslot->count > 10) in __udp4_lib_mcast_demux_lookup()
2768 sk_for_each_rcu(sk, &hslot->head) { in __udp4_lib_mcast_demux_lookup()
2789 struct udp_table *udptable = net->ipv4.udp_table; in __udp4_lib_demux_lookup()
2794 __portpair ports; in __udp4_lib_demux_lookup() local
2799 ports = INET_COMBINED_PORTS(rmt_port, hnum); in __udp4_lib_demux_lookup()
2801 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { in __udp4_lib_demux_lookup()
2802 if (inet_match(net, sk, acookie, ports, dif, sdif)) in __udp4_lib_demux_lookup()
2812 struct net *net = dev_net(skb->dev); in udp_v4_early_demux()
2818 int dif = skb->dev->ifindex; in udp_v4_early_demux()
2829 if (skb->pkt_type == PACKET_MULTICAST) { in udp_v4_early_demux()
2830 in_dev = __in_dev_get_rcu(skb->dev); in udp_v4_early_demux()
2835 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, in udp_v4_early_demux()
2836 iph->protocol); in udp_v4_early_demux()
2840 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2841 uh->source, iph->saddr, in udp_v4_early_demux()
2843 } else if (skb->pkt_type == PACKET_HOST) { in udp_v4_early_demux()
2844 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2845 uh->source, iph->saddr, dif, sdif); in udp_v4_early_demux()
2851 skb->sk = sk; in udp_v4_early_demux()
2853 skb->destructor = sock_pfree; in udp_v4_early_demux()
2854 dst = rcu_dereference(sk->sk_rx_dst); in udp_v4_early_demux()
2870 if (!inet_sk(sk)->inet_daddr && in_dev) in udp_v4_early_demux()
2871 return ip_mc_validate_source(skb, iph->daddr, in udp_v4_early_demux()
2872 iph->saddr, in udp_v4_early_demux()
2874 skb->dev, in_dev, &itag); in udp_v4_early_demux()
2881 return __udp4_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); in udp_rcv()
2894 if (up->encap_type) { in udp_destroy_sock()
2896 encap_destroy = READ_ONCE(up->encap_destroy); in udp_destroy_sock()
2919 new_gro_receive = ipv6_stub->xfrm6_gro_udp_encap_rcv; in set_xfrm_gro_udp_encap_rcv()
2923 if (udp_sk(sk)->gro_receive != new_gro_receive) { in set_xfrm_gro_udp_encap_rcv()
2928 if (udp_sk(sk)->gro_receive) in set_xfrm_gro_udp_encap_rcv()
2931 WRITE_ONCE(udp_sk(sk)->gro_receive, new_gro_receive); in set_xfrm_gro_udp_encap_rcv()
2956 WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2); in udp_lib_setsockopt()
2963 return -EINVAL; in udp_lib_setsockopt()
2966 return -EFAULT; in udp_lib_setsockopt()
2988 set_xfrm_gro_udp_encap_rcv(val, sk->sk_family, sk); in udp_lib_setsockopt()
2990 if (sk->sk_family == AF_INET6) in udp_lib_setsockopt()
2991 WRITE_ONCE(up->encap_rcv, in udp_lib_setsockopt()
2992 ipv6_stub->xfrm6_udp_encap_rcv); in udp_lib_setsockopt()
2995 WRITE_ONCE(up->encap_rcv, in udp_lib_setsockopt()
3000 WRITE_ONCE(up->encap_type, val); in udp_lib_setsockopt()
3004 err = -ENOPROTOOPT; in udp_lib_setsockopt()
3020 return -EINVAL; in udp_lib_setsockopt()
3021 WRITE_ONCE(up->gso_size, val); in udp_lib_setsockopt()
3031 set_xfrm_gro_udp_encap_rcv(up->encap_type, sk->sk_family, sk); in udp_lib_setsockopt()
3036 * UDP-Lite's partial checksum coverage (RFC 3828). in udp_lib_setsockopt()
3042 return -ENOPROTOOPT; in udp_lib_setsockopt()
3047 WRITE_ONCE(up->pcslen, val); in udp_lib_setsockopt()
3056 return -ENOPROTOOPT; in udp_lib_setsockopt()
3061 WRITE_ONCE(up->pcrlen, val); in udp_lib_setsockopt()
3066 err = -ENOPROTOOPT; in udp_lib_setsockopt()
3091 return -EFAULT; in udp_lib_getsockopt()
3094 return -EINVAL; in udp_lib_getsockopt()
3104 val = READ_ONCE(up->encap_type); in udp_lib_getsockopt()
3116 val = READ_ONCE(up->gso_size); in udp_lib_getsockopt()
3126 val = READ_ONCE(up->pcslen); in udp_lib_getsockopt()
3130 val = READ_ONCE(up->pcrlen); in udp_lib_getsockopt()
3134 return -ENOPROTOOPT; in udp_lib_getsockopt()
3138 return -EFAULT; in udp_lib_getsockopt()
3140 return -EFAULT; in udp_lib_getsockopt()
3154 * udp_poll - wait for a UDP event.
3155 * @file: - file struct
3156 * @sock: - socket
3157 * @wait: - poll table
3169 struct sock *sk = sock->sk; in udp_poll()
3171 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) in udp_poll()
3175 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && in udp_poll()
3176 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) in udp_poll()
3198 sk->sk_err = err; in udp_abort()
3246 /* ------------------------------------------------------------------------ */
3255 return ((family == AF_UNSPEC || family == sk->sk_family) && in seq_sk_match()
3268 if (seq->op == &bpf_iter_udp_seq_ops) in udp_get_table_seq()
3269 return net->ipv4.udp_table; in udp_get_table_seq()
3272 afinfo = pde_data(file_inode(seq->file)); in udp_get_table_seq()
3273 return afinfo->udp_table ? : net->ipv4.udp_table; in udp_get_table_seq()
3278 struct udp_iter_state *state = seq->private; in udp_get_first()
3285 for (state->bucket = start; state->bucket <= udptable->mask; in udp_get_first()
3286 ++state->bucket) { in udp_get_first()
3287 struct udp_hslot *hslot = &udptable->hash[state->bucket]; in udp_get_first()
3289 if (hlist_empty(&hslot->head)) in udp_get_first()
3292 spin_lock_bh(&hslot->lock); in udp_get_first()
3293 sk_for_each(sk, &hslot->head) { in udp_get_first()
3297 spin_unlock_bh(&hslot->lock); in udp_get_first()
3306 struct udp_iter_state *state = seq->private; in udp_get_next()
3317 if (state->bucket <= udptable->mask) in udp_get_next()
3318 spin_unlock_bh(&udptable->hash[state->bucket].lock); in udp_get_next()
3320 return udp_get_first(seq, state->bucket + 1); in udp_get_next()
3331 --pos; in udp_get_idx()
3337 struct udp_iter_state *state = seq->private; in udp_seq_start()
3338 state->bucket = MAX_UDP_PORTS; in udp_seq_start()
3340 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; in udp_seq_start()
3360 struct udp_iter_state *state = seq->private; in udp_seq_stop()
3365 if (state->bucket <= udptable->mask) in udp_seq_stop()
3366 spin_unlock_bh(&udptable->hash[state->bucket].lock); in udp_seq_stop()
3370 /* ------------------------------------------------------------------------ */
3371 static void udp4_format_sock(struct sock *sp, struct seq_file *f, in udp4_format_sock() argument
3374 struct inet_sock *inet = inet_sk(sp); in udp4_format_sock()
3375 __be32 dest = inet->inet_daddr; in udp4_format_sock()
3376 __be32 src = inet->inet_rcv_saddr; in udp4_format_sock()
3377 __u16 destp = ntohs(inet->inet_dport); in udp4_format_sock()
3378 __u16 srcp = ntohs(inet->inet_sport); in udp4_format_sock()
3382 bucket, src, srcp, dest, destp, sp->sk_state, in udp4_format_sock()
3383 sk_wmem_alloc_get(sp), in udp4_format_sock()
3384 udp_rqueue_get(sp), in udp4_format_sock()
3386 from_kuid_munged(seq_user_ns(f), sk_uid(sp)), in udp4_format_sock()
3387 0, sock_i_ino(sp), in udp4_format_sock()
3388 refcount_read(&sp->sk_refcnt), sp, in udp4_format_sock()
3389 atomic_read(&sp->sk_drops)); in udp4_format_sock()
3397 "rx_queue tr tm->when retrnsmt uid timeout " in udp4_seq_show()
3400 struct udp_iter_state *state = seq->private; in udp4_seq_show()
3402 udp4_format_sock(v, seq, state->bucket); in udp4_seq_show()
3441 if (cookies[i].cookie == atomic64_read(&sk->sk_cookie)) in bpf_iter_udp_resume()
3450 struct bpf_udp_iter_state *iter = seq->private; in bpf_iter_udp_batch()
3451 struct udp_iter_state *state = &iter->state; in bpf_iter_udp_batch()
3461 resume_bucket = state->bucket; in bpf_iter_udp_batch()
3464 if (iter->cur_sk == iter->end_sk) in bpf_iter_udp_batch()
3465 state->bucket++; in bpf_iter_udp_batch()
3477 find_cookie = iter->cur_sk; in bpf_iter_udp_batch()
3478 end_cookie = iter->end_sk; in bpf_iter_udp_batch()
3479 iter->cur_sk = 0; in bpf_iter_udp_batch()
3480 iter->end_sk = 0; in bpf_iter_udp_batch()
3483 for (; state->bucket <= udptable->mask; state->bucket++) { in bpf_iter_udp_batch()
3484 struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot; in bpf_iter_udp_batch()
3486 if (hlist_empty(&hslot2->head)) in bpf_iter_udp_batch()
3489 spin_lock_bh(&hslot2->lock); in bpf_iter_udp_batch()
3490 sk = hlist_entry_safe(hslot2->head.first, struct sock, in bpf_iter_udp_batch()
3498 if (state->bucket == resume_bucket) in bpf_iter_udp_batch()
3499 sk = bpf_iter_udp_resume(sk, &iter->batch[find_cookie], in bpf_iter_udp_batch()
3500 end_cookie - find_cookie); in bpf_iter_udp_batch()
3504 if (iter->end_sk < iter->max_sk) { in bpf_iter_udp_batch()
3506 iter->batch[iter->end_sk++].sk = sk; in bpf_iter_udp_batch()
3513 if (unlikely(resizes <= 1 && iter->end_sk && in bpf_iter_udp_batch()
3514 iter->end_sk != batch_sks)) { in bpf_iter_udp_batch()
3521 spin_unlock_bh(&hslot2->lock); in bpf_iter_udp_batch()
3537 spin_unlock_bh(&hslot2->lock); in bpf_iter_udp_batch()
3542 sk = iter->batch[iter->end_sk - 1].sk; in bpf_iter_udp_batch()
3543 sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next, in bpf_iter_udp_batch()
3546 batch_sks = iter->end_sk; in bpf_iter_udp_batch()
3550 spin_unlock_bh(&hslot2->lock); in bpf_iter_udp_batch()
3552 if (iter->end_sk) in bpf_iter_udp_batch()
3558 WARN_ON_ONCE(iter->end_sk != batch_sks); in bpf_iter_udp_batch()
3559 return iter->end_sk ? iter->batch[0].sk : NULL; in bpf_iter_udp_batch()
3564 struct bpf_udp_iter_state *iter = seq->private; in bpf_iter_udp_seq_next()
3567 /* Whenever seq_next() is called, the iter->cur_sk is in bpf_iter_udp_seq_next()
3568 * done with seq_show(), so unref the iter->cur_sk. in bpf_iter_udp_seq_next()
3570 if (iter->cur_sk < iter->end_sk) in bpf_iter_udp_seq_next()
3571 sock_put(iter->batch[iter->cur_sk++].sk); in bpf_iter_udp_seq_next()
3573 /* After updating iter->cur_sk, check if there are more sockets in bpf_iter_udp_seq_next()
3576 if (iter->cur_sk < iter->end_sk) in bpf_iter_udp_seq_next()
3577 sk = iter->batch[iter->cur_sk].sk; in bpf_iter_udp_seq_next()
3589 * continue from where it was stop()-ped. in bpf_iter_udp_seq_start()
3602 meta->seq_num--; /* skip SEQ_START_TOKEN */ in udp_prog_seq_show()
3612 struct udp_iter_state *state = seq->private; in bpf_iter_udp_seq_show()
3632 ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket); in bpf_iter_udp_seq_show()
3642 unsigned int cur_sk = iter->cur_sk; in bpf_iter_udp_put_batch()
3648 while (cur_sk < iter->end_sk) { in bpf_iter_udp_put_batch()
3649 item = &iter->batch[cur_sk++]; in bpf_iter_udp_put_batch()
3650 cookie = sock_gen_cookie(item->sk); in bpf_iter_udp_put_batch()
3651 sock_put(item->sk); in bpf_iter_udp_put_batch()
3652 item->cookie = cookie; in bpf_iter_udp_put_batch()
3658 struct bpf_udp_iter_state *iter = seq->private; in bpf_iter_udp_seq_stop()
3669 if (iter->cur_sk < iter->end_sk) in bpf_iter_udp_seq_stop()
3687 if (seq->op == &bpf_iter_udp_seq_ops) in seq_file_family()
3692 afinfo = pde_data(file_inode(seq->file)); in seq_file_family()
3693 return afinfo->family; in seq_file_family()
3711 if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops, in udp4_proc_init_net()
3713 return -ENOMEM; in udp4_proc_init_net()
3719 remove_proc_entry("udp", net->proc_net); in udp4_proc_exit_net()
3762 table->hash = alloc_large_system_hash(name, in udp_table_init()
3767 &table->log, in udp_table_init()
3768 &table->mask, in udp_table_init()
3772 table->hash2 = (void *)(table->hash + (table->mask + 1)); in udp_table_init()
3773 for (i = 0; i <= table->mask; i++) { in udp_table_init()
3774 INIT_HLIST_HEAD(&table->hash[i].head); in udp_table_init()
3775 table->hash[i].count = 0; in udp_table_init()
3776 spin_lock_init(&table->hash[i].lock); in udp_table_init()
3778 for (i = 0; i <= table->mask; i++) { in udp_table_init()
3779 INIT_HLIST_HEAD(&table->hash2[i].hslot.head); in udp_table_init()
3780 table->hash2[i].hslot.count = 0; in udp_table_init()
3781 spin_lock_init(&table->hash2[i].hslot.lock); in udp_table_init()
3798 net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE; in udp_sysctl_init()
3799 net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE; in udp_sysctl_init()
3802 net->ipv4.sysctl_udp_l3mdev_accept = 0; in udp_sysctl_init()
3818 udptable->hash = vmalloc_huge(hash_entries * slot_size, in udp_pernet_table_alloc()
3820 if (!udptable->hash) in udp_pernet_table_alloc()
3823 udptable->hash2 = (void *)(udptable->hash + hash_entries); in udp_pernet_table_alloc()
3824 udptable->mask = hash_entries - 1; in udp_pernet_table_alloc()
3825 udptable->log = ilog2(hash_entries); in udp_pernet_table_alloc()
3828 INIT_HLIST_HEAD(&udptable->hash[i].head); in udp_pernet_table_alloc()
3829 udptable->hash[i].count = 0; in udp_pernet_table_alloc()
3830 spin_lock_init(&udptable->hash[i].lock); in udp_pernet_table_alloc()
3832 INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head); in udp_pernet_table_alloc()
3833 udptable->hash2[i].hslot.count = 0; in udp_pernet_table_alloc()
3834 spin_lock_init(&udptable->hash2[i].hslot.lock); in udp_pernet_table_alloc()
3848 struct udp_table *udptable = net->ipv4.udp_table; in udp_pernet_table_free()
3853 kvfree(udptable->hash); in udp_pernet_table_free()
3866 old_net = current->nsproxy->net_ns; in udp_set_table()
3867 hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries); in udp_set_table()
3879 net->ipv4.udp_table = udptable; in udp_set_table()
3885 net->ipv4.udp_table = &udp_table; in udp_set_table()
3895 for (i = 0; i < ARRAY_SIZE(net->ipv4.udp_tunnel_gro); ++i) { in udp_pernet_init()
3896 INIT_HLIST_HEAD(&net->ipv4.udp_tunnel_gro[i].list); in udp_pernet_init()
3897 RCU_INIT_POINTER(net->ipv4.udp_tunnel_gro[i].sk, NULL); in udp_pernet_init()
3928 return -ENOMEM; in DEFINE_BPF_ITER_FUNC()
3933 memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk); in DEFINE_BPF_ITER_FUNC()
3934 kvfree(iter->batch); in DEFINE_BPF_ITER_FUNC()
3935 iter->batch = new_batch; in DEFINE_BPF_ITER_FUNC()
3936 iter->max_sk = new_batch_sz; in DEFINE_BPF_ITER_FUNC()
3956 iter->state.bucket = -1; in bpf_iter_init_udp()
3966 kvfree(iter->batch); in bpf_iter_fini_udp()