12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * The User Datagram Protocol (UDP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 12113aa838SAlan Cox * Alan Cox, <alan@lxorguk.ukuu.org.uk> 131da177e4SLinus Torvalds * Hirokazu Takahashi, <taka@valinux.co.jp> 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * Fixes: 161da177e4SLinus Torvalds * Alan Cox : verify_area() calls 171da177e4SLinus Torvalds * Alan Cox : stopped close while in use off icmp 181da177e4SLinus Torvalds * messages. Not a fix but a botch that 191da177e4SLinus Torvalds * for udp at least is 'valid'. 201da177e4SLinus Torvalds * Alan Cox : Fixed icmp handling properly 211da177e4SLinus Torvalds * Alan Cox : Correct error for oversized datagrams 221da177e4SLinus Torvalds * Alan Cox : Tidied select() semantics. 231da177e4SLinus Torvalds * Alan Cox : udp_err() fixed properly, also now 241da177e4SLinus Torvalds * select and read wake correctly on errors 251da177e4SLinus Torvalds * Alan Cox : udp_send verify_area moved to avoid mem leak 261da177e4SLinus Torvalds * Alan Cox : UDP can count its memory 271da177e4SLinus Torvalds * Alan Cox : send to an unknown connection causes 281da177e4SLinus Torvalds * an ECONNREFUSED off the icmp, but 291da177e4SLinus Torvalds * does NOT close. 301da177e4SLinus Torvalds * Alan Cox : Switched to new sk_buff handlers. No more backlog! 311da177e4SLinus Torvalds * Alan Cox : Using generic datagram code. Even smaller and the PEEK 321da177e4SLinus Torvalds * bug no longer crashes it. 331da177e4SLinus Torvalds * Fred Van Kempen : Net2e support for sk->broadcast. 341da177e4SLinus Torvalds * Alan Cox : Uses skb_free_datagram 351da177e4SLinus Torvalds * Alan Cox : Added get/set sockopt support. 361da177e4SLinus Torvalds * Alan Cox : Broadcasting without option set returns EACCES. 371da177e4SLinus Torvalds * Alan Cox : No wakeup calls. Instead we now use the callbacks. 381da177e4SLinus Torvalds * Alan Cox : Use ip_tos and ip_ttl 391da177e4SLinus Torvalds * Alan Cox : SNMP Mibs 401da177e4SLinus Torvalds * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 411da177e4SLinus Torvalds * Matt Dillon : UDP length checks. 421da177e4SLinus Torvalds * Alan Cox : Smarter af_inet used properly. 431da177e4SLinus Torvalds * Alan Cox : Use new kernel side addressing. 441da177e4SLinus Torvalds * Alan Cox : Incorrect return on truncated datagram receive. 451da177e4SLinus Torvalds * Arnt Gulbrandsen : New udp_send and stuff 461da177e4SLinus Torvalds * Alan Cox : Cache last socket 471da177e4SLinus Torvalds * Alan Cox : Route cache 481da177e4SLinus Torvalds * Jon Peatfield : Minor efficiency fix to sendto(). 491da177e4SLinus Torvalds * Mike Shaver : RFC1122 checks. 501da177e4SLinus Torvalds * Alan Cox : Nonblocking error fix. 511da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 521da177e4SLinus Torvalds * Mike McLagan : Routing by source 531da177e4SLinus Torvalds * David S. Miller : New socket lookup architecture. 541da177e4SLinus Torvalds * Last socket cache retained as it 551da177e4SLinus Torvalds * does have a high hit rate. 561da177e4SLinus Torvalds * Olaf Kirch : Don't linearise iovec on sendmsg. 571da177e4SLinus Torvalds * Andi Kleen : Some cleanups, cache destination entry 581da177e4SLinus Torvalds * for connect. 591da177e4SLinus Torvalds * Vitaly E. Lavrov : Transparent proxy revived after year coma. 601da177e4SLinus Torvalds * Melvin Smith : Check msg_name not msg_namelen in sendto(), 611da177e4SLinus Torvalds * return ENOTCONN for unconnected sockets (POSIX) 621da177e4SLinus Torvalds * Janos Farkas : don't deliver multi/broadcasts to a different 631da177e4SLinus Torvalds * bound-to-device socket 641da177e4SLinus Torvalds * Hirokazu Takahashi : HW checksumming for outgoing UDP 651da177e4SLinus Torvalds * datagrams. 661da177e4SLinus Torvalds * Hirokazu Takahashi : sendfile() on UDP works now. 671da177e4SLinus Torvalds * Arnaldo C. Melo : convert /proc/net/udp to seq_file 681da177e4SLinus Torvalds * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 691da177e4SLinus Torvalds * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 701da177e4SLinus Torvalds * a single port at the same time. 711da177e4SLinus Torvalds * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 72342f0234SJames Chapman * James Chapman : Add L2TP encapsulation type. 731da177e4SLinus Torvalds */ 741da177e4SLinus Torvalds 75afd46503SJoe Perches #define pr_fmt(fmt) "UDP: " fmt 76afd46503SJoe Perches 77aef2fedaSJakub Kicinski #include <linux/bpf-cgroup.h> 787c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 791da177e4SLinus Torvalds #include <asm/ioctls.h> 8057c8a661SMike Rapoport #include <linux/memblock.h> 818203efb3SEric Dumazet #include <linux/highmem.h> 821da177e4SLinus Torvalds #include <linux/types.h> 831da177e4SLinus Torvalds #include <linux/fcntl.h> 841da177e4SLinus Torvalds #include <linux/module.h> 851da177e4SLinus Torvalds #include <linux/socket.h> 861da177e4SLinus Torvalds #include <linux/sockios.h> 8714c85021SArnaldo Carvalho de Melo #include <linux/igmp.h> 886e540309SShawn Bohrer #include <linux/inetdevice.h> 891da177e4SLinus Torvalds #include <linux/in.h> 901da177e4SLinus Torvalds #include <linux/errno.h> 911da177e4SLinus Torvalds #include <linux/timer.h> 921da177e4SLinus Torvalds #include <linux/mm.h> 931da177e4SLinus Torvalds #include <linux/inet.h> 941da177e4SLinus Torvalds #include <linux/netdevice.h> 955a0e3ad6STejun Heo #include <linux/slab.h> 96c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h> 971da177e4SLinus Torvalds #include <linux/skbuff.h> 981da177e4SLinus Torvalds #include <linux/proc_fs.h> 991da177e4SLinus Torvalds #include <linux/seq_file.h> 100457c4cbcSEric W. Biederman #include <net/net_namespace.h> 1011da177e4SLinus Torvalds #include <net/icmp.h> 102421b3885SShawn Bohrer #include <net/inet_hashtables.h> 103e7cc0824SStefano Brivio #include <net/ip_tunnels.h> 1041da177e4SLinus Torvalds #include <net/route.h> 1051da177e4SLinus Torvalds #include <net/checksum.h> 1061da177e4SLinus Torvalds #include <net/xfrm.h> 107296f7ea7SSatoru Moriya #include <trace/events/udp.h> 108447167bfSEric Dumazet #include <linux/static_key.h> 109951cf368SYonghong Song #include <linux/btf_ids.h> 11022911fc5SEric Dumazet #include <trace/events/skb.h> 111076bb0c8SEliezer Tamir #include <net/busy_poll.h> 112ba4e58ecSGerrit Renker #include "udp_impl.h" 113e32ea7e7SCraig Gallek #include <net/sock_reuseport.h> 114217375a0SEric Dumazet #include <net/addrconf.h> 11560fb9567SPaolo Abeni #include <net/udp_tunnel.h> 1160146dca7SSabrina Dubroca #if IS_ENABLED(CONFIG_IPV6) 1170146dca7SSabrina Dubroca #include <net/ipv6_stubs.h> 1180146dca7SSabrina Dubroca #endif 1191da177e4SLinus Torvalds 120f86dcc5aSEric Dumazet struct udp_table udp_table __read_mostly; 121645ca708SEric Dumazet EXPORT_SYMBOL(udp_table); 1221da177e4SLinus Torvalds 1238d987e5cSEric Dumazet long sysctl_udp_mem[3] __read_mostly; 12495766fffSHideo Aoki EXPORT_SYMBOL(sysctl_udp_mem); 125c482c568SEric Dumazet 12691b6d325SEric Dumazet atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; 12795766fffSHideo Aoki EXPORT_SYMBOL(udp_memory_allocated); 1280defbb0aSEric Dumazet DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); 1290defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); 13095766fffSHideo Aoki 131f86dcc5aSEric Dumazet #define MAX_UDP_PORTS 65536 1329804985bSKuniyuki Iwashima #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET) 13398322f22SEric Dumazet 13467fb4330SKuniyuki Iwashima static struct udp_table *udp_get_table_prot(struct sock *sk) 13567fb4330SKuniyuki Iwashima { 13667fb4330SKuniyuki Iwashima return sk->sk_prot->h.udp_table ? : sock_net(sk)->ipv4.udp_table; 13767fb4330SKuniyuki Iwashima } 13867fb4330SKuniyuki Iwashima 139f24d43c0SEric Dumazet static int udp_lib_lport_inuse(struct net *net, __u16 num, 140645ca708SEric Dumazet const struct udp_hslot *hslot, 14198322f22SEric Dumazet unsigned long *bitmap, 142fe38d2a1SJosef Bacik struct sock *sk, unsigned int log) 14325030a7fSGerrit Renker { 144f24d43c0SEric Dumazet struct sock *sk2; 145ba418fa3STom Herbert kuid_t uid = sock_i_uid(sk); 14625030a7fSGerrit Renker 147ca065d0cSEric Dumazet sk_for_each(sk2, &hslot->head) { 148f24d43c0SEric Dumazet if (net_eq(sock_net(sk2), net) && 149f24d43c0SEric Dumazet sk2 != sk && 150d4cada4aSEric Dumazet (bitmap || udp_sk(sk2)->udp_port_hash == num) && 151f24d43c0SEric Dumazet (!sk2->sk_reuse || !sk->sk_reuse) && 1529d4fb27dSJoe Perches (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 1539d4fb27dSJoe Perches sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 154fe38d2a1SJosef Bacik inet_rcv_saddr_equal(sk, sk2, true)) { 155df560056SEric Garver if (sk2->sk_reuseport && sk->sk_reuseport && 156df560056SEric Garver !rcu_access_pointer(sk->sk_reuseport_cb) && 157df560056SEric Garver uid_eq(uid, sock_i_uid(sk2))) { 158df560056SEric Garver if (!bitmap) 159df560056SEric Garver return 0; 160df560056SEric Garver } else { 1614243cdc2SJoe Perches if (!bitmap) 162fc038410SDavid S. Miller return 1; 163df560056SEric Garver __set_bit(udp_sk(sk2)->udp_port_hash >> log, 164df560056SEric Garver bitmap); 165df560056SEric Garver } 1664243cdc2SJoe Perches } 16798322f22SEric Dumazet } 16825030a7fSGerrit Renker return 0; 16925030a7fSGerrit Renker } 17025030a7fSGerrit Renker 17130fff923SEric Dumazet /* 17230fff923SEric Dumazet * Note: we still hold spinlock of primary hash chain, so no other writer 17330fff923SEric Dumazet * can insert/delete a socket with local_port == num 17430fff923SEric Dumazet */ 17530fff923SEric Dumazet static int udp_lib_lport_inuse2(struct net *net, __u16 num, 17630fff923SEric Dumazet struct udp_hslot *hslot2, 177fe38d2a1SJosef Bacik struct sock *sk) 17830fff923SEric Dumazet { 17930fff923SEric Dumazet struct sock *sk2; 180ba418fa3STom Herbert kuid_t uid = sock_i_uid(sk); 18130fff923SEric Dumazet int res = 0; 18230fff923SEric Dumazet 18330fff923SEric Dumazet spin_lock(&hslot2->lock); 184ca065d0cSEric Dumazet udp_portaddr_for_each_entry(sk2, &hslot2->head) { 18530fff923SEric Dumazet if (net_eq(sock_net(sk2), net) && 18630fff923SEric Dumazet sk2 != sk && 18730fff923SEric Dumazet (udp_sk(sk2)->udp_port_hash == num) && 18830fff923SEric Dumazet (!sk2->sk_reuse || !sk->sk_reuse) && 1899d4fb27dSJoe Perches (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 1909d4fb27dSJoe Perches sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 191fe38d2a1SJosef Bacik inet_rcv_saddr_equal(sk, sk2, true)) { 192df560056SEric Garver if (sk2->sk_reuseport && sk->sk_reuseport && 193df560056SEric Garver !rcu_access_pointer(sk->sk_reuseport_cb) && 194df560056SEric Garver uid_eq(uid, sock_i_uid(sk2))) { 195df560056SEric Garver res = 0; 196df560056SEric Garver } else { 19730fff923SEric Dumazet res = 1; 198df560056SEric Garver } 19930fff923SEric Dumazet break; 20030fff923SEric Dumazet } 2014243cdc2SJoe Perches } 20230fff923SEric Dumazet spin_unlock(&hslot2->lock); 20330fff923SEric Dumazet return res; 20430fff923SEric Dumazet } 20530fff923SEric Dumazet 206fe38d2a1SJosef Bacik static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) 207e32ea7e7SCraig Gallek { 208e32ea7e7SCraig Gallek struct net *net = sock_net(sk); 209e32ea7e7SCraig Gallek kuid_t uid = sock_i_uid(sk); 210e32ea7e7SCraig Gallek struct sock *sk2; 211e32ea7e7SCraig Gallek 212ca065d0cSEric Dumazet sk_for_each(sk2, &hslot->head) { 213e32ea7e7SCraig Gallek if (net_eq(sock_net(sk2), net) && 214e32ea7e7SCraig Gallek sk2 != sk && 215e32ea7e7SCraig Gallek sk2->sk_family == sk->sk_family && 216e32ea7e7SCraig Gallek ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 217e32ea7e7SCraig Gallek (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 218e32ea7e7SCraig Gallek (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 219e32ea7e7SCraig Gallek sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 220fe38d2a1SJosef Bacik inet_rcv_saddr_equal(sk, sk2, false)) { 2212dbb9b9eSMartin KaFai Lau return reuseport_add_sock(sk, sk2, 2222dbb9b9eSMartin KaFai Lau inet_rcv_saddr_any(sk)); 223e32ea7e7SCraig Gallek } 224e32ea7e7SCraig Gallek } 225e32ea7e7SCraig Gallek 2262dbb9b9eSMartin KaFai Lau return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 227e32ea7e7SCraig Gallek } 228e32ea7e7SCraig Gallek 22925030a7fSGerrit Renker /** 2306ba5a3c5SPavel Emelyanov * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 23125030a7fSGerrit Renker * 23225030a7fSGerrit Renker * @sk: socket struct in question 23325030a7fSGerrit Renker * @snum: port number to look up 23425985edcSLucas De Marchi * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 23530fff923SEric Dumazet * with NULL address 23625030a7fSGerrit Renker */ 2376ba5a3c5SPavel Emelyanov int udp_lib_get_port(struct sock *sk, unsigned short snum, 23830fff923SEric Dumazet unsigned int hash2_nulladdr) 2391da177e4SLinus Torvalds { 24067fb4330SKuniyuki Iwashima struct udp_table *udptable = udp_get_table_prot(sk); 241919dfa0bSKuniyuki Iwashima struct udp_hslot *hslot, *hslot2; 2423b1e0a65SYOSHIFUJI Hideaki struct net *net = sock_net(sk); 2437a7160edSKuniyuki Iwashima int error = -EADDRINUSE; 2441da177e4SLinus Torvalds 24532c1da70SStephen Hemminger if (!snum) { 246919dfa0bSKuniyuki Iwashima DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 247919dfa0bSKuniyuki Iwashima unsigned short first, last; 2489088c560SEric Dumazet int low, high, remaining; 24995c96174SEric Dumazet unsigned int rand; 2501da177e4SLinus Torvalds 25191d0b78cSJakub Sitnicki inet_sk_get_local_port_range(sk, &low, &high); 252a25de534SAnton Arapov remaining = (high - low) + 1; 253227b60f5SStephen Hemminger 254a251c17aSJason A. Donenfeld rand = get_random_u32(); 2558fc54f68SDaniel Borkmann first = reciprocal_scale(rand, remaining) + low; 25698322f22SEric Dumazet /* 25798322f22SEric Dumazet * force rand to be an odd multiple of UDP_HTABLE_SIZE 25898322f22SEric Dumazet */ 259f86dcc5aSEric Dumazet rand = (rand | 1) * (udptable->mask + 1); 2605781b235SEric Dumazet last = first + udptable->mask + 1; 2615781b235SEric Dumazet do { 262f86dcc5aSEric Dumazet hslot = udp_hashslot(udptable, net, first); 26398322f22SEric Dumazet bitmap_zero(bitmap, PORTS_PER_CHAIN); 264645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 26598322f22SEric Dumazet udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 266fe38d2a1SJosef Bacik udptable->log); 26798322f22SEric Dumazet 26898322f22SEric Dumazet snum = first; 26998322f22SEric Dumazet /* 27098322f22SEric Dumazet * Iterate on all possible values of snum for this hash. 27198322f22SEric Dumazet * Using steps of an odd multiple of UDP_HTABLE_SIZE 27298322f22SEric Dumazet * give us randomization and full range coverage. 27398322f22SEric Dumazet */ 2749088c560SEric Dumazet do { 27598322f22SEric Dumazet if (low <= snum && snum <= high && 276e3826f1eSAmerigo Wang !test_bit(snum >> udptable->log, bitmap) && 277122ff243SWANG Cong !inet_is_local_reserved_port(net, snum)) 27898322f22SEric Dumazet goto found; 27998322f22SEric Dumazet snum += rand; 28098322f22SEric Dumazet } while (snum != first); 28198322f22SEric Dumazet spin_unlock_bh(&hslot->lock); 282df560056SEric Garver cond_resched(); 2835781b235SEric Dumazet } while (++first != last); 28498322f22SEric Dumazet goto fail; 285645ca708SEric Dumazet } else { 286f86dcc5aSEric Dumazet hslot = udp_hashslot(udptable, net, snum); 287645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 28830fff923SEric Dumazet if (hslot->count > 10) { 28930fff923SEric Dumazet int exist; 29030fff923SEric Dumazet unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 29130fff923SEric Dumazet 29230fff923SEric Dumazet slot2 &= udptable->mask; 29330fff923SEric Dumazet hash2_nulladdr &= udptable->mask; 29430fff923SEric Dumazet 29530fff923SEric Dumazet hslot2 = udp_hashslot2(udptable, slot2); 29630fff923SEric Dumazet if (hslot->count < hslot2->count) 29730fff923SEric Dumazet goto scan_primary_hash; 29830fff923SEric Dumazet 299fe38d2a1SJosef Bacik exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); 30030fff923SEric Dumazet if (!exist && (hash2_nulladdr != slot2)) { 30130fff923SEric Dumazet hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 30230fff923SEric Dumazet exist = udp_lib_lport_inuse2(net, snum, hslot2, 303fe38d2a1SJosef Bacik sk); 30430fff923SEric Dumazet } 30530fff923SEric Dumazet if (exist) 30630fff923SEric Dumazet goto fail_unlock; 30730fff923SEric Dumazet else 30830fff923SEric Dumazet goto found; 30930fff923SEric Dumazet } 31030fff923SEric Dumazet scan_primary_hash: 311fe38d2a1SJosef Bacik if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) 312645ca708SEric Dumazet goto fail_unlock; 313645ca708SEric Dumazet } 31498322f22SEric Dumazet found: 315c720c7e8SEric Dumazet inet_sk(sk)->inet_num = snum; 316d4cada4aSEric Dumazet udp_sk(sk)->udp_port_hash = snum; 317d4cada4aSEric Dumazet udp_sk(sk)->udp_portaddr_hash ^= snum; 3181da177e4SLinus Torvalds if (sk_unhashed(sk)) { 319e32ea7e7SCraig Gallek if (sk->sk_reuseport && 320fe38d2a1SJosef Bacik udp_reuseport_add_sock(sk, hslot)) { 321e32ea7e7SCraig Gallek inet_sk(sk)->inet_num = 0; 322e32ea7e7SCraig Gallek udp_sk(sk)->udp_port_hash = 0; 323e32ea7e7SCraig Gallek udp_sk(sk)->udp_portaddr_hash ^= snum; 324e32ea7e7SCraig Gallek goto fail_unlock; 325e32ea7e7SCraig Gallek } 326e32ea7e7SCraig Gallek 327ca065d0cSEric Dumazet sk_add_node_rcu(sk, &hslot->head); 328fdcc8aa9SEric Dumazet hslot->count++; 329c29a0bc4SPavel Emelyanov sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 330512615b6SEric Dumazet 331512615b6SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 332512615b6SEric Dumazet spin_lock(&hslot2->lock); 333d894ba18SCraig Gallek if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 334d894ba18SCraig Gallek sk->sk_family == AF_INET6) 3351602f49bSDavid S. Miller hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 336d894ba18SCraig Gallek &hslot2->head); 337d894ba18SCraig Gallek else 338ca065d0cSEric Dumazet hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 339512615b6SEric Dumazet &hslot2->head); 340512615b6SEric Dumazet hslot2->count++; 341512615b6SEric Dumazet spin_unlock(&hslot2->lock); 3421da177e4SLinus Torvalds } 343ca065d0cSEric Dumazet sock_set_flag(sk, SOCK_RCU_FREE); 34425030a7fSGerrit Renker error = 0; 345645ca708SEric Dumazet fail_unlock: 346645ca708SEric Dumazet spin_unlock_bh(&hslot->lock); 3471da177e4SLinus Torvalds fail: 34825030a7fSGerrit Renker return error; 3491da177e4SLinus Torvalds } 350c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_get_port); 3511da177e4SLinus Torvalds 3526ba5a3c5SPavel Emelyanov int udp_v4_get_port(struct sock *sk, unsigned short snum) 353db8dac20SDavid S. Miller { 35430fff923SEric Dumazet unsigned int hash2_nulladdr = 355f0b1e64cSMartin KaFai Lau ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 35630fff923SEric Dumazet unsigned int hash2_partial = 357f0b1e64cSMartin KaFai Lau ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 35830fff923SEric Dumazet 359d4cada4aSEric Dumazet /* precompute partial secondary hash */ 36030fff923SEric Dumazet udp_sk(sk)->udp_portaddr_hash = hash2_partial; 361fe38d2a1SJosef Bacik return udp_lib_get_port(sk, snum, hash2_nulladdr); 362db8dac20SDavid S. Miller } 363db8dac20SDavid S. Miller 364d1e37288SSu, Xuemin static int compute_score(struct sock *sk, struct net *net, 365d1e37288SSu, Xuemin __be32 saddr, __be16 sport, 366fb74c277SDavid Ahern __be32 daddr, unsigned short hnum, 36773545373STim Beale int dif, int sdif) 368645ca708SEric Dumazet { 36960c04aecSJoe Perches int score; 37060c04aecSJoe Perches struct inet_sock *inet; 3716da5b0f0SMike Manning bool dev_match; 372645ca708SEric Dumazet 37360c04aecSJoe Perches if (!net_eq(sock_net(sk), net) || 37460c04aecSJoe Perches udp_sk(sk)->udp_port_hash != hnum || 37560c04aecSJoe Perches ipv6_only_sock(sk)) 37660c04aecSJoe Perches return -1; 377645ca708SEric Dumazet 3784cdeeee9SPeter Oskolkov if (sk->sk_rcv_saddr != daddr) 379645ca708SEric Dumazet return -1; 38060c04aecSJoe Perches 3814cdeeee9SPeter Oskolkov score = (sk->sk_family == PF_INET) ? 2 : 1; 3824cdeeee9SPeter Oskolkov 3834cdeeee9SPeter Oskolkov inet = inet_sk(sk); 384c720c7e8SEric Dumazet if (inet->inet_daddr) { 385c720c7e8SEric Dumazet if (inet->inet_daddr != saddr) 386645ca708SEric Dumazet return -1; 387ba418fa3STom Herbert score += 4; 388645ca708SEric Dumazet } 38960c04aecSJoe Perches 390c720c7e8SEric Dumazet if (inet->inet_dport) { 391c720c7e8SEric Dumazet if (inet->inet_dport != sport) 392645ca708SEric Dumazet return -1; 393ba418fa3STom Herbert score += 4; 394645ca708SEric Dumazet } 39560c04aecSJoe Perches 3966da5b0f0SMike Manning dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, 3976da5b0f0SMike Manning dif, sdif); 39869678bcdSPaolo Abeni if (!dev_match) 399645ca708SEric Dumazet return -1; 4008d6c414cSMike Manning if (sk->sk_bound_dev_if) 401ba418fa3STom Herbert score += 4; 402fb74c277SDavid Ahern 4037170a977SEric Dumazet if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 40470da268bSEric Dumazet score++; 405645ca708SEric Dumazet return score; 406645ca708SEric Dumazet } 407645ca708SEric Dumazet 4086eada011SEric Dumazet static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 40965cd8033SHannes Frederic Sowa const __u16 lport, const __be32 faddr, 41065cd8033SHannes Frederic Sowa const __be16 fport) 41165cd8033SHannes Frederic Sowa { 4121bbdceefSHannes Frederic Sowa static u32 udp_ehash_secret __read_mostly; 4131bbdceefSHannes Frederic Sowa 4141bbdceefSHannes Frederic Sowa net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 4151bbdceefSHannes Frederic Sowa 41665cd8033SHannes Frederic Sowa return __inet_ehashfn(laddr, lport, faddr, fport, 4171bbdceefSHannes Frederic Sowa udp_ehash_secret + net_hash_mix(net)); 41865cd8033SHannes Frederic Sowa } 41965cd8033SHannes Frederic Sowa 420a57066b1SDavid S. Miller static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 4217629c73aSJakub Sitnicki struct sk_buff *skb, 4227629c73aSJakub Sitnicki __be32 saddr, __be16 sport, 4237629c73aSJakub Sitnicki __be32 daddr, unsigned short hnum) 4247629c73aSJakub Sitnicki { 4257629c73aSJakub Sitnicki struct sock *reuse_sk = NULL; 4267629c73aSJakub Sitnicki u32 hash; 4277629c73aSJakub Sitnicki 4287629c73aSJakub Sitnicki if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 4297629c73aSJakub Sitnicki hash = udp_ehashfn(net, daddr, hnum, saddr, sport); 4307629c73aSJakub Sitnicki reuse_sk = reuseport_select_sock(sk, hash, skb, 4317629c73aSJakub Sitnicki sizeof(struct udphdr)); 4327629c73aSJakub Sitnicki } 4337629c73aSJakub Sitnicki return reuse_sk; 4347629c73aSJakub Sitnicki } 4357629c73aSJakub Sitnicki 436d1e37288SSu, Xuemin /* called with rcu_read_lock() */ 4375051ebd2SEric Dumazet static struct sock *udp4_lib_lookup2(struct net *net, 4385051ebd2SEric Dumazet __be32 saddr, __be16 sport, 439fb74c277SDavid Ahern __be32 daddr, unsigned int hnum, 44073545373STim Beale int dif, int sdif, 441d1e37288SSu, Xuemin struct udp_hslot *hslot2, 4421134158bSCraig Gallek struct sk_buff *skb) 4435051ebd2SEric Dumazet { 4445051ebd2SEric Dumazet struct sock *sk, *result; 445e94a62f5SPaolo Abeni int score, badness; 4465051ebd2SEric Dumazet 4475051ebd2SEric Dumazet result = NULL; 448ba418fa3STom Herbert badness = 0; 449ca065d0cSEric Dumazet udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 450d1e37288SSu, Xuemin score = compute_score(sk, net, saddr, sport, 45173545373STim Beale daddr, hnum, dif, sdif); 4525051ebd2SEric Dumazet if (score > badness) { 4537629c73aSJakub Sitnicki result = lookup_reuseport(net, sk, skb, 4547629c73aSJakub Sitnicki saddr, sport, daddr, hnum); 455a57066b1SDavid S. Miller /* Fall back to scoring if group has connections */ 45669421bf9SKuniyuki Iwashima if (result && !reuseport_has_conns(sk)) 457ca065d0cSEric Dumazet return result; 4587629c73aSJakub Sitnicki 459a57066b1SDavid S. Miller result = result ? : sk; 460ca065d0cSEric Dumazet badness = score; 4615051ebd2SEric Dumazet } 4625051ebd2SEric Dumazet } 4635051ebd2SEric Dumazet return result; 4645051ebd2SEric Dumazet } 4655051ebd2SEric Dumazet 466a57066b1SDavid S. Miller static struct sock *udp4_lookup_run_bpf(struct net *net, 46772f7e944SJakub Sitnicki struct udp_table *udptable, 46872f7e944SJakub Sitnicki struct sk_buff *skb, 46972f7e944SJakub Sitnicki __be32 saddr, __be16 sport, 470f8931565SMark Pashmfouroush __be32 daddr, u16 hnum, const int dif) 47172f7e944SJakub Sitnicki { 47272f7e944SJakub Sitnicki struct sock *sk, *reuse_sk; 47372f7e944SJakub Sitnicki bool no_reuseport; 47472f7e944SJakub Sitnicki 475ba6aac15SKuniyuki Iwashima if (udptable != net->ipv4.udp_table) 47672f7e944SJakub Sitnicki return NULL; /* only UDP is supported */ 47772f7e944SJakub Sitnicki 478f8931565SMark Pashmfouroush no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport, 479f8931565SMark Pashmfouroush daddr, hnum, dif, &sk); 48072f7e944SJakub Sitnicki if (no_reuseport || IS_ERR_OR_NULL(sk)) 48172f7e944SJakub Sitnicki return sk; 48272f7e944SJakub Sitnicki 48372f7e944SJakub Sitnicki reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 484c64c9c28SJakub Sitnicki if (reuse_sk) 48572f7e944SJakub Sitnicki sk = reuse_sk; 48672f7e944SJakub Sitnicki return sk; 48772f7e944SJakub Sitnicki } 48872f7e944SJakub Sitnicki 489db8dac20SDavid S. Miller /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 490db8dac20SDavid S. Miller * harder than this. -DaveM 491db8dac20SDavid S. Miller */ 492fce82338SPavel Emelyanov struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 493fb74c277SDavid Ahern __be16 sport, __be32 daddr, __be16 dport, int dif, 494fb74c277SDavid Ahern int sdif, struct udp_table *udptable, struct sk_buff *skb) 495db8dac20SDavid S. Miller { 496db8dac20SDavid S. Miller unsigned short hnum = ntohs(dport); 4974cdeeee9SPeter Oskolkov unsigned int hash2, slot2; 4984cdeeee9SPeter Oskolkov struct udp_hslot *hslot2; 49972f7e944SJakub Sitnicki struct sock *result, *sk; 500db8dac20SDavid S. Miller 501f0b1e64cSMartin KaFai Lau hash2 = ipv4_portaddr_hash(net, daddr, hnum); 5025051ebd2SEric Dumazet slot2 = hash2 & udptable->mask; 5035051ebd2SEric Dumazet hslot2 = &udptable->hash2[slot2]; 5045051ebd2SEric Dumazet 50572f7e944SJakub Sitnicki /* Lookup connected or non-wildcard socket */ 5065051ebd2SEric Dumazet result = udp4_lib_lookup2(net, saddr, sport, 507fb74c277SDavid Ahern daddr, hnum, dif, sdif, 50873545373STim Beale hslot2, skb); 50972f7e944SJakub Sitnicki if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 51072f7e944SJakub Sitnicki goto done; 51172f7e944SJakub Sitnicki 51272f7e944SJakub Sitnicki /* Lookup redirect from BPF */ 51372f7e944SJakub Sitnicki if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 51472f7e944SJakub Sitnicki sk = udp4_lookup_run_bpf(net, udptable, skb, 515f8931565SMark Pashmfouroush saddr, sport, daddr, hnum, dif); 51672f7e944SJakub Sitnicki if (sk) { 51772f7e944SJakub Sitnicki result = sk; 51872f7e944SJakub Sitnicki goto done; 51972f7e944SJakub Sitnicki } 52072f7e944SJakub Sitnicki } 52172f7e944SJakub Sitnicki 52272f7e944SJakub Sitnicki /* Got non-wildcard socket or error on first lookup */ 52372f7e944SJakub Sitnicki if (result) 52472f7e944SJakub Sitnicki goto done; 52572f7e944SJakub Sitnicki 52672f7e944SJakub Sitnicki /* Lookup wildcard sockets */ 527f0b1e64cSMartin KaFai Lau hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 5285051ebd2SEric Dumazet slot2 = hash2 & udptable->mask; 5295051ebd2SEric Dumazet hslot2 = &udptable->hash2[slot2]; 5305051ebd2SEric Dumazet 5311223c67cSJorge Boncompte [DTI2] result = udp4_lib_lookup2(net, saddr, sport, 5324cdeeee9SPeter Oskolkov htonl(INADDR_ANY), hnum, dif, sdif, 53373545373STim Beale hslot2, skb); 53472f7e944SJakub Sitnicki done: 53588e235b8SEnrico Weigelt if (IS_ERR(result)) 5368217ca65SMartin KaFai Lau return NULL; 5375051ebd2SEric Dumazet return result; 5385051ebd2SEric Dumazet } 539fce82338SPavel Emelyanov EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 540db8dac20SDavid S. Miller 541607c4aafSKOVACS Krisztian static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 542607c4aafSKOVACS Krisztian __be16 sport, __be16 dport, 543645ca708SEric Dumazet struct udp_table *udptable) 544607c4aafSKOVACS Krisztian { 545607c4aafSKOVACS Krisztian const struct iphdr *iph = ip_hdr(skb); 546607c4aafSKOVACS Krisztian 547ed7cbbceSAlexander Duyck return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 548607c4aafSKOVACS Krisztian iph->daddr, dport, inet_iif(skb), 549fb74c277SDavid Ahern inet_sdif(skb), udptable, skb); 550607c4aafSKOVACS Krisztian } 551607c4aafSKOVACS Krisztian 5527b58e63eSEric Dumazet struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, 55363058308STom Herbert __be16 sport, __be16 dport) 55463058308STom Herbert { 555257a525fSMartin KaFai Lau const struct iphdr *iph = ip_hdr(skb); 556ba6aac15SKuniyuki Iwashima struct net *net = dev_net(skb->dev); 557257a525fSMartin KaFai Lau 558ba6aac15SKuniyuki Iwashima return __udp4_lib_lookup(net, iph->saddr, sport, 559257a525fSMartin KaFai Lau iph->daddr, dport, inet_iif(skb), 560ba6aac15SKuniyuki Iwashima inet_sdif(skb), net->ipv4.udp_table, NULL); 56163058308STom Herbert } 56263058308STom Herbert 563ca065d0cSEric Dumazet /* Must be called under rcu_read_lock(). 564ca065d0cSEric Dumazet * Does increment socket refcount. 565ca065d0cSEric Dumazet */ 5666e86000cSArnd Bergmann #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 567bcd41303SKOVACS Krisztian struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 568bcd41303SKOVACS Krisztian __be32 daddr, __be16 dport, int dif) 569bcd41303SKOVACS Krisztian { 570ca065d0cSEric Dumazet struct sock *sk; 571ca065d0cSEric Dumazet 572ca065d0cSEric Dumazet sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 573ba6aac15SKuniyuki Iwashima dif, 0, net->ipv4.udp_table, NULL); 57441c6d650SReshetova, Elena if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 575ca065d0cSEric Dumazet sk = NULL; 576ca065d0cSEric Dumazet return sk; 577bcd41303SKOVACS Krisztian } 578bcd41303SKOVACS Krisztian EXPORT_SYMBOL_GPL(udp4_lib_lookup); 579ca065d0cSEric Dumazet #endif 580bcd41303SKOVACS Krisztian 581a0a989d3SEric Dumazet static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk, 582421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 583421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 584fb74c277SDavid Ahern int dif, int sdif, unsigned short hnum) 585421b3885SShawn Bohrer { 586a0a989d3SEric Dumazet const struct inet_sock *inet = inet_sk(sk); 587421b3885SShawn Bohrer 588421b3885SShawn Bohrer if (!net_eq(sock_net(sk), net) || 589421b3885SShawn Bohrer udp_sk(sk)->udp_port_hash != hnum || 590421b3885SShawn Bohrer (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 591421b3885SShawn Bohrer (inet->inet_dport != rmt_port && inet->inet_dport) || 592421b3885SShawn Bohrer (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 593421b3885SShawn Bohrer ipv6_only_sock(sk) || 59482ba25c6STim Beale !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 595421b3885SShawn Bohrer return false; 59660d9b031SDavid Ahern if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) 597421b3885SShawn Bohrer return false; 598421b3885SShawn Bohrer return true; 599421b3885SShawn Bohrer } 600421b3885SShawn Bohrer 601a36e185eSStefano Brivio DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); 602a36e185eSStefano Brivio void udp_encap_enable(void) 603a36e185eSStefano Brivio { 6049c480601SPaolo Abeni static_branch_inc(&udp_encap_needed_key); 605a36e185eSStefano Brivio } 606a36e185eSStefano Brivio EXPORT_SYMBOL(udp_encap_enable); 607a36e185eSStefano Brivio 608a4a600ddSXin Long void udp_encap_disable(void) 609a4a600ddSXin Long { 610a4a600ddSXin Long static_branch_dec(&udp_encap_needed_key); 611a4a600ddSXin Long } 612a4a600ddSXin Long EXPORT_SYMBOL(udp_encap_disable); 613a4a600ddSXin Long 614e7cc0824SStefano Brivio /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 615e7cc0824SStefano Brivio * through error handlers in encapsulations looking for a match. 616e7cc0824SStefano Brivio */ 617e7cc0824SStefano Brivio static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) 618e7cc0824SStefano Brivio { 619e7cc0824SStefano Brivio int i; 620e7cc0824SStefano Brivio 621e7cc0824SStefano Brivio for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 622e7cc0824SStefano Brivio int (*handler)(struct sk_buff *skb, u32 info); 62392b95364SPaolo Abeni const struct ip_tunnel_encap_ops *encap; 624e7cc0824SStefano Brivio 62592b95364SPaolo Abeni encap = rcu_dereference(iptun_encaps[i]); 62692b95364SPaolo Abeni if (!encap) 627e7cc0824SStefano Brivio continue; 62892b95364SPaolo Abeni handler = encap->err_handler; 629e7cc0824SStefano Brivio if (handler && !handler(skb, info)) 630e7cc0824SStefano Brivio return 0; 631e7cc0824SStefano Brivio } 632e7cc0824SStefano Brivio 633e7cc0824SStefano Brivio return -ENOENT; 634e7cc0824SStefano Brivio } 635e7cc0824SStefano Brivio 636a36e185eSStefano Brivio /* Try to match ICMP errors to UDP tunnels by looking up a socket without 637a36e185eSStefano Brivio * reversing source and destination port: this will match tunnels that force the 638a36e185eSStefano Brivio * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 639a36e185eSStefano Brivio * lwtunnels might actually break this assumption by being configured with 640a36e185eSStefano Brivio * different destination ports on endpoints, in this case we won't be able to 641a36e185eSStefano Brivio * trace ICMP messages back to them. 642a36e185eSStefano Brivio * 643e7cc0824SStefano Brivio * If this doesn't match any socket, probe tunnels with arbitrary destination 644e7cc0824SStefano Brivio * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 645e7cc0824SStefano Brivio * we've sent packets to won't necessarily match the local destination port. 646e7cc0824SStefano Brivio * 647a36e185eSStefano Brivio * Then ask the tunnel implementation to match the error against a valid 648a36e185eSStefano Brivio * association. 649a36e185eSStefano Brivio * 650e7cc0824SStefano Brivio * Return an error if we can't find a match, the socket if we need further 651e7cc0824SStefano Brivio * processing, zero otherwise. 652a36e185eSStefano Brivio */ 653a36e185eSStefano Brivio static struct sock *__udp4_lib_err_encap(struct net *net, 654a36e185eSStefano Brivio const struct iphdr *iph, 655a36e185eSStefano Brivio struct udphdr *uh, 656a36e185eSStefano Brivio struct udp_table *udptable, 6579bfce73cSVadim Fedorenko struct sock *sk, 658e7cc0824SStefano Brivio struct sk_buff *skb, u32 info) 659a36e185eSStefano Brivio { 6609bfce73cSVadim Fedorenko int (*lookup)(struct sock *sk, struct sk_buff *skb); 661a36e185eSStefano Brivio int network_offset, transport_offset; 6629bfce73cSVadim Fedorenko struct udp_sock *up; 663a36e185eSStefano Brivio 664a36e185eSStefano Brivio network_offset = skb_network_offset(skb); 665a36e185eSStefano Brivio transport_offset = skb_transport_offset(skb); 666a36e185eSStefano Brivio 667a36e185eSStefano Brivio /* Network header needs to point to the outer IPv4 header inside ICMP */ 668a36e185eSStefano Brivio skb_reset_network_header(skb); 669a36e185eSStefano Brivio 670a36e185eSStefano Brivio /* Transport header needs to point to the UDP header */ 671a36e185eSStefano Brivio skb_set_transport_header(skb, iph->ihl << 2); 672a36e185eSStefano Brivio 6739bfce73cSVadim Fedorenko if (sk) { 6749bfce73cSVadim Fedorenko up = udp_sk(sk); 6759bfce73cSVadim Fedorenko 6769bfce73cSVadim Fedorenko lookup = READ_ONCE(up->encap_err_lookup); 6779bfce73cSVadim Fedorenko if (lookup && lookup(sk, skb)) 6789bfce73cSVadim Fedorenko sk = NULL; 6799bfce73cSVadim Fedorenko 6809bfce73cSVadim Fedorenko goto out; 6819bfce73cSVadim Fedorenko } 6829bfce73cSVadim Fedorenko 683e7cc0824SStefano Brivio sk = __udp4_lib_lookup(net, iph->daddr, uh->source, 684e7cc0824SStefano Brivio iph->saddr, uh->dest, skb->dev->ifindex, 0, 685e7cc0824SStefano Brivio udptable, NULL); 686e7cc0824SStefano Brivio if (sk) { 6879bfce73cSVadim Fedorenko up = udp_sk(sk); 688e7cc0824SStefano Brivio 689a36e185eSStefano Brivio lookup = READ_ONCE(up->encap_err_lookup); 690a36e185eSStefano Brivio if (!lookup || lookup(sk, skb)) 691a36e185eSStefano Brivio sk = NULL; 692e7cc0824SStefano Brivio } 693e7cc0824SStefano Brivio 6949bfce73cSVadim Fedorenko out: 695e7cc0824SStefano Brivio if (!sk) 696e7cc0824SStefano Brivio sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); 697a36e185eSStefano Brivio 698a36e185eSStefano Brivio skb_set_transport_header(skb, transport_offset); 699a36e185eSStefano Brivio skb_set_network_header(skb, network_offset); 700a36e185eSStefano Brivio 701a36e185eSStefano Brivio return sk; 702a36e185eSStefano Brivio } 703a36e185eSStefano Brivio 704db8dac20SDavid S. Miller /* 705db8dac20SDavid S. Miller * This routine is called by the ICMP module when it gets some 706db8dac20SDavid S. Miller * sort of error condition. If err < 0 then the socket should 707db8dac20SDavid S. Miller * be closed and the error returned to the user. If err > 0 708db8dac20SDavid S. Miller * it's just the icmp type << 8 | icmp code. 709db8dac20SDavid S. Miller * Header points to the ip header of the error packet. We move 710db8dac20SDavid S. Miller * on past this. Then (as it used to claim before adjustment) 711db8dac20SDavid S. Miller * header points to the first 8 bytes of the udp header. We need 712db8dac20SDavid S. Miller * to find the appropriate port. 713db8dac20SDavid S. Miller */ 714db8dac20SDavid S. Miller 71532bbd879SStefano Brivio int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 716db8dac20SDavid S. Miller { 717db8dac20SDavid S. Miller struct inet_sock *inet; 718b71d1d42SEric Dumazet const struct iphdr *iph = (const struct iphdr *)skb->data; 719db8dac20SDavid S. Miller struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 720db8dac20SDavid S. Miller const int type = icmp_hdr(skb)->type; 721db8dac20SDavid S. Miller const int code = icmp_hdr(skb)->code; 722a36e185eSStefano Brivio bool tunnel = false; 723db8dac20SDavid S. Miller struct sock *sk; 724db8dac20SDavid S. Miller int harderr; 725db8dac20SDavid S. Miller int err; 726fd54d716SPavel Emelyanov struct net *net = dev_net(skb->dev); 727db8dac20SDavid S. Miller 728fd54d716SPavel Emelyanov sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 729f64bf6b8SMike Manning iph->saddr, uh->source, skb->dev->ifindex, 730f64bf6b8SMike Manning inet_sdif(skb), udptable, NULL); 7319bfce73cSVadim Fedorenko 732d26796aeSXin Long if (!sk || udp_sk(sk)->encap_type) { 733a36e185eSStefano Brivio /* No socket for error: try tunnels before discarding */ 734e7cc0824SStefano Brivio if (static_branch_unlikely(&udp_encap_needed_key)) { 7359bfce73cSVadim Fedorenko sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, 736e7cc0824SStefano Brivio info); 737e7cc0824SStefano Brivio if (!sk) 738e7cc0824SStefano Brivio return 0; 7399bfce73cSVadim Fedorenko } else 7409bfce73cSVadim Fedorenko sk = ERR_PTR(-ENOENT); 741e7cc0824SStefano Brivio 742e7cc0824SStefano Brivio if (IS_ERR(sk)) { 743e7cc0824SStefano Brivio __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 744e7cc0824SStefano Brivio return PTR_ERR(sk); 745e7cc0824SStefano Brivio } 746e7cc0824SStefano Brivio 747a36e185eSStefano Brivio tunnel = true; 748db8dac20SDavid S. Miller } 749db8dac20SDavid S. Miller 750db8dac20SDavid S. Miller err = 0; 751db8dac20SDavid S. Miller harderr = 0; 752db8dac20SDavid S. Miller inet = inet_sk(sk); 753db8dac20SDavid S. Miller 754db8dac20SDavid S. Miller switch (type) { 755db8dac20SDavid S. Miller default: 756db8dac20SDavid S. Miller case ICMP_TIME_EXCEEDED: 757db8dac20SDavid S. Miller err = EHOSTUNREACH; 758db8dac20SDavid S. Miller break; 759db8dac20SDavid S. Miller case ICMP_SOURCE_QUENCH: 760db8dac20SDavid S. Miller goto out; 761db8dac20SDavid S. Miller case ICMP_PARAMETERPROB: 762db8dac20SDavid S. Miller err = EPROTO; 763db8dac20SDavid S. Miller harderr = 1; 764db8dac20SDavid S. Miller break; 765db8dac20SDavid S. Miller case ICMP_DEST_UNREACH: 766db8dac20SDavid S. Miller if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 76736393395SDavid S. Miller ipv4_sk_update_pmtu(skb, sk, info); 768db8dac20SDavid S. Miller if (inet->pmtudisc != IP_PMTUDISC_DONT) { 769db8dac20SDavid S. Miller err = EMSGSIZE; 770db8dac20SDavid S. Miller harderr = 1; 771db8dac20SDavid S. Miller break; 772db8dac20SDavid S. Miller } 773db8dac20SDavid S. Miller goto out; 774db8dac20SDavid S. Miller } 775db8dac20SDavid S. Miller err = EHOSTUNREACH; 776db8dac20SDavid S. Miller if (code <= NR_ICMP_UNREACH) { 777db8dac20SDavid S. Miller harderr = icmp_err_convert[code].fatal; 778db8dac20SDavid S. Miller err = icmp_err_convert[code].errno; 779db8dac20SDavid S. Miller } 780db8dac20SDavid S. Miller break; 78155be7a9cSDavid S. Miller case ICMP_REDIRECT: 78255be7a9cSDavid S. Miller ipv4_sk_redirect(skb, sk); 7831a462d18SDuan Jiong goto out; 784db8dac20SDavid S. Miller } 785db8dac20SDavid S. Miller 786db8dac20SDavid S. Miller /* 787db8dac20SDavid S. Miller * RFC1122: OK. Passes ICMP errors back to application, as per 788db8dac20SDavid S. Miller * 4.1.3.3. 789db8dac20SDavid S. Miller */ 790a36e185eSStefano Brivio if (tunnel) { 791a36e185eSStefano Brivio /* ...not for tunnels though: we don't have a sending socket */ 792ac56a0b4SDavid Howells if (udp_sk(sk)->encap_err_rcv) 79342fb06b3SDavid Howells udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info, 79442fb06b3SDavid Howells (u8 *)(uh+1)); 795a36e185eSStefano Brivio goto out; 796a36e185eSStefano Brivio } 797db8dac20SDavid S. Miller if (!inet->recverr) { 798db8dac20SDavid S. Miller if (!harderr || sk->sk_state != TCP_ESTABLISHED) 799db8dac20SDavid S. Miller goto out; 800b1faf566SEric Dumazet } else 801db8dac20SDavid S. Miller ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 802b1faf566SEric Dumazet 803db8dac20SDavid S. Miller sk->sk_err = err; 804e3ae2365SAlexander Aring sk_error_report(sk); 805db8dac20SDavid S. Miller out: 80632bbd879SStefano Brivio return 0; 807db8dac20SDavid S. Miller } 808db8dac20SDavid S. Miller 80932bbd879SStefano Brivio int udp_err(struct sk_buff *skb, u32 info) 810db8dac20SDavid S. Miller { 811ba6aac15SKuniyuki Iwashima return __udp4_lib_err(skb, info, dev_net(skb->dev)->ipv4.udp_table); 812db8dac20SDavid S. Miller } 813db8dac20SDavid S. Miller 814db8dac20SDavid S. Miller /* 815db8dac20SDavid S. Miller * Throw away all pending data and cancel the corking. Socket is locked. 816db8dac20SDavid S. Miller */ 81736d926b9SDenis V. Lunev void udp_flush_pending_frames(struct sock *sk) 818db8dac20SDavid S. Miller { 819db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 820db8dac20SDavid S. Miller 821db8dac20SDavid S. Miller if (up->pending) { 822db8dac20SDavid S. Miller up->len = 0; 823db8dac20SDavid S. Miller up->pending = 0; 824db8dac20SDavid S. Miller ip_flush_pending_frames(sk); 825db8dac20SDavid S. Miller } 826db8dac20SDavid S. Miller } 82736d926b9SDenis V. Lunev EXPORT_SYMBOL(udp_flush_pending_frames); 828db8dac20SDavid S. Miller 829db8dac20SDavid S. Miller /** 830f6b9664fSHerbert Xu * udp4_hwcsum - handle outgoing HW checksumming 831db8dac20SDavid S. Miller * @skb: sk_buff containing the filled-in UDP header 832db8dac20SDavid S. Miller * (checksum field must be zeroed out) 833f6b9664fSHerbert Xu * @src: source IP address 834f6b9664fSHerbert Xu * @dst: destination IP address 835db8dac20SDavid S. Miller */ 836c26bf4a5SThomas Graf void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 837db8dac20SDavid S. Miller { 838db8dac20SDavid S. Miller struct udphdr *uh = udp_hdr(skb); 839f6b9664fSHerbert Xu int offset = skb_transport_offset(skb); 840f6b9664fSHerbert Xu int len = skb->len - offset; 841f6b9664fSHerbert Xu int hlen = len; 842db8dac20SDavid S. Miller __wsum csum = 0; 843db8dac20SDavid S. Miller 844ebbe495fSWANG Cong if (!skb_has_frag_list(skb)) { 845db8dac20SDavid S. Miller /* 846db8dac20SDavid S. Miller * Only one fragment on the socket. 847db8dac20SDavid S. Miller */ 848db8dac20SDavid S. Miller skb->csum_start = skb_transport_header(skb) - skb->head; 849db8dac20SDavid S. Miller skb->csum_offset = offsetof(struct udphdr, check); 850f6b9664fSHerbert Xu uh->check = ~csum_tcpudp_magic(src, dst, len, 851f6b9664fSHerbert Xu IPPROTO_UDP, 0); 852db8dac20SDavid S. Miller } else { 853ebbe495fSWANG Cong struct sk_buff *frags; 854ebbe495fSWANG Cong 855db8dac20SDavid S. Miller /* 856db8dac20SDavid S. Miller * HW-checksum won't work as there are two or more 857db8dac20SDavid S. Miller * fragments on the socket so that all csums of sk_buffs 858db8dac20SDavid S. Miller * should be together 859db8dac20SDavid S. Miller */ 860ebbe495fSWANG Cong skb_walk_frags(skb, frags) { 861f6b9664fSHerbert Xu csum = csum_add(csum, frags->csum); 862f6b9664fSHerbert Xu hlen -= frags->len; 863ebbe495fSWANG Cong } 864db8dac20SDavid S. Miller 865f6b9664fSHerbert Xu csum = skb_checksum(skb, offset, hlen, csum); 866db8dac20SDavid S. Miller skb->ip_summed = CHECKSUM_NONE; 867db8dac20SDavid S. Miller 868db8dac20SDavid S. Miller uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 869db8dac20SDavid S. Miller if (uh->check == 0) 870db8dac20SDavid S. Miller uh->check = CSUM_MANGLED_0; 871db8dac20SDavid S. Miller } 872db8dac20SDavid S. Miller } 873c26bf4a5SThomas Graf EXPORT_SYMBOL_GPL(udp4_hwcsum); 874db8dac20SDavid S. Miller 875af5fcba7STom Herbert /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 876af5fcba7STom Herbert * for the simple case like when setting the checksum for a UDP tunnel. 877af5fcba7STom Herbert */ 878af5fcba7STom Herbert void udp_set_csum(bool nocheck, struct sk_buff *skb, 879af5fcba7STom Herbert __be32 saddr, __be32 daddr, int len) 880af5fcba7STom Herbert { 881af5fcba7STom Herbert struct udphdr *uh = udp_hdr(skb); 882af5fcba7STom Herbert 883179bc67fSEdward Cree if (nocheck) { 884af5fcba7STom Herbert uh->check = 0; 885179bc67fSEdward Cree } else if (skb_is_gso(skb)) { 886af5fcba7STom Herbert uh->check = ~udp_v4_check(len, saddr, daddr, 0); 887179bc67fSEdward Cree } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 888179bc67fSEdward Cree uh->check = 0; 889179bc67fSEdward Cree uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 890179bc67fSEdward Cree if (uh->check == 0) 891179bc67fSEdward Cree uh->check = CSUM_MANGLED_0; 892d75f1306SEdward Cree } else { 893af5fcba7STom Herbert skb->ip_summed = CHECKSUM_PARTIAL; 894af5fcba7STom Herbert skb->csum_start = skb_transport_header(skb) - skb->head; 895af5fcba7STom Herbert skb->csum_offset = offsetof(struct udphdr, check); 896af5fcba7STom Herbert uh->check = ~udp_v4_check(len, saddr, daddr, 0); 897af5fcba7STom Herbert } 898af5fcba7STom Herbert } 899af5fcba7STom Herbert EXPORT_SYMBOL(udp_set_csum); 900af5fcba7STom Herbert 901bec1f6f6SWillem de Bruijn static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, 902bec1f6f6SWillem de Bruijn struct inet_cork *cork) 903f6b9664fSHerbert Xu { 904f6b9664fSHerbert Xu struct sock *sk = skb->sk; 905f6b9664fSHerbert Xu struct inet_sock *inet = inet_sk(sk); 906f6b9664fSHerbert Xu struct udphdr *uh; 907cffb8f61SMenglong Dong int err; 908f6b9664fSHerbert Xu int is_udplite = IS_UDPLITE(sk); 909f6b9664fSHerbert Xu int offset = skb_transport_offset(skb); 910f6b9664fSHerbert Xu int len = skb->len - offset; 9114094871dSJosh Hunt int datalen = len - sizeof(*uh); 912f6b9664fSHerbert Xu __wsum csum = 0; 913f6b9664fSHerbert Xu 914f6b9664fSHerbert Xu /* 915f6b9664fSHerbert Xu * Create a UDP header 916f6b9664fSHerbert Xu */ 917f6b9664fSHerbert Xu uh = udp_hdr(skb); 918f6b9664fSHerbert Xu uh->source = inet->inet_sport; 91979ab0531SDavid S. Miller uh->dest = fl4->fl4_dport; 920f6b9664fSHerbert Xu uh->len = htons(len); 921f6b9664fSHerbert Xu uh->check = 0; 922f6b9664fSHerbert Xu 923bec1f6f6SWillem de Bruijn if (cork->gso_size) { 924bec1f6f6SWillem de Bruijn const int hlen = skb_network_header_len(skb) + 925bec1f6f6SWillem de Bruijn sizeof(struct udphdr); 926bec1f6f6SWillem de Bruijn 9270f149c9fSWillem de Bruijn if (hlen + cork->gso_size > cork->fragsize) { 9280f149c9fSWillem de Bruijn kfree_skb(skb); 929bec1f6f6SWillem de Bruijn return -EINVAL; 9300f149c9fSWillem de Bruijn } 931158390e4SJianguo Wu if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 9320f149c9fSWillem de Bruijn kfree_skb(skb); 933bec1f6f6SWillem de Bruijn return -EINVAL; 9340f149c9fSWillem de Bruijn } 9350f149c9fSWillem de Bruijn if (sk->sk_no_check_tx) { 9360f149c9fSWillem de Bruijn kfree_skb(skb); 937a8c744a8SWillem de Bruijn return -EINVAL; 9380f149c9fSWillem de Bruijn } 939ff06342cSWillem de Bruijn if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 9400f149c9fSWillem de Bruijn dst_xfrm(skb_dst(skb))) { 9410f149c9fSWillem de Bruijn kfree_skb(skb); 942bec1f6f6SWillem de Bruijn return -EIO; 9430f149c9fSWillem de Bruijn } 944bec1f6f6SWillem de Bruijn 9454094871dSJosh Hunt if (datalen > cork->gso_size) { 946bec1f6f6SWillem de Bruijn skb_shinfo(skb)->gso_size = cork->gso_size; 947bec1f6f6SWillem de Bruijn skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 9484094871dSJosh Hunt skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 949dfec0ee2SAlexander Duyck cork->gso_size); 9504094871dSJosh Hunt } 951a8c744a8SWillem de Bruijn goto csum_partial; 952bec1f6f6SWillem de Bruijn } 953bec1f6f6SWillem de Bruijn 954f6b9664fSHerbert Xu if (is_udplite) /* UDP-Lite */ 955f6b9664fSHerbert Xu csum = udplite_csum(skb); 956f6b9664fSHerbert Xu 957ab2fb7e3SWillem de Bruijn else if (sk->sk_no_check_tx) { /* UDP csum off */ 958f6b9664fSHerbert Xu 959f6b9664fSHerbert Xu skb->ip_summed = CHECKSUM_NONE; 960f6b9664fSHerbert Xu goto send; 961f6b9664fSHerbert Xu 962f6b9664fSHerbert Xu } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 963a8c744a8SWillem de Bruijn csum_partial: 964f6b9664fSHerbert Xu 96579ab0531SDavid S. Miller udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 966f6b9664fSHerbert Xu goto send; 967f6b9664fSHerbert Xu 968f6b9664fSHerbert Xu } else 969f6b9664fSHerbert Xu csum = udp_csum(skb); 970f6b9664fSHerbert Xu 971f6b9664fSHerbert Xu /* add protocol-dependent pseudo-header */ 97279ab0531SDavid S. Miller uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 973f6b9664fSHerbert Xu sk->sk_protocol, csum); 974f6b9664fSHerbert Xu if (uh->check == 0) 975f6b9664fSHerbert Xu uh->check = CSUM_MANGLED_0; 976f6b9664fSHerbert Xu 977f6b9664fSHerbert Xu send: 978b5ec8eeaSEric Dumazet err = ip_send_skb(sock_net(sk), skb); 979f6b9664fSHerbert Xu if (err) { 980f6b9664fSHerbert Xu if (err == -ENOBUFS && !inet->recverr) { 9816aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 982f6b9664fSHerbert Xu UDP_MIB_SNDBUFERRORS, is_udplite); 983f6b9664fSHerbert Xu err = 0; 984f6b9664fSHerbert Xu } 985f6b9664fSHerbert Xu } else 9866aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 987f6b9664fSHerbert Xu UDP_MIB_OUTDATAGRAMS, is_udplite); 988f6b9664fSHerbert Xu return err; 989f6b9664fSHerbert Xu } 990f6b9664fSHerbert Xu 991db8dac20SDavid S. Miller /* 992db8dac20SDavid S. Miller * Push out all pending data as one UDP datagram. Socket is locked. 993db8dac20SDavid S. Miller */ 9948822b64aSHannes Frederic Sowa int udp_push_pending_frames(struct sock *sk) 995db8dac20SDavid S. Miller { 996db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 997db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 998b6f21b26SDavid S. Miller struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 999db8dac20SDavid S. Miller struct sk_buff *skb; 1000db8dac20SDavid S. Miller int err = 0; 1001db8dac20SDavid S. Miller 100277968b78SDavid S. Miller skb = ip_finish_skb(sk, fl4); 1003f6b9664fSHerbert Xu if (!skb) 1004db8dac20SDavid S. Miller goto out; 1005db8dac20SDavid S. Miller 1006bec1f6f6SWillem de Bruijn err = udp_send_skb(skb, fl4, &inet->cork.base); 1007db8dac20SDavid S. Miller 1008db8dac20SDavid S. Miller out: 1009db8dac20SDavid S. Miller up->len = 0; 1010db8dac20SDavid S. Miller up->pending = 0; 1011db8dac20SDavid S. Miller return err; 1012db8dac20SDavid S. Miller } 10138822b64aSHannes Frederic Sowa EXPORT_SYMBOL(udp_push_pending_frames); 1014db8dac20SDavid S. Miller 10152e8de857SWillem de Bruijn static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) 10162e8de857SWillem de Bruijn { 10172e8de857SWillem de Bruijn switch (cmsg->cmsg_type) { 10182e8de857SWillem de Bruijn case UDP_SEGMENT: 10192e8de857SWillem de Bruijn if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) 10202e8de857SWillem de Bruijn return -EINVAL; 10212e8de857SWillem de Bruijn *gso_size = *(__u16 *)CMSG_DATA(cmsg); 10222e8de857SWillem de Bruijn return 0; 10232e8de857SWillem de Bruijn default: 10242e8de857SWillem de Bruijn return -EINVAL; 10252e8de857SWillem de Bruijn } 10262e8de857SWillem de Bruijn } 10272e8de857SWillem de Bruijn 10282e8de857SWillem de Bruijn int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) 10292e8de857SWillem de Bruijn { 10302e8de857SWillem de Bruijn struct cmsghdr *cmsg; 10312e8de857SWillem de Bruijn bool need_ip = false; 10322e8de857SWillem de Bruijn int err; 10332e8de857SWillem de Bruijn 10342e8de857SWillem de Bruijn for_each_cmsghdr(cmsg, msg) { 10352e8de857SWillem de Bruijn if (!CMSG_OK(msg, cmsg)) 10362e8de857SWillem de Bruijn return -EINVAL; 10372e8de857SWillem de Bruijn 10382e8de857SWillem de Bruijn if (cmsg->cmsg_level != SOL_UDP) { 10392e8de857SWillem de Bruijn need_ip = true; 10402e8de857SWillem de Bruijn continue; 10412e8de857SWillem de Bruijn } 10422e8de857SWillem de Bruijn 10432e8de857SWillem de Bruijn err = __udp_cmsg_send(cmsg, gso_size); 10442e8de857SWillem de Bruijn if (err) 10452e8de857SWillem de Bruijn return err; 10462e8de857SWillem de Bruijn } 10472e8de857SWillem de Bruijn 10482e8de857SWillem de Bruijn return need_ip; 10492e8de857SWillem de Bruijn } 10502e8de857SWillem de Bruijn EXPORT_SYMBOL_GPL(udp_cmsg_send); 10512e8de857SWillem de Bruijn 10521b784140SYing Xue int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1053db8dac20SDavid S. Miller { 1054db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 1055db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 10561cedee13SAndrey Ignatov DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 1057e474995fSDavid S. Miller struct flowi4 fl4_stack; 1058b6f21b26SDavid S. Miller struct flowi4 *fl4; 1059db8dac20SDavid S. Miller int ulen = len; 1060db8dac20SDavid S. Miller struct ipcm_cookie ipc; 1061db8dac20SDavid S. Miller struct rtable *rt = NULL; 1062db8dac20SDavid S. Miller int free = 0; 1063db8dac20SDavid S. Miller int connected = 0; 1064db8dac20SDavid S. Miller __be32 daddr, faddr, saddr; 10650e26371dSGuillaume Nault u8 tos, scope; 1066db8dac20SDavid S. Miller __be16 dport; 1067db8dac20SDavid S. Miller int err, is_udplite = IS_UDPLITE(sk); 1068a9f59707SEric Dumazet int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1069db8dac20SDavid S. Miller int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1070903ab86dSHerbert Xu struct sk_buff *skb; 1071f6d8bd05SEric Dumazet struct ip_options_data opt_copy; 1072db8dac20SDavid S. Miller 1073db8dac20SDavid S. Miller if (len > 0xFFFF) 1074db8dac20SDavid S. Miller return -EMSGSIZE; 1075db8dac20SDavid S. Miller 1076db8dac20SDavid S. Miller /* 1077db8dac20SDavid S. Miller * Check the flags. 1078db8dac20SDavid S. Miller */ 1079db8dac20SDavid S. Miller 1080db8dac20SDavid S. Miller if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 1081db8dac20SDavid S. Miller return -EOPNOTSUPP; 1082db8dac20SDavid S. Miller 1083903ab86dSHerbert Xu getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1084903ab86dSHerbert Xu 1085f5fca608SDavid S. Miller fl4 = &inet->cork.fl.u.ip4; 1086db8dac20SDavid S. Miller if (up->pending) { 1087db8dac20SDavid S. Miller /* 1088db8dac20SDavid S. Miller * There are pending frames. 1089db8dac20SDavid S. Miller * The socket lock must be held while it's corked. 1090db8dac20SDavid S. Miller */ 1091db8dac20SDavid S. Miller lock_sock(sk); 1092db8dac20SDavid S. Miller if (likely(up->pending)) { 1093db8dac20SDavid S. Miller if (unlikely(up->pending != AF_INET)) { 1094db8dac20SDavid S. Miller release_sock(sk); 1095db8dac20SDavid S. Miller return -EINVAL; 1096db8dac20SDavid S. Miller } 1097db8dac20SDavid S. Miller goto do_append_data; 1098db8dac20SDavid S. Miller } 1099db8dac20SDavid S. Miller release_sock(sk); 1100db8dac20SDavid S. Miller } 1101db8dac20SDavid S. Miller ulen += sizeof(struct udphdr); 1102db8dac20SDavid S. Miller 1103db8dac20SDavid S. Miller /* 1104db8dac20SDavid S. Miller * Get and verify the address. 1105db8dac20SDavid S. Miller */ 11061cedee13SAndrey Ignatov if (usin) { 1107db8dac20SDavid S. Miller if (msg->msg_namelen < sizeof(*usin)) 1108db8dac20SDavid S. Miller return -EINVAL; 1109db8dac20SDavid S. Miller if (usin->sin_family != AF_INET) { 1110db8dac20SDavid S. Miller if (usin->sin_family != AF_UNSPEC) 1111db8dac20SDavid S. Miller return -EAFNOSUPPORT; 1112db8dac20SDavid S. Miller } 1113db8dac20SDavid S. Miller 1114db8dac20SDavid S. Miller daddr = usin->sin_addr.s_addr; 1115db8dac20SDavid S. Miller dport = usin->sin_port; 1116db8dac20SDavid S. Miller if (dport == 0) 1117db8dac20SDavid S. Miller return -EINVAL; 1118db8dac20SDavid S. Miller } else { 1119db8dac20SDavid S. Miller if (sk->sk_state != TCP_ESTABLISHED) 1120db8dac20SDavid S. Miller return -EDESTADDRREQ; 1121c720c7e8SEric Dumazet daddr = inet->inet_daddr; 1122c720c7e8SEric Dumazet dport = inet->inet_dport; 1123db8dac20SDavid S. Miller /* Open fast path for connected socket. 1124db8dac20SDavid S. Miller Route will not be used, if at least one option is set. 1125db8dac20SDavid S. Miller */ 1126db8dac20SDavid S. Miller connected = 1; 1127db8dac20SDavid S. Miller } 1128c14ac945SSoheil Hassas Yeganeh 112935178206SWillem de Bruijn ipcm_init_sk(&ipc, inet); 113018a419baSEric Dumazet ipc.gso_size = READ_ONCE(up->gso_size); 1131bf84a010SDaniel Borkmann 1132db8dac20SDavid S. Miller if (msg->msg_controllen) { 11332e8de857SWillem de Bruijn err = udp_cmsg_send(sk, msg, &ipc.gso_size); 11342e8de857SWillem de Bruijn if (err > 0) 11352e8de857SWillem de Bruijn err = ip_cmsg_send(sk, msg, &ipc, 11362e8de857SWillem de Bruijn sk->sk_family == AF_INET6); 11372e8de857SWillem de Bruijn if (unlikely(err < 0)) { 113891948309SEric Dumazet kfree(ipc.opt); 1139db8dac20SDavid S. Miller return err; 114091948309SEric Dumazet } 1141db8dac20SDavid S. Miller if (ipc.opt) 1142db8dac20SDavid S. Miller free = 1; 1143db8dac20SDavid S. Miller connected = 0; 1144db8dac20SDavid S. Miller } 1145f6d8bd05SEric Dumazet if (!ipc.opt) { 1146f6d8bd05SEric Dumazet struct ip_options_rcu *inet_opt; 1147f6d8bd05SEric Dumazet 1148f6d8bd05SEric Dumazet rcu_read_lock(); 1149f6d8bd05SEric Dumazet inet_opt = rcu_dereference(inet->inet_opt); 1150f6d8bd05SEric Dumazet if (inet_opt) { 1151f6d8bd05SEric Dumazet memcpy(&opt_copy, inet_opt, 1152f6d8bd05SEric Dumazet sizeof(*inet_opt) + inet_opt->opt.optlen); 1153f6d8bd05SEric Dumazet ipc.opt = &opt_copy.opt; 1154f6d8bd05SEric Dumazet } 1155f6d8bd05SEric Dumazet rcu_read_unlock(); 1156f6d8bd05SEric Dumazet } 1157db8dac20SDavid S. Miller 11586fc88c35SDave Marchevsky if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) { 11591cedee13SAndrey Ignatov err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, 11601cedee13SAndrey Ignatov (struct sockaddr *)usin, &ipc.addr); 11611cedee13SAndrey Ignatov if (err) 11621cedee13SAndrey Ignatov goto out_free; 11631cedee13SAndrey Ignatov if (usin) { 11641cedee13SAndrey Ignatov if (usin->sin_port == 0) { 11651cedee13SAndrey Ignatov /* BPF program set invalid port. Reject it. */ 11661cedee13SAndrey Ignatov err = -EINVAL; 11671cedee13SAndrey Ignatov goto out_free; 11681cedee13SAndrey Ignatov } 11691cedee13SAndrey Ignatov daddr = usin->sin_addr.s_addr; 11701cedee13SAndrey Ignatov dport = usin->sin_port; 11711cedee13SAndrey Ignatov } 11721cedee13SAndrey Ignatov } 11731cedee13SAndrey Ignatov 1174db8dac20SDavid S. Miller saddr = ipc.addr; 1175db8dac20SDavid S. Miller ipc.addr = faddr = daddr; 1176db8dac20SDavid S. Miller 1177f6d8bd05SEric Dumazet if (ipc.opt && ipc.opt->opt.srr) { 11781b97013bSAndrey Ignatov if (!daddr) { 11791b97013bSAndrey Ignatov err = -EINVAL; 11801b97013bSAndrey Ignatov goto out_free; 11811b97013bSAndrey Ignatov } 1182f6d8bd05SEric Dumazet faddr = ipc.opt->opt.faddr; 1183db8dac20SDavid S. Miller connected = 0; 1184db8dac20SDavid S. Miller } 1185aa661581SFrancesco Fusco tos = get_rttos(&ipc, inet); 11860e26371dSGuillaume Nault scope = ip_sendmsg_scope(inet, &ipc, msg); 11870e26371dSGuillaume Nault if (scope == RT_SCOPE_LINK) 1188db8dac20SDavid S. Miller connected = 0; 1189db8dac20SDavid S. Miller 1190db8dac20SDavid S. Miller if (ipv4_is_multicast(daddr)) { 1191854da991SRobert Shearman if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) 1192db8dac20SDavid S. Miller ipc.oif = inet->mc_index; 1193db8dac20SDavid S. Miller if (!saddr) 1194db8dac20SDavid S. Miller saddr = inet->mc_addr; 1195db8dac20SDavid S. Miller connected = 0; 11969515a2e0SDavid Ahern } else if (!ipc.oif) { 119776e21053SErich E. Hoover ipc.oif = inet->uc_index; 11989515a2e0SDavid Ahern } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { 11999515a2e0SDavid Ahern /* oif is set, packet is to local broadcast and 12002bdcc73cSRandy Dunlap * uc_index is set. oif is most likely set 12019515a2e0SDavid Ahern * by sk_bound_dev_if. If uc_index != oif check if the 12029515a2e0SDavid Ahern * oif is an L3 master and uc_index is an L3 slave. 12039515a2e0SDavid Ahern * If so, we want to allow the send using the uc_index. 12049515a2e0SDavid Ahern */ 12059515a2e0SDavid Ahern if (ipc.oif != inet->uc_index && 12069515a2e0SDavid Ahern ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), 12079515a2e0SDavid Ahern inet->uc_index)) { 12089515a2e0SDavid Ahern ipc.oif = inet->uc_index; 12099515a2e0SDavid Ahern } 12109515a2e0SDavid Ahern } 1211db8dac20SDavid S. Miller 1212db8dac20SDavid S. Miller if (connected) 1213db8dac20SDavid S. Miller rt = (struct rtable *)sk_dst_check(sk, 0); 1214db8dac20SDavid S. Miller 121551456b29SIan Morris if (!rt) { 121684a3aa00SPavel Emelyanov struct net *net = sock_net(sk); 12179a24abfaSDavid Ahern __u8 flow_flags = inet_sk_flowi_flags(sk); 121884a3aa00SPavel Emelyanov 1219e474995fSDavid S. Miller fl4 = &fl4_stack; 12209a24abfaSDavid Ahern 12210e26371dSGuillaume Nault flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope, 12220e26371dSGuillaume Nault sk->sk_protocol, flow_flags, faddr, saddr, 12230e26371dSGuillaume Nault dport, inet->inet_sport, sk->sk_uid); 1224c0951cbcSDavid S. Miller 12253df98d79SPaul Moore security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 1226e474995fSDavid S. Miller rt = ip_route_output_flow(net, fl4, sk); 1227b23dd4feSDavid S. Miller if (IS_ERR(rt)) { 1228b23dd4feSDavid S. Miller err = PTR_ERR(rt); 122906dc94b1SDavid S. Miller rt = NULL; 1230db8dac20SDavid S. Miller if (err == -ENETUNREACH) 1231f1d8cba6SEric Dumazet IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1232db8dac20SDavid S. Miller goto out; 1233db8dac20SDavid S. Miller } 1234db8dac20SDavid S. Miller 1235db8dac20SDavid S. Miller err = -EACCES; 1236db8dac20SDavid S. Miller if ((rt->rt_flags & RTCF_BROADCAST) && 1237db8dac20SDavid S. Miller !sock_flag(sk, SOCK_BROADCAST)) 1238db8dac20SDavid S. Miller goto out; 1239db8dac20SDavid S. Miller if (connected) 1240d8d1f30bSChangli Gao sk_dst_set(sk, dst_clone(&rt->dst)); 1241db8dac20SDavid S. Miller } 1242db8dac20SDavid S. Miller 1243db8dac20SDavid S. Miller if (msg->msg_flags&MSG_CONFIRM) 1244db8dac20SDavid S. Miller goto do_confirm; 1245db8dac20SDavid S. Miller back_from_confirm: 1246db8dac20SDavid S. Miller 1247e474995fSDavid S. Miller saddr = fl4->saddr; 1248db8dac20SDavid S. Miller if (!ipc.addr) 1249e474995fSDavid S. Miller daddr = ipc.addr = fl4->daddr; 1250db8dac20SDavid S. Miller 1251903ab86dSHerbert Xu /* Lockless fast path for the non-corking case. */ 1252903ab86dSHerbert Xu if (!corkreq) { 12531cd7884dSWillem de Bruijn struct inet_cork cork; 12541cd7884dSWillem de Bruijn 1255f69e6d13SAl Viro skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1256903ab86dSHerbert Xu sizeof(struct udphdr), &ipc, &rt, 12571cd7884dSWillem de Bruijn &cork, msg->msg_flags); 1258903ab86dSHerbert Xu err = PTR_ERR(skb); 125950c3a487SYOSHIFUJI Hideaki / 吉藤英明 if (!IS_ERR_OR_NULL(skb)) 1260bec1f6f6SWillem de Bruijn err = udp_send_skb(skb, fl4, &cork); 1261903ab86dSHerbert Xu goto out; 1262903ab86dSHerbert Xu } 1263903ab86dSHerbert Xu 1264db8dac20SDavid S. Miller lock_sock(sk); 1265db8dac20SDavid S. Miller if (unlikely(up->pending)) { 1266db8dac20SDavid S. Miller /* The socket is already corked while preparing it. */ 1267db8dac20SDavid S. Miller /* ... which is an evident application bug. --ANK */ 1268db8dac20SDavid S. Miller release_sock(sk); 1269db8dac20SDavid S. Miller 1270197df02cSMatteo Croce net_dbg_ratelimited("socket already corked\n"); 1271db8dac20SDavid S. Miller err = -EINVAL; 1272db8dac20SDavid S. Miller goto out; 1273db8dac20SDavid S. Miller } 1274db8dac20SDavid S. Miller /* 1275db8dac20SDavid S. Miller * Now cork the socket to pend data. 1276db8dac20SDavid S. Miller */ 1277b6f21b26SDavid S. Miller fl4 = &inet->cork.fl.u.ip4; 1278b6f21b26SDavid S. Miller fl4->daddr = daddr; 1279b6f21b26SDavid S. Miller fl4->saddr = saddr; 12809cce96dfSDavid S. Miller fl4->fl4_dport = dport; 12819cce96dfSDavid S. Miller fl4->fl4_sport = inet->inet_sport; 1282db8dac20SDavid S. Miller up->pending = AF_INET; 1283db8dac20SDavid S. Miller 1284db8dac20SDavid S. Miller do_append_data: 1285db8dac20SDavid S. Miller up->len += ulen; 1286f69e6d13SAl Viro err = ip_append_data(sk, fl4, getfrag, msg, ulen, 12872e77d89bSEric Dumazet sizeof(struct udphdr), &ipc, &rt, 1288db8dac20SDavid S. Miller corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1289db8dac20SDavid S. Miller if (err) 1290db8dac20SDavid S. Miller udp_flush_pending_frames(sk); 1291db8dac20SDavid S. Miller else if (!corkreq) 1292db8dac20SDavid S. Miller err = udp_push_pending_frames(sk); 1293db8dac20SDavid S. Miller else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1294db8dac20SDavid S. Miller up->pending = 0; 1295db8dac20SDavid S. Miller release_sock(sk); 1296db8dac20SDavid S. Miller 1297db8dac20SDavid S. Miller out: 1298db8dac20SDavid S. Miller ip_rt_put(rt); 12991b97013bSAndrey Ignatov out_free: 1300db8dac20SDavid S. Miller if (free) 1301db8dac20SDavid S. Miller kfree(ipc.opt); 1302db8dac20SDavid S. Miller if (!err) 1303db8dac20SDavid S. Miller return len; 1304db8dac20SDavid S. Miller /* 1305db8dac20SDavid S. Miller * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1306db8dac20SDavid S. Miller * ENOBUFS might not be good (it's not tunable per se), but otherwise 1307db8dac20SDavid S. Miller * we don't have a good statistic (IpOutDiscards but it can be too many 1308db8dac20SDavid S. Miller * things). We could add another new stat but at least for now that 1309db8dac20SDavid S. Miller * seems like overkill. 1310db8dac20SDavid S. Miller */ 1311db8dac20SDavid S. Miller if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 13126aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1313629ca23cSPavel Emelyanov UDP_MIB_SNDBUFERRORS, is_udplite); 1314db8dac20SDavid S. Miller } 1315db8dac20SDavid S. Miller return err; 1316db8dac20SDavid S. Miller 1317db8dac20SDavid S. Miller do_confirm: 13180dec879fSJulian Anastasov if (msg->msg_flags & MSG_PROBE) 13190dec879fSJulian Anastasov dst_confirm_neigh(&rt->dst, &fl4->daddr); 1320db8dac20SDavid S. Miller if (!(msg->msg_flags&MSG_PROBE) || len) 1321db8dac20SDavid S. Miller goto back_from_confirm; 1322db8dac20SDavid S. Miller err = 0; 1323db8dac20SDavid S. Miller goto out; 1324db8dac20SDavid S. Miller } 1325c482c568SEric Dumazet EXPORT_SYMBOL(udp_sendmsg); 1326db8dac20SDavid S. Miller 1327*1d7e4538SDavid Howells void udp_splice_eof(struct socket *sock) 1328*1d7e4538SDavid Howells { 1329*1d7e4538SDavid Howells struct sock *sk = sock->sk; 1330*1d7e4538SDavid Howells struct udp_sock *up = udp_sk(sk); 1331*1d7e4538SDavid Howells 1332*1d7e4538SDavid Howells if (!up->pending || READ_ONCE(up->corkflag)) 1333*1d7e4538SDavid Howells return; 1334*1d7e4538SDavid Howells 1335*1d7e4538SDavid Howells lock_sock(sk); 1336*1d7e4538SDavid Howells if (up->pending && !READ_ONCE(up->corkflag)) 1337*1d7e4538SDavid Howells udp_push_pending_frames(sk); 1338*1d7e4538SDavid Howells release_sock(sk); 1339*1d7e4538SDavid Howells } 1340*1d7e4538SDavid Howells EXPORT_SYMBOL_GPL(udp_splice_eof); 1341*1d7e4538SDavid Howells 1342db8dac20SDavid S. Miller int udp_sendpage(struct sock *sk, struct page *page, int offset, 1343db8dac20SDavid S. Miller size_t size, int flags) 1344db8dac20SDavid S. Miller { 13457ac7c987SDavid Howells struct bio_vec bvec; 13467ac7c987SDavid Howells struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES }; 1347db8dac20SDavid S. Miller 1348d3f7d56aSShawn Landden if (flags & MSG_SENDPAGE_NOTLAST) 13497ac7c987SDavid Howells msg.msg_flags |= MSG_MORE; 1350d3f7d56aSShawn Landden 13517ac7c987SDavid Howells bvec_set_page(&bvec, page, size, offset); 13527ac7c987SDavid Howells iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); 13537ac7c987SDavid Howells return udp_sendmsg(sk, &msg, size); 1354db8dac20SDavid S. Miller } 1355db8dac20SDavid S. Miller 1356dce4551cSPaolo Abeni #define UDP_SKB_IS_STATELESS 0x80000000 1357dce4551cSPaolo Abeni 1358677bf08cSFlorian Westphal /* all head states (dst, sk, nf conntrack) except skb extensions are 1359677bf08cSFlorian Westphal * cleared by udp_rcv(). 1360677bf08cSFlorian Westphal * 1361677bf08cSFlorian Westphal * We need to preserve secpath, if present, to eventually process 1362677bf08cSFlorian Westphal * IP_CMSG_PASSSEC at recvmsg() time. 1363677bf08cSFlorian Westphal * 1364677bf08cSFlorian Westphal * Other extensions can be cleared. 1365677bf08cSFlorian Westphal */ 1366677bf08cSFlorian Westphal static bool udp_try_make_stateless(struct sk_buff *skb) 1367677bf08cSFlorian Westphal { 1368677bf08cSFlorian Westphal if (!skb_has_extensions(skb)) 1369677bf08cSFlorian Westphal return true; 1370677bf08cSFlorian Westphal 1371677bf08cSFlorian Westphal if (!secpath_exists(skb)) { 1372677bf08cSFlorian Westphal skb_ext_reset(skb); 1373677bf08cSFlorian Westphal return true; 1374677bf08cSFlorian Westphal } 1375677bf08cSFlorian Westphal 1376677bf08cSFlorian Westphal return false; 1377677bf08cSFlorian Westphal } 1378677bf08cSFlorian Westphal 1379b65ac446SPaolo Abeni static void udp_set_dev_scratch(struct sk_buff *skb) 1380b65ac446SPaolo Abeni { 1381dce4551cSPaolo Abeni struct udp_dev_scratch *scratch = udp_skb_scratch(skb); 1382b65ac446SPaolo Abeni 1383b65ac446SPaolo Abeni BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); 1384dce4551cSPaolo Abeni scratch->_tsize_state = skb->truesize; 1385dce4551cSPaolo Abeni #if BITS_PER_LONG == 64 1386b65ac446SPaolo Abeni scratch->len = skb->len; 1387b65ac446SPaolo Abeni scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1388b65ac446SPaolo Abeni scratch->is_linear = !skb_is_nonlinear(skb); 1389b65ac446SPaolo Abeni #endif 1390677bf08cSFlorian Westphal if (udp_try_make_stateless(skb)) 1391dce4551cSPaolo Abeni scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1392dce4551cSPaolo Abeni } 1393dce4551cSPaolo Abeni 1394a793183cSEric Dumazet static void udp_skb_csum_unnecessary_set(struct sk_buff *skb) 1395a793183cSEric Dumazet { 1396a793183cSEric Dumazet /* We come here after udp_lib_checksum_complete() returned 0. 1397a793183cSEric Dumazet * This means that __skb_checksum_complete() might have 1398a793183cSEric Dumazet * set skb->csum_valid to 1. 1399a793183cSEric Dumazet * On 64bit platforms, we can set csum_unnecessary 1400a793183cSEric Dumazet * to true, but only if the skb is not shared. 1401a793183cSEric Dumazet */ 1402a793183cSEric Dumazet #if BITS_PER_LONG == 64 1403a793183cSEric Dumazet if (!skb_shared(skb)) 1404a793183cSEric Dumazet udp_skb_scratch(skb)->csum_unnecessary = true; 1405a793183cSEric Dumazet #endif 1406a793183cSEric Dumazet } 1407a793183cSEric Dumazet 1408dce4551cSPaolo Abeni static int udp_skb_truesize(struct sk_buff *skb) 1409dce4551cSPaolo Abeni { 1410dce4551cSPaolo Abeni return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1411dce4551cSPaolo Abeni } 1412dce4551cSPaolo Abeni 1413dce4551cSPaolo Abeni static bool udp_skb_has_head_state(struct sk_buff *skb) 1414dce4551cSPaolo Abeni { 1415dce4551cSPaolo Abeni return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); 1416dce4551cSPaolo Abeni } 1417b65ac446SPaolo Abeni 14187c13f97fSPaolo Abeni /* fully reclaim rmem/fwd memory allocated for skb */ 14196dfb4367SPaolo Abeni static void udp_rmem_release(struct sock *sk, int size, int partial, 14206dfb4367SPaolo Abeni bool rx_queue_lock_held) 1421f970bd9eSPaolo Abeni { 14226b229cf7SEric Dumazet struct udp_sock *up = udp_sk(sk); 14232276f58aSPaolo Abeni struct sk_buff_head *sk_queue; 1424f970bd9eSPaolo Abeni int amt; 1425f970bd9eSPaolo Abeni 14266b229cf7SEric Dumazet if (likely(partial)) { 14276b229cf7SEric Dumazet up->forward_deficit += size; 14286b229cf7SEric Dumazet size = up->forward_deficit; 14298a3854c7SPaolo Abeni if (size < READ_ONCE(up->forward_threshold) && 1430d39ca259SPaolo Abeni !skb_queue_empty(&up->reader_queue)) 14316b229cf7SEric Dumazet return; 14326b229cf7SEric Dumazet } else { 14336b229cf7SEric Dumazet size += up->forward_deficit; 14346b229cf7SEric Dumazet } 14356b229cf7SEric Dumazet up->forward_deficit = 0; 14366b229cf7SEric Dumazet 14376dfb4367SPaolo Abeni /* acquire the sk_receive_queue for fwd allocated memory scheduling, 14386dfb4367SPaolo Abeni * if the called don't held it already 14396dfb4367SPaolo Abeni */ 14402276f58aSPaolo Abeni sk_queue = &sk->sk_receive_queue; 14416dfb4367SPaolo Abeni if (!rx_queue_lock_held) 14422276f58aSPaolo Abeni spin_lock(&sk_queue->lock); 14432276f58aSPaolo Abeni 14446dfb4367SPaolo Abeni 1445f970bd9eSPaolo Abeni sk->sk_forward_alloc += size; 1446100fdd1fSEric Dumazet amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); 1447f970bd9eSPaolo Abeni sk->sk_forward_alloc -= amt; 1448f970bd9eSPaolo Abeni 1449f970bd9eSPaolo Abeni if (amt) 1450100fdd1fSEric Dumazet __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); 145102ab0d13SEric Dumazet 145202ab0d13SEric Dumazet atomic_sub(size, &sk->sk_rmem_alloc); 14532276f58aSPaolo Abeni 14542276f58aSPaolo Abeni /* this can save us from acquiring the rx queue lock on next receive */ 14552276f58aSPaolo Abeni skb_queue_splice_tail_init(sk_queue, &up->reader_queue); 14562276f58aSPaolo Abeni 14576dfb4367SPaolo Abeni if (!rx_queue_lock_held) 14582276f58aSPaolo Abeni spin_unlock(&sk_queue->lock); 1459f970bd9eSPaolo Abeni } 1460f970bd9eSPaolo Abeni 14612276f58aSPaolo Abeni /* Note: called with reader_queue.lock held. 1462c84d9490SEric Dumazet * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1463c84d9490SEric Dumazet * This avoids a cache line miss while receive_queue lock is held. 1464c84d9490SEric Dumazet * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1465c84d9490SEric Dumazet */ 14667c13f97fSPaolo Abeni void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1467f970bd9eSPaolo Abeni { 1468b65ac446SPaolo Abeni prefetch(&skb->data); 1469b65ac446SPaolo Abeni udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); 1470f970bd9eSPaolo Abeni } 14717c13f97fSPaolo Abeni EXPORT_SYMBOL(udp_skb_destructor); 1472f970bd9eSPaolo Abeni 14736dfb4367SPaolo Abeni /* as above, but the caller held the rx queue lock, too */ 147464f5102dSColin Ian King static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) 14756dfb4367SPaolo Abeni { 1476b65ac446SPaolo Abeni prefetch(&skb->data); 1477b65ac446SPaolo Abeni udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); 14786dfb4367SPaolo Abeni } 14796dfb4367SPaolo Abeni 14804b272750SEric Dumazet /* Idea of busylocks is to let producers grab an extra spinlock 14814b272750SEric Dumazet * to relieve pressure on the receive_queue spinlock shared by consumer. 14824b272750SEric Dumazet * Under flood, this means that only one producer can be in line 14834b272750SEric Dumazet * trying to acquire the receive_queue spinlock. 14844b272750SEric Dumazet * These busylock can be allocated on a per cpu manner, instead of a 14854b272750SEric Dumazet * per socket one (that would consume a cache line per socket) 14864b272750SEric Dumazet */ 14874b272750SEric Dumazet static int udp_busylocks_log __read_mostly; 14884b272750SEric Dumazet static spinlock_t *udp_busylocks __read_mostly; 14894b272750SEric Dumazet 14904b272750SEric Dumazet static spinlock_t *busylock_acquire(void *ptr) 14914b272750SEric Dumazet { 14924b272750SEric Dumazet spinlock_t *busy; 14934b272750SEric Dumazet 14944b272750SEric Dumazet busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 14954b272750SEric Dumazet spin_lock(busy); 14964b272750SEric Dumazet return busy; 14974b272750SEric Dumazet } 14984b272750SEric Dumazet 14994b272750SEric Dumazet static void busylock_release(spinlock_t *busy) 15004b272750SEric Dumazet { 15014b272750SEric Dumazet if (busy) 15024b272750SEric Dumazet spin_unlock(busy); 15034b272750SEric Dumazet } 15044b272750SEric Dumazet 1505fd9c31f8SJason Xing static int udp_rmem_schedule(struct sock *sk, int size) 1506fd9c31f8SJason Xing { 1507fd9c31f8SJason Xing int delta; 1508fd9c31f8SJason Xing 1509fd9c31f8SJason Xing delta = size - sk->sk_forward_alloc; 1510fd9c31f8SJason Xing if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV)) 1511fd9c31f8SJason Xing return -ENOBUFS; 1512fd9c31f8SJason Xing 1513fd9c31f8SJason Xing return 0; 1514fd9c31f8SJason Xing } 1515fd9c31f8SJason Xing 1516f970bd9eSPaolo Abeni int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1517f970bd9eSPaolo Abeni { 1518f970bd9eSPaolo Abeni struct sk_buff_head *list = &sk->sk_receive_queue; 1519fd9c31f8SJason Xing int rmem, err = -ENOMEM; 15204b272750SEric Dumazet spinlock_t *busy = NULL; 1521c8c8b127SEric Dumazet int size; 1522f970bd9eSPaolo Abeni 1523f970bd9eSPaolo Abeni /* try to avoid the costly atomic add/sub pair when the receive 1524f970bd9eSPaolo Abeni * queue is full; always allow at least a packet 1525f970bd9eSPaolo Abeni */ 1526f970bd9eSPaolo Abeni rmem = atomic_read(&sk->sk_rmem_alloc); 1527363dc73aSPaolo Abeni if (rmem > sk->sk_rcvbuf) 1528f970bd9eSPaolo Abeni goto drop; 1529f970bd9eSPaolo Abeni 1530c8c8b127SEric Dumazet /* Under mem pressure, it might be helpful to help udp_recvmsg() 1531c8c8b127SEric Dumazet * having linear skbs : 1532c8c8b127SEric Dumazet * - Reduce memory overhead and thus increase receive queue capacity 1533c8c8b127SEric Dumazet * - Less cache line misses at copyout() time 1534c8c8b127SEric Dumazet * - Less work at consume_skb() (less alien page frag freeing) 1535c8c8b127SEric Dumazet */ 15364b272750SEric Dumazet if (rmem > (sk->sk_rcvbuf >> 1)) { 1537c8c8b127SEric Dumazet skb_condense(skb); 15384b272750SEric Dumazet 15394b272750SEric Dumazet busy = busylock_acquire(sk); 15404b272750SEric Dumazet } 1541c8c8b127SEric Dumazet size = skb->truesize; 1542b65ac446SPaolo Abeni udp_set_dev_scratch(skb); 1543c8c8b127SEric Dumazet 1544f970bd9eSPaolo Abeni /* we drop only if the receive buf is full and the receive 1545f970bd9eSPaolo Abeni * queue contains some other skb 1546f970bd9eSPaolo Abeni */ 1547f970bd9eSPaolo Abeni rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1548feed8a4fSAntonio Messina if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) 1549f970bd9eSPaolo Abeni goto uncharge_drop; 1550f970bd9eSPaolo Abeni 1551f970bd9eSPaolo Abeni spin_lock(&list->lock); 1552fd9c31f8SJason Xing err = udp_rmem_schedule(sk, size); 1553fd9c31f8SJason Xing if (err) { 1554f970bd9eSPaolo Abeni spin_unlock(&list->lock); 1555f970bd9eSPaolo Abeni goto uncharge_drop; 1556f970bd9eSPaolo Abeni } 1557f970bd9eSPaolo Abeni 1558f970bd9eSPaolo Abeni sk->sk_forward_alloc -= size; 1559f970bd9eSPaolo Abeni 15607c13f97fSPaolo Abeni /* no need to setup a destructor, we will explicitly release the 15617c13f97fSPaolo Abeni * forward allocated memory on dequeue 15627c13f97fSPaolo Abeni */ 1563f970bd9eSPaolo Abeni sock_skb_set_dropcount(sk, skb); 1564f970bd9eSPaolo Abeni 1565f970bd9eSPaolo Abeni __skb_queue_tail(list, skb); 1566f970bd9eSPaolo Abeni spin_unlock(&list->lock); 1567f970bd9eSPaolo Abeni 1568f970bd9eSPaolo Abeni if (!sock_flag(sk, SOCK_DEAD)) 1569f970bd9eSPaolo Abeni sk->sk_data_ready(sk); 1570f970bd9eSPaolo Abeni 15714b272750SEric Dumazet busylock_release(busy); 1572f970bd9eSPaolo Abeni return 0; 1573f970bd9eSPaolo Abeni 1574f970bd9eSPaolo Abeni uncharge_drop: 1575f970bd9eSPaolo Abeni atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1576f970bd9eSPaolo Abeni 1577f970bd9eSPaolo Abeni drop: 1578f970bd9eSPaolo Abeni atomic_inc(&sk->sk_drops); 15794b272750SEric Dumazet busylock_release(busy); 1580f970bd9eSPaolo Abeni return err; 1581f970bd9eSPaolo Abeni } 1582f970bd9eSPaolo Abeni EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1583f970bd9eSPaolo Abeni 1584d38afeecSKuniyuki Iwashima void udp_destruct_common(struct sock *sk) 1585f970bd9eSPaolo Abeni { 1586f970bd9eSPaolo Abeni /* reclaim completely the forward allocated memory */ 15872276f58aSPaolo Abeni struct udp_sock *up = udp_sk(sk); 15887c13f97fSPaolo Abeni unsigned int total = 0; 15897c13f97fSPaolo Abeni struct sk_buff *skb; 15907c13f97fSPaolo Abeni 15912276f58aSPaolo Abeni skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); 15922276f58aSPaolo Abeni while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { 15937c13f97fSPaolo Abeni total += skb->truesize; 15947c13f97fSPaolo Abeni kfree_skb(skb); 15957c13f97fSPaolo Abeni } 15966dfb4367SPaolo Abeni udp_rmem_release(sk, total, 0, true); 1597d38afeecSKuniyuki Iwashima } 1598d38afeecSKuniyuki Iwashima EXPORT_SYMBOL_GPL(udp_destruct_common); 15997c13f97fSPaolo Abeni 1600d38afeecSKuniyuki Iwashima static void udp_destruct_sock(struct sock *sk) 1601d38afeecSKuniyuki Iwashima { 1602d38afeecSKuniyuki Iwashima udp_destruct_common(sk); 1603f970bd9eSPaolo Abeni inet_sock_destruct(sk); 1604f970bd9eSPaolo Abeni } 1605f970bd9eSPaolo Abeni 1606f970bd9eSPaolo Abeni int udp_init_sock(struct sock *sk) 1607f970bd9eSPaolo Abeni { 16088a3854c7SPaolo Abeni udp_lib_init_sock(sk); 1609f970bd9eSPaolo Abeni sk->sk_destruct = udp_destruct_sock; 1610e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 1611f970bd9eSPaolo Abeni return 0; 1612f970bd9eSPaolo Abeni } 1613f970bd9eSPaolo Abeni 1614f970bd9eSPaolo Abeni void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1615f970bd9eSPaolo Abeni { 1616f970bd9eSPaolo Abeni if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1617f970bd9eSPaolo Abeni bool slow = lock_sock_fast(sk); 1618f970bd9eSPaolo Abeni 1619f970bd9eSPaolo Abeni sk_peek_offset_bwd(sk, len); 1620f970bd9eSPaolo Abeni unlock_sock_fast(sk, slow); 1621f970bd9eSPaolo Abeni } 16220a463c78SPaolo Abeni 1623ca2c1418SPaolo Abeni if (!skb_unref(skb)) 1624ca2c1418SPaolo Abeni return; 1625ca2c1418SPaolo Abeni 1626dce4551cSPaolo Abeni /* In the more common cases we cleared the head states previously, 1627dce4551cSPaolo Abeni * see __udp_queue_rcv_skb(). 16280ddf3fb2SPaolo Abeni */ 1629dce4551cSPaolo Abeni if (unlikely(udp_skb_has_head_state(skb))) 16300ddf3fb2SPaolo Abeni skb_release_head_state(skb); 1631ca2c1418SPaolo Abeni __consume_stateless_skb(skb); 1632f970bd9eSPaolo Abeni } 1633f970bd9eSPaolo Abeni EXPORT_SYMBOL_GPL(skb_consume_udp); 1634f970bd9eSPaolo Abeni 16352276f58aSPaolo Abeni static struct sk_buff *__first_packet_length(struct sock *sk, 16362276f58aSPaolo Abeni struct sk_buff_head *rcvq, 16372276f58aSPaolo Abeni int *total) 16382276f58aSPaolo Abeni { 16392276f58aSPaolo Abeni struct sk_buff *skb; 16402276f58aSPaolo Abeni 16419bd780f5SPaolo Abeni while ((skb = skb_peek(rcvq)) != NULL) { 16429bd780f5SPaolo Abeni if (udp_lib_checksum_complete(skb)) { 16432276f58aSPaolo Abeni __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 16442276f58aSPaolo Abeni IS_UDPLITE(sk)); 16452276f58aSPaolo Abeni __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 16462276f58aSPaolo Abeni IS_UDPLITE(sk)); 16472276f58aSPaolo Abeni atomic_inc(&sk->sk_drops); 16482276f58aSPaolo Abeni __skb_unlink(skb, rcvq); 16492276f58aSPaolo Abeni *total += skb->truesize; 16502276f58aSPaolo Abeni kfree_skb(skb); 16519bd780f5SPaolo Abeni } else { 1652a793183cSEric Dumazet udp_skb_csum_unnecessary_set(skb); 16539bd780f5SPaolo Abeni break; 16549bd780f5SPaolo Abeni } 16552276f58aSPaolo Abeni } 16562276f58aSPaolo Abeni return skb; 16572276f58aSPaolo Abeni } 16582276f58aSPaolo Abeni 165985584672SEric Dumazet /** 166085584672SEric Dumazet * first_packet_length - return length of first packet in receive queue 166185584672SEric Dumazet * @sk: socket 166285584672SEric Dumazet * 166385584672SEric Dumazet * Drops all bad checksum frames, until a valid one is found. 1664e83c6744SEric Dumazet * Returns the length of found skb, or -1 if none is found. 166585584672SEric Dumazet */ 1666e83c6744SEric Dumazet static int first_packet_length(struct sock *sk) 166785584672SEric Dumazet { 16682276f58aSPaolo Abeni struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; 16692276f58aSPaolo Abeni struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 167085584672SEric Dumazet struct sk_buff *skb; 16717c13f97fSPaolo Abeni int total = 0; 1672e83c6744SEric Dumazet int res; 167385584672SEric Dumazet 167485584672SEric Dumazet spin_lock_bh(&rcvq->lock); 16752276f58aSPaolo Abeni skb = __first_packet_length(sk, rcvq, &total); 1676137a0dbeSEric Dumazet if (!skb && !skb_queue_empty_lockless(sk_queue)) { 16772276f58aSPaolo Abeni spin_lock(&sk_queue->lock); 16782276f58aSPaolo Abeni skb_queue_splice_tail_init(sk_queue, rcvq); 16792276f58aSPaolo Abeni spin_unlock(&sk_queue->lock); 16802276f58aSPaolo Abeni 16812276f58aSPaolo Abeni skb = __first_packet_length(sk, rcvq, &total); 168285584672SEric Dumazet } 1683e83c6744SEric Dumazet res = skb ? skb->len : -1; 16847c13f97fSPaolo Abeni if (total) 16856dfb4367SPaolo Abeni udp_rmem_release(sk, total, 1, false); 168685584672SEric Dumazet spin_unlock_bh(&rcvq->lock); 168785584672SEric Dumazet return res; 168885584672SEric Dumazet } 168985584672SEric Dumazet 16901da177e4SLinus Torvalds /* 16911da177e4SLinus Torvalds * IOCTL requests applicable to the UDP protocol 16921da177e4SLinus Torvalds */ 16931da177e4SLinus Torvalds 16941da177e4SLinus Torvalds int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 16951da177e4SLinus Torvalds { 16966516c655SStephen Hemminger switch (cmd) { 16971da177e4SLinus Torvalds case SIOCOUTQ: 16981da177e4SLinus Torvalds { 169931e6d363SEric Dumazet int amount = sk_wmem_alloc_get(sk); 170031e6d363SEric Dumazet 17011da177e4SLinus Torvalds return put_user(amount, (int __user *)arg); 17021da177e4SLinus Torvalds } 17031da177e4SLinus Torvalds 17041da177e4SLinus Torvalds case SIOCINQ: 17051da177e4SLinus Torvalds { 1706e83c6744SEric Dumazet int amount = max_t(int, 0, first_packet_length(sk)); 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds return put_user(amount, (int __user *)arg); 17091da177e4SLinus Torvalds } 17101da177e4SLinus Torvalds 17111da177e4SLinus Torvalds default: 17121da177e4SLinus Torvalds return -ENOIOCTLCMD; 17131da177e4SLinus Torvalds } 17146516c655SStephen Hemminger 17156516c655SStephen Hemminger return 0; 17161da177e4SLinus Torvalds } 1717c482c568SEric Dumazet EXPORT_SYMBOL(udp_ioctl); 17181da177e4SLinus Torvalds 17192276f58aSPaolo Abeni struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 1720ec095263SOliver Hartkopp int *off, int *err) 17212276f58aSPaolo Abeni { 17222276f58aSPaolo Abeni struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 17232276f58aSPaolo Abeni struct sk_buff_head *queue; 17242276f58aSPaolo Abeni struct sk_buff *last; 17252276f58aSPaolo Abeni long timeo; 17262276f58aSPaolo Abeni int error; 17272276f58aSPaolo Abeni 17282276f58aSPaolo Abeni queue = &udp_sk(sk)->reader_queue; 17292276f58aSPaolo Abeni timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 17302276f58aSPaolo Abeni do { 17312276f58aSPaolo Abeni struct sk_buff *skb; 17322276f58aSPaolo Abeni 17332276f58aSPaolo Abeni error = sock_error(sk); 17342276f58aSPaolo Abeni if (error) 17352276f58aSPaolo Abeni break; 17362276f58aSPaolo Abeni 17372276f58aSPaolo Abeni error = -EAGAIN; 17382276f58aSPaolo Abeni do { 17392276f58aSPaolo Abeni spin_lock_bh(&queue->lock); 1740e427cad6SPaolo Abeni skb = __skb_try_recv_from_queue(sk, queue, flags, off, 1741e427cad6SPaolo Abeni err, &last); 17422276f58aSPaolo Abeni if (skb) { 1743e427cad6SPaolo Abeni if (!(flags & MSG_PEEK)) 1744e427cad6SPaolo Abeni udp_skb_destructor(sk, skb); 17452276f58aSPaolo Abeni spin_unlock_bh(&queue->lock); 17462276f58aSPaolo Abeni return skb; 17472276f58aSPaolo Abeni } 17482276f58aSPaolo Abeni 1749137a0dbeSEric Dumazet if (skb_queue_empty_lockless(sk_queue)) { 17502276f58aSPaolo Abeni spin_unlock_bh(&queue->lock); 17512276f58aSPaolo Abeni goto busy_check; 17522276f58aSPaolo Abeni } 17532276f58aSPaolo Abeni 17546dfb4367SPaolo Abeni /* refill the reader queue and walk it again 17556dfb4367SPaolo Abeni * keep both queues locked to avoid re-acquiring 17566dfb4367SPaolo Abeni * the sk_receive_queue lock if fwd memory scheduling 17576dfb4367SPaolo Abeni * is needed. 17586dfb4367SPaolo Abeni */ 17592276f58aSPaolo Abeni spin_lock(&sk_queue->lock); 17602276f58aSPaolo Abeni skb_queue_splice_tail_init(sk_queue, queue); 17612276f58aSPaolo Abeni 1762e427cad6SPaolo Abeni skb = __skb_try_recv_from_queue(sk, queue, flags, off, 1763e427cad6SPaolo Abeni err, &last); 1764e427cad6SPaolo Abeni if (skb && !(flags & MSG_PEEK)) 1765e427cad6SPaolo Abeni udp_skb_dtor_locked(sk, skb); 17666dfb4367SPaolo Abeni spin_unlock(&sk_queue->lock); 17672276f58aSPaolo Abeni spin_unlock_bh(&queue->lock); 1768de321ed3SAndrey Vagin if (skb) 17692276f58aSPaolo Abeni return skb; 17702276f58aSPaolo Abeni 17712276f58aSPaolo Abeni busy_check: 17722276f58aSPaolo Abeni if (!sk_can_busy_loop(sk)) 17732276f58aSPaolo Abeni break; 17742276f58aSPaolo Abeni 17752276f58aSPaolo Abeni sk_busy_loop(sk, flags & MSG_DONTWAIT); 1776137a0dbeSEric Dumazet } while (!skb_queue_empty_lockless(sk_queue)); 17772276f58aSPaolo Abeni 17782276f58aSPaolo Abeni /* sk_queue is empty, reader_queue may contain peeked packets */ 17792276f58aSPaolo Abeni } while (timeo && 1780b50b0580SSabrina Dubroca !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, 1781b50b0580SSabrina Dubroca &error, &timeo, 17822276f58aSPaolo Abeni (struct sk_buff *)sk_queue)); 17832276f58aSPaolo Abeni 17842276f58aSPaolo Abeni *err = error; 17852276f58aSPaolo Abeni return NULL; 17862276f58aSPaolo Abeni } 17877e823644SJiri Kosina EXPORT_SYMBOL(__skb_recv_udp); 17882276f58aSPaolo Abeni 1789965b57b4SCong Wang int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 1790d7f57118SCong Wang { 1791d7f57118SCong Wang struct sk_buff *skb; 179278fa0d61SJohn Fastabend int err; 1793d7f57118SCong Wang 179431f1fbcbSPeilin Ye try_again: 1795ec095263SOliver Hartkopp skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); 1796d7f57118SCong Wang if (!skb) 1797d7f57118SCong Wang return err; 1798099f896fSCong Wang 1799099f896fSCong Wang if (udp_lib_checksum_complete(skb)) { 180031f1fbcbSPeilin Ye int is_udplite = IS_UDPLITE(sk); 180131f1fbcbSPeilin Ye struct net *net = sock_net(sk); 180231f1fbcbSPeilin Ye 180331f1fbcbSPeilin Ye __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); 180431f1fbcbSPeilin Ye __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); 1805099f896fSCong Wang atomic_inc(&sk->sk_drops); 1806099f896fSCong Wang kfree_skb(skb); 180731f1fbcbSPeilin Ye goto try_again; 1808099f896fSCong Wang } 1809099f896fSCong Wang 1810db39dfdcSPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 181178fa0d61SJohn Fastabend return recv_actor(sk, skb); 1812d7f57118SCong Wang } 1813965b57b4SCong Wang EXPORT_SYMBOL(udp_read_skb); 1814d7f57118SCong Wang 1815db8dac20SDavid S. Miller /* 1816db8dac20SDavid S. Miller * This should be easy, if there is something there we 1817db8dac20SDavid S. Miller * return it, otherwise we block. 1818db8dac20SDavid S. Miller */ 1819db8dac20SDavid S. Miller 1820ec095263SOliver Hartkopp int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 1821ec095263SOliver Hartkopp int *addr_len) 1822db8dac20SDavid S. Miller { 1823db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 1824342dfc30SSteffen Hurrle DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1825db8dac20SDavid S. Miller struct sk_buff *skb; 182659c2cdaeSDavid S. Miller unsigned int ulen, copied; 1827fd69c399SPaolo Abeni int off, err, peeking = flags & MSG_PEEK; 1828db8dac20SDavid S. Miller int is_udplite = IS_UDPLITE(sk); 1829197c949eSEric Dumazet bool checksum_valid = false; 1830db8dac20SDavid S. Miller 1831db8dac20SDavid S. Miller if (flags & MSG_ERRQUEUE) 183285fbaa75SHannes Frederic Sowa return ip_recv_error(sk, msg, len, addr_len); 1833db8dac20SDavid S. Miller 1834db8dac20SDavid S. Miller try_again: 1835a0917e0bSMatthew Dawson off = sk_peek_offset(sk, flags); 1836ec095263SOliver Hartkopp skb = __skb_recv_udp(sk, flags, &off, &err); 1837db8dac20SDavid S. Miller if (!skb) 1838627d2d6bSsamanthakumar return err; 1839db8dac20SDavid S. Miller 1840b65ac446SPaolo Abeni ulen = udp_skb_len(skb); 184159c2cdaeSDavid S. Miller copied = len; 1842627d2d6bSsamanthakumar if (copied > ulen - off) 1843627d2d6bSsamanthakumar copied = ulen - off; 184459c2cdaeSDavid S. Miller else if (copied < ulen) 1845db8dac20SDavid S. Miller msg->msg_flags |= MSG_TRUNC; 1846db8dac20SDavid S. Miller 1847db8dac20SDavid S. Miller /* 1848db8dac20SDavid S. Miller * If checksum is needed at all, try to do it while copying the 1849db8dac20SDavid S. Miller * data. If the data is truncated, or if we only want a partial 1850db8dac20SDavid S. Miller * coverage checksum (UDP-Lite), do it before the copy. 1851db8dac20SDavid S. Miller */ 1852db8dac20SDavid S. Miller 1853d21dbdfeSEric Dumazet if (copied < ulen || peeking || 1854d21dbdfeSEric Dumazet (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1855b65ac446SPaolo Abeni checksum_valid = udp_skb_csum_unnecessary(skb) || 1856b65ac446SPaolo Abeni !__udp_lib_checksum_complete(skb); 1857197c949eSEric Dumazet if (!checksum_valid) 1858db8dac20SDavid S. Miller goto csum_copy_err; 1859db8dac20SDavid S. Miller } 1860db8dac20SDavid S. Miller 1861b65ac446SPaolo Abeni if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 1862b65ac446SPaolo Abeni if (udp_skb_is_linear(skb)) 1863b65ac446SPaolo Abeni err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 1864b65ac446SPaolo Abeni else 1865627d2d6bSsamanthakumar err = skb_copy_datagram_msg(skb, off, msg, copied); 1866b65ac446SPaolo Abeni } else { 1867627d2d6bSsamanthakumar err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1868db8dac20SDavid S. Miller 1869db8dac20SDavid S. Miller if (err == -EINVAL) 1870db8dac20SDavid S. Miller goto csum_copy_err; 1871db8dac20SDavid S. Miller } 1872db8dac20SDavid S. Miller 187322911fc5SEric Dumazet if (unlikely(err)) { 1874fd69c399SPaolo Abeni if (!peeking) { 1875979402b1SEric Dumazet atomic_inc(&sk->sk_drops); 18766aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1877979402b1SEric Dumazet UDP_MIB_INERRORS, is_udplite); 1878979402b1SEric Dumazet } 1879850cbaddSPaolo Abeni kfree_skb(skb); 1880627d2d6bSsamanthakumar return err; 188122911fc5SEric Dumazet } 1882db8dac20SDavid S. Miller 1883fd69c399SPaolo Abeni if (!peeking) 18846aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1885629ca23cSPavel Emelyanov UDP_MIB_INDATAGRAMS, is_udplite); 1886db8dac20SDavid S. Miller 18876fd1d51cSErin MacNeil sock_recv_cmsgs(msg, sk, skb); 1888db8dac20SDavid S. Miller 1889db8dac20SDavid S. Miller /* Copy the address. */ 1890c482c568SEric Dumazet if (sin) { 1891db8dac20SDavid S. Miller sin->sin_family = AF_INET; 1892db8dac20SDavid S. Miller sin->sin_port = udp_hdr(skb)->source; 1893db8dac20SDavid S. Miller sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1894db8dac20SDavid S. Miller memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1895bceaa902SHannes Frederic Sowa *addr_len = sizeof(*sin); 1896983695faSDaniel Borkmann 1897983695faSDaniel Borkmann BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, 1898983695faSDaniel Borkmann (struct sockaddr *)sin); 1899db8dac20SDavid S. Miller } 1900bcd1665eSPaolo Abeni 1901bcd1665eSPaolo Abeni if (udp_sk(sk)->gro_enabled) 1902bcd1665eSPaolo Abeni udp_cmsg_recv(msg, sk, skb); 1903bcd1665eSPaolo Abeni 1904db8dac20SDavid S. Miller if (inet->cmsg_flags) 1905ad959036SPaolo Abeni ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1906db8dac20SDavid S. Miller 190759c2cdaeSDavid S. Miller err = copied; 1908db8dac20SDavid S. Miller if (flags & MSG_TRUNC) 1909db8dac20SDavid S. Miller err = ulen; 1910db8dac20SDavid S. Miller 1911850cbaddSPaolo Abeni skb_consume_udp(sk, skb, peeking ? -err : err); 1912db8dac20SDavid S. Miller return err; 1913db8dac20SDavid S. Miller 1914db8dac20SDavid S. Miller csum_copy_err: 19152276f58aSPaolo Abeni if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 19162276f58aSPaolo Abeni udp_skb_destructor)) { 19176aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 19186aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 19196a5dc9e5SEric Dumazet } 1920850cbaddSPaolo Abeni kfree_skb(skb); 1921db8dac20SDavid S. Miller 1922beb39db5SEric Dumazet /* starting over for a new packet, but check if we need to yield */ 1923beb39db5SEric Dumazet cond_resched(); 19249cfaa8deSXufeng Zhang msg->msg_flags &= ~MSG_TRUNC; 1925db8dac20SDavid S. Miller goto try_again; 1926db8dac20SDavid S. Miller } 1927db8dac20SDavid S. Miller 1928d74bad4eSAndrey Ignatov int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 1929d74bad4eSAndrey Ignatov { 1930d74bad4eSAndrey Ignatov /* This check is replicated from __ip4_datagram_connect() and 1931d74bad4eSAndrey Ignatov * intended to prevent BPF program called below from accessing bytes 1932d74bad4eSAndrey Ignatov * that are out of the bound specified by user in addr_len. 1933d74bad4eSAndrey Ignatov */ 1934d74bad4eSAndrey Ignatov if (addr_len < sizeof(struct sockaddr_in)) 1935d74bad4eSAndrey Ignatov return -EINVAL; 1936d74bad4eSAndrey Ignatov 1937d74bad4eSAndrey Ignatov return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr); 1938d74bad4eSAndrey Ignatov } 1939d74bad4eSAndrey Ignatov EXPORT_SYMBOL(udp_pre_connect); 1940d74bad4eSAndrey Ignatov 1941286c72deSEric Dumazet int __udp_disconnect(struct sock *sk, int flags) 19421da177e4SLinus Torvalds { 19431da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 19441da177e4SLinus Torvalds /* 19451da177e4SLinus Torvalds * 1003.1g - break association. 19461da177e4SLinus Torvalds */ 19471da177e4SLinus Torvalds 19481da177e4SLinus Torvalds sk->sk_state = TCP_CLOSE; 1949c720c7e8SEric Dumazet inet->inet_daddr = 0; 1950c720c7e8SEric Dumazet inet->inet_dport = 0; 1951bdeab991STom Herbert sock_rps_reset_rxhash(sk); 19521da177e4SLinus Torvalds sk->sk_bound_dev_if = 0; 1953303d0403SWillem de Bruijn if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { 19541da177e4SLinus Torvalds inet_reset_saddr(sk); 1955303d0403SWillem de Bruijn if (sk->sk_prot->rehash && 1956303d0403SWillem de Bruijn (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1957303d0403SWillem de Bruijn sk->sk_prot->rehash(sk); 1958303d0403SWillem de Bruijn } 19591da177e4SLinus Torvalds 19601da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 19611da177e4SLinus Torvalds sk->sk_prot->unhash(sk); 1962c720c7e8SEric Dumazet inet->inet_sport = 0; 19631da177e4SLinus Torvalds } 19641da177e4SLinus Torvalds sk_dst_reset(sk); 19651da177e4SLinus Torvalds return 0; 19661da177e4SLinus Torvalds } 1967286c72deSEric Dumazet EXPORT_SYMBOL(__udp_disconnect); 1968286c72deSEric Dumazet 1969286c72deSEric Dumazet int udp_disconnect(struct sock *sk, int flags) 1970286c72deSEric Dumazet { 1971286c72deSEric Dumazet lock_sock(sk); 1972286c72deSEric Dumazet __udp_disconnect(sk, flags); 1973286c72deSEric Dumazet release_sock(sk); 1974286c72deSEric Dumazet return 0; 1975286c72deSEric Dumazet } 1976c482c568SEric Dumazet EXPORT_SYMBOL(udp_disconnect); 19771da177e4SLinus Torvalds 1978645ca708SEric Dumazet void udp_lib_unhash(struct sock *sk) 1979645ca708SEric Dumazet { 1980723b4610SEric Dumazet if (sk_hashed(sk)) { 198167fb4330SKuniyuki Iwashima struct udp_table *udptable = udp_get_table_prot(sk); 1982512615b6SEric Dumazet struct udp_hslot *hslot, *hslot2; 1983512615b6SEric Dumazet 1984512615b6SEric Dumazet hslot = udp_hashslot(udptable, sock_net(sk), 1985d4cada4aSEric Dumazet udp_sk(sk)->udp_port_hash); 1986512615b6SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1987645ca708SEric Dumazet 1988c8db3fecSEric Dumazet spin_lock_bh(&hslot->lock); 1989e32ea7e7SCraig Gallek if (rcu_access_pointer(sk->sk_reuseport_cb)) 1990e32ea7e7SCraig Gallek reuseport_detach_sock(sk); 1991ca065d0cSEric Dumazet if (sk_del_node_init_rcu(sk)) { 1992fdcc8aa9SEric Dumazet hslot->count--; 1993c720c7e8SEric Dumazet inet_sk(sk)->inet_num = 0; 1994645ca708SEric Dumazet sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1995512615b6SEric Dumazet 1996512615b6SEric Dumazet spin_lock(&hslot2->lock); 1997ca065d0cSEric Dumazet hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1998512615b6SEric Dumazet hslot2->count--; 1999512615b6SEric Dumazet spin_unlock(&hslot2->lock); 2000645ca708SEric Dumazet } 2001c8db3fecSEric Dumazet spin_unlock_bh(&hslot->lock); 2002645ca708SEric Dumazet } 2003723b4610SEric Dumazet } 2004645ca708SEric Dumazet EXPORT_SYMBOL(udp_lib_unhash); 2005645ca708SEric Dumazet 2006719f8358SEric Dumazet /* 2007719f8358SEric Dumazet * inet_rcv_saddr was changed, we must rehash secondary hash 2008719f8358SEric Dumazet */ 2009719f8358SEric Dumazet void udp_lib_rehash(struct sock *sk, u16 newhash) 2010719f8358SEric Dumazet { 2011719f8358SEric Dumazet if (sk_hashed(sk)) { 201267fb4330SKuniyuki Iwashima struct udp_table *udptable = udp_get_table_prot(sk); 2013719f8358SEric Dumazet struct udp_hslot *hslot, *hslot2, *nhslot2; 2014719f8358SEric Dumazet 2015719f8358SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 2016719f8358SEric Dumazet nhslot2 = udp_hashslot2(udptable, newhash); 2017719f8358SEric Dumazet udp_sk(sk)->udp_portaddr_hash = newhash; 2018e32ea7e7SCraig Gallek 2019e32ea7e7SCraig Gallek if (hslot2 != nhslot2 || 2020e32ea7e7SCraig Gallek rcu_access_pointer(sk->sk_reuseport_cb)) { 2021719f8358SEric Dumazet hslot = udp_hashslot(udptable, sock_net(sk), 2022719f8358SEric Dumazet udp_sk(sk)->udp_port_hash); 2023719f8358SEric Dumazet /* we must lock primary chain too */ 2024719f8358SEric Dumazet spin_lock_bh(&hslot->lock); 2025e32ea7e7SCraig Gallek if (rcu_access_pointer(sk->sk_reuseport_cb)) 2026e32ea7e7SCraig Gallek reuseport_detach_sock(sk); 2027719f8358SEric Dumazet 2028e32ea7e7SCraig Gallek if (hslot2 != nhslot2) { 2029719f8358SEric Dumazet spin_lock(&hslot2->lock); 2030ca065d0cSEric Dumazet hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 2031719f8358SEric Dumazet hslot2->count--; 2032719f8358SEric Dumazet spin_unlock(&hslot2->lock); 2033719f8358SEric Dumazet 2034719f8358SEric Dumazet spin_lock(&nhslot2->lock); 2035ca065d0cSEric Dumazet hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 2036719f8358SEric Dumazet &nhslot2->head); 2037719f8358SEric Dumazet nhslot2->count++; 2038719f8358SEric Dumazet spin_unlock(&nhslot2->lock); 2039e32ea7e7SCraig Gallek } 2040719f8358SEric Dumazet 2041719f8358SEric Dumazet spin_unlock_bh(&hslot->lock); 2042719f8358SEric Dumazet } 2043719f8358SEric Dumazet } 2044719f8358SEric Dumazet } 2045719f8358SEric Dumazet EXPORT_SYMBOL(udp_lib_rehash); 2046719f8358SEric Dumazet 20478f6b5392SAlexey Kodanev void udp_v4_rehash(struct sock *sk) 2048719f8358SEric Dumazet { 2049f0b1e64cSMartin KaFai Lau u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 2050719f8358SEric Dumazet inet_sk(sk)->inet_rcv_saddr, 2051719f8358SEric Dumazet inet_sk(sk)->inet_num); 2052719f8358SEric Dumazet udp_lib_rehash(sk, new_hash); 2053719f8358SEric Dumazet } 2054719f8358SEric Dumazet 2055a3f96c47SPaolo Abeni static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 205693821778SHerbert Xu { 2057fec5e652STom Herbert int rc; 205893821778SHerbert Xu 2059005ec974SShawn Bohrer if (inet_sk(sk)->inet_daddr) { 2060bdeab991STom Herbert sock_rps_save_rxhash(sk, skb); 2061005ec974SShawn Bohrer sk_mark_napi_id(sk, skb); 20622c8c56e1SEric Dumazet sk_incoming_cpu_update(sk); 2063e68b6e50SEric Dumazet } else { 2064e68b6e50SEric Dumazet sk_mark_napi_id_once(sk, skb); 2065005ec974SShawn Bohrer } 2066fec5e652STom Herbert 2067850cbaddSPaolo Abeni rc = __udp_enqueue_schedule_skb(sk, skb); 2068766e9037SEric Dumazet if (rc < 0) { 2069766e9037SEric Dumazet int is_udplite = IS_UDPLITE(sk); 207008d4c037SMenglong Dong int drop_reason; 2071766e9037SEric Dumazet 207293821778SHerbert Xu /* Note that an ENOMEM error is charged twice */ 207308d4c037SMenglong Dong if (rc == -ENOMEM) { 2074e61da9e2SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 207593821778SHerbert Xu is_udplite); 207608d4c037SMenglong Dong drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 207708d4c037SMenglong Dong } else { 2078a3ce2b10SMenglong Dong UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, 2079a3ce2b10SMenglong Dong is_udplite); 208008d4c037SMenglong Dong drop_reason = SKB_DROP_REASON_PROTO_MEM; 208108d4c037SMenglong Dong } 2082e61da9e2SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 208308d4c037SMenglong Dong kfree_skb_reason(skb, drop_reason); 2084296f7ea7SSatoru Moriya trace_udp_fail_queue_rcv_skb(rc, sk); 2085766e9037SEric Dumazet return -1; 208693821778SHerbert Xu } 208793821778SHerbert Xu 208893821778SHerbert Xu return 0; 208993821778SHerbert Xu } 209093821778SHerbert Xu 2091db8dac20SDavid S. Miller /* returns: 2092db8dac20SDavid S. Miller * -1: error 2093db8dac20SDavid S. Miller * 0: success 2094db8dac20SDavid S. Miller * >0: "udp encap" protocol resubmission 2095db8dac20SDavid S. Miller * 2096db8dac20SDavid S. Miller * Note that in the success and error cases, the skb is assumed to 2097db8dac20SDavid S. Miller * have either been requeued or freed. 2098db8dac20SDavid S. Miller */ 2099cf329aa4SPaolo Abeni static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 2100db8dac20SDavid S. Miller { 21011379a92dSMenglong Dong int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 2102db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 2103db8dac20SDavid S. Miller int is_udplite = IS_UDPLITE(sk); 2104db8dac20SDavid S. Miller 2105db8dac20SDavid S. Miller /* 2106db8dac20SDavid S. Miller * Charge it to the socket, dropping if the queue is full. 2107db8dac20SDavid S. Miller */ 21081379a92dSMenglong Dong if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { 21091379a92dSMenglong Dong drop_reason = SKB_DROP_REASON_XFRM_POLICY; 2110db8dac20SDavid S. Miller goto drop; 21111379a92dSMenglong Dong } 2112895b5c9fSFlorian Westphal nf_reset_ct(skb); 2113db8dac20SDavid S. Miller 211488ab3108SDavidlohr Bueso if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { 21150ad92ad0SEric Dumazet int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 21160ad92ad0SEric Dumazet 2117db8dac20SDavid S. Miller /* 2118db8dac20SDavid S. Miller * This is an encapsulation socket so pass the skb to 2119db8dac20SDavid S. Miller * the socket's udp_encap_rcv() hook. Otherwise, just 2120db8dac20SDavid S. Miller * fall through and pass this up the UDP socket. 2121db8dac20SDavid S. Miller * up->encap_rcv() returns the following value: 2122db8dac20SDavid S. Miller * =0 if skb was successfully passed to the encap 2123db8dac20SDavid S. Miller * handler or was discarded by it. 2124db8dac20SDavid S. Miller * >0 if skb should be passed on to UDP. 2125db8dac20SDavid S. Miller * <0 if skb should be resubmitted as proto -N 2126db8dac20SDavid S. Miller */ 2127db8dac20SDavid S. Miller 2128db8dac20SDavid S. Miller /* if we're overly short, let UDP handle it */ 21296aa7de05SMark Rutland encap_rcv = READ_ONCE(up->encap_rcv); 2130e5aed006SHannes Frederic Sowa if (encap_rcv) { 2131db8dac20SDavid S. Miller int ret; 2132db8dac20SDavid S. Miller 21330a80966bSTom Herbert /* Verify checksum before giving to encap */ 21340a80966bSTom Herbert if (udp_lib_checksum_complete(skb)) 21350a80966bSTom Herbert goto csum_error; 21360a80966bSTom Herbert 21370ad92ad0SEric Dumazet ret = encap_rcv(sk, skb); 2138db8dac20SDavid S. Miller if (ret <= 0) { 213902c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), 21400283328eSPavel Emelyanov UDP_MIB_INDATAGRAMS, 2141db8dac20SDavid S. Miller is_udplite); 2142db8dac20SDavid S. Miller return -ret; 2143db8dac20SDavid S. Miller } 2144db8dac20SDavid S. Miller } 2145db8dac20SDavid S. Miller 2146db8dac20SDavid S. Miller /* FALLTHROUGH -- it's a UDP Packet */ 2147db8dac20SDavid S. Miller } 2148db8dac20SDavid S. Miller 2149db8dac20SDavid S. Miller /* 2150db8dac20SDavid S. Miller * UDP-Lite specific tests, ignored on UDP sockets 2151db8dac20SDavid S. Miller */ 2152b0a42277SMiaohe Lin if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 2153db8dac20SDavid S. Miller 2154db8dac20SDavid S. Miller /* 2155db8dac20SDavid S. Miller * MIB statistics other than incrementing the error count are 2156db8dac20SDavid S. Miller * disabled for the following two types of errors: these depend 2157db8dac20SDavid S. Miller * on the application settings, not on the functioning of the 2158db8dac20SDavid S. Miller * protocol stack as such. 2159db8dac20SDavid S. Miller * 2160db8dac20SDavid S. Miller * RFC 3828 here recommends (sec 3.3): "There should also be a 2161db8dac20SDavid S. Miller * way ... to ... at least let the receiving application block 2162db8dac20SDavid S. Miller * delivery of packets with coverage values less than a value 2163db8dac20SDavid S. Miller * provided by the application." 2164db8dac20SDavid S. Miller */ 2165db8dac20SDavid S. Miller if (up->pcrlen == 0) { /* full coverage was set */ 2166ba7a46f1SJoe Perches net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 2167db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov, skb->len); 2168db8dac20SDavid S. Miller goto drop; 2169db8dac20SDavid S. Miller } 2170db8dac20SDavid S. Miller /* The next case involves violating the min. coverage requested 2171db8dac20SDavid S. Miller * by the receiver. This is subtle: if receiver wants x and x is 2172db8dac20SDavid S. Miller * greater than the buffersize/MTU then receiver will complain 2173db8dac20SDavid S. Miller * that it wants x while sender emits packets of smaller size y. 2174db8dac20SDavid S. Miller * Therefore the above ...()->partial_cov statement is essential. 2175db8dac20SDavid S. Miller */ 2176db8dac20SDavid S. Miller if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 2177ba7a46f1SJoe Perches net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 2178db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov, up->pcrlen); 2179db8dac20SDavid S. Miller goto drop; 2180db8dac20SDavid S. Miller } 2181db8dac20SDavid S. Miller } 2182db8dac20SDavid S. Miller 2183dd99e425SPaolo Abeni prefetch(&sk->sk_rmem_alloc); 2184ce25d66aSEric Dumazet if (rcu_access_pointer(sk->sk_filter) && 2185ce25d66aSEric Dumazet udp_lib_checksum_complete(skb)) 21866a5dc9e5SEric Dumazet goto csum_error; 2187ce25d66aSEric Dumazet 21881379a92dSMenglong Dong if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 21891379a92dSMenglong Dong drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 2190a6127697SMichal Kubeček goto drop; 21911379a92dSMenglong Dong } 2192db8dac20SDavid S. Miller 2193e6afc8acSsamanthakumar udp_csum_pull_header(skb); 2194db8dac20SDavid S. Miller 2195fbf8866dSShawn Bohrer ipv4_pktinfo_prepare(sk, skb); 2196850cbaddSPaolo Abeni return __udp_queue_rcv_skb(sk, skb); 2197db8dac20SDavid S. Miller 21986a5dc9e5SEric Dumazet csum_error: 21991379a92dSMenglong Dong drop_reason = SKB_DROP_REASON_UDP_CSUM; 220002c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2201db8dac20SDavid S. Miller drop: 220202c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 22038edf19c2SEric Dumazet atomic_inc(&sk->sk_drops); 22041379a92dSMenglong Dong kfree_skb_reason(skb, drop_reason); 2205db8dac20SDavid S. Miller return -1; 2206db8dac20SDavid S. Miller } 2207db8dac20SDavid S. Miller 2208cf329aa4SPaolo Abeni static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 2209cf329aa4SPaolo Abeni { 2210cf329aa4SPaolo Abeni struct sk_buff *next, *segs; 2211cf329aa4SPaolo Abeni int ret; 2212cf329aa4SPaolo Abeni 2213cf329aa4SPaolo Abeni if (likely(!udp_unexpected_gso(sk, skb))) 2214cf329aa4SPaolo Abeni return udp_queue_rcv_one_skb(sk, skb); 2215cf329aa4SPaolo Abeni 2216a08e7fd9SCambda Zhu BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET); 2217cf329aa4SPaolo Abeni __skb_push(skb, -skb_mac_offset(skb)); 2218cf329aa4SPaolo Abeni segs = udp_rcv_segment(sk, skb, true); 22191a186c14SJason A. Donenfeld skb_list_walk_safe(segs, skb, next) { 2220cf329aa4SPaolo Abeni __skb_pull(skb, skb_transport_offset(skb)); 2221000ac44dSPaolo Abeni 2222000ac44dSPaolo Abeni udp_post_segment_fix_csum(skb); 2223cf329aa4SPaolo Abeni ret = udp_queue_rcv_one_skb(sk, skb); 2224cf329aa4SPaolo Abeni if (ret > 0) 222510c678bdSXin Long ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); 2226cf329aa4SPaolo Abeni } 2227cf329aa4SPaolo Abeni return 0; 2228cf329aa4SPaolo Abeni } 2229cf329aa4SPaolo Abeni 223097502231SEric Dumazet /* For TCP sockets, sk_rx_dst is protected by socket lock 2231e47eb5dfSEric Dumazet * For UDP, we use xchg() to guard against concurrent changes. 223297502231SEric Dumazet */ 223364f0f5d1SPaolo Abeni bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 2234421b3885SShawn Bohrer { 223597502231SEric Dumazet struct dst_entry *old; 2236421b3885SShawn Bohrer 2237d24406c8SWei Wang if (dst_hold_safe(dst)) { 22388f905c0eSEric Dumazet old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); 223997502231SEric Dumazet dst_release(old); 224064f0f5d1SPaolo Abeni return old != dst; 224197502231SEric Dumazet } 224264f0f5d1SPaolo Abeni return false; 2243d24406c8SWei Wang } 2244c9f2c1aeSPaolo Abeni EXPORT_SYMBOL(udp_sk_rx_dst_set); 2245421b3885SShawn Bohrer 2246db8dac20SDavid S. Miller /* 2247db8dac20SDavid S. Miller * Multicasts and broadcasts go to each listener. 2248db8dac20SDavid S. Miller * 22491240d137SEric Dumazet * Note: called only from the BH handler context. 2250db8dac20SDavid S. Miller */ 2251e3163493SPavel Emelyanov static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 2252db8dac20SDavid S. Miller struct udphdr *uh, 2253db8dac20SDavid S. Miller __be32 saddr, __be32 daddr, 225436cbb245SRick Jones struct udp_table *udptable, 225536cbb245SRick Jones int proto) 2256db8dac20SDavid S. Miller { 2257ca065d0cSEric Dumazet struct sock *sk, *first = NULL; 22585cf3d461SDavid Held unsigned short hnum = ntohs(uh->dest); 22595cf3d461SDavid Held struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 22602dc41cffSDavid Held unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 2261ca065d0cSEric Dumazet unsigned int offset = offsetof(typeof(*sk), sk_node); 2262ca065d0cSEric Dumazet int dif = skb->dev->ifindex; 2263fb74c277SDavid Ahern int sdif = inet_sdif(skb); 2264ca065d0cSEric Dumazet struct hlist_node *node; 2265ca065d0cSEric Dumazet struct sk_buff *nskb; 22662dc41cffSDavid Held 22672dc41cffSDavid Held if (use_hash2) { 2268f0b1e64cSMartin KaFai Lau hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 226973e2d5e3SPablo Neira udptable->mask; 2270f0b1e64cSMartin KaFai Lau hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; 22712dc41cffSDavid Held start_lookup: 227273e2d5e3SPablo Neira hslot = &udptable->hash2[hash2]; 22732dc41cffSDavid Held offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 22742dc41cffSDavid Held } 2275db8dac20SDavid S. Miller 2276ca065d0cSEric Dumazet sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 2277ca065d0cSEric Dumazet if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 2278fb74c277SDavid Ahern uh->source, saddr, dif, sdif, hnum)) 2279ca065d0cSEric Dumazet continue; 22801240d137SEric Dumazet 2281ca065d0cSEric Dumazet if (!first) { 2282ca065d0cSEric Dumazet first = sk; 2283ca065d0cSEric Dumazet continue; 2284ca065d0cSEric Dumazet } 2285ca065d0cSEric Dumazet nskb = skb_clone(skb, GFP_ATOMIC); 2286ca065d0cSEric Dumazet 2287ca065d0cSEric Dumazet if (unlikely(!nskb)) { 2288ca065d0cSEric Dumazet atomic_inc(&sk->sk_drops); 228902c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2290ca065d0cSEric Dumazet IS_UDPLITE(sk)); 229102c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_INERRORS, 2292ca065d0cSEric Dumazet IS_UDPLITE(sk)); 2293ca065d0cSEric Dumazet continue; 2294ca065d0cSEric Dumazet } 2295ca065d0cSEric Dumazet if (udp_queue_rcv_skb(sk, nskb) > 0) 2296ca065d0cSEric Dumazet consume_skb(nskb); 2297ca065d0cSEric Dumazet } 22981240d137SEric Dumazet 22992dc41cffSDavid Held /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 23002dc41cffSDavid Held if (use_hash2 && hash2 != hash2_any) { 23012dc41cffSDavid Held hash2 = hash2_any; 23022dc41cffSDavid Held goto start_lookup; 23032dc41cffSDavid Held } 23042dc41cffSDavid Held 2305ca065d0cSEric Dumazet if (first) { 2306ca065d0cSEric Dumazet if (udp_queue_rcv_skb(first, skb) > 0) 2307ca065d0cSEric Dumazet consume_skb(skb); 23081240d137SEric Dumazet } else { 2309ca065d0cSEric Dumazet kfree_skb(skb); 231002c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 231136cbb245SRick Jones proto == IPPROTO_UDPLITE); 23121240d137SEric Dumazet } 2313db8dac20SDavid S. Miller return 0; 2314db8dac20SDavid S. Miller } 2315db8dac20SDavid S. Miller 2316db8dac20SDavid S. Miller /* Initialize UDP checksum. If exited with zero value (success), 2317db8dac20SDavid S. Miller * CHECKSUM_UNNECESSARY means, that no more checks are required. 2318666a3d6eSSu Yanjun * Otherwise, csum completion requires checksumming packet body, 2319db8dac20SDavid S. Miller * including udp header and folding it to skb->csum. 2320db8dac20SDavid S. Miller */ 2321db8dac20SDavid S. Miller static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 2322db8dac20SDavid S. Miller int proto) 2323db8dac20SDavid S. Miller { 2324db8dac20SDavid S. Miller int err; 2325db8dac20SDavid S. Miller 2326db8dac20SDavid S. Miller UDP_SKB_CB(skb)->partial_cov = 0; 2327db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov = skb->len; 2328db8dac20SDavid S. Miller 2329db8dac20SDavid S. Miller if (proto == IPPROTO_UDPLITE) { 2330db8dac20SDavid S. Miller err = udplite_checksum_init(skb, uh); 2331db8dac20SDavid S. Miller if (err) 2332db8dac20SDavid S. Miller return err; 233315f35d49SAlexey Kodanev 233415f35d49SAlexey Kodanev if (UDP_SKB_CB(skb)->partial_cov) { 233515f35d49SAlexey Kodanev skb->csum = inet_compute_pseudo(skb, proto); 233615f35d49SAlexey Kodanev return 0; 233715f35d49SAlexey Kodanev } 2338db8dac20SDavid S. Miller } 2339db8dac20SDavid S. Miller 2340b46d9f62SHannes Frederic Sowa /* Note, we are only interested in != 0 or == 0, thus the 2341b46d9f62SHannes Frederic Sowa * force to int. 2342b46d9f62SHannes Frederic Sowa */ 2343db4f1be3SSean Tranchetti err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 2344ed70fcfcSTom Herbert inet_compute_pseudo); 2345db4f1be3SSean Tranchetti if (err) 2346db4f1be3SSean Tranchetti return err; 2347db4f1be3SSean Tranchetti 2348db4f1be3SSean Tranchetti if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { 2349db4f1be3SSean Tranchetti /* If SW calculated the value, we know it's bad */ 2350db4f1be3SSean Tranchetti if (skb->csum_complete_sw) 2351db4f1be3SSean Tranchetti return 1; 2352db4f1be3SSean Tranchetti 2353db4f1be3SSean Tranchetti /* HW says the value is bad. Let's validate that. 2354db4f1be3SSean Tranchetti * skb->csum is no longer the full packet checksum, 2355db4f1be3SSean Tranchetti * so don't treat it as such. 2356db4f1be3SSean Tranchetti */ 2357db4f1be3SSean Tranchetti skb_checksum_complete_unset(skb); 2358db4f1be3SSean Tranchetti } 2359db4f1be3SSean Tranchetti 2360db4f1be3SSean Tranchetti return 0; 2361db8dac20SDavid S. Miller } 2362db8dac20SDavid S. Miller 23632b5a9217SPaolo Abeni /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 23642b5a9217SPaolo Abeni * return code conversion for ip layer consumption 23652b5a9217SPaolo Abeni */ 23662b5a9217SPaolo Abeni static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 23672b5a9217SPaolo Abeni struct udphdr *uh) 23682b5a9217SPaolo Abeni { 23692b5a9217SPaolo Abeni int ret; 23702b5a9217SPaolo Abeni 23712b5a9217SPaolo Abeni if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 2372e4aa33adSLi RongQing skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo); 23732b5a9217SPaolo Abeni 23742b5a9217SPaolo Abeni ret = udp_queue_rcv_skb(sk, skb); 23752b5a9217SPaolo Abeni 23762b5a9217SPaolo Abeni /* a return value > 0 means to resubmit the input, but 23772b5a9217SPaolo Abeni * it wants the return to be -protocol, or 0 23782b5a9217SPaolo Abeni */ 23792b5a9217SPaolo Abeni if (ret > 0) 23802b5a9217SPaolo Abeni return -ret; 23812b5a9217SPaolo Abeni return 0; 23822b5a9217SPaolo Abeni } 23832b5a9217SPaolo Abeni 2384db8dac20SDavid S. Miller /* 2385db8dac20SDavid S. Miller * All we need to do is get the socket, and then do a checksum. 2386db8dac20SDavid S. Miller */ 2387db8dac20SDavid S. Miller 2388645ca708SEric Dumazet int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 2389db8dac20SDavid S. Miller int proto) 2390db8dac20SDavid S. Miller { 2391db8dac20SDavid S. Miller struct sock *sk; 23927b5e56f9SJesper Dangaard Brouer struct udphdr *uh; 2393db8dac20SDavid S. Miller unsigned short ulen; 2394adf30907SEric Dumazet struct rtable *rt = skb_rtable(skb); 23952783ef23SJesper Dangaard Brouer __be32 saddr, daddr; 23960283328eSPavel Emelyanov struct net *net = dev_net(skb->dev); 239771489e21SJoe Stringer bool refcounted; 23981c7fab70SMenglong Dong int drop_reason; 23991c7fab70SMenglong Dong 24001c7fab70SMenglong Dong drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 2401db8dac20SDavid S. Miller 2402db8dac20SDavid S. Miller /* 2403db8dac20SDavid S. Miller * Validate the packet. 2404db8dac20SDavid S. Miller */ 2405db8dac20SDavid S. Miller if (!pskb_may_pull(skb, sizeof(struct udphdr))) 2406db8dac20SDavid S. Miller goto drop; /* No space for header. */ 2407db8dac20SDavid S. Miller 24087b5e56f9SJesper Dangaard Brouer uh = udp_hdr(skb); 2409db8dac20SDavid S. Miller ulen = ntohs(uh->len); 2410ccc2d97cSBjørn Mork saddr = ip_hdr(skb)->saddr; 2411ccc2d97cSBjørn Mork daddr = ip_hdr(skb)->daddr; 2412ccc2d97cSBjørn Mork 2413db8dac20SDavid S. Miller if (ulen > skb->len) 2414db8dac20SDavid S. Miller goto short_packet; 2415db8dac20SDavid S. Miller 2416db8dac20SDavid S. Miller if (proto == IPPROTO_UDP) { 2417db8dac20SDavid S. Miller /* UDP validates ulen. */ 2418db8dac20SDavid S. Miller if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 2419db8dac20SDavid S. Miller goto short_packet; 2420db8dac20SDavid S. Miller uh = udp_hdr(skb); 2421db8dac20SDavid S. Miller } 2422db8dac20SDavid S. Miller 2423db8dac20SDavid S. Miller if (udp4_csum_init(skb, uh, proto)) 2424db8dac20SDavid S. Miller goto csum_error; 2425db8dac20SDavid S. Miller 242671489e21SJoe Stringer sk = skb_steal_sock(skb, &refcounted); 24278afdd99aSEric Dumazet if (sk) { 242897502231SEric Dumazet struct dst_entry *dst = skb_dst(skb); 2429421b3885SShawn Bohrer int ret; 2430421b3885SShawn Bohrer 24318f905c0eSEric Dumazet if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 243297502231SEric Dumazet udp_sk_rx_dst_set(sk, dst); 2433421b3885SShawn Bohrer 24342b5a9217SPaolo Abeni ret = udp_unicast_rcv_skb(sk, skb, uh); 243571489e21SJoe Stringer if (refcounted) 24368afdd99aSEric Dumazet sock_put(sk); 24372b5a9217SPaolo Abeni return ret; 2438c18450a5SFabian Frederick } 2439c18450a5SFabian Frederick 2440db8dac20SDavid S. Miller if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 2441e3163493SPavel Emelyanov return __udp4_lib_mcast_deliver(net, skb, uh, 244236cbb245SRick Jones saddr, daddr, udptable, proto); 2443db8dac20SDavid S. Miller 2444607c4aafSKOVACS Krisztian sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 24452b5a9217SPaolo Abeni if (sk) 24462b5a9217SPaolo Abeni return udp_unicast_rcv_skb(sk, skb, uh); 2447db8dac20SDavid S. Miller 2448db8dac20SDavid S. Miller if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 2449db8dac20SDavid S. Miller goto drop; 2450895b5c9fSFlorian Westphal nf_reset_ct(skb); 2451db8dac20SDavid S. Miller 2452db8dac20SDavid S. Miller /* No socket. Drop packet silently, if checksum is wrong */ 2453db8dac20SDavid S. Miller if (udp_lib_checksum_complete(skb)) 2454db8dac20SDavid S. Miller goto csum_error; 2455db8dac20SDavid S. Miller 24561c7fab70SMenglong Dong drop_reason = SKB_DROP_REASON_NO_SOCKET; 245702c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 2458db8dac20SDavid S. Miller icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 2459db8dac20SDavid S. Miller 2460db8dac20SDavid S. Miller /* 2461db8dac20SDavid S. Miller * Hmm. We got an UDP packet to a port to which we 2462db8dac20SDavid S. Miller * don't wanna listen. Ignore it. 2463db8dac20SDavid S. Miller */ 24641c7fab70SMenglong Dong kfree_skb_reason(skb, drop_reason); 2465db8dac20SDavid S. Miller return 0; 2466db8dac20SDavid S. Miller 2467db8dac20SDavid S. Miller short_packet: 24681c7fab70SMenglong Dong drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; 2469ba7a46f1SJoe Perches net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 2470afd46503SJoe Perches proto == IPPROTO_UDPLITE ? "Lite" : "", 2471afd46503SJoe Perches &saddr, ntohs(uh->source), 2472afd46503SJoe Perches ulen, skb->len, 2473afd46503SJoe Perches &daddr, ntohs(uh->dest)); 2474db8dac20SDavid S. Miller goto drop; 2475db8dac20SDavid S. Miller 2476db8dac20SDavid S. Miller csum_error: 2477db8dac20SDavid S. Miller /* 2478db8dac20SDavid S. Miller * RFC1122: OK. Discards the bad packet silently (as far as 2479db8dac20SDavid S. Miller * the network is concerned, anyway) as per 4.1.3.4 (MUST). 2480db8dac20SDavid S. Miller */ 24811c7fab70SMenglong Dong drop_reason = SKB_DROP_REASON_UDP_CSUM; 2482ba7a46f1SJoe Perches net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 2483afd46503SJoe Perches proto == IPPROTO_UDPLITE ? "Lite" : "", 2484afd46503SJoe Perches &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 2485db8dac20SDavid S. Miller ulen); 248602c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 2487db8dac20SDavid S. Miller drop: 248802c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 24891c7fab70SMenglong Dong kfree_skb_reason(skb, drop_reason); 2490db8dac20SDavid S. Miller return 0; 2491db8dac20SDavid S. Miller } 2492db8dac20SDavid S. Miller 2493421b3885SShawn Bohrer /* We can only early demux multicast if there is a single matching socket. 2494421b3885SShawn Bohrer * If more than one socket found returns NULL 2495421b3885SShawn Bohrer */ 2496421b3885SShawn Bohrer static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2497421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 2498421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 2499fb74c277SDavid Ahern int dif, int sdif) 2500421b3885SShawn Bohrer { 2501ba6aac15SKuniyuki Iwashima struct udp_table *udptable = net->ipv4.udp_table; 2502421b3885SShawn Bohrer unsigned short hnum = ntohs(loc_port); 2503919dfa0bSKuniyuki Iwashima struct sock *sk, *result; 2504919dfa0bSKuniyuki Iwashima struct udp_hslot *hslot; 2505919dfa0bSKuniyuki Iwashima unsigned int slot; 2506919dfa0bSKuniyuki Iwashima 2507ba6aac15SKuniyuki Iwashima slot = udp_hashfn(net, hnum, udptable->mask); 2508ba6aac15SKuniyuki Iwashima hslot = &udptable->hash[slot]; 2509421b3885SShawn Bohrer 251063c6f81cSEric Dumazet /* Do not bother scanning a too big list */ 251163c6f81cSEric Dumazet if (hslot->count > 10) 251263c6f81cSEric Dumazet return NULL; 251363c6f81cSEric Dumazet 2514421b3885SShawn Bohrer result = NULL; 2515ca065d0cSEric Dumazet sk_for_each_rcu(sk, &hslot->head) { 2516ca065d0cSEric Dumazet if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2517fb74c277SDavid Ahern rmt_port, rmt_addr, dif, sdif, hnum)) { 2518ca065d0cSEric Dumazet if (result) 2519ca065d0cSEric Dumazet return NULL; 2520421b3885SShawn Bohrer result = sk; 2521421b3885SShawn Bohrer } 2522421b3885SShawn Bohrer } 2523421b3885SShawn Bohrer 2524421b3885SShawn Bohrer return result; 2525421b3885SShawn Bohrer } 2526421b3885SShawn Bohrer 2527421b3885SShawn Bohrer /* For unicast we should only early demux connected sockets or we can 2528421b3885SShawn Bohrer * break forwarding setups. The chains here can be long so only check 2529421b3885SShawn Bohrer * if the first socket is an exact match and if not move on. 2530421b3885SShawn Bohrer */ 2531421b3885SShawn Bohrer static struct sock *__udp4_lib_demux_lookup(struct net *net, 2532421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 2533421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 25343fa6f616SDavid Ahern int dif, int sdif) 2535421b3885SShawn Bohrer { 2536ba6aac15SKuniyuki Iwashima struct udp_table *udptable = net->ipv4.udp_table; 2537c7228317SJoe Perches INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2538919dfa0bSKuniyuki Iwashima unsigned short hnum = ntohs(loc_port); 2539919dfa0bSKuniyuki Iwashima unsigned int hash2, slot2; 2540919dfa0bSKuniyuki Iwashima struct udp_hslot *hslot2; 2541919dfa0bSKuniyuki Iwashima __portpair ports; 2542ca065d0cSEric Dumazet struct sock *sk; 2543421b3885SShawn Bohrer 2544919dfa0bSKuniyuki Iwashima hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); 2545ba6aac15SKuniyuki Iwashima slot2 = hash2 & udptable->mask; 2546ba6aac15SKuniyuki Iwashima hslot2 = &udptable->hash2[slot2]; 2547919dfa0bSKuniyuki Iwashima ports = INET_COMBINED_PORTS(rmt_port, hnum); 2548919dfa0bSKuniyuki Iwashima 2549ca065d0cSEric Dumazet udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2550eda090c3SEric Dumazet if (inet_match(net, sk, acookie, ports, dif, sdif)) 2551ca065d0cSEric Dumazet return sk; 2552421b3885SShawn Bohrer /* Only check first socket in chain */ 2553421b3885SShawn Bohrer break; 2554421b3885SShawn Bohrer } 2555ca065d0cSEric Dumazet return NULL; 2556421b3885SShawn Bohrer } 2557421b3885SShawn Bohrer 25587487449cSPaolo Abeni int udp_v4_early_demux(struct sk_buff *skb) 2559421b3885SShawn Bohrer { 2560610438b7SEric Dumazet struct net *net = dev_net(skb->dev); 2561bc044e8dSPaolo Abeni struct in_device *in_dev = NULL; 2562610438b7SEric Dumazet const struct iphdr *iph; 2563610438b7SEric Dumazet const struct udphdr *uh; 2564ca065d0cSEric Dumazet struct sock *sk = NULL; 2565421b3885SShawn Bohrer struct dst_entry *dst; 2566421b3885SShawn Bohrer int dif = skb->dev->ifindex; 2567fb74c277SDavid Ahern int sdif = inet_sdif(skb); 25686e540309SShawn Bohrer int ours; 2569421b3885SShawn Bohrer 2570421b3885SShawn Bohrer /* validate the packet */ 2571421b3885SShawn Bohrer if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 25727487449cSPaolo Abeni return 0; 2573421b3885SShawn Bohrer 2574610438b7SEric Dumazet iph = ip_hdr(skb); 2575610438b7SEric Dumazet uh = udp_hdr(skb); 2576610438b7SEric Dumazet 2577996b44fcSPaolo Abeni if (skb->pkt_type == PACKET_MULTICAST) { 2578bc044e8dSPaolo Abeni in_dev = __in_dev_get_rcu(skb->dev); 25796e540309SShawn Bohrer 25806e540309SShawn Bohrer if (!in_dev) 25817487449cSPaolo Abeni return 0; 25826e540309SShawn Bohrer 25836e540309SShawn Bohrer ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 25846e540309SShawn Bohrer iph->protocol); 25856e540309SShawn Bohrer if (!ours) 25867487449cSPaolo Abeni return 0; 2587ad0ea198SPaolo Abeni 2588421b3885SShawn Bohrer sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2589fb74c277SDavid Ahern uh->source, iph->saddr, 2590fb74c277SDavid Ahern dif, sdif); 25916e540309SShawn Bohrer } else if (skb->pkt_type == PACKET_HOST) { 2592421b3885SShawn Bohrer sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 25933fa6f616SDavid Ahern uh->source, iph->saddr, dif, sdif); 25946e540309SShawn Bohrer } 2595421b3885SShawn Bohrer 259641c6d650SReshetova, Elena if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 25977487449cSPaolo Abeni return 0; 2598421b3885SShawn Bohrer 2599421b3885SShawn Bohrer skb->sk = sk; 260082eabd9eSAlexander Duyck skb->destructor = sock_efree; 26018f905c0eSEric Dumazet dst = rcu_dereference(sk->sk_rx_dst); 2602421b3885SShawn Bohrer 2603421b3885SShawn Bohrer if (dst) 2604421b3885SShawn Bohrer dst = dst_check(dst, 0); 260510e2eb87SEric Dumazet if (dst) { 2606bc044e8dSPaolo Abeni u32 itag = 0; 2607bc044e8dSPaolo Abeni 2608d24406c8SWei Wang /* set noref for now. 2609d24406c8SWei Wang * any place which wants to hold dst has to call 2610d24406c8SWei Wang * dst_hold_safe() 2611d24406c8SWei Wang */ 2612421b3885SShawn Bohrer skb_dst_set_noref(skb, dst); 2613bc044e8dSPaolo Abeni 2614bc044e8dSPaolo Abeni /* for unconnected multicast sockets we need to validate 2615bc044e8dSPaolo Abeni * the source on each packet 2616bc044e8dSPaolo Abeni */ 2617bc044e8dSPaolo Abeni if (!inet_sk(sk)->inet_daddr && in_dev) 2618bc044e8dSPaolo Abeni return ip_mc_validate_source(skb, iph->daddr, 26198d2b51b0SGuillaume Nault iph->saddr, 26208d2b51b0SGuillaume Nault iph->tos & IPTOS_RT_MASK, 2621bc044e8dSPaolo Abeni skb->dev, in_dev, &itag); 2622421b3885SShawn Bohrer } 26237487449cSPaolo Abeni return 0; 262410e2eb87SEric Dumazet } 2625421b3885SShawn Bohrer 2626db8dac20SDavid S. Miller int udp_rcv(struct sk_buff *skb) 2627db8dac20SDavid S. Miller { 2628ba6aac15SKuniyuki Iwashima return __udp4_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 2629db8dac20SDavid S. Miller } 2630db8dac20SDavid S. Miller 26317d06b2e0SBrian Haley void udp_destroy_sock(struct sock *sk) 2632db8dac20SDavid S. Miller { 263344046a59STom Parkin struct udp_sock *up = udp_sk(sk); 26348a74ad60SEric Dumazet bool slow = lock_sock_fast(sk); 2635a8b897c7SPaolo Abeni 2636a8b897c7SPaolo Abeni /* protects from races with udp_abort() */ 2637a8b897c7SPaolo Abeni sock_set_flag(sk, SOCK_DEAD); 2638db8dac20SDavid S. Miller udp_flush_pending_frames(sk); 26398a74ad60SEric Dumazet unlock_sock_fast(sk, slow); 264060fb9567SPaolo Abeni if (static_branch_unlikely(&udp_encap_needed_key)) { 264160fb9567SPaolo Abeni if (up->encap_type) { 264244046a59STom Parkin void (*encap_destroy)(struct sock *sk); 26436aa7de05SMark Rutland encap_destroy = READ_ONCE(up->encap_destroy); 264444046a59STom Parkin if (encap_destroy) 264544046a59STom Parkin encap_destroy(sk); 264644046a59STom Parkin } 264760fb9567SPaolo Abeni if (up->encap_enabled) 26489c480601SPaolo Abeni static_branch_dec(&udp_encap_needed_key); 264960fb9567SPaolo Abeni } 2650db8dac20SDavid S. Miller } 2651db8dac20SDavid S. Miller 26521da177e4SLinus Torvalds /* 26531da177e4SLinus Torvalds * Socket option code for UDP 26541da177e4SLinus Torvalds */ 26554c0a6cb0SGerrit Renker int udp_lib_setsockopt(struct sock *sk, int level, int optname, 265691ac1ccaSChristoph Hellwig sockptr_t optval, unsigned int optlen, 26574c0a6cb0SGerrit Renker int (*push_pending_frames)(struct sock *)) 26581da177e4SLinus Torvalds { 26591da177e4SLinus Torvalds struct udp_sock *up = udp_sk(sk); 26601c19448cSTom Herbert int val, valbool; 26611da177e4SLinus Torvalds int err = 0; 2662b2bf1e26SWang Chen int is_udplite = IS_UDPLITE(sk); 26631da177e4SLinus Torvalds 26648a3854c7SPaolo Abeni if (level == SOL_SOCKET) { 26658a3854c7SPaolo Abeni err = sk_setsockopt(sk, level, optname, optval, optlen); 26668a3854c7SPaolo Abeni 26678a3854c7SPaolo Abeni if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) { 26688a3854c7SPaolo Abeni sockopt_lock_sock(sk); 26698a3854c7SPaolo Abeni /* paired with READ_ONCE in udp_rmem_release() */ 26708a3854c7SPaolo Abeni WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2); 26718a3854c7SPaolo Abeni sockopt_release_sock(sk); 26728a3854c7SPaolo Abeni } 26738a3854c7SPaolo Abeni return err; 26748a3854c7SPaolo Abeni } 26758a3854c7SPaolo Abeni 26761da177e4SLinus Torvalds if (optlen < sizeof(int)) 26771da177e4SLinus Torvalds return -EINVAL; 26781da177e4SLinus Torvalds 267991ac1ccaSChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 26801da177e4SLinus Torvalds return -EFAULT; 26811da177e4SLinus Torvalds 26821c19448cSTom Herbert valbool = val ? 1 : 0; 26831c19448cSTom Herbert 26841da177e4SLinus Torvalds switch (optname) { 26851da177e4SLinus Torvalds case UDP_CORK: 26861da177e4SLinus Torvalds if (val != 0) { 2687a9f59707SEric Dumazet WRITE_ONCE(up->corkflag, 1); 26881da177e4SLinus Torvalds } else { 2689a9f59707SEric Dumazet WRITE_ONCE(up->corkflag, 0); 26901da177e4SLinus Torvalds lock_sock(sk); 26914243cdc2SJoe Perches push_pending_frames(sk); 26921da177e4SLinus Torvalds release_sock(sk); 26931da177e4SLinus Torvalds } 26941da177e4SLinus Torvalds break; 26951da177e4SLinus Torvalds 26961da177e4SLinus Torvalds case UDP_ENCAP: 26971da177e4SLinus Torvalds switch (val) { 26981da177e4SLinus Torvalds case 0: 2699fd1ac07fSAlexey Dobriyan #ifdef CONFIG_XFRM 27001da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP: 27011da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP_NON_IKE: 27020146dca7SSabrina Dubroca #if IS_ENABLED(CONFIG_IPV6) 27030146dca7SSabrina Dubroca if (sk->sk_family == AF_INET6) 27040146dca7SSabrina Dubroca up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv; 27050146dca7SSabrina Dubroca else 27060146dca7SSabrina Dubroca #endif 2707067b207bSJames Chapman up->encap_rcv = xfrm4_udp_encap_rcv; 2708fd1ac07fSAlexey Dobriyan #endif 2709a8eceea8SJoe Perches fallthrough; 2710342f0234SJames Chapman case UDP_ENCAP_L2TPINUDP: 27111da177e4SLinus Torvalds up->encap_type = val; 271260fb9567SPaolo Abeni lock_sock(sk); 271360fb9567SPaolo Abeni udp_tunnel_encap_enable(sk->sk_socket); 271460fb9567SPaolo Abeni release_sock(sk); 27151da177e4SLinus Torvalds break; 27161da177e4SLinus Torvalds default: 27171da177e4SLinus Torvalds err = -ENOPROTOOPT; 27181da177e4SLinus Torvalds break; 27191da177e4SLinus Torvalds } 27201da177e4SLinus Torvalds break; 27211da177e4SLinus Torvalds 27221c19448cSTom Herbert case UDP_NO_CHECK6_TX: 27231c19448cSTom Herbert up->no_check6_tx = valbool; 27241c19448cSTom Herbert break; 27251c19448cSTom Herbert 27261c19448cSTom Herbert case UDP_NO_CHECK6_RX: 27271c19448cSTom Herbert up->no_check6_rx = valbool; 27281c19448cSTom Herbert break; 27291c19448cSTom Herbert 2730bec1f6f6SWillem de Bruijn case UDP_SEGMENT: 2731bec1f6f6SWillem de Bruijn if (val < 0 || val > USHRT_MAX) 2732bec1f6f6SWillem de Bruijn return -EINVAL; 273318a419baSEric Dumazet WRITE_ONCE(up->gso_size, val); 2734bec1f6f6SWillem de Bruijn break; 2735bec1f6f6SWillem de Bruijn 2736e20cf8d3SPaolo Abeni case UDP_GRO: 2737e20cf8d3SPaolo Abeni lock_sock(sk); 273878352f73SPaolo Abeni 273978352f73SPaolo Abeni /* when enabling GRO, accept the related GSO packet type */ 2740e20cf8d3SPaolo Abeni if (valbool) 2741e20cf8d3SPaolo Abeni udp_tunnel_encap_enable(sk->sk_socket); 2742e20cf8d3SPaolo Abeni up->gro_enabled = valbool; 274378352f73SPaolo Abeni up->accept_udp_l4 = valbool; 2744e20cf8d3SPaolo Abeni release_sock(sk); 2745e20cf8d3SPaolo Abeni break; 2746e20cf8d3SPaolo Abeni 2747ba4e58ecSGerrit Renker /* 2748ba4e58ecSGerrit Renker * UDP-Lite's partial checksum coverage (RFC 3828). 2749ba4e58ecSGerrit Renker */ 2750ba4e58ecSGerrit Renker /* The sender sets actual checksum coverage length via this option. 2751ba4e58ecSGerrit Renker * The case coverage > packet length is handled by send module. */ 2752ba4e58ecSGerrit Renker case UDPLITE_SEND_CSCOV: 2753b2bf1e26SWang Chen if (!is_udplite) /* Disable the option on UDP sockets */ 2754ba4e58ecSGerrit Renker return -ENOPROTOOPT; 2755ba4e58ecSGerrit Renker if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2756ba4e58ecSGerrit Renker val = 8; 27574be929beSAlexey Dobriyan else if (val > USHRT_MAX) 27584be929beSAlexey Dobriyan val = USHRT_MAX; 2759ba4e58ecSGerrit Renker up->pcslen = val; 2760ba4e58ecSGerrit Renker up->pcflag |= UDPLITE_SEND_CC; 2761ba4e58ecSGerrit Renker break; 2762ba4e58ecSGerrit Renker 2763ba4e58ecSGerrit Renker /* The receiver specifies a minimum checksum coverage value. To make 2764ba4e58ecSGerrit Renker * sense, this should be set to at least 8 (as done below). If zero is 2765ba4e58ecSGerrit Renker * used, this again means full checksum coverage. */ 2766ba4e58ecSGerrit Renker case UDPLITE_RECV_CSCOV: 2767b2bf1e26SWang Chen if (!is_udplite) /* Disable the option on UDP sockets */ 2768ba4e58ecSGerrit Renker return -ENOPROTOOPT; 2769ba4e58ecSGerrit Renker if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2770ba4e58ecSGerrit Renker val = 8; 27714be929beSAlexey Dobriyan else if (val > USHRT_MAX) 27724be929beSAlexey Dobriyan val = USHRT_MAX; 2773ba4e58ecSGerrit Renker up->pcrlen = val; 2774ba4e58ecSGerrit Renker up->pcflag |= UDPLITE_RECV_CC; 2775ba4e58ecSGerrit Renker break; 2776ba4e58ecSGerrit Renker 27771da177e4SLinus Torvalds default: 27781da177e4SLinus Torvalds err = -ENOPROTOOPT; 27791da177e4SLinus Torvalds break; 27806516c655SStephen Hemminger } 27811da177e4SLinus Torvalds 27821da177e4SLinus Torvalds return err; 27831da177e4SLinus Torvalds } 2784c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_setsockopt); 27851da177e4SLinus Torvalds 2786a7b75c5aSChristoph Hellwig int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 2787a7b75c5aSChristoph Hellwig unsigned int optlen) 2788db8dac20SDavid S. Miller { 27898a3854c7SPaolo Abeni if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 279091ac1ccaSChristoph Hellwig return udp_lib_setsockopt(sk, level, optname, 2791a7b75c5aSChristoph Hellwig optval, optlen, 2792db8dac20SDavid S. Miller udp_push_pending_frames); 2793db8dac20SDavid S. Miller return ip_setsockopt(sk, level, optname, optval, optlen); 2794db8dac20SDavid S. Miller } 2795db8dac20SDavid S. Miller 27964c0a6cb0SGerrit Renker int udp_lib_getsockopt(struct sock *sk, int level, int optname, 27971da177e4SLinus Torvalds char __user *optval, int __user *optlen) 27981da177e4SLinus Torvalds { 27991da177e4SLinus Torvalds struct udp_sock *up = udp_sk(sk); 28001da177e4SLinus Torvalds int val, len; 28011da177e4SLinus Torvalds 28021da177e4SLinus Torvalds if (get_user(len, optlen)) 28031da177e4SLinus Torvalds return -EFAULT; 28041da177e4SLinus Torvalds 28051da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(int)); 28061da177e4SLinus Torvalds 28071da177e4SLinus Torvalds if (len < 0) 28081da177e4SLinus Torvalds return -EINVAL; 28091da177e4SLinus Torvalds 28101da177e4SLinus Torvalds switch (optname) { 28111da177e4SLinus Torvalds case UDP_CORK: 2812a9f59707SEric Dumazet val = READ_ONCE(up->corkflag); 28131da177e4SLinus Torvalds break; 28141da177e4SLinus Torvalds 28151da177e4SLinus Torvalds case UDP_ENCAP: 28161da177e4SLinus Torvalds val = up->encap_type; 28171da177e4SLinus Torvalds break; 28181da177e4SLinus Torvalds 28191c19448cSTom Herbert case UDP_NO_CHECK6_TX: 28201c19448cSTom Herbert val = up->no_check6_tx; 28211c19448cSTom Herbert break; 28221c19448cSTom Herbert 28231c19448cSTom Herbert case UDP_NO_CHECK6_RX: 28241c19448cSTom Herbert val = up->no_check6_rx; 28251c19448cSTom Herbert break; 28261c19448cSTom Herbert 2827bec1f6f6SWillem de Bruijn case UDP_SEGMENT: 282818a419baSEric Dumazet val = READ_ONCE(up->gso_size); 2829bec1f6f6SWillem de Bruijn break; 2830bec1f6f6SWillem de Bruijn 283198184612SNorman Maurer case UDP_GRO: 283298184612SNorman Maurer val = up->gro_enabled; 283398184612SNorman Maurer break; 283498184612SNorman Maurer 2835ba4e58ecSGerrit Renker /* The following two cannot be changed on UDP sockets, the return is 2836ba4e58ecSGerrit Renker * always 0 (which corresponds to the full checksum coverage of UDP). */ 2837ba4e58ecSGerrit Renker case UDPLITE_SEND_CSCOV: 2838ba4e58ecSGerrit Renker val = up->pcslen; 2839ba4e58ecSGerrit Renker break; 2840ba4e58ecSGerrit Renker 2841ba4e58ecSGerrit Renker case UDPLITE_RECV_CSCOV: 2842ba4e58ecSGerrit Renker val = up->pcrlen; 2843ba4e58ecSGerrit Renker break; 2844ba4e58ecSGerrit Renker 28451da177e4SLinus Torvalds default: 28461da177e4SLinus Torvalds return -ENOPROTOOPT; 28476516c655SStephen Hemminger } 28481da177e4SLinus Torvalds 28491da177e4SLinus Torvalds if (put_user(len, optlen)) 28501da177e4SLinus Torvalds return -EFAULT; 28511da177e4SLinus Torvalds if (copy_to_user(optval, &val, len)) 28521da177e4SLinus Torvalds return -EFAULT; 28531da177e4SLinus Torvalds return 0; 28541da177e4SLinus Torvalds } 2855c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_getsockopt); 28561da177e4SLinus Torvalds 2857db8dac20SDavid S. Miller int udp_getsockopt(struct sock *sk, int level, int optname, 2858db8dac20SDavid S. Miller char __user *optval, int __user *optlen) 2859db8dac20SDavid S. Miller { 2860db8dac20SDavid S. Miller if (level == SOL_UDP || level == SOL_UDPLITE) 2861db8dac20SDavid S. Miller return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2862db8dac20SDavid S. Miller return ip_getsockopt(sk, level, optname, optval, optlen); 2863db8dac20SDavid S. Miller } 2864db8dac20SDavid S. Miller 28651da177e4SLinus Torvalds /** 28661da177e4SLinus Torvalds * udp_poll - wait for a UDP event. 28673628e3cbSAndrew Lunn * @file: - file struct 28683628e3cbSAndrew Lunn * @sock: - socket 28693628e3cbSAndrew Lunn * @wait: - poll table 28701da177e4SLinus Torvalds * 28711da177e4SLinus Torvalds * This is same as datagram poll, except for the special case of 28721da177e4SLinus Torvalds * blocking sockets. If application is using a blocking fd 28731da177e4SLinus Torvalds * and a packet with checksum error is in the queue; 28741da177e4SLinus Torvalds * then it could get return from select indicating data available 28751da177e4SLinus Torvalds * but then block when reading it. Add special case code 28761da177e4SLinus Torvalds * to work around these arguably broken applications. 28771da177e4SLinus Torvalds */ 2878a11e1d43SLinus Torvalds __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) 28791da177e4SLinus Torvalds { 2880a11e1d43SLinus Torvalds __poll_t mask = datagram_poll(file, sock, wait); 28811da177e4SLinus Torvalds struct sock *sk = sock->sk; 28821da177e4SLinus Torvalds 28833ef7cf57SEric Dumazet if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 2884a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 28852276f58aSPaolo Abeni 28861da177e4SLinus Torvalds /* Check for false positives due to checksum errors */ 2887a11e1d43SLinus Torvalds if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2888e83c6744SEric Dumazet !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2889a9a08845SLinus Torvalds mask &= ~(EPOLLIN | EPOLLRDNORM); 28901da177e4SLinus Torvalds 2891af493388SCong Wang /* psock ingress_msg queue should not contain any bad checksum frames */ 2892af493388SCong Wang if (sk_is_readable(sk)) 2893af493388SCong Wang mask |= EPOLLIN | EPOLLRDNORM; 28941da177e4SLinus Torvalds return mask; 28951da177e4SLinus Torvalds 28961da177e4SLinus Torvalds } 2897a11e1d43SLinus Torvalds EXPORT_SYMBOL(udp_poll); 28981da177e4SLinus Torvalds 28995d77dca8SDavid Ahern int udp_abort(struct sock *sk, int err) 29005d77dca8SDavid Ahern { 29014ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 29025d77dca8SDavid Ahern lock_sock(sk); 29035d77dca8SDavid Ahern 2904a8b897c7SPaolo Abeni /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing 2905a8b897c7SPaolo Abeni * with close() 2906a8b897c7SPaolo Abeni */ 2907a8b897c7SPaolo Abeni if (sock_flag(sk, SOCK_DEAD)) 2908a8b897c7SPaolo Abeni goto out; 2909a8b897c7SPaolo Abeni 29105d77dca8SDavid Ahern sk->sk_err = err; 2911e3ae2365SAlexander Aring sk_error_report(sk); 2912286c72deSEric Dumazet __udp_disconnect(sk, 0); 29135d77dca8SDavid Ahern 2914a8b897c7SPaolo Abeni out: 29154ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 29165d77dca8SDavid Ahern release_sock(sk); 29175d77dca8SDavid Ahern 29185d77dca8SDavid Ahern return 0; 29195d77dca8SDavid Ahern } 29205d77dca8SDavid Ahern EXPORT_SYMBOL_GPL(udp_abort); 29215d77dca8SDavid Ahern 2922db8dac20SDavid S. Miller struct proto udp_prot = { 2923db8dac20SDavid S. Miller .name = "UDP", 2924db8dac20SDavid S. Miller .owner = THIS_MODULE, 2925db8dac20SDavid S. Miller .close = udp_lib_close, 2926d74bad4eSAndrey Ignatov .pre_connect = udp_pre_connect, 2927db8dac20SDavid S. Miller .connect = ip4_datagram_connect, 2928db8dac20SDavid S. Miller .disconnect = udp_disconnect, 2929db8dac20SDavid S. Miller .ioctl = udp_ioctl, 2930850cbaddSPaolo Abeni .init = udp_init_sock, 2931db8dac20SDavid S. Miller .destroy = udp_destroy_sock, 2932db8dac20SDavid S. Miller .setsockopt = udp_setsockopt, 2933db8dac20SDavid S. Miller .getsockopt = udp_getsockopt, 2934db8dac20SDavid S. Miller .sendmsg = udp_sendmsg, 2935db8dac20SDavid S. Miller .recvmsg = udp_recvmsg, 2936*1d7e4538SDavid Howells .splice_eof = udp_splice_eof, 2937db8dac20SDavid S. Miller .sendpage = udp_sendpage, 29388141ed9fSSteffen Klassert .release_cb = ip4_datagram_release_cb, 2939db8dac20SDavid S. Miller .hash = udp_lib_hash, 2940db8dac20SDavid S. Miller .unhash = udp_lib_unhash, 2941719f8358SEric Dumazet .rehash = udp_v4_rehash, 2942db8dac20SDavid S. Miller .get_port = udp_v4_get_port, 294391a760b2SMenglong Dong .put_port = udp_lib_unhash, 29448a59f9d1SCong Wang #ifdef CONFIG_BPF_SYSCALL 29458a59f9d1SCong Wang .psock_update_sk_prot = udp_bpf_update_proto, 29468a59f9d1SCong Wang #endif 2947db8dac20SDavid S. Miller .memory_allocated = &udp_memory_allocated, 29480defbb0aSEric Dumazet .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 29490defbb0aSEric Dumazet 2950db8dac20SDavid S. Miller .sysctl_mem = sysctl_udp_mem, 29511e802951STonghao Zhang .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 29521e802951STonghao Zhang .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 2953db8dac20SDavid S. Miller .obj_size = sizeof(struct udp_sock), 295467fb4330SKuniyuki Iwashima .h.udp_table = NULL, 29555d77dca8SDavid Ahern .diag_destroy = udp_abort, 2956db8dac20SDavid S. Miller }; 2957c482c568SEric Dumazet EXPORT_SYMBOL(udp_prot); 29581da177e4SLinus Torvalds 29591da177e4SLinus Torvalds /* ------------------------------------------------------------------------ */ 29601da177e4SLinus Torvalds #ifdef CONFIG_PROC_FS 29611da177e4SLinus Torvalds 2962f44b1c51SAditi Ghag static unsigned short seq_file_family(const struct seq_file *seq); 2963f44b1c51SAditi Ghag static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) 2964f44b1c51SAditi Ghag { 2965f44b1c51SAditi Ghag unsigned short family = seq_file_family(seq); 2966f44b1c51SAditi Ghag 2967f44b1c51SAditi Ghag /* AF_UNSPEC is used as a match all */ 2968f44b1c51SAditi Ghag return ((family == AF_UNSPEC || family == sk->sk_family) && 2969f44b1c51SAditi Ghag net_eq(sock_net(sk), seq_file_net(seq))); 2970f44b1c51SAditi Ghag } 2971f44b1c51SAditi Ghag 2972e4fe1bf1SAditi Ghag #ifdef CONFIG_BPF_SYSCALL 2973e4fe1bf1SAditi Ghag static const struct seq_operations bpf_iter_udp_seq_ops; 2974e4fe1bf1SAditi Ghag #endif 29757625d2e9SAditi Ghag static struct udp_table *udp_get_table_seq(struct seq_file *seq, 2976478aee5dSKuniyuki Iwashima struct net *net) 2977478aee5dSKuniyuki Iwashima { 29787625d2e9SAditi Ghag const struct udp_seq_afinfo *afinfo; 29797625d2e9SAditi Ghag 2980e4fe1bf1SAditi Ghag #ifdef CONFIG_BPF_SYSCALL 2981e4fe1bf1SAditi Ghag if (seq->op == &bpf_iter_udp_seq_ops) 29827625d2e9SAditi Ghag return net->ipv4.udp_table; 2983e4fe1bf1SAditi Ghag #endif 29847625d2e9SAditi Ghag 29857625d2e9SAditi Ghag afinfo = pde_data(file_inode(seq->file)); 2986478aee5dSKuniyuki Iwashima return afinfo->udp_table ? : net->ipv4.udp_table; 2987478aee5dSKuniyuki Iwashima } 2988478aee5dSKuniyuki Iwashima 2989645ca708SEric Dumazet static struct sock *udp_get_first(struct seq_file *seq, int start) 29901da177e4SLinus Torvalds { 29911da177e4SLinus Torvalds struct udp_iter_state *state = seq->private; 29926f191efeSDenis V. Lunev struct net *net = seq_file_net(seq); 2993478aee5dSKuniyuki Iwashima struct udp_table *udptable; 2994919dfa0bSKuniyuki Iwashima struct sock *sk; 29951da177e4SLinus Torvalds 29967625d2e9SAditi Ghag udptable = udp_get_table_seq(seq, net); 2997478aee5dSKuniyuki Iwashima 2998478aee5dSKuniyuki Iwashima for (state->bucket = start; state->bucket <= udptable->mask; 2999f86dcc5aSEric Dumazet ++state->bucket) { 3000478aee5dSKuniyuki Iwashima struct udp_hslot *hslot = &udptable->hash[state->bucket]; 3001f86dcc5aSEric Dumazet 3002ca065d0cSEric Dumazet if (hlist_empty(&hslot->head)) 3003f86dcc5aSEric Dumazet continue; 3004f86dcc5aSEric Dumazet 3005645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 3006ca065d0cSEric Dumazet sk_for_each(sk, &hslot->head) { 3007f44b1c51SAditi Ghag if (seq_sk_match(seq, sk)) 30081da177e4SLinus Torvalds goto found; 30091da177e4SLinus Torvalds } 3010645ca708SEric Dumazet spin_unlock_bh(&hslot->lock); 30111da177e4SLinus Torvalds } 30121da177e4SLinus Torvalds sk = NULL; 30131da177e4SLinus Torvalds found: 30141da177e4SLinus Torvalds return sk; 30151da177e4SLinus Torvalds } 30161da177e4SLinus Torvalds 30171da177e4SLinus Torvalds static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 30181da177e4SLinus Torvalds { 30191da177e4SLinus Torvalds struct udp_iter_state *state = seq->private; 30206f191efeSDenis V. Lunev struct net *net = seq_file_net(seq); 3021478aee5dSKuniyuki Iwashima struct udp_table *udptable; 30221da177e4SLinus Torvalds 30231da177e4SLinus Torvalds do { 3024ca065d0cSEric Dumazet sk = sk_next(sk); 3025f44b1c51SAditi Ghag } while (sk && !seq_sk_match(seq, sk)); 30261da177e4SLinus Torvalds 3027645ca708SEric Dumazet if (!sk) { 30287625d2e9SAditi Ghag udptable = udp_get_table_seq(seq, net); 3029478aee5dSKuniyuki Iwashima 3030478aee5dSKuniyuki Iwashima if (state->bucket <= udptable->mask) 3031478aee5dSKuniyuki Iwashima spin_unlock_bh(&udptable->hash[state->bucket].lock); 3032478aee5dSKuniyuki Iwashima 3033645ca708SEric Dumazet return udp_get_first(seq, state->bucket + 1); 30341da177e4SLinus Torvalds } 30351da177e4SLinus Torvalds return sk; 30361da177e4SLinus Torvalds } 30371da177e4SLinus Torvalds 30381da177e4SLinus Torvalds static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 30391da177e4SLinus Torvalds { 3040645ca708SEric Dumazet struct sock *sk = udp_get_first(seq, 0); 30411da177e4SLinus Torvalds 30421da177e4SLinus Torvalds if (sk) 30431da177e4SLinus Torvalds while (pos && (sk = udp_get_next(seq, sk)) != NULL) 30441da177e4SLinus Torvalds --pos; 30451da177e4SLinus Torvalds return pos ? NULL : sk; 30461da177e4SLinus Torvalds } 30471da177e4SLinus Torvalds 3048a3d2599bSChristoph Hellwig void *udp_seq_start(struct seq_file *seq, loff_t *pos) 30491da177e4SLinus Torvalds { 305030842f29SVitaly Mayatskikh struct udp_iter_state *state = seq->private; 3051f86dcc5aSEric Dumazet state->bucket = MAX_UDP_PORTS; 305230842f29SVitaly Mayatskikh 3053b50660f1SYOSHIFUJI Hideaki return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 30541da177e4SLinus Torvalds } 3055a3d2599bSChristoph Hellwig EXPORT_SYMBOL(udp_seq_start); 30561da177e4SLinus Torvalds 3057a3d2599bSChristoph Hellwig void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 30581da177e4SLinus Torvalds { 30591da177e4SLinus Torvalds struct sock *sk; 30601da177e4SLinus Torvalds 3061b50660f1SYOSHIFUJI Hideaki if (v == SEQ_START_TOKEN) 30621da177e4SLinus Torvalds sk = udp_get_idx(seq, 0); 30631da177e4SLinus Torvalds else 30641da177e4SLinus Torvalds sk = udp_get_next(seq, v); 30651da177e4SLinus Torvalds 30661da177e4SLinus Torvalds ++*pos; 30671da177e4SLinus Torvalds return sk; 30681da177e4SLinus Torvalds } 3069a3d2599bSChristoph Hellwig EXPORT_SYMBOL(udp_seq_next); 30701da177e4SLinus Torvalds 3071a3d2599bSChristoph Hellwig void udp_seq_stop(struct seq_file *seq, void *v) 30721da177e4SLinus Torvalds { 3073645ca708SEric Dumazet struct udp_iter_state *state = seq->private; 3074478aee5dSKuniyuki Iwashima struct udp_table *udptable; 3075645ca708SEric Dumazet 30767625d2e9SAditi Ghag udptable = udp_get_table_seq(seq, seq_file_net(seq)); 3077478aee5dSKuniyuki Iwashima 3078478aee5dSKuniyuki Iwashima if (state->bucket <= udptable->mask) 3079478aee5dSKuniyuki Iwashima spin_unlock_bh(&udptable->hash[state->bucket].lock); 30801da177e4SLinus Torvalds } 3081a3d2599bSChristoph Hellwig EXPORT_SYMBOL(udp_seq_stop); 3082db8dac20SDavid S. Miller 3083db8dac20SDavid S. Miller /* ------------------------------------------------------------------------ */ 30845e659e4cSPavel Emelyanov static void udp4_format_sock(struct sock *sp, struct seq_file *f, 3085652586dfSTetsuo Handa int bucket) 3086db8dac20SDavid S. Miller { 3087db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sp); 3088c720c7e8SEric Dumazet __be32 dest = inet->inet_daddr; 3089c720c7e8SEric Dumazet __be32 src = inet->inet_rcv_saddr; 3090c720c7e8SEric Dumazet __u16 destp = ntohs(inet->inet_dport); 3091c720c7e8SEric Dumazet __u16 srcp = ntohs(inet->inet_sport); 3092db8dac20SDavid S. Miller 3093f86dcc5aSEric Dumazet seq_printf(f, "%5d: %08X:%04X %08X:%04X" 3094ea9a0379SPatrick Talbert " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 3095db8dac20SDavid S. Miller bucket, src, srcp, dest, destp, sp->sk_state, 309631e6d363SEric Dumazet sk_wmem_alloc_get(sp), 30976c206b20SPaolo Abeni udp_rqueue_get(sp), 3098a7cb5a49SEric W. Biederman 0, 0L, 0, 3099a7cb5a49SEric W. Biederman from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 3100a7cb5a49SEric W. Biederman 0, sock_i_ino(sp), 310141c6d650SReshetova, Elena refcount_read(&sp->sk_refcnt), sp, 3102652586dfSTetsuo Handa atomic_read(&sp->sk_drops)); 3103db8dac20SDavid S. Miller } 3104db8dac20SDavid S. Miller 3105db8dac20SDavid S. Miller int udp4_seq_show(struct seq_file *seq, void *v) 3106db8dac20SDavid S. Miller { 3107652586dfSTetsuo Handa seq_setwidth(seq, 127); 3108db8dac20SDavid S. Miller if (v == SEQ_START_TOKEN) 3109652586dfSTetsuo Handa seq_puts(seq, " sl local_address rem_address st tx_queue " 3110db8dac20SDavid S. Miller "rx_queue tr tm->when retrnsmt uid timeout " 3111cb61cb9bSEric Dumazet "inode ref pointer drops"); 3112db8dac20SDavid S. Miller else { 3113db8dac20SDavid S. Miller struct udp_iter_state *state = seq->private; 3114db8dac20SDavid S. Miller 3115652586dfSTetsuo Handa udp4_format_sock(v, seq, state->bucket); 3116db8dac20SDavid S. Miller } 3117652586dfSTetsuo Handa seq_pad(seq, '\n'); 3118db8dac20SDavid S. Miller return 0; 3119db8dac20SDavid S. Miller } 3120db8dac20SDavid S. Miller 31215788b3a0SYonghong Song #ifdef CONFIG_BPF_SYSCALL 31225788b3a0SYonghong Song struct bpf_iter__udp { 31235788b3a0SYonghong Song __bpf_md_ptr(struct bpf_iter_meta *, meta); 31245788b3a0SYonghong Song __bpf_md_ptr(struct udp_sock *, udp_sk); 31255788b3a0SYonghong Song uid_t uid __aligned(8); 31265788b3a0SYonghong Song int bucket __aligned(8); 31275788b3a0SYonghong Song }; 31285788b3a0SYonghong Song 3129c96dac8dSAditi Ghag struct bpf_udp_iter_state { 3130c96dac8dSAditi Ghag struct udp_iter_state state; 3131c96dac8dSAditi Ghag unsigned int cur_sk; 3132c96dac8dSAditi Ghag unsigned int end_sk; 3133c96dac8dSAditi Ghag unsigned int max_sk; 3134c96dac8dSAditi Ghag int offset; 3135c96dac8dSAditi Ghag struct sock **batch; 3136c96dac8dSAditi Ghag bool st_bucket_done; 3137c96dac8dSAditi Ghag }; 3138c96dac8dSAditi Ghag 3139c96dac8dSAditi Ghag static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3140c96dac8dSAditi Ghag unsigned int new_batch_sz); 3141c96dac8dSAditi Ghag static struct sock *bpf_iter_udp_batch(struct seq_file *seq) 3142c96dac8dSAditi Ghag { 3143c96dac8dSAditi Ghag struct bpf_udp_iter_state *iter = seq->private; 3144c96dac8dSAditi Ghag struct udp_iter_state *state = &iter->state; 3145c96dac8dSAditi Ghag struct net *net = seq_file_net(seq); 3146c96dac8dSAditi Ghag struct udp_table *udptable; 3147c96dac8dSAditi Ghag unsigned int batch_sks = 0; 3148c96dac8dSAditi Ghag bool resized = false; 3149c96dac8dSAditi Ghag struct sock *sk; 3150c96dac8dSAditi Ghag 3151c96dac8dSAditi Ghag /* The current batch is done, so advance the bucket. */ 3152c96dac8dSAditi Ghag if (iter->st_bucket_done) { 3153c96dac8dSAditi Ghag state->bucket++; 3154c96dac8dSAditi Ghag iter->offset = 0; 3155c96dac8dSAditi Ghag } 3156c96dac8dSAditi Ghag 3157c96dac8dSAditi Ghag udptable = udp_get_table_seq(seq, net); 3158c96dac8dSAditi Ghag 3159c96dac8dSAditi Ghag again: 3160c96dac8dSAditi Ghag /* New batch for the next bucket. 3161c96dac8dSAditi Ghag * Iterate over the hash table to find a bucket with sockets matching 3162c96dac8dSAditi Ghag * the iterator attributes, and return the first matching socket from 3163c96dac8dSAditi Ghag * the bucket. The remaining matched sockets from the bucket are batched 3164c96dac8dSAditi Ghag * before releasing the bucket lock. This allows BPF programs that are 3165c96dac8dSAditi Ghag * called in seq_show to acquire the bucket lock if needed. 3166c96dac8dSAditi Ghag */ 3167c96dac8dSAditi Ghag iter->cur_sk = 0; 3168c96dac8dSAditi Ghag iter->end_sk = 0; 3169c96dac8dSAditi Ghag iter->st_bucket_done = false; 3170c96dac8dSAditi Ghag batch_sks = 0; 3171c96dac8dSAditi Ghag 3172c96dac8dSAditi Ghag for (; state->bucket <= udptable->mask; state->bucket++) { 3173c96dac8dSAditi Ghag struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; 3174c96dac8dSAditi Ghag 3175c96dac8dSAditi Ghag if (hlist_empty(&hslot2->head)) { 3176c96dac8dSAditi Ghag iter->offset = 0; 3177c96dac8dSAditi Ghag continue; 3178c96dac8dSAditi Ghag } 3179c96dac8dSAditi Ghag 3180c96dac8dSAditi Ghag spin_lock_bh(&hslot2->lock); 3181c96dac8dSAditi Ghag udp_portaddr_for_each_entry(sk, &hslot2->head) { 3182c96dac8dSAditi Ghag if (seq_sk_match(seq, sk)) { 3183c96dac8dSAditi Ghag /* Resume from the last iterated socket at the 3184c96dac8dSAditi Ghag * offset in the bucket before iterator was stopped. 3185c96dac8dSAditi Ghag */ 3186c96dac8dSAditi Ghag if (iter->offset) { 3187c96dac8dSAditi Ghag --iter->offset; 3188c96dac8dSAditi Ghag continue; 3189c96dac8dSAditi Ghag } 3190c96dac8dSAditi Ghag if (iter->end_sk < iter->max_sk) { 3191c96dac8dSAditi Ghag sock_hold(sk); 3192c96dac8dSAditi Ghag iter->batch[iter->end_sk++] = sk; 3193c96dac8dSAditi Ghag } 3194c96dac8dSAditi Ghag batch_sks++; 3195c96dac8dSAditi Ghag } 3196c96dac8dSAditi Ghag } 3197c96dac8dSAditi Ghag spin_unlock_bh(&hslot2->lock); 3198c96dac8dSAditi Ghag 3199c96dac8dSAditi Ghag if (iter->end_sk) 3200c96dac8dSAditi Ghag break; 3201c96dac8dSAditi Ghag 3202c96dac8dSAditi Ghag /* Reset the current bucket's offset before moving to the next bucket. */ 3203c96dac8dSAditi Ghag iter->offset = 0; 3204c96dac8dSAditi Ghag } 3205c96dac8dSAditi Ghag 3206c96dac8dSAditi Ghag /* All done: no batch made. */ 3207c96dac8dSAditi Ghag if (!iter->end_sk) 3208c96dac8dSAditi Ghag return NULL; 3209c96dac8dSAditi Ghag 3210c96dac8dSAditi Ghag if (iter->end_sk == batch_sks) { 3211c96dac8dSAditi Ghag /* Batching is done for the current bucket; return the first 3212c96dac8dSAditi Ghag * socket to be iterated from the batch. 3213c96dac8dSAditi Ghag */ 3214c96dac8dSAditi Ghag iter->st_bucket_done = true; 3215c96dac8dSAditi Ghag goto done; 3216c96dac8dSAditi Ghag } 3217c96dac8dSAditi Ghag if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) { 3218c96dac8dSAditi Ghag resized = true; 3219c96dac8dSAditi Ghag /* After allocating a larger batch, retry one more time to grab 3220c96dac8dSAditi Ghag * the whole bucket. 3221c96dac8dSAditi Ghag */ 3222c96dac8dSAditi Ghag state->bucket--; 3223c96dac8dSAditi Ghag goto again; 3224c96dac8dSAditi Ghag } 3225c96dac8dSAditi Ghag done: 3226c96dac8dSAditi Ghag return iter->batch[0]; 3227c96dac8dSAditi Ghag } 3228c96dac8dSAditi Ghag 3229c96dac8dSAditi Ghag static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3230c96dac8dSAditi Ghag { 3231c96dac8dSAditi Ghag struct bpf_udp_iter_state *iter = seq->private; 3232c96dac8dSAditi Ghag struct sock *sk; 3233c96dac8dSAditi Ghag 3234c96dac8dSAditi Ghag /* Whenever seq_next() is called, the iter->cur_sk is 3235c96dac8dSAditi Ghag * done with seq_show(), so unref the iter->cur_sk. 3236c96dac8dSAditi Ghag */ 3237c96dac8dSAditi Ghag if (iter->cur_sk < iter->end_sk) { 3238c96dac8dSAditi Ghag sock_put(iter->batch[iter->cur_sk++]); 3239c96dac8dSAditi Ghag ++iter->offset; 3240c96dac8dSAditi Ghag } 3241c96dac8dSAditi Ghag 3242c96dac8dSAditi Ghag /* After updating iter->cur_sk, check if there are more sockets 3243c96dac8dSAditi Ghag * available in the current bucket batch. 3244c96dac8dSAditi Ghag */ 3245c96dac8dSAditi Ghag if (iter->cur_sk < iter->end_sk) 3246c96dac8dSAditi Ghag sk = iter->batch[iter->cur_sk]; 3247c96dac8dSAditi Ghag else 3248c96dac8dSAditi Ghag /* Prepare a new batch. */ 3249c96dac8dSAditi Ghag sk = bpf_iter_udp_batch(seq); 3250c96dac8dSAditi Ghag 3251c96dac8dSAditi Ghag ++*pos; 3252c96dac8dSAditi Ghag return sk; 3253c96dac8dSAditi Ghag } 3254c96dac8dSAditi Ghag 3255c96dac8dSAditi Ghag static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos) 3256c96dac8dSAditi Ghag { 3257c96dac8dSAditi Ghag /* bpf iter does not support lseek, so it always 3258c96dac8dSAditi Ghag * continue from where it was stop()-ped. 3259c96dac8dSAditi Ghag */ 3260c96dac8dSAditi Ghag if (*pos) 3261c96dac8dSAditi Ghag return bpf_iter_udp_batch(seq); 3262c96dac8dSAditi Ghag 3263c96dac8dSAditi Ghag return SEQ_START_TOKEN; 3264c96dac8dSAditi Ghag } 3265c96dac8dSAditi Ghag 32665788b3a0SYonghong Song static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, 32675788b3a0SYonghong Song struct udp_sock *udp_sk, uid_t uid, int bucket) 32685788b3a0SYonghong Song { 32695788b3a0SYonghong Song struct bpf_iter__udp ctx; 32705788b3a0SYonghong Song 32715788b3a0SYonghong Song meta->seq_num--; /* skip SEQ_START_TOKEN */ 32725788b3a0SYonghong Song ctx.meta = meta; 32735788b3a0SYonghong Song ctx.udp_sk = udp_sk; 32745788b3a0SYonghong Song ctx.uid = uid; 32755788b3a0SYonghong Song ctx.bucket = bucket; 32765788b3a0SYonghong Song return bpf_iter_run_prog(prog, &ctx); 32775788b3a0SYonghong Song } 32785788b3a0SYonghong Song 32795788b3a0SYonghong Song static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) 32805788b3a0SYonghong Song { 32815788b3a0SYonghong Song struct udp_iter_state *state = seq->private; 32825788b3a0SYonghong Song struct bpf_iter_meta meta; 32835788b3a0SYonghong Song struct bpf_prog *prog; 32845788b3a0SYonghong Song struct sock *sk = v; 32855788b3a0SYonghong Song uid_t uid; 3286c96dac8dSAditi Ghag int ret; 32875788b3a0SYonghong Song 32885788b3a0SYonghong Song if (v == SEQ_START_TOKEN) 32895788b3a0SYonghong Song return 0; 32905788b3a0SYonghong Song 3291c96dac8dSAditi Ghag lock_sock(sk); 3292c96dac8dSAditi Ghag 3293c96dac8dSAditi Ghag if (unlikely(sk_unhashed(sk))) { 3294c96dac8dSAditi Ghag ret = SEQ_SKIP; 3295c96dac8dSAditi Ghag goto unlock; 3296c96dac8dSAditi Ghag } 3297c96dac8dSAditi Ghag 32985788b3a0SYonghong Song uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 32995788b3a0SYonghong Song meta.seq = seq; 33005788b3a0SYonghong Song prog = bpf_iter_get_info(&meta, false); 3301c96dac8dSAditi Ghag ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket); 3302c96dac8dSAditi Ghag 3303c96dac8dSAditi Ghag unlock: 3304c96dac8dSAditi Ghag release_sock(sk); 3305c96dac8dSAditi Ghag return ret; 3306c96dac8dSAditi Ghag } 3307c96dac8dSAditi Ghag 3308c96dac8dSAditi Ghag static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter) 3309c96dac8dSAditi Ghag { 3310c96dac8dSAditi Ghag while (iter->cur_sk < iter->end_sk) 3311c96dac8dSAditi Ghag sock_put(iter->batch[iter->cur_sk++]); 33125788b3a0SYonghong Song } 33135788b3a0SYonghong Song 33145788b3a0SYonghong Song static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) 33155788b3a0SYonghong Song { 3316c96dac8dSAditi Ghag struct bpf_udp_iter_state *iter = seq->private; 33175788b3a0SYonghong Song struct bpf_iter_meta meta; 33185788b3a0SYonghong Song struct bpf_prog *prog; 33195788b3a0SYonghong Song 33205788b3a0SYonghong Song if (!v) { 33215788b3a0SYonghong Song meta.seq = seq; 33225788b3a0SYonghong Song prog = bpf_iter_get_info(&meta, true); 33235788b3a0SYonghong Song if (prog) 33245788b3a0SYonghong Song (void)udp_prog_seq_show(prog, &meta, v, 0, 0); 33255788b3a0SYonghong Song } 33265788b3a0SYonghong Song 3327c96dac8dSAditi Ghag if (iter->cur_sk < iter->end_sk) { 3328c96dac8dSAditi Ghag bpf_iter_udp_put_batch(iter); 3329c96dac8dSAditi Ghag iter->st_bucket_done = false; 3330c96dac8dSAditi Ghag } 33315788b3a0SYonghong Song } 33325788b3a0SYonghong Song 33335788b3a0SYonghong Song static const struct seq_operations bpf_iter_udp_seq_ops = { 3334c96dac8dSAditi Ghag .start = bpf_iter_udp_seq_start, 3335c96dac8dSAditi Ghag .next = bpf_iter_udp_seq_next, 33365788b3a0SYonghong Song .stop = bpf_iter_udp_seq_stop, 33375788b3a0SYonghong Song .show = bpf_iter_udp_seq_show, 33385788b3a0SYonghong Song }; 33395788b3a0SYonghong Song #endif 33405788b3a0SYonghong Song 3341f44b1c51SAditi Ghag static unsigned short seq_file_family(const struct seq_file *seq) 3342f44b1c51SAditi Ghag { 3343f44b1c51SAditi Ghag const struct udp_seq_afinfo *afinfo; 3344f44b1c51SAditi Ghag 3345f44b1c51SAditi Ghag #ifdef CONFIG_BPF_SYSCALL 3346f44b1c51SAditi Ghag /* BPF iterator: bpf programs to filter sockets. */ 3347f44b1c51SAditi Ghag if (seq->op == &bpf_iter_udp_seq_ops) 3348f44b1c51SAditi Ghag return AF_UNSPEC; 3349f44b1c51SAditi Ghag #endif 3350f44b1c51SAditi Ghag 3351f44b1c51SAditi Ghag /* Proc fs iterator */ 3352f44b1c51SAditi Ghag afinfo = pde_data(file_inode(seq->file)); 3353f44b1c51SAditi Ghag return afinfo->family; 3354f44b1c51SAditi Ghag } 3355f44b1c51SAditi Ghag 3356c3506372SChristoph Hellwig const struct seq_operations udp_seq_ops = { 3357a3d2599bSChristoph Hellwig .start = udp_seq_start, 3358a3d2599bSChristoph Hellwig .next = udp_seq_next, 3359a3d2599bSChristoph Hellwig .stop = udp_seq_stop, 3360a3d2599bSChristoph Hellwig .show = udp4_seq_show, 3361a3d2599bSChristoph Hellwig }; 3362c3506372SChristoph Hellwig EXPORT_SYMBOL(udp_seq_ops); 336373cb88ecSArjan van de Ven 3364db8dac20SDavid S. Miller static struct udp_seq_afinfo udp4_seq_afinfo = { 3365db8dac20SDavid S. Miller .family = AF_INET, 3366478aee5dSKuniyuki Iwashima .udp_table = NULL, 3367db8dac20SDavid S. Miller }; 3368db8dac20SDavid S. Miller 33692c8c1e72SAlexey Dobriyan static int __net_init udp4_proc_init_net(struct net *net) 337015439febSPavel Emelyanov { 3371c3506372SChristoph Hellwig if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops, 3372c3506372SChristoph Hellwig sizeof(struct udp_iter_state), &udp4_seq_afinfo)) 3373a3d2599bSChristoph Hellwig return -ENOMEM; 3374a3d2599bSChristoph Hellwig return 0; 337515439febSPavel Emelyanov } 337615439febSPavel Emelyanov 33772c8c1e72SAlexey Dobriyan static void __net_exit udp4_proc_exit_net(struct net *net) 337815439febSPavel Emelyanov { 3379a3d2599bSChristoph Hellwig remove_proc_entry("udp", net->proc_net); 338015439febSPavel Emelyanov } 338115439febSPavel Emelyanov 338215439febSPavel Emelyanov static struct pernet_operations udp4_net_ops = { 338315439febSPavel Emelyanov .init = udp4_proc_init_net, 338415439febSPavel Emelyanov .exit = udp4_proc_exit_net, 338515439febSPavel Emelyanov }; 338615439febSPavel Emelyanov 3387db8dac20SDavid S. Miller int __init udp4_proc_init(void) 3388db8dac20SDavid S. Miller { 338915439febSPavel Emelyanov return register_pernet_subsys(&udp4_net_ops); 3390db8dac20SDavid S. Miller } 3391db8dac20SDavid S. Miller 3392db8dac20SDavid S. Miller void udp4_proc_exit(void) 3393db8dac20SDavid S. Miller { 339415439febSPavel Emelyanov unregister_pernet_subsys(&udp4_net_ops); 3395db8dac20SDavid S. Miller } 33961da177e4SLinus Torvalds #endif /* CONFIG_PROC_FS */ 33971da177e4SLinus Torvalds 3398f86dcc5aSEric Dumazet static __initdata unsigned long uhash_entries; 3399f86dcc5aSEric Dumazet static int __init set_uhash_entries(char *str) 3400645ca708SEric Dumazet { 3401413c27d8SEldad Zack ssize_t ret; 3402413c27d8SEldad Zack 3403f86dcc5aSEric Dumazet if (!str) 3404f86dcc5aSEric Dumazet return 0; 3405413c27d8SEldad Zack 3406413c27d8SEldad Zack ret = kstrtoul(str, 0, &uhash_entries); 3407413c27d8SEldad Zack if (ret) 3408413c27d8SEldad Zack return 0; 3409413c27d8SEldad Zack 3410f86dcc5aSEric Dumazet if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 3411f86dcc5aSEric Dumazet uhash_entries = UDP_HTABLE_SIZE_MIN; 3412f86dcc5aSEric Dumazet return 1; 3413f86dcc5aSEric Dumazet } 3414f86dcc5aSEric Dumazet __setup("uhash_entries=", set_uhash_entries); 3415645ca708SEric Dumazet 3416f86dcc5aSEric Dumazet void __init udp_table_init(struct udp_table *table, const char *name) 3417f86dcc5aSEric Dumazet { 3418f86dcc5aSEric Dumazet unsigned int i; 3419f86dcc5aSEric Dumazet 3420f86dcc5aSEric Dumazet table->hash = alloc_large_system_hash(name, 3421512615b6SEric Dumazet 2 * sizeof(struct udp_hslot), 3422f86dcc5aSEric Dumazet uhash_entries, 3423f86dcc5aSEric Dumazet 21, /* one slot per 2 MB */ 3424f86dcc5aSEric Dumazet 0, 3425f86dcc5aSEric Dumazet &table->log, 3426f86dcc5aSEric Dumazet &table->mask, 342731fe62b9STim Bird UDP_HTABLE_SIZE_MIN, 34289804985bSKuniyuki Iwashima UDP_HTABLE_SIZE_MAX); 342931fe62b9STim Bird 3430512615b6SEric Dumazet table->hash2 = table->hash + (table->mask + 1); 3431f86dcc5aSEric Dumazet for (i = 0; i <= table->mask; i++) { 3432ca065d0cSEric Dumazet INIT_HLIST_HEAD(&table->hash[i].head); 3433fdcc8aa9SEric Dumazet table->hash[i].count = 0; 3434645ca708SEric Dumazet spin_lock_init(&table->hash[i].lock); 3435645ca708SEric Dumazet } 3436512615b6SEric Dumazet for (i = 0; i <= table->mask; i++) { 3437ca065d0cSEric Dumazet INIT_HLIST_HEAD(&table->hash2[i].head); 3438512615b6SEric Dumazet table->hash2[i].count = 0; 3439512615b6SEric Dumazet spin_lock_init(&table->hash2[i].lock); 3440512615b6SEric Dumazet } 3441645ca708SEric Dumazet } 3442645ca708SEric Dumazet 3443723b8e46STom Herbert u32 udp_flow_hashrnd(void) 3444723b8e46STom Herbert { 3445723b8e46STom Herbert static u32 hashrnd __read_mostly; 3446723b8e46STom Herbert 3447723b8e46STom Herbert net_get_random_once(&hashrnd, sizeof(hashrnd)); 3448723b8e46STom Herbert 3449723b8e46STom Herbert return hashrnd; 3450723b8e46STom Herbert } 3451723b8e46STom Herbert EXPORT_SYMBOL(udp_flow_hashrnd); 3452723b8e46STom Herbert 34539804985bSKuniyuki Iwashima static void __net_init udp_sysctl_init(struct net *net) 34541e802951STonghao Zhang { 3455100fdd1fSEric Dumazet net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE; 3456100fdd1fSEric Dumazet net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE; 34571e802951STonghao Zhang 34581e802951STonghao Zhang #ifdef CONFIG_NET_L3_MASTER_DEV 34591e802951STonghao Zhang net->ipv4.sysctl_udp_l3mdev_accept = 0; 34601e802951STonghao Zhang #endif 34619804985bSKuniyuki Iwashima } 34629804985bSKuniyuki Iwashima 34639804985bSKuniyuki Iwashima static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries) 34649804985bSKuniyuki Iwashima { 34659804985bSKuniyuki Iwashima struct udp_table *udptable; 34669804985bSKuniyuki Iwashima int i; 34679804985bSKuniyuki Iwashima 34689804985bSKuniyuki Iwashima udptable = kmalloc(sizeof(*udptable), GFP_KERNEL); 34699804985bSKuniyuki Iwashima if (!udptable) 34709804985bSKuniyuki Iwashima goto out; 34719804985bSKuniyuki Iwashima 34729804985bSKuniyuki Iwashima udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot), 34739804985bSKuniyuki Iwashima GFP_KERNEL_ACCOUNT); 34749804985bSKuniyuki Iwashima if (!udptable->hash) 34759804985bSKuniyuki Iwashima goto free_table; 34769804985bSKuniyuki Iwashima 34779804985bSKuniyuki Iwashima udptable->hash2 = udptable->hash + hash_entries; 34789804985bSKuniyuki Iwashima udptable->mask = hash_entries - 1; 34799804985bSKuniyuki Iwashima udptable->log = ilog2(hash_entries); 34809804985bSKuniyuki Iwashima 34819804985bSKuniyuki Iwashima for (i = 0; i < hash_entries; i++) { 34829804985bSKuniyuki Iwashima INIT_HLIST_HEAD(&udptable->hash[i].head); 34839804985bSKuniyuki Iwashima udptable->hash[i].count = 0; 34849804985bSKuniyuki Iwashima spin_lock_init(&udptable->hash[i].lock); 34859804985bSKuniyuki Iwashima 34869804985bSKuniyuki Iwashima INIT_HLIST_HEAD(&udptable->hash2[i].head); 34879804985bSKuniyuki Iwashima udptable->hash2[i].count = 0; 34889804985bSKuniyuki Iwashima spin_lock_init(&udptable->hash2[i].lock); 34899804985bSKuniyuki Iwashima } 34909804985bSKuniyuki Iwashima 34919804985bSKuniyuki Iwashima return udptable; 34929804985bSKuniyuki Iwashima 34939804985bSKuniyuki Iwashima free_table: 34949804985bSKuniyuki Iwashima kfree(udptable); 34959804985bSKuniyuki Iwashima out: 34969804985bSKuniyuki Iwashima return NULL; 34979804985bSKuniyuki Iwashima } 34989804985bSKuniyuki Iwashima 34999804985bSKuniyuki Iwashima static void __net_exit udp_pernet_table_free(struct net *net) 35009804985bSKuniyuki Iwashima { 35019804985bSKuniyuki Iwashima struct udp_table *udptable = net->ipv4.udp_table; 35029804985bSKuniyuki Iwashima 35039804985bSKuniyuki Iwashima if (udptable == &udp_table) 35049804985bSKuniyuki Iwashima return; 35059804985bSKuniyuki Iwashima 35069804985bSKuniyuki Iwashima kvfree(udptable->hash); 35079804985bSKuniyuki Iwashima kfree(udptable); 35089804985bSKuniyuki Iwashima } 35099804985bSKuniyuki Iwashima 35109804985bSKuniyuki Iwashima static void __net_init udp_set_table(struct net *net) 35119804985bSKuniyuki Iwashima { 35129804985bSKuniyuki Iwashima struct udp_table *udptable; 35139804985bSKuniyuki Iwashima unsigned int hash_entries; 35149804985bSKuniyuki Iwashima struct net *old_net; 35159804985bSKuniyuki Iwashima 35169804985bSKuniyuki Iwashima if (net_eq(net, &init_net)) 35179804985bSKuniyuki Iwashima goto fallback; 35189804985bSKuniyuki Iwashima 35199804985bSKuniyuki Iwashima old_net = current->nsproxy->net_ns; 35209804985bSKuniyuki Iwashima hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries); 35219804985bSKuniyuki Iwashima if (!hash_entries) 35229804985bSKuniyuki Iwashima goto fallback; 35239804985bSKuniyuki Iwashima 35249804985bSKuniyuki Iwashima /* Set min to keep the bitmap on stack in udp_lib_get_port() */ 35259804985bSKuniyuki Iwashima if (hash_entries < UDP_HTABLE_SIZE_MIN_PERNET) 35269804985bSKuniyuki Iwashima hash_entries = UDP_HTABLE_SIZE_MIN_PERNET; 35279804985bSKuniyuki Iwashima else 35289804985bSKuniyuki Iwashima hash_entries = roundup_pow_of_two(hash_entries); 35299804985bSKuniyuki Iwashima 35309804985bSKuniyuki Iwashima udptable = udp_pernet_table_alloc(hash_entries); 35319804985bSKuniyuki Iwashima if (udptable) { 35329804985bSKuniyuki Iwashima net->ipv4.udp_table = udptable; 35339804985bSKuniyuki Iwashima } else { 35349804985bSKuniyuki Iwashima pr_warn("Failed to allocate UDP hash table (entries: %u) " 35359804985bSKuniyuki Iwashima "for a netns, fallback to the global one\n", 35369804985bSKuniyuki Iwashima hash_entries); 35379804985bSKuniyuki Iwashima fallback: 35389804985bSKuniyuki Iwashima net->ipv4.udp_table = &udp_table; 35399804985bSKuniyuki Iwashima } 35409804985bSKuniyuki Iwashima } 35419804985bSKuniyuki Iwashima 35429804985bSKuniyuki Iwashima static int __net_init udp_pernet_init(struct net *net) 35439804985bSKuniyuki Iwashima { 35449804985bSKuniyuki Iwashima udp_sysctl_init(net); 35459804985bSKuniyuki Iwashima udp_set_table(net); 35461e802951STonghao Zhang 35471e802951STonghao Zhang return 0; 35481e802951STonghao Zhang } 35491e802951STonghao Zhang 35509804985bSKuniyuki Iwashima static void __net_exit udp_pernet_exit(struct net *net) 35519804985bSKuniyuki Iwashima { 35529804985bSKuniyuki Iwashima udp_pernet_table_free(net); 35539804985bSKuniyuki Iwashima } 35549804985bSKuniyuki Iwashima 35551e802951STonghao Zhang static struct pernet_operations __net_initdata udp_sysctl_ops = { 35569804985bSKuniyuki Iwashima .init = udp_pernet_init, 35579804985bSKuniyuki Iwashima .exit = udp_pernet_exit, 35581e802951STonghao Zhang }; 35591e802951STonghao Zhang 35605788b3a0SYonghong Song #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 35615788b3a0SYonghong Song DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, 35625788b3a0SYonghong Song struct udp_sock *udp_sk, uid_t uid, int bucket) 35635788b3a0SYonghong Song 3564c96dac8dSAditi Ghag static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3565c96dac8dSAditi Ghag unsigned int new_batch_sz) 35665788b3a0SYonghong Song { 3567c96dac8dSAditi Ghag struct sock **new_batch; 35685788b3a0SYonghong Song 3569c96dac8dSAditi Ghag new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), 3570c96dac8dSAditi Ghag GFP_USER | __GFP_NOWARN); 3571c96dac8dSAditi Ghag if (!new_batch) 35725788b3a0SYonghong Song return -ENOMEM; 35735788b3a0SYonghong Song 3574c96dac8dSAditi Ghag bpf_iter_udp_put_batch(iter); 3575c96dac8dSAditi Ghag kvfree(iter->batch); 3576c96dac8dSAditi Ghag iter->batch = new_batch; 3577c96dac8dSAditi Ghag iter->max_sk = new_batch_sz; 3578c96dac8dSAditi Ghag 3579c96dac8dSAditi Ghag return 0; 3580c96dac8dSAditi Ghag } 3581c96dac8dSAditi Ghag 3582c96dac8dSAditi Ghag #define INIT_BATCH_SZ 16 3583c96dac8dSAditi Ghag 35845788b3a0SYonghong Song static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) 35855788b3a0SYonghong Song { 3586c96dac8dSAditi Ghag struct bpf_udp_iter_state *iter = priv_data; 3587c96dac8dSAditi Ghag int ret; 3588c96dac8dSAditi Ghag 3589f9c79272SYonghong Song ret = bpf_iter_init_seq_net(priv_data, aux); 35905788b3a0SYonghong Song if (ret) 3591c96dac8dSAditi Ghag return ret; 3592c96dac8dSAditi Ghag 3593c96dac8dSAditi Ghag ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ); 3594c96dac8dSAditi Ghag if (ret) 3595c96dac8dSAditi Ghag bpf_iter_fini_seq_net(priv_data); 3596c96dac8dSAditi Ghag 35975788b3a0SYonghong Song return ret; 35985788b3a0SYonghong Song } 35995788b3a0SYonghong Song 36005788b3a0SYonghong Song static void bpf_iter_fini_udp(void *priv_data) 36015788b3a0SYonghong Song { 3602c96dac8dSAditi Ghag struct bpf_udp_iter_state *iter = priv_data; 36035788b3a0SYonghong Song 36045788b3a0SYonghong Song bpf_iter_fini_seq_net(priv_data); 3605c96dac8dSAditi Ghag kvfree(iter->batch); 36065788b3a0SYonghong Song } 36075788b3a0SYonghong Song 360814fc6bd6SYonghong Song static const struct bpf_iter_seq_info udp_seq_info = { 36095788b3a0SYonghong Song .seq_ops = &bpf_iter_udp_seq_ops, 36105788b3a0SYonghong Song .init_seq_private = bpf_iter_init_udp, 36115788b3a0SYonghong Song .fini_seq_private = bpf_iter_fini_udp, 3612c96dac8dSAditi Ghag .seq_priv_size = sizeof(struct bpf_udp_iter_state), 361314fc6bd6SYonghong Song }; 361414fc6bd6SYonghong Song 361514fc6bd6SYonghong Song static struct bpf_iter_reg udp_reg_info = { 361614fc6bd6SYonghong Song .target = "udp", 36175788b3a0SYonghong Song .ctx_arg_info_size = 1, 36185788b3a0SYonghong Song .ctx_arg_info = { 36195788b3a0SYonghong Song { offsetof(struct bpf_iter__udp, udp_sk), 36204ddbcb88SAditi Ghag PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 36215788b3a0SYonghong Song }, 362214fc6bd6SYonghong Song .seq_info = &udp_seq_info, 36235788b3a0SYonghong Song }; 36245788b3a0SYonghong Song 36255788b3a0SYonghong Song static void __init bpf_iter_register(void) 36265788b3a0SYonghong Song { 3627951cf368SYonghong Song udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; 36285788b3a0SYonghong Song if (bpf_iter_reg_target(&udp_reg_info)) 36295788b3a0SYonghong Song pr_warn("Warning: could not register bpf iterator udp\n"); 36305788b3a0SYonghong Song } 36315788b3a0SYonghong Song #endif 36325788b3a0SYonghong Song 363395766fffSHideo Aoki void __init udp_init(void) 363495766fffSHideo Aoki { 3635f03d78dbSEric Dumazet unsigned long limit; 36364b272750SEric Dumazet unsigned int i; 363795766fffSHideo Aoki 3638f86dcc5aSEric Dumazet udp_table_init(&udp_table, "UDP"); 3639f03d78dbSEric Dumazet limit = nr_free_buffer_pages() / 8; 364095766fffSHideo Aoki limit = max(limit, 128UL); 364195766fffSHideo Aoki sysctl_udp_mem[0] = limit / 4 * 3; 364295766fffSHideo Aoki sysctl_udp_mem[1] = limit; 364395766fffSHideo Aoki sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 364495766fffSHideo Aoki 36454b272750SEric Dumazet /* 16 spinlocks per cpu */ 36464b272750SEric Dumazet udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 36474b272750SEric Dumazet udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 36484b272750SEric Dumazet GFP_KERNEL); 36494b272750SEric Dumazet if (!udp_busylocks) 36504b272750SEric Dumazet panic("UDP: failed to alloc udp_busylocks\n"); 36514b272750SEric Dumazet for (i = 0; i < (1U << udp_busylocks_log); i++) 36524b272750SEric Dumazet spin_lock_init(udp_busylocks + i); 36531e802951STonghao Zhang 36541e802951STonghao Zhang if (register_pernet_subsys(&udp_sysctl_ops)) 36551e802951STonghao Zhang panic("UDP: failed to init sysctl parameters.\n"); 36565788b3a0SYonghong Song 36575788b3a0SYonghong Song #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 36585788b3a0SYonghong Song bpf_iter_register(); 36595788b3a0SYonghong Song #endif 366095766fffSHideo Aoki } 3661