11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * The User Datagram Protocol (UDP). 71da177e4SLinus Torvalds * 802c30a84SJesper Juhl * Authors: Ross Biro 91da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 101da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 11113aa838SAlan Cox * Alan Cox, <alan@lxorguk.ukuu.org.uk> 121da177e4SLinus Torvalds * Hirokazu Takahashi, <taka@valinux.co.jp> 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Fixes: 151da177e4SLinus Torvalds * Alan Cox : verify_area() calls 161da177e4SLinus Torvalds * Alan Cox : stopped close while in use off icmp 171da177e4SLinus Torvalds * messages. Not a fix but a botch that 181da177e4SLinus Torvalds * for udp at least is 'valid'. 191da177e4SLinus Torvalds * Alan Cox : Fixed icmp handling properly 201da177e4SLinus Torvalds * Alan Cox : Correct error for oversized datagrams 211da177e4SLinus Torvalds * Alan Cox : Tidied select() semantics. 221da177e4SLinus Torvalds * Alan Cox : udp_err() fixed properly, also now 231da177e4SLinus Torvalds * select and read wake correctly on errors 241da177e4SLinus Torvalds * Alan Cox : udp_send verify_area moved to avoid mem leak 251da177e4SLinus Torvalds * Alan Cox : UDP can count its memory 261da177e4SLinus Torvalds * Alan Cox : send to an unknown connection causes 271da177e4SLinus Torvalds * an ECONNREFUSED off the icmp, but 281da177e4SLinus Torvalds * does NOT close. 291da177e4SLinus Torvalds * Alan Cox : Switched to new sk_buff handlers. No more backlog! 301da177e4SLinus Torvalds * Alan Cox : Using generic datagram code. Even smaller and the PEEK 311da177e4SLinus Torvalds * bug no longer crashes it. 321da177e4SLinus Torvalds * Fred Van Kempen : Net2e support for sk->broadcast. 331da177e4SLinus Torvalds * Alan Cox : Uses skb_free_datagram 341da177e4SLinus Torvalds * Alan Cox : Added get/set sockopt support. 351da177e4SLinus Torvalds * Alan Cox : Broadcasting without option set returns EACCES. 361da177e4SLinus Torvalds * Alan Cox : No wakeup calls. Instead we now use the callbacks. 371da177e4SLinus Torvalds * Alan Cox : Use ip_tos and ip_ttl 381da177e4SLinus Torvalds * Alan Cox : SNMP Mibs 391da177e4SLinus Torvalds * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 401da177e4SLinus Torvalds * Matt Dillon : UDP length checks. 411da177e4SLinus Torvalds * Alan Cox : Smarter af_inet used properly. 421da177e4SLinus Torvalds * Alan Cox : Use new kernel side addressing. 431da177e4SLinus Torvalds * Alan Cox : Incorrect return on truncated datagram receive. 441da177e4SLinus Torvalds * Arnt Gulbrandsen : New udp_send and stuff 451da177e4SLinus Torvalds * Alan Cox : Cache last socket 461da177e4SLinus Torvalds * Alan Cox : Route cache 471da177e4SLinus Torvalds * Jon Peatfield : Minor efficiency fix to sendto(). 481da177e4SLinus Torvalds * Mike Shaver : RFC1122 checks. 491da177e4SLinus Torvalds * Alan Cox : Nonblocking error fix. 501da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 511da177e4SLinus Torvalds * Mike McLagan : Routing by source 521da177e4SLinus Torvalds * David S. Miller : New socket lookup architecture. 531da177e4SLinus Torvalds * Last socket cache retained as it 541da177e4SLinus Torvalds * does have a high hit rate. 551da177e4SLinus Torvalds * Olaf Kirch : Don't linearise iovec on sendmsg. 561da177e4SLinus Torvalds * Andi Kleen : Some cleanups, cache destination entry 571da177e4SLinus Torvalds * for connect. 581da177e4SLinus Torvalds * Vitaly E. Lavrov : Transparent proxy revived after year coma. 591da177e4SLinus Torvalds * Melvin Smith : Check msg_name not msg_namelen in sendto(), 601da177e4SLinus Torvalds * return ENOTCONN for unconnected sockets (POSIX) 611da177e4SLinus Torvalds * Janos Farkas : don't deliver multi/broadcasts to a different 621da177e4SLinus Torvalds * bound-to-device socket 631da177e4SLinus Torvalds * Hirokazu Takahashi : HW checksumming for outgoing UDP 641da177e4SLinus Torvalds * datagrams. 651da177e4SLinus Torvalds * Hirokazu Takahashi : sendfile() on UDP works now. 661da177e4SLinus Torvalds * Arnaldo C. Melo : convert /proc/net/udp to seq_file 671da177e4SLinus Torvalds * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 681da177e4SLinus Torvalds * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 691da177e4SLinus Torvalds * a single port at the same time. 701da177e4SLinus Torvalds * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 71342f0234SJames Chapman * James Chapman : Add L2TP encapsulation type. 721da177e4SLinus Torvalds * 731da177e4SLinus Torvalds * 741da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 751da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 761da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 771da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 781da177e4SLinus Torvalds */ 791da177e4SLinus Torvalds 80afd46503SJoe Perches #define pr_fmt(fmt) "UDP: " fmt 81afd46503SJoe Perches 821da177e4SLinus Torvalds #include <asm/uaccess.h> 831da177e4SLinus Torvalds #include <asm/ioctls.h> 8495766fffSHideo Aoki #include <linux/bootmem.h> 858203efb3SEric Dumazet #include <linux/highmem.h> 868203efb3SEric Dumazet #include <linux/swap.h> 871da177e4SLinus Torvalds #include <linux/types.h> 881da177e4SLinus Torvalds #include <linux/fcntl.h> 891da177e4SLinus Torvalds #include <linux/module.h> 901da177e4SLinus Torvalds #include <linux/socket.h> 911da177e4SLinus Torvalds #include <linux/sockios.h> 9214c85021SArnaldo Carvalho de Melo #include <linux/igmp.h> 936e540309SShawn Bohrer #include <linux/inetdevice.h> 941da177e4SLinus Torvalds #include <linux/in.h> 951da177e4SLinus Torvalds #include <linux/errno.h> 961da177e4SLinus Torvalds #include <linux/timer.h> 971da177e4SLinus Torvalds #include <linux/mm.h> 981da177e4SLinus Torvalds #include <linux/inet.h> 991da177e4SLinus Torvalds #include <linux/netdevice.h> 1005a0e3ad6STejun Heo #include <linux/slab.h> 101c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h> 1021da177e4SLinus Torvalds #include <linux/skbuff.h> 1031da177e4SLinus Torvalds #include <linux/proc_fs.h> 1041da177e4SLinus Torvalds #include <linux/seq_file.h> 105457c4cbcSEric W. Biederman #include <net/net_namespace.h> 1061da177e4SLinus Torvalds #include <net/icmp.h> 107421b3885SShawn Bohrer #include <net/inet_hashtables.h> 1081da177e4SLinus Torvalds #include <net/route.h> 1091da177e4SLinus Torvalds #include <net/checksum.h> 1101da177e4SLinus Torvalds #include <net/xfrm.h> 111296f7ea7SSatoru Moriya #include <trace/events/udp.h> 112447167bfSEric Dumazet #include <linux/static_key.h> 11322911fc5SEric Dumazet #include <trace/events/skb.h> 114076bb0c8SEliezer Tamir #include <net/busy_poll.h> 115ba4e58ecSGerrit Renker #include "udp_impl.h" 116e32ea7e7SCraig Gallek #include <net/sock_reuseport.h> 117217375a0SEric Dumazet #include <net/addrconf.h> 1181da177e4SLinus Torvalds 119f86dcc5aSEric Dumazet struct udp_table udp_table __read_mostly; 120645ca708SEric Dumazet EXPORT_SYMBOL(udp_table); 1211da177e4SLinus Torvalds 1228d987e5cSEric Dumazet long sysctl_udp_mem[3] __read_mostly; 12395766fffSHideo Aoki EXPORT_SYMBOL(sysctl_udp_mem); 124c482c568SEric Dumazet 125c482c568SEric Dumazet int sysctl_udp_rmem_min __read_mostly; 12695766fffSHideo Aoki EXPORT_SYMBOL(sysctl_udp_rmem_min); 127c482c568SEric Dumazet 128c482c568SEric Dumazet int sysctl_udp_wmem_min __read_mostly; 12995766fffSHideo Aoki EXPORT_SYMBOL(sysctl_udp_wmem_min); 13095766fffSHideo Aoki 1318d987e5cSEric Dumazet atomic_long_t udp_memory_allocated; 13295766fffSHideo Aoki EXPORT_SYMBOL(udp_memory_allocated); 13395766fffSHideo Aoki 134f86dcc5aSEric Dumazet #define MAX_UDP_PORTS 65536 135f86dcc5aSEric Dumazet #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) 13698322f22SEric Dumazet 137f24d43c0SEric Dumazet static int udp_lib_lport_inuse(struct net *net, __u16 num, 138645ca708SEric Dumazet const struct udp_hslot *hslot, 13998322f22SEric Dumazet unsigned long *bitmap, 140f24d43c0SEric Dumazet struct sock *sk, 141f24d43c0SEric Dumazet int (*saddr_comp)(const struct sock *sk1, 142e32ea7e7SCraig Gallek const struct sock *sk2, 143e32ea7e7SCraig Gallek bool match_wildcard), 144f86dcc5aSEric Dumazet unsigned int log) 14525030a7fSGerrit Renker { 146f24d43c0SEric Dumazet struct sock *sk2; 147ba418fa3STom Herbert kuid_t uid = sock_i_uid(sk); 14825030a7fSGerrit Renker 149ca065d0cSEric Dumazet sk_for_each(sk2, &hslot->head) { 150f24d43c0SEric Dumazet if (net_eq(sock_net(sk2), net) && 151f24d43c0SEric Dumazet sk2 != sk && 152d4cada4aSEric Dumazet (bitmap || udp_sk(sk2)->udp_port_hash == num) && 153f24d43c0SEric Dumazet (!sk2->sk_reuse || !sk->sk_reuse) && 1549d4fb27dSJoe Perches (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 1559d4fb27dSJoe Perches sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 156ba418fa3STom Herbert (!sk2->sk_reuseport || !sk->sk_reuseport || 157e32ea7e7SCraig Gallek rcu_access_pointer(sk->sk_reuseport_cb) || 158ba418fa3STom Herbert !uid_eq(uid, sock_i_uid(sk2))) && 159e32ea7e7SCraig Gallek saddr_comp(sk, sk2, true)) { 1604243cdc2SJoe Perches if (!bitmap) 161fc038410SDavid S. Miller return 1; 1624243cdc2SJoe Perches __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); 1634243cdc2SJoe Perches } 16498322f22SEric Dumazet } 16525030a7fSGerrit Renker return 0; 16625030a7fSGerrit Renker } 16725030a7fSGerrit Renker 16830fff923SEric Dumazet /* 16930fff923SEric Dumazet * Note: we still hold spinlock of primary hash chain, so no other writer 17030fff923SEric Dumazet * can insert/delete a socket with local_port == num 17130fff923SEric Dumazet */ 17230fff923SEric Dumazet static int udp_lib_lport_inuse2(struct net *net, __u16 num, 17330fff923SEric Dumazet struct udp_hslot *hslot2, 17430fff923SEric Dumazet struct sock *sk, 17530fff923SEric Dumazet int (*saddr_comp)(const struct sock *sk1, 176e32ea7e7SCraig Gallek const struct sock *sk2, 177e32ea7e7SCraig Gallek bool match_wildcard)) 17830fff923SEric Dumazet { 17930fff923SEric Dumazet struct sock *sk2; 180ba418fa3STom Herbert kuid_t uid = sock_i_uid(sk); 18130fff923SEric Dumazet int res = 0; 18230fff923SEric Dumazet 18330fff923SEric Dumazet spin_lock(&hslot2->lock); 184ca065d0cSEric Dumazet udp_portaddr_for_each_entry(sk2, &hslot2->head) { 18530fff923SEric Dumazet if (net_eq(sock_net(sk2), net) && 18630fff923SEric Dumazet sk2 != sk && 18730fff923SEric Dumazet (udp_sk(sk2)->udp_port_hash == num) && 18830fff923SEric Dumazet (!sk2->sk_reuse || !sk->sk_reuse) && 1899d4fb27dSJoe Perches (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 1909d4fb27dSJoe Perches sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 191ba418fa3STom Herbert (!sk2->sk_reuseport || !sk->sk_reuseport || 192e32ea7e7SCraig Gallek rcu_access_pointer(sk->sk_reuseport_cb) || 193ba418fa3STom Herbert !uid_eq(uid, sock_i_uid(sk2))) && 194e32ea7e7SCraig Gallek saddr_comp(sk, sk2, true)) { 19530fff923SEric Dumazet res = 1; 19630fff923SEric Dumazet break; 19730fff923SEric Dumazet } 1984243cdc2SJoe Perches } 19930fff923SEric Dumazet spin_unlock(&hslot2->lock); 20030fff923SEric Dumazet return res; 20130fff923SEric Dumazet } 20230fff923SEric Dumazet 203e32ea7e7SCraig Gallek static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, 204e32ea7e7SCraig Gallek int (*saddr_same)(const struct sock *sk1, 205e32ea7e7SCraig Gallek const struct sock *sk2, 206e32ea7e7SCraig Gallek bool match_wildcard)) 207e32ea7e7SCraig Gallek { 208e32ea7e7SCraig Gallek struct net *net = sock_net(sk); 209e32ea7e7SCraig Gallek kuid_t uid = sock_i_uid(sk); 210e32ea7e7SCraig Gallek struct sock *sk2; 211e32ea7e7SCraig Gallek 212ca065d0cSEric Dumazet sk_for_each(sk2, &hslot->head) { 213e32ea7e7SCraig Gallek if (net_eq(sock_net(sk2), net) && 214e32ea7e7SCraig Gallek sk2 != sk && 215e32ea7e7SCraig Gallek sk2->sk_family == sk->sk_family && 216e32ea7e7SCraig Gallek ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 217e32ea7e7SCraig Gallek (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 218e32ea7e7SCraig Gallek (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 219e32ea7e7SCraig Gallek sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 220e32ea7e7SCraig Gallek (*saddr_same)(sk, sk2, false)) { 221e32ea7e7SCraig Gallek return reuseport_add_sock(sk, sk2); 222e32ea7e7SCraig Gallek } 223e32ea7e7SCraig Gallek } 224e32ea7e7SCraig Gallek 225e32ea7e7SCraig Gallek /* Initial allocation may have already happened via setsockopt */ 226e32ea7e7SCraig Gallek if (!rcu_access_pointer(sk->sk_reuseport_cb)) 227e32ea7e7SCraig Gallek return reuseport_alloc(sk); 228e32ea7e7SCraig Gallek return 0; 229e32ea7e7SCraig Gallek } 230e32ea7e7SCraig Gallek 23125030a7fSGerrit Renker /** 2326ba5a3c5SPavel Emelyanov * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 23325030a7fSGerrit Renker * 23425030a7fSGerrit Renker * @sk: socket struct in question 23525030a7fSGerrit Renker * @snum: port number to look up 236df2bc459SDavid S. Miller * @saddr_comp: AF-dependent comparison of bound local IP addresses 23725985edcSLucas De Marchi * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 23830fff923SEric Dumazet * with NULL address 23925030a7fSGerrit Renker */ 2406ba5a3c5SPavel Emelyanov int udp_lib_get_port(struct sock *sk, unsigned short snum, 241df2bc459SDavid S. Miller int (*saddr_comp)(const struct sock *sk1, 242e32ea7e7SCraig Gallek const struct sock *sk2, 243e32ea7e7SCraig Gallek bool match_wildcard), 24430fff923SEric Dumazet unsigned int hash2_nulladdr) 2451da177e4SLinus Torvalds { 246512615b6SEric Dumazet struct udp_hslot *hslot, *hslot2; 247645ca708SEric Dumazet struct udp_table *udptable = sk->sk_prot->h.udp_table; 24825030a7fSGerrit Renker int error = 1; 2493b1e0a65SYOSHIFUJI Hideaki struct net *net = sock_net(sk); 2501da177e4SLinus Torvalds 25132c1da70SStephen Hemminger if (!snum) { 2529088c560SEric Dumazet int low, high, remaining; 25395c96174SEric Dumazet unsigned int rand; 25498322f22SEric Dumazet unsigned short first, last; 25598322f22SEric Dumazet DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 2561da177e4SLinus Torvalds 2570bbf87d8SEric W. Biederman inet_get_local_port_range(net, &low, &high); 258a25de534SAnton Arapov remaining = (high - low) + 1; 259227b60f5SStephen Hemminger 26063862b5bSAruna-Hewapathirane rand = prandom_u32(); 2618fc54f68SDaniel Borkmann first = reciprocal_scale(rand, remaining) + low; 26298322f22SEric Dumazet /* 26398322f22SEric Dumazet * force rand to be an odd multiple of UDP_HTABLE_SIZE 26498322f22SEric Dumazet */ 265f86dcc5aSEric Dumazet rand = (rand | 1) * (udptable->mask + 1); 2665781b235SEric Dumazet last = first + udptable->mask + 1; 2675781b235SEric Dumazet do { 268f86dcc5aSEric Dumazet hslot = udp_hashslot(udptable, net, first); 26998322f22SEric Dumazet bitmap_zero(bitmap, PORTS_PER_CHAIN); 270645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 27198322f22SEric Dumazet udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 272f86dcc5aSEric Dumazet saddr_comp, udptable->log); 27398322f22SEric Dumazet 27498322f22SEric Dumazet snum = first; 27598322f22SEric Dumazet /* 27698322f22SEric Dumazet * Iterate on all possible values of snum for this hash. 27798322f22SEric Dumazet * Using steps of an odd multiple of UDP_HTABLE_SIZE 27898322f22SEric Dumazet * give us randomization and full range coverage. 27998322f22SEric Dumazet */ 2809088c560SEric Dumazet do { 28198322f22SEric Dumazet if (low <= snum && snum <= high && 282e3826f1eSAmerigo Wang !test_bit(snum >> udptable->log, bitmap) && 283122ff243SWANG Cong !inet_is_local_reserved_port(net, snum)) 28498322f22SEric Dumazet goto found; 28598322f22SEric Dumazet snum += rand; 28698322f22SEric Dumazet } while (snum != first); 28798322f22SEric Dumazet spin_unlock_bh(&hslot->lock); 2885781b235SEric Dumazet } while (++first != last); 28998322f22SEric Dumazet goto fail; 290645ca708SEric Dumazet } else { 291f86dcc5aSEric Dumazet hslot = udp_hashslot(udptable, net, snum); 292645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 29330fff923SEric Dumazet if (hslot->count > 10) { 29430fff923SEric Dumazet int exist; 29530fff923SEric Dumazet unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 29630fff923SEric Dumazet 29730fff923SEric Dumazet slot2 &= udptable->mask; 29830fff923SEric Dumazet hash2_nulladdr &= udptable->mask; 29930fff923SEric Dumazet 30030fff923SEric Dumazet hslot2 = udp_hashslot2(udptable, slot2); 30130fff923SEric Dumazet if (hslot->count < hslot2->count) 30230fff923SEric Dumazet goto scan_primary_hash; 30330fff923SEric Dumazet 30430fff923SEric Dumazet exist = udp_lib_lport_inuse2(net, snum, hslot2, 30530fff923SEric Dumazet sk, saddr_comp); 30630fff923SEric Dumazet if (!exist && (hash2_nulladdr != slot2)) { 30730fff923SEric Dumazet hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 30830fff923SEric Dumazet exist = udp_lib_lport_inuse2(net, snum, hslot2, 30930fff923SEric Dumazet sk, saddr_comp); 31030fff923SEric Dumazet } 31130fff923SEric Dumazet if (exist) 31230fff923SEric Dumazet goto fail_unlock; 31330fff923SEric Dumazet else 31430fff923SEric Dumazet goto found; 31530fff923SEric Dumazet } 31630fff923SEric Dumazet scan_primary_hash: 317f86dcc5aSEric Dumazet if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 318f86dcc5aSEric Dumazet saddr_comp, 0)) 319645ca708SEric Dumazet goto fail_unlock; 320645ca708SEric Dumazet } 32198322f22SEric Dumazet found: 322c720c7e8SEric Dumazet inet_sk(sk)->inet_num = snum; 323d4cada4aSEric Dumazet udp_sk(sk)->udp_port_hash = snum; 324d4cada4aSEric Dumazet udp_sk(sk)->udp_portaddr_hash ^= snum; 3251da177e4SLinus Torvalds if (sk_unhashed(sk)) { 326e32ea7e7SCraig Gallek if (sk->sk_reuseport && 327e32ea7e7SCraig Gallek udp_reuseport_add_sock(sk, hslot, saddr_comp)) { 328e32ea7e7SCraig Gallek inet_sk(sk)->inet_num = 0; 329e32ea7e7SCraig Gallek udp_sk(sk)->udp_port_hash = 0; 330e32ea7e7SCraig Gallek udp_sk(sk)->udp_portaddr_hash ^= snum; 331e32ea7e7SCraig Gallek goto fail_unlock; 332e32ea7e7SCraig Gallek } 333e32ea7e7SCraig Gallek 334ca065d0cSEric Dumazet sk_add_node_rcu(sk, &hslot->head); 335fdcc8aa9SEric Dumazet hslot->count++; 336c29a0bc4SPavel Emelyanov sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 337512615b6SEric Dumazet 338512615b6SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 339512615b6SEric Dumazet spin_lock(&hslot2->lock); 340d894ba18SCraig Gallek if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 341d894ba18SCraig Gallek sk->sk_family == AF_INET6) 3421602f49bSDavid S. Miller hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 343d894ba18SCraig Gallek &hslot2->head); 344d894ba18SCraig Gallek else 345ca065d0cSEric Dumazet hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 346512615b6SEric Dumazet &hslot2->head); 347512615b6SEric Dumazet hslot2->count++; 348512615b6SEric Dumazet spin_unlock(&hslot2->lock); 3491da177e4SLinus Torvalds } 350ca065d0cSEric Dumazet sock_set_flag(sk, SOCK_RCU_FREE); 35125030a7fSGerrit Renker error = 0; 352645ca708SEric Dumazet fail_unlock: 353645ca708SEric Dumazet spin_unlock_bh(&hslot->lock); 3541da177e4SLinus Torvalds fail: 35525030a7fSGerrit Renker return error; 3561da177e4SLinus Torvalds } 357c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_get_port); 3581da177e4SLinus Torvalds 359e32ea7e7SCraig Gallek /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 360e32ea7e7SCraig Gallek * match_wildcard == false: addresses must be exactly the same, i.e. 361e32ea7e7SCraig Gallek * 0.0.0.0 only equals to 0.0.0.0 362e32ea7e7SCraig Gallek */ 363c125e80bSCraig Gallek int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2, 364e32ea7e7SCraig Gallek bool match_wildcard) 365db8dac20SDavid S. Miller { 366db8dac20SDavid S. Miller struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 367db8dac20SDavid S. Miller 368e32ea7e7SCraig Gallek if (!ipv6_only_sock(sk2)) { 369e32ea7e7SCraig Gallek if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr) 370e32ea7e7SCraig Gallek return 1; 371e32ea7e7SCraig Gallek if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr) 372e32ea7e7SCraig Gallek return match_wildcard; 373e32ea7e7SCraig Gallek } 374e32ea7e7SCraig Gallek return 0; 375db8dac20SDavid S. Miller } 376db8dac20SDavid S. Miller 3776eada011SEric Dumazet static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, 378d4cada4aSEric Dumazet unsigned int port) 379d4cada4aSEric Dumazet { 3800eae88f3SEric Dumazet return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 381d4cada4aSEric Dumazet } 382d4cada4aSEric Dumazet 3836ba5a3c5SPavel Emelyanov int udp_v4_get_port(struct sock *sk, unsigned short snum) 384db8dac20SDavid S. Miller { 38530fff923SEric Dumazet unsigned int hash2_nulladdr = 3860eae88f3SEric Dumazet udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 38730fff923SEric Dumazet unsigned int hash2_partial = 38830fff923SEric Dumazet udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 38930fff923SEric Dumazet 390d4cada4aSEric Dumazet /* precompute partial secondary hash */ 39130fff923SEric Dumazet udp_sk(sk)->udp_portaddr_hash = hash2_partial; 39230fff923SEric Dumazet return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); 393db8dac20SDavid S. Miller } 394db8dac20SDavid S. Miller 395d1e37288SSu, Xuemin static int compute_score(struct sock *sk, struct net *net, 396d1e37288SSu, Xuemin __be32 saddr, __be16 sport, 397d1e37288SSu, Xuemin __be32 daddr, unsigned short hnum, int dif) 398645ca708SEric Dumazet { 39960c04aecSJoe Perches int score; 40060c04aecSJoe Perches struct inet_sock *inet; 401645ca708SEric Dumazet 40260c04aecSJoe Perches if (!net_eq(sock_net(sk), net) || 40360c04aecSJoe Perches udp_sk(sk)->udp_port_hash != hnum || 40460c04aecSJoe Perches ipv6_only_sock(sk)) 40560c04aecSJoe Perches return -1; 406645ca708SEric Dumazet 40760c04aecSJoe Perches score = (sk->sk_family == PF_INET) ? 2 : 1; 40860c04aecSJoe Perches inet = inet_sk(sk); 40960c04aecSJoe Perches 410c720c7e8SEric Dumazet if (inet->inet_rcv_saddr) { 411c720c7e8SEric Dumazet if (inet->inet_rcv_saddr != daddr) 412645ca708SEric Dumazet return -1; 413ba418fa3STom Herbert score += 4; 414645ca708SEric Dumazet } 41560c04aecSJoe Perches 416c720c7e8SEric Dumazet if (inet->inet_daddr) { 417c720c7e8SEric Dumazet if (inet->inet_daddr != saddr) 418645ca708SEric Dumazet return -1; 419ba418fa3STom Herbert score += 4; 420645ca708SEric Dumazet } 42160c04aecSJoe Perches 422c720c7e8SEric Dumazet if (inet->inet_dport) { 423c720c7e8SEric Dumazet if (inet->inet_dport != sport) 424645ca708SEric Dumazet return -1; 425ba418fa3STom Herbert score += 4; 426645ca708SEric Dumazet } 42760c04aecSJoe Perches 428645ca708SEric Dumazet if (sk->sk_bound_dev_if) { 429645ca708SEric Dumazet if (sk->sk_bound_dev_if != dif) 430645ca708SEric Dumazet return -1; 431ba418fa3STom Herbert score += 4; 432645ca708SEric Dumazet } 43370da268bSEric Dumazet if (sk->sk_incoming_cpu == raw_smp_processor_id()) 43470da268bSEric Dumazet score++; 435645ca708SEric Dumazet return score; 436645ca708SEric Dumazet } 437645ca708SEric Dumazet 4386eada011SEric Dumazet static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 43965cd8033SHannes Frederic Sowa const __u16 lport, const __be32 faddr, 44065cd8033SHannes Frederic Sowa const __be16 fport) 44165cd8033SHannes Frederic Sowa { 4421bbdceefSHannes Frederic Sowa static u32 udp_ehash_secret __read_mostly; 4431bbdceefSHannes Frederic Sowa 4441bbdceefSHannes Frederic Sowa net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 4451bbdceefSHannes Frederic Sowa 44665cd8033SHannes Frederic Sowa return __inet_ehashfn(laddr, lport, faddr, fport, 4471bbdceefSHannes Frederic Sowa udp_ehash_secret + net_hash_mix(net)); 44865cd8033SHannes Frederic Sowa } 44965cd8033SHannes Frederic Sowa 450d1e37288SSu, Xuemin /* called with rcu_read_lock() */ 4515051ebd2SEric Dumazet static struct sock *udp4_lib_lookup2(struct net *net, 4525051ebd2SEric Dumazet __be32 saddr, __be16 sport, 4535051ebd2SEric Dumazet __be32 daddr, unsigned int hnum, int dif, 454d1e37288SSu, Xuemin struct udp_hslot *hslot2, 4551134158bSCraig Gallek struct sk_buff *skb) 4565051ebd2SEric Dumazet { 4575051ebd2SEric Dumazet struct sock *sk, *result; 458ba418fa3STom Herbert int score, badness, matches = 0, reuseport = 0; 459ba418fa3STom Herbert u32 hash = 0; 4605051ebd2SEric Dumazet 4615051ebd2SEric Dumazet result = NULL; 462ba418fa3STom Herbert badness = 0; 463ca065d0cSEric Dumazet udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 464d1e37288SSu, Xuemin score = compute_score(sk, net, saddr, sport, 4655051ebd2SEric Dumazet daddr, hnum, dif); 4665051ebd2SEric Dumazet if (score > badness) { 467ba418fa3STom Herbert reuseport = sk->sk_reuseport; 468ba418fa3STom Herbert if (reuseport) { 46965cd8033SHannes Frederic Sowa hash = udp_ehashfn(net, daddr, hnum, 4707c0cadc6SEric Dumazet saddr, sport); 471ca065d0cSEric Dumazet result = reuseport_select_sock(sk, hash, skb, 4721134158bSCraig Gallek sizeof(struct udphdr)); 473ca065d0cSEric Dumazet if (result) 474ca065d0cSEric Dumazet return result; 475ba418fa3STom Herbert matches = 1; 476ba418fa3STom Herbert } 477ca065d0cSEric Dumazet badness = score; 478ca065d0cSEric Dumazet result = sk; 479ba418fa3STom Herbert } else if (score == badness && reuseport) { 480ba418fa3STom Herbert matches++; 4818fc54f68SDaniel Borkmann if (reciprocal_scale(hash, matches) == 0) 482ba418fa3STom Herbert result = sk; 483ba418fa3STom Herbert hash = next_pseudo_random32(hash); 4845051ebd2SEric Dumazet } 4855051ebd2SEric Dumazet } 4865051ebd2SEric Dumazet return result; 4875051ebd2SEric Dumazet } 4885051ebd2SEric Dumazet 489db8dac20SDavid S. Miller /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 490db8dac20SDavid S. Miller * harder than this. -DaveM 491db8dac20SDavid S. Miller */ 492fce82338SPavel Emelyanov struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 493db8dac20SDavid S. Miller __be16 sport, __be32 daddr, __be16 dport, 494538950a1SCraig Gallek int dif, struct udp_table *udptable, struct sk_buff *skb) 495db8dac20SDavid S. Miller { 496271b72c7SEric Dumazet struct sock *sk, *result; 497db8dac20SDavid S. Miller unsigned short hnum = ntohs(dport); 4985051ebd2SEric Dumazet unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 4995051ebd2SEric Dumazet struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 500ba418fa3STom Herbert int score, badness, matches = 0, reuseport = 0; 501ba418fa3STom Herbert u32 hash = 0; 502db8dac20SDavid S. Miller 5035051ebd2SEric Dumazet if (hslot->count > 10) { 5045051ebd2SEric Dumazet hash2 = udp4_portaddr_hash(net, daddr, hnum); 5055051ebd2SEric Dumazet slot2 = hash2 & udptable->mask; 5065051ebd2SEric Dumazet hslot2 = &udptable->hash2[slot2]; 5075051ebd2SEric Dumazet if (hslot->count < hslot2->count) 5085051ebd2SEric Dumazet goto begin; 5095051ebd2SEric Dumazet 5105051ebd2SEric Dumazet result = udp4_lib_lookup2(net, saddr, sport, 5115051ebd2SEric Dumazet daddr, hnum, dif, 512d1e37288SSu, Xuemin hslot2, skb); 5135051ebd2SEric Dumazet if (!result) { 514d1e37288SSu, Xuemin unsigned int old_slot2 = slot2; 5150eae88f3SEric Dumazet hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 5165051ebd2SEric Dumazet slot2 = hash2 & udptable->mask; 517d1e37288SSu, Xuemin /* avoid searching the same slot again. */ 518d1e37288SSu, Xuemin if (unlikely(slot2 == old_slot2)) 519d1e37288SSu, Xuemin return result; 520d1e37288SSu, Xuemin 5215051ebd2SEric Dumazet hslot2 = &udptable->hash2[slot2]; 5225051ebd2SEric Dumazet if (hslot->count < hslot2->count) 5235051ebd2SEric Dumazet goto begin; 5245051ebd2SEric Dumazet 5251223c67cSJorge Boncompte [DTI2] result = udp4_lib_lookup2(net, saddr, sport, 526d1e37288SSu, Xuemin daddr, hnum, dif, 527d1e37288SSu, Xuemin hslot2, skb); 5285051ebd2SEric Dumazet } 5295051ebd2SEric Dumazet return result; 5305051ebd2SEric Dumazet } 531271b72c7SEric Dumazet begin: 532271b72c7SEric Dumazet result = NULL; 533ba418fa3STom Herbert badness = 0; 534ca065d0cSEric Dumazet sk_for_each_rcu(sk, &hslot->head) { 535d1e37288SSu, Xuemin score = compute_score(sk, net, saddr, sport, 536d1e37288SSu, Xuemin daddr, hnum, dif); 537645ca708SEric Dumazet if (score > badness) { 538ba418fa3STom Herbert reuseport = sk->sk_reuseport; 539ba418fa3STom Herbert if (reuseport) { 54065cd8033SHannes Frederic Sowa hash = udp_ehashfn(net, daddr, hnum, 5417c0cadc6SEric Dumazet saddr, sport); 542ca065d0cSEric Dumazet result = reuseport_select_sock(sk, hash, skb, 543538950a1SCraig Gallek sizeof(struct udphdr)); 544ca065d0cSEric Dumazet if (result) 545ca065d0cSEric Dumazet return result; 546ba418fa3STom Herbert matches = 1; 547ba418fa3STom Herbert } 548ca065d0cSEric Dumazet result = sk; 549ca065d0cSEric Dumazet badness = score; 550ba418fa3STom Herbert } else if (score == badness && reuseport) { 551ba418fa3STom Herbert matches++; 5528fc54f68SDaniel Borkmann if (reciprocal_scale(hash, matches) == 0) 553ba418fa3STom Herbert result = sk; 554ba418fa3STom Herbert hash = next_pseudo_random32(hash); 555db8dac20SDavid S. Miller } 556db8dac20SDavid S. Miller } 557db8dac20SDavid S. Miller return result; 558db8dac20SDavid S. Miller } 559fce82338SPavel Emelyanov EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 560db8dac20SDavid S. Miller 561607c4aafSKOVACS Krisztian static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 562607c4aafSKOVACS Krisztian __be16 sport, __be16 dport, 563645ca708SEric Dumazet struct udp_table *udptable) 564607c4aafSKOVACS Krisztian { 565607c4aafSKOVACS Krisztian const struct iphdr *iph = ip_hdr(skb); 566607c4aafSKOVACS Krisztian 567ed7cbbceSAlexander Duyck return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 568607c4aafSKOVACS Krisztian iph->daddr, dport, inet_iif(skb), 569538950a1SCraig Gallek udptable, skb); 570607c4aafSKOVACS Krisztian } 571607c4aafSKOVACS Krisztian 57263058308STom Herbert struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, 57363058308STom Herbert __be16 sport, __be16 dport) 57463058308STom Herbert { 575ed7cbbceSAlexander Duyck return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); 57663058308STom Herbert } 57763058308STom Herbert EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); 57863058308STom Herbert 579ca065d0cSEric Dumazet /* Must be called under rcu_read_lock(). 580ca065d0cSEric Dumazet * Does increment socket refcount. 581ca065d0cSEric Dumazet */ 582ca065d0cSEric Dumazet #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ 58330f58158SArnd Bergmann IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ 58430f58158SArnd Bergmann IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 585bcd41303SKOVACS Krisztian struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 586bcd41303SKOVACS Krisztian __be32 daddr, __be16 dport, int dif) 587bcd41303SKOVACS Krisztian { 588ca065d0cSEric Dumazet struct sock *sk; 589ca065d0cSEric Dumazet 590ca065d0cSEric Dumazet sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 591ca065d0cSEric Dumazet dif, &udp_table, NULL); 592ca065d0cSEric Dumazet if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) 593ca065d0cSEric Dumazet sk = NULL; 594ca065d0cSEric Dumazet return sk; 595bcd41303SKOVACS Krisztian } 596bcd41303SKOVACS Krisztian EXPORT_SYMBOL_GPL(udp4_lib_lookup); 597ca065d0cSEric Dumazet #endif 598bcd41303SKOVACS Krisztian 599421b3885SShawn Bohrer static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, 600421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 601421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 602421b3885SShawn Bohrer int dif, unsigned short hnum) 603421b3885SShawn Bohrer { 604421b3885SShawn Bohrer struct inet_sock *inet = inet_sk(sk); 605421b3885SShawn Bohrer 606421b3885SShawn Bohrer if (!net_eq(sock_net(sk), net) || 607421b3885SShawn Bohrer udp_sk(sk)->udp_port_hash != hnum || 608421b3885SShawn Bohrer (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 609421b3885SShawn Bohrer (inet->inet_dport != rmt_port && inet->inet_dport) || 610421b3885SShawn Bohrer (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 611421b3885SShawn Bohrer ipv6_only_sock(sk) || 612421b3885SShawn Bohrer (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 613421b3885SShawn Bohrer return false; 614421b3885SShawn Bohrer if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) 615421b3885SShawn Bohrer return false; 616421b3885SShawn Bohrer return true; 617421b3885SShawn Bohrer } 618421b3885SShawn Bohrer 619db8dac20SDavid S. Miller /* 620db8dac20SDavid S. Miller * This routine is called by the ICMP module when it gets some 621db8dac20SDavid S. Miller * sort of error condition. If err < 0 then the socket should 622db8dac20SDavid S. Miller * be closed and the error returned to the user. If err > 0 623db8dac20SDavid S. Miller * it's just the icmp type << 8 | icmp code. 624db8dac20SDavid S. Miller * Header points to the ip header of the error packet. We move 625db8dac20SDavid S. Miller * on past this. Then (as it used to claim before adjustment) 626db8dac20SDavid S. Miller * header points to the first 8 bytes of the udp header. We need 627db8dac20SDavid S. Miller * to find the appropriate port. 628db8dac20SDavid S. Miller */ 629db8dac20SDavid S. Miller 630645ca708SEric Dumazet void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 631db8dac20SDavid S. Miller { 632db8dac20SDavid S. Miller struct inet_sock *inet; 633b71d1d42SEric Dumazet const struct iphdr *iph = (const struct iphdr *)skb->data; 634db8dac20SDavid S. Miller struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 635db8dac20SDavid S. Miller const int type = icmp_hdr(skb)->type; 636db8dac20SDavid S. Miller const int code = icmp_hdr(skb)->code; 637db8dac20SDavid S. Miller struct sock *sk; 638db8dac20SDavid S. Miller int harderr; 639db8dac20SDavid S. Miller int err; 640fd54d716SPavel Emelyanov struct net *net = dev_net(skb->dev); 641db8dac20SDavid S. Miller 642fd54d716SPavel Emelyanov sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 643538950a1SCraig Gallek iph->saddr, uh->source, skb->dev->ifindex, udptable, 644538950a1SCraig Gallek NULL); 64551456b29SIan Morris if (!sk) { 6465d3848bcSEric Dumazet __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 647db8dac20SDavid S. Miller return; /* No socket for error */ 648db8dac20SDavid S. Miller } 649db8dac20SDavid S. Miller 650db8dac20SDavid S. Miller err = 0; 651db8dac20SDavid S. Miller harderr = 0; 652db8dac20SDavid S. Miller inet = inet_sk(sk); 653db8dac20SDavid S. Miller 654db8dac20SDavid S. Miller switch (type) { 655db8dac20SDavid S. Miller default: 656db8dac20SDavid S. Miller case ICMP_TIME_EXCEEDED: 657db8dac20SDavid S. Miller err = EHOSTUNREACH; 658db8dac20SDavid S. Miller break; 659db8dac20SDavid S. Miller case ICMP_SOURCE_QUENCH: 660db8dac20SDavid S. Miller goto out; 661db8dac20SDavid S. Miller case ICMP_PARAMETERPROB: 662db8dac20SDavid S. Miller err = EPROTO; 663db8dac20SDavid S. Miller harderr = 1; 664db8dac20SDavid S. Miller break; 665db8dac20SDavid S. Miller case ICMP_DEST_UNREACH: 666db8dac20SDavid S. Miller if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 66736393395SDavid S. Miller ipv4_sk_update_pmtu(skb, sk, info); 668db8dac20SDavid S. Miller if (inet->pmtudisc != IP_PMTUDISC_DONT) { 669db8dac20SDavid S. Miller err = EMSGSIZE; 670db8dac20SDavid S. Miller harderr = 1; 671db8dac20SDavid S. Miller break; 672db8dac20SDavid S. Miller } 673db8dac20SDavid S. Miller goto out; 674db8dac20SDavid S. Miller } 675db8dac20SDavid S. Miller err = EHOSTUNREACH; 676db8dac20SDavid S. Miller if (code <= NR_ICMP_UNREACH) { 677db8dac20SDavid S. Miller harderr = icmp_err_convert[code].fatal; 678db8dac20SDavid S. Miller err = icmp_err_convert[code].errno; 679db8dac20SDavid S. Miller } 680db8dac20SDavid S. Miller break; 68155be7a9cSDavid S. Miller case ICMP_REDIRECT: 68255be7a9cSDavid S. Miller ipv4_sk_redirect(skb, sk); 6831a462d18SDuan Jiong goto out; 684db8dac20SDavid S. Miller } 685db8dac20SDavid S. Miller 686db8dac20SDavid S. Miller /* 687db8dac20SDavid S. Miller * RFC1122: OK. Passes ICMP errors back to application, as per 688db8dac20SDavid S. Miller * 4.1.3.3. 689db8dac20SDavid S. Miller */ 690db8dac20SDavid S. Miller if (!inet->recverr) { 691db8dac20SDavid S. Miller if (!harderr || sk->sk_state != TCP_ESTABLISHED) 692db8dac20SDavid S. Miller goto out; 693b1faf566SEric Dumazet } else 694db8dac20SDavid S. Miller ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 695b1faf566SEric Dumazet 696db8dac20SDavid S. Miller sk->sk_err = err; 697db8dac20SDavid S. Miller sk->sk_error_report(sk); 698db8dac20SDavid S. Miller out: 699ca065d0cSEric Dumazet return; 700db8dac20SDavid S. Miller } 701db8dac20SDavid S. Miller 702db8dac20SDavid S. Miller void udp_err(struct sk_buff *skb, u32 info) 703db8dac20SDavid S. Miller { 704645ca708SEric Dumazet __udp4_lib_err(skb, info, &udp_table); 705db8dac20SDavid S. Miller } 706db8dac20SDavid S. Miller 707db8dac20SDavid S. Miller /* 708db8dac20SDavid S. Miller * Throw away all pending data and cancel the corking. Socket is locked. 709db8dac20SDavid S. Miller */ 71036d926b9SDenis V. Lunev void udp_flush_pending_frames(struct sock *sk) 711db8dac20SDavid S. Miller { 712db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 713db8dac20SDavid S. Miller 714db8dac20SDavid S. Miller if (up->pending) { 715db8dac20SDavid S. Miller up->len = 0; 716db8dac20SDavid S. Miller up->pending = 0; 717db8dac20SDavid S. Miller ip_flush_pending_frames(sk); 718db8dac20SDavid S. Miller } 719db8dac20SDavid S. Miller } 72036d926b9SDenis V. Lunev EXPORT_SYMBOL(udp_flush_pending_frames); 721db8dac20SDavid S. Miller 722db8dac20SDavid S. Miller /** 723f6b9664fSHerbert Xu * udp4_hwcsum - handle outgoing HW checksumming 724db8dac20SDavid S. Miller * @skb: sk_buff containing the filled-in UDP header 725db8dac20SDavid S. Miller * (checksum field must be zeroed out) 726f6b9664fSHerbert Xu * @src: source IP address 727f6b9664fSHerbert Xu * @dst: destination IP address 728db8dac20SDavid S. Miller */ 729c26bf4a5SThomas Graf void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 730db8dac20SDavid S. Miller { 731db8dac20SDavid S. Miller struct udphdr *uh = udp_hdr(skb); 732f6b9664fSHerbert Xu int offset = skb_transport_offset(skb); 733f6b9664fSHerbert Xu int len = skb->len - offset; 734f6b9664fSHerbert Xu int hlen = len; 735db8dac20SDavid S. Miller __wsum csum = 0; 736db8dac20SDavid S. Miller 737ebbe495fSWANG Cong if (!skb_has_frag_list(skb)) { 738db8dac20SDavid S. Miller /* 739db8dac20SDavid S. Miller * Only one fragment on the socket. 740db8dac20SDavid S. Miller */ 741db8dac20SDavid S. Miller skb->csum_start = skb_transport_header(skb) - skb->head; 742db8dac20SDavid S. Miller skb->csum_offset = offsetof(struct udphdr, check); 743f6b9664fSHerbert Xu uh->check = ~csum_tcpudp_magic(src, dst, len, 744f6b9664fSHerbert Xu IPPROTO_UDP, 0); 745db8dac20SDavid S. Miller } else { 746ebbe495fSWANG Cong struct sk_buff *frags; 747ebbe495fSWANG Cong 748db8dac20SDavid S. Miller /* 749db8dac20SDavid S. Miller * HW-checksum won't work as there are two or more 750db8dac20SDavid S. Miller * fragments on the socket so that all csums of sk_buffs 751db8dac20SDavid S. Miller * should be together 752db8dac20SDavid S. Miller */ 753ebbe495fSWANG Cong skb_walk_frags(skb, frags) { 754f6b9664fSHerbert Xu csum = csum_add(csum, frags->csum); 755f6b9664fSHerbert Xu hlen -= frags->len; 756ebbe495fSWANG Cong } 757db8dac20SDavid S. Miller 758f6b9664fSHerbert Xu csum = skb_checksum(skb, offset, hlen, csum); 759db8dac20SDavid S. Miller skb->ip_summed = CHECKSUM_NONE; 760db8dac20SDavid S. Miller 761db8dac20SDavid S. Miller uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 762db8dac20SDavid S. Miller if (uh->check == 0) 763db8dac20SDavid S. Miller uh->check = CSUM_MANGLED_0; 764db8dac20SDavid S. Miller } 765db8dac20SDavid S. Miller } 766c26bf4a5SThomas Graf EXPORT_SYMBOL_GPL(udp4_hwcsum); 767db8dac20SDavid S. Miller 768af5fcba7STom Herbert /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 769af5fcba7STom Herbert * for the simple case like when setting the checksum for a UDP tunnel. 770af5fcba7STom Herbert */ 771af5fcba7STom Herbert void udp_set_csum(bool nocheck, struct sk_buff *skb, 772af5fcba7STom Herbert __be32 saddr, __be32 daddr, int len) 773af5fcba7STom Herbert { 774af5fcba7STom Herbert struct udphdr *uh = udp_hdr(skb); 775af5fcba7STom Herbert 776179bc67fSEdward Cree if (nocheck) { 777af5fcba7STom Herbert uh->check = 0; 778179bc67fSEdward Cree } else if (skb_is_gso(skb)) { 779af5fcba7STom Herbert uh->check = ~udp_v4_check(len, saddr, daddr, 0); 780179bc67fSEdward Cree } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 781179bc67fSEdward Cree uh->check = 0; 782179bc67fSEdward Cree uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 783179bc67fSEdward Cree if (uh->check == 0) 784179bc67fSEdward Cree uh->check = CSUM_MANGLED_0; 785d75f1306SEdward Cree } else { 786af5fcba7STom Herbert skb->ip_summed = CHECKSUM_PARTIAL; 787af5fcba7STom Herbert skb->csum_start = skb_transport_header(skb) - skb->head; 788af5fcba7STom Herbert skb->csum_offset = offsetof(struct udphdr, check); 789af5fcba7STom Herbert uh->check = ~udp_v4_check(len, saddr, daddr, 0); 790af5fcba7STom Herbert } 791af5fcba7STom Herbert } 792af5fcba7STom Herbert EXPORT_SYMBOL(udp_set_csum); 793af5fcba7STom Herbert 79479ab0531SDavid S. Miller static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) 795f6b9664fSHerbert Xu { 796f6b9664fSHerbert Xu struct sock *sk = skb->sk; 797f6b9664fSHerbert Xu struct inet_sock *inet = inet_sk(sk); 798f6b9664fSHerbert Xu struct udphdr *uh; 799f6b9664fSHerbert Xu int err = 0; 800f6b9664fSHerbert Xu int is_udplite = IS_UDPLITE(sk); 801f6b9664fSHerbert Xu int offset = skb_transport_offset(skb); 802f6b9664fSHerbert Xu int len = skb->len - offset; 803f6b9664fSHerbert Xu __wsum csum = 0; 804f6b9664fSHerbert Xu 805f6b9664fSHerbert Xu /* 806f6b9664fSHerbert Xu * Create a UDP header 807f6b9664fSHerbert Xu */ 808f6b9664fSHerbert Xu uh = udp_hdr(skb); 809f6b9664fSHerbert Xu uh->source = inet->inet_sport; 81079ab0531SDavid S. Miller uh->dest = fl4->fl4_dport; 811f6b9664fSHerbert Xu uh->len = htons(len); 812f6b9664fSHerbert Xu uh->check = 0; 813f6b9664fSHerbert Xu 814f6b9664fSHerbert Xu if (is_udplite) /* UDP-Lite */ 815f6b9664fSHerbert Xu csum = udplite_csum(skb); 816f6b9664fSHerbert Xu 81728448b80STom Herbert else if (sk->sk_no_check_tx) { /* UDP csum disabled */ 818f6b9664fSHerbert Xu 819f6b9664fSHerbert Xu skb->ip_summed = CHECKSUM_NONE; 820f6b9664fSHerbert Xu goto send; 821f6b9664fSHerbert Xu 822f6b9664fSHerbert Xu } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 823f6b9664fSHerbert Xu 82479ab0531SDavid S. Miller udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 825f6b9664fSHerbert Xu goto send; 826f6b9664fSHerbert Xu 827f6b9664fSHerbert Xu } else 828f6b9664fSHerbert Xu csum = udp_csum(skb); 829f6b9664fSHerbert Xu 830f6b9664fSHerbert Xu /* add protocol-dependent pseudo-header */ 83179ab0531SDavid S. Miller uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 832f6b9664fSHerbert Xu sk->sk_protocol, csum); 833f6b9664fSHerbert Xu if (uh->check == 0) 834f6b9664fSHerbert Xu uh->check = CSUM_MANGLED_0; 835f6b9664fSHerbert Xu 836f6b9664fSHerbert Xu send: 837b5ec8eeaSEric Dumazet err = ip_send_skb(sock_net(sk), skb); 838f6b9664fSHerbert Xu if (err) { 839f6b9664fSHerbert Xu if (err == -ENOBUFS && !inet->recverr) { 8406aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 841f6b9664fSHerbert Xu UDP_MIB_SNDBUFERRORS, is_udplite); 842f6b9664fSHerbert Xu err = 0; 843f6b9664fSHerbert Xu } 844f6b9664fSHerbert Xu } else 8456aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 846f6b9664fSHerbert Xu UDP_MIB_OUTDATAGRAMS, is_udplite); 847f6b9664fSHerbert Xu return err; 848f6b9664fSHerbert Xu } 849f6b9664fSHerbert Xu 850db8dac20SDavid S. Miller /* 851db8dac20SDavid S. Miller * Push out all pending data as one UDP datagram. Socket is locked. 852db8dac20SDavid S. Miller */ 8538822b64aSHannes Frederic Sowa int udp_push_pending_frames(struct sock *sk) 854db8dac20SDavid S. Miller { 855db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 856db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 857b6f21b26SDavid S. Miller struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 858db8dac20SDavid S. Miller struct sk_buff *skb; 859db8dac20SDavid S. Miller int err = 0; 860db8dac20SDavid S. Miller 86177968b78SDavid S. Miller skb = ip_finish_skb(sk, fl4); 862f6b9664fSHerbert Xu if (!skb) 863db8dac20SDavid S. Miller goto out; 864db8dac20SDavid S. Miller 86579ab0531SDavid S. Miller err = udp_send_skb(skb, fl4); 866db8dac20SDavid S. Miller 867db8dac20SDavid S. Miller out: 868db8dac20SDavid S. Miller up->len = 0; 869db8dac20SDavid S. Miller up->pending = 0; 870db8dac20SDavid S. Miller return err; 871db8dac20SDavid S. Miller } 8728822b64aSHannes Frederic Sowa EXPORT_SYMBOL(udp_push_pending_frames); 873db8dac20SDavid S. Miller 8741b784140SYing Xue int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 875db8dac20SDavid S. Miller { 876db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 877db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 878e474995fSDavid S. Miller struct flowi4 fl4_stack; 879b6f21b26SDavid S. Miller struct flowi4 *fl4; 880db8dac20SDavid S. Miller int ulen = len; 881db8dac20SDavid S. Miller struct ipcm_cookie ipc; 882db8dac20SDavid S. Miller struct rtable *rt = NULL; 883db8dac20SDavid S. Miller int free = 0; 884db8dac20SDavid S. Miller int connected = 0; 885db8dac20SDavid S. Miller __be32 daddr, faddr, saddr; 886db8dac20SDavid S. Miller __be16 dport; 887db8dac20SDavid S. Miller u8 tos; 888db8dac20SDavid S. Miller int err, is_udplite = IS_UDPLITE(sk); 889db8dac20SDavid S. Miller int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 890db8dac20SDavid S. Miller int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 891903ab86dSHerbert Xu struct sk_buff *skb; 892f6d8bd05SEric Dumazet struct ip_options_data opt_copy; 893db8dac20SDavid S. Miller 894db8dac20SDavid S. Miller if (len > 0xFFFF) 895db8dac20SDavid S. Miller return -EMSGSIZE; 896db8dac20SDavid S. Miller 897db8dac20SDavid S. Miller /* 898db8dac20SDavid S. Miller * Check the flags. 899db8dac20SDavid S. Miller */ 900db8dac20SDavid S. Miller 901db8dac20SDavid S. Miller if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 902db8dac20SDavid S. Miller return -EOPNOTSUPP; 903db8dac20SDavid S. Miller 904db8dac20SDavid S. Miller ipc.opt = NULL; 9052244d07bSOliver Hartkopp ipc.tx_flags = 0; 906aa661581SFrancesco Fusco ipc.ttl = 0; 907aa661581SFrancesco Fusco ipc.tos = -1; 908db8dac20SDavid S. Miller 909903ab86dSHerbert Xu getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 910903ab86dSHerbert Xu 911f5fca608SDavid S. Miller fl4 = &inet->cork.fl.u.ip4; 912db8dac20SDavid S. Miller if (up->pending) { 913db8dac20SDavid S. Miller /* 914db8dac20SDavid S. Miller * There are pending frames. 915db8dac20SDavid S. Miller * The socket lock must be held while it's corked. 916db8dac20SDavid S. Miller */ 917db8dac20SDavid S. Miller lock_sock(sk); 918db8dac20SDavid S. Miller if (likely(up->pending)) { 919db8dac20SDavid S. Miller if (unlikely(up->pending != AF_INET)) { 920db8dac20SDavid S. Miller release_sock(sk); 921db8dac20SDavid S. Miller return -EINVAL; 922db8dac20SDavid S. Miller } 923db8dac20SDavid S. Miller goto do_append_data; 924db8dac20SDavid S. Miller } 925db8dac20SDavid S. Miller release_sock(sk); 926db8dac20SDavid S. Miller } 927db8dac20SDavid S. Miller ulen += sizeof(struct udphdr); 928db8dac20SDavid S. Miller 929db8dac20SDavid S. Miller /* 930db8dac20SDavid S. Miller * Get and verify the address. 931db8dac20SDavid S. Miller */ 932db8dac20SDavid S. Miller if (msg->msg_name) { 933342dfc30SSteffen Hurrle DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 934db8dac20SDavid S. Miller if (msg->msg_namelen < sizeof(*usin)) 935db8dac20SDavid S. Miller return -EINVAL; 936db8dac20SDavid S. Miller if (usin->sin_family != AF_INET) { 937db8dac20SDavid S. Miller if (usin->sin_family != AF_UNSPEC) 938db8dac20SDavid S. Miller return -EAFNOSUPPORT; 939db8dac20SDavid S. Miller } 940db8dac20SDavid S. Miller 941db8dac20SDavid S. Miller daddr = usin->sin_addr.s_addr; 942db8dac20SDavid S. Miller dport = usin->sin_port; 943db8dac20SDavid S. Miller if (dport == 0) 944db8dac20SDavid S. Miller return -EINVAL; 945db8dac20SDavid S. Miller } else { 946db8dac20SDavid S. Miller if (sk->sk_state != TCP_ESTABLISHED) 947db8dac20SDavid S. Miller return -EDESTADDRREQ; 948c720c7e8SEric Dumazet daddr = inet->inet_daddr; 949c720c7e8SEric Dumazet dport = inet->inet_dport; 950db8dac20SDavid S. Miller /* Open fast path for connected socket. 951db8dac20SDavid S. Miller Route will not be used, if at least one option is set. 952db8dac20SDavid S. Miller */ 953db8dac20SDavid S. Miller connected = 1; 954db8dac20SDavid S. Miller } 955c14ac945SSoheil Hassas Yeganeh 956c14ac945SSoheil Hassas Yeganeh ipc.sockc.tsflags = sk->sk_tsflags; 957c720c7e8SEric Dumazet ipc.addr = inet->inet_saddr; 958db8dac20SDavid S. Miller ipc.oif = sk->sk_bound_dev_if; 959bf84a010SDaniel Borkmann 960db8dac20SDavid S. Miller if (msg->msg_controllen) { 96124025c46SSoheil Hassas Yeganeh err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); 96291948309SEric Dumazet if (unlikely(err)) { 96391948309SEric Dumazet kfree(ipc.opt); 964db8dac20SDavid S. Miller return err; 96591948309SEric Dumazet } 966db8dac20SDavid S. Miller if (ipc.opt) 967db8dac20SDavid S. Miller free = 1; 968db8dac20SDavid S. Miller connected = 0; 969db8dac20SDavid S. Miller } 970f6d8bd05SEric Dumazet if (!ipc.opt) { 971f6d8bd05SEric Dumazet struct ip_options_rcu *inet_opt; 972f6d8bd05SEric Dumazet 973f6d8bd05SEric Dumazet rcu_read_lock(); 974f6d8bd05SEric Dumazet inet_opt = rcu_dereference(inet->inet_opt); 975f6d8bd05SEric Dumazet if (inet_opt) { 976f6d8bd05SEric Dumazet memcpy(&opt_copy, inet_opt, 977f6d8bd05SEric Dumazet sizeof(*inet_opt) + inet_opt->opt.optlen); 978f6d8bd05SEric Dumazet ipc.opt = &opt_copy.opt; 979f6d8bd05SEric Dumazet } 980f6d8bd05SEric Dumazet rcu_read_unlock(); 981f6d8bd05SEric Dumazet } 982db8dac20SDavid S. Miller 983db8dac20SDavid S. Miller saddr = ipc.addr; 984db8dac20SDavid S. Miller ipc.addr = faddr = daddr; 985db8dac20SDavid S. Miller 986c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); 987c14ac945SSoheil Hassas Yeganeh 988f6d8bd05SEric Dumazet if (ipc.opt && ipc.opt->opt.srr) { 989db8dac20SDavid S. Miller if (!daddr) 990db8dac20SDavid S. Miller return -EINVAL; 991f6d8bd05SEric Dumazet faddr = ipc.opt->opt.faddr; 992db8dac20SDavid S. Miller connected = 0; 993db8dac20SDavid S. Miller } 994aa661581SFrancesco Fusco tos = get_rttos(&ipc, inet); 995db8dac20SDavid S. Miller if (sock_flag(sk, SOCK_LOCALROUTE) || 996db8dac20SDavid S. Miller (msg->msg_flags & MSG_DONTROUTE) || 997f6d8bd05SEric Dumazet (ipc.opt && ipc.opt->opt.is_strictroute)) { 998db8dac20SDavid S. Miller tos |= RTO_ONLINK; 999db8dac20SDavid S. Miller connected = 0; 1000db8dac20SDavid S. Miller } 1001db8dac20SDavid S. Miller 1002db8dac20SDavid S. Miller if (ipv4_is_multicast(daddr)) { 1003db8dac20SDavid S. Miller if (!ipc.oif) 1004db8dac20SDavid S. Miller ipc.oif = inet->mc_index; 1005db8dac20SDavid S. Miller if (!saddr) 1006db8dac20SDavid S. Miller saddr = inet->mc_addr; 1007db8dac20SDavid S. Miller connected = 0; 100876e21053SErich E. Hoover } else if (!ipc.oif) 100976e21053SErich E. Hoover ipc.oif = inet->uc_index; 1010db8dac20SDavid S. Miller 1011db8dac20SDavid S. Miller if (connected) 1012db8dac20SDavid S. Miller rt = (struct rtable *)sk_dst_check(sk, 0); 1013db8dac20SDavid S. Miller 101451456b29SIan Morris if (!rt) { 101584a3aa00SPavel Emelyanov struct net *net = sock_net(sk); 10169a24abfaSDavid Ahern __u8 flow_flags = inet_sk_flowi_flags(sk); 101784a3aa00SPavel Emelyanov 1018e474995fSDavid S. Miller fl4 = &fl4_stack; 10199a24abfaSDavid Ahern 10209a24abfaSDavid Ahern flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 10219a24abfaSDavid Ahern RT_SCOPE_UNIVERSE, sk->sk_protocol, 10229a24abfaSDavid Ahern flow_flags, 1023e2d118a1SLorenzo Colitti faddr, saddr, dport, inet->inet_sport, 1024e2d118a1SLorenzo Colitti sk->sk_uid); 1025c0951cbcSDavid S. Miller 1026e474995fSDavid S. Miller security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1027e474995fSDavid S. Miller rt = ip_route_output_flow(net, fl4, sk); 1028b23dd4feSDavid S. Miller if (IS_ERR(rt)) { 1029b23dd4feSDavid S. Miller err = PTR_ERR(rt); 103006dc94b1SDavid S. Miller rt = NULL; 1031db8dac20SDavid S. Miller if (err == -ENETUNREACH) 1032f1d8cba6SEric Dumazet IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1033db8dac20SDavid S. Miller goto out; 1034db8dac20SDavid S. Miller } 1035db8dac20SDavid S. Miller 1036db8dac20SDavid S. Miller err = -EACCES; 1037db8dac20SDavid S. Miller if ((rt->rt_flags & RTCF_BROADCAST) && 1038db8dac20SDavid S. Miller !sock_flag(sk, SOCK_BROADCAST)) 1039db8dac20SDavid S. Miller goto out; 1040db8dac20SDavid S. Miller if (connected) 1041d8d1f30bSChangli Gao sk_dst_set(sk, dst_clone(&rt->dst)); 1042db8dac20SDavid S. Miller } 1043db8dac20SDavid S. Miller 1044db8dac20SDavid S. Miller if (msg->msg_flags&MSG_CONFIRM) 1045db8dac20SDavid S. Miller goto do_confirm; 1046db8dac20SDavid S. Miller back_from_confirm: 1047db8dac20SDavid S. Miller 1048e474995fSDavid S. Miller saddr = fl4->saddr; 1049db8dac20SDavid S. Miller if (!ipc.addr) 1050e474995fSDavid S. Miller daddr = ipc.addr = fl4->daddr; 1051db8dac20SDavid S. Miller 1052903ab86dSHerbert Xu /* Lockless fast path for the non-corking case. */ 1053903ab86dSHerbert Xu if (!corkreq) { 1054f69e6d13SAl Viro skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1055903ab86dSHerbert Xu sizeof(struct udphdr), &ipc, &rt, 1056903ab86dSHerbert Xu msg->msg_flags); 1057903ab86dSHerbert Xu err = PTR_ERR(skb); 105850c3a487SYOSHIFUJI Hideaki / 吉藤英明 if (!IS_ERR_OR_NULL(skb)) 105979ab0531SDavid S. Miller err = udp_send_skb(skb, fl4); 1060903ab86dSHerbert Xu goto out; 1061903ab86dSHerbert Xu } 1062903ab86dSHerbert Xu 1063db8dac20SDavid S. Miller lock_sock(sk); 1064db8dac20SDavid S. Miller if (unlikely(up->pending)) { 1065db8dac20SDavid S. Miller /* The socket is already corked while preparing it. */ 1066db8dac20SDavid S. Miller /* ... which is an evident application bug. --ANK */ 1067db8dac20SDavid S. Miller release_sock(sk); 1068db8dac20SDavid S. Miller 1069ba7a46f1SJoe Perches net_dbg_ratelimited("cork app bug 2\n"); 1070db8dac20SDavid S. Miller err = -EINVAL; 1071db8dac20SDavid S. Miller goto out; 1072db8dac20SDavid S. Miller } 1073db8dac20SDavid S. Miller /* 1074db8dac20SDavid S. Miller * Now cork the socket to pend data. 1075db8dac20SDavid S. Miller */ 1076b6f21b26SDavid S. Miller fl4 = &inet->cork.fl.u.ip4; 1077b6f21b26SDavid S. Miller fl4->daddr = daddr; 1078b6f21b26SDavid S. Miller fl4->saddr = saddr; 10799cce96dfSDavid S. Miller fl4->fl4_dport = dport; 10809cce96dfSDavid S. Miller fl4->fl4_sport = inet->inet_sport; 1081db8dac20SDavid S. Miller up->pending = AF_INET; 1082db8dac20SDavid S. Miller 1083db8dac20SDavid S. Miller do_append_data: 1084db8dac20SDavid S. Miller up->len += ulen; 1085f69e6d13SAl Viro err = ip_append_data(sk, fl4, getfrag, msg, ulen, 10862e77d89bSEric Dumazet sizeof(struct udphdr), &ipc, &rt, 1087db8dac20SDavid S. Miller corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1088db8dac20SDavid S. Miller if (err) 1089db8dac20SDavid S. Miller udp_flush_pending_frames(sk); 1090db8dac20SDavid S. Miller else if (!corkreq) 1091db8dac20SDavid S. Miller err = udp_push_pending_frames(sk); 1092db8dac20SDavid S. Miller else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1093db8dac20SDavid S. Miller up->pending = 0; 1094db8dac20SDavid S. Miller release_sock(sk); 1095db8dac20SDavid S. Miller 1096db8dac20SDavid S. Miller out: 1097db8dac20SDavid S. Miller ip_rt_put(rt); 1098db8dac20SDavid S. Miller if (free) 1099db8dac20SDavid S. Miller kfree(ipc.opt); 1100db8dac20SDavid S. Miller if (!err) 1101db8dac20SDavid S. Miller return len; 1102db8dac20SDavid S. Miller /* 1103db8dac20SDavid S. Miller * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1104db8dac20SDavid S. Miller * ENOBUFS might not be good (it's not tunable per se), but otherwise 1105db8dac20SDavid S. Miller * we don't have a good statistic (IpOutDiscards but it can be too many 1106db8dac20SDavid S. Miller * things). We could add another new stat but at least for now that 1107db8dac20SDavid S. Miller * seems like overkill. 1108db8dac20SDavid S. Miller */ 1109db8dac20SDavid S. Miller if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 11106aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1111629ca23cSPavel Emelyanov UDP_MIB_SNDBUFERRORS, is_udplite); 1112db8dac20SDavid S. Miller } 1113db8dac20SDavid S. Miller return err; 1114db8dac20SDavid S. Miller 1115db8dac20SDavid S. Miller do_confirm: 1116d8d1f30bSChangli Gao dst_confirm(&rt->dst); 1117db8dac20SDavid S. Miller if (!(msg->msg_flags&MSG_PROBE) || len) 1118db8dac20SDavid S. Miller goto back_from_confirm; 1119db8dac20SDavid S. Miller err = 0; 1120db8dac20SDavid S. Miller goto out; 1121db8dac20SDavid S. Miller } 1122c482c568SEric Dumazet EXPORT_SYMBOL(udp_sendmsg); 1123db8dac20SDavid S. Miller 1124db8dac20SDavid S. Miller int udp_sendpage(struct sock *sk, struct page *page, int offset, 1125db8dac20SDavid S. Miller size_t size, int flags) 1126db8dac20SDavid S. Miller { 1127f5fca608SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 1128db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 1129db8dac20SDavid S. Miller int ret; 1130db8dac20SDavid S. Miller 1131d3f7d56aSShawn Landden if (flags & MSG_SENDPAGE_NOTLAST) 1132d3f7d56aSShawn Landden flags |= MSG_MORE; 1133d3f7d56aSShawn Landden 1134db8dac20SDavid S. Miller if (!up->pending) { 1135db8dac20SDavid S. Miller struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1136db8dac20SDavid S. Miller 1137db8dac20SDavid S. Miller /* Call udp_sendmsg to specify destination address which 1138db8dac20SDavid S. Miller * sendpage interface can't pass. 1139db8dac20SDavid S. Miller * This will succeed only when the socket is connected. 1140db8dac20SDavid S. Miller */ 11411b784140SYing Xue ret = udp_sendmsg(sk, &msg, 0); 1142db8dac20SDavid S. Miller if (ret < 0) 1143db8dac20SDavid S. Miller return ret; 1144db8dac20SDavid S. Miller } 1145db8dac20SDavid S. Miller 1146db8dac20SDavid S. Miller lock_sock(sk); 1147db8dac20SDavid S. Miller 1148db8dac20SDavid S. Miller if (unlikely(!up->pending)) { 1149db8dac20SDavid S. Miller release_sock(sk); 1150db8dac20SDavid S. Miller 1151ba7a46f1SJoe Perches net_dbg_ratelimited("udp cork app bug 3\n"); 1152db8dac20SDavid S. Miller return -EINVAL; 1153db8dac20SDavid S. Miller } 1154db8dac20SDavid S. Miller 1155f5fca608SDavid S. Miller ret = ip_append_page(sk, &inet->cork.fl.u.ip4, 1156f5fca608SDavid S. Miller page, offset, size, flags); 1157db8dac20SDavid S. Miller if (ret == -EOPNOTSUPP) { 1158db8dac20SDavid S. Miller release_sock(sk); 1159db8dac20SDavid S. Miller return sock_no_sendpage(sk->sk_socket, page, offset, 1160db8dac20SDavid S. Miller size, flags); 1161db8dac20SDavid S. Miller } 1162db8dac20SDavid S. Miller if (ret < 0) { 1163db8dac20SDavid S. Miller udp_flush_pending_frames(sk); 1164db8dac20SDavid S. Miller goto out; 1165db8dac20SDavid S. Miller } 1166db8dac20SDavid S. Miller 1167db8dac20SDavid S. Miller up->len += size; 1168db8dac20SDavid S. Miller if (!(up->corkflag || (flags&MSG_MORE))) 1169db8dac20SDavid S. Miller ret = udp_push_pending_frames(sk); 1170db8dac20SDavid S. Miller if (!ret) 1171db8dac20SDavid S. Miller ret = size; 1172db8dac20SDavid S. Miller out: 1173db8dac20SDavid S. Miller release_sock(sk); 1174db8dac20SDavid S. Miller return ret; 1175db8dac20SDavid S. Miller } 1176db8dac20SDavid S. Miller 11777c13f97fSPaolo Abeni /* fully reclaim rmem/fwd memory allocated for skb */ 1178f970bd9eSPaolo Abeni static void udp_rmem_release(struct sock *sk, int size, int partial) 1179f970bd9eSPaolo Abeni { 11806b229cf7SEric Dumazet struct udp_sock *up = udp_sk(sk); 1181f970bd9eSPaolo Abeni int amt; 1182f970bd9eSPaolo Abeni 11836b229cf7SEric Dumazet if (likely(partial)) { 11846b229cf7SEric Dumazet up->forward_deficit += size; 11856b229cf7SEric Dumazet size = up->forward_deficit; 11866b229cf7SEric Dumazet if (size < (sk->sk_rcvbuf >> 2) && 11876b229cf7SEric Dumazet !skb_queue_empty(&sk->sk_receive_queue)) 11886b229cf7SEric Dumazet return; 11896b229cf7SEric Dumazet } else { 11906b229cf7SEric Dumazet size += up->forward_deficit; 11916b229cf7SEric Dumazet } 11926b229cf7SEric Dumazet up->forward_deficit = 0; 11936b229cf7SEric Dumazet 1194f970bd9eSPaolo Abeni sk->sk_forward_alloc += size; 1195f970bd9eSPaolo Abeni amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); 1196f970bd9eSPaolo Abeni sk->sk_forward_alloc -= amt; 1197f970bd9eSPaolo Abeni 1198f970bd9eSPaolo Abeni if (amt) 1199f970bd9eSPaolo Abeni __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); 1200*02ab0d13SEric Dumazet 1201*02ab0d13SEric Dumazet atomic_sub(size, &sk->sk_rmem_alloc); 1202f970bd9eSPaolo Abeni } 1203f970bd9eSPaolo Abeni 1204c84d9490SEric Dumazet /* Note: called with sk_receive_queue.lock held. 1205c84d9490SEric Dumazet * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1206c84d9490SEric Dumazet * This avoids a cache line miss while receive_queue lock is held. 1207c84d9490SEric Dumazet * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1208c84d9490SEric Dumazet */ 12097c13f97fSPaolo Abeni void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1210f970bd9eSPaolo Abeni { 1211c84d9490SEric Dumazet udp_rmem_release(sk, skb->dev_scratch, 1); 1212f970bd9eSPaolo Abeni } 12137c13f97fSPaolo Abeni EXPORT_SYMBOL(udp_skb_destructor); 1214f970bd9eSPaolo Abeni 12154b272750SEric Dumazet /* Idea of busylocks is to let producers grab an extra spinlock 12164b272750SEric Dumazet * to relieve pressure on the receive_queue spinlock shared by consumer. 12174b272750SEric Dumazet * Under flood, this means that only one producer can be in line 12184b272750SEric Dumazet * trying to acquire the receive_queue spinlock. 12194b272750SEric Dumazet * These busylock can be allocated on a per cpu manner, instead of a 12204b272750SEric Dumazet * per socket one (that would consume a cache line per socket) 12214b272750SEric Dumazet */ 12224b272750SEric Dumazet static int udp_busylocks_log __read_mostly; 12234b272750SEric Dumazet static spinlock_t *udp_busylocks __read_mostly; 12244b272750SEric Dumazet 12254b272750SEric Dumazet static spinlock_t *busylock_acquire(void *ptr) 12264b272750SEric Dumazet { 12274b272750SEric Dumazet spinlock_t *busy; 12284b272750SEric Dumazet 12294b272750SEric Dumazet busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 12304b272750SEric Dumazet spin_lock(busy); 12314b272750SEric Dumazet return busy; 12324b272750SEric Dumazet } 12334b272750SEric Dumazet 12344b272750SEric Dumazet static void busylock_release(spinlock_t *busy) 12354b272750SEric Dumazet { 12364b272750SEric Dumazet if (busy) 12374b272750SEric Dumazet spin_unlock(busy); 12384b272750SEric Dumazet } 12394b272750SEric Dumazet 1240f970bd9eSPaolo Abeni int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1241f970bd9eSPaolo Abeni { 1242f970bd9eSPaolo Abeni struct sk_buff_head *list = &sk->sk_receive_queue; 1243f970bd9eSPaolo Abeni int rmem, delta, amt, err = -ENOMEM; 12444b272750SEric Dumazet spinlock_t *busy = NULL; 1245c8c8b127SEric Dumazet int size; 1246f970bd9eSPaolo Abeni 1247f970bd9eSPaolo Abeni /* try to avoid the costly atomic add/sub pair when the receive 1248f970bd9eSPaolo Abeni * queue is full; always allow at least a packet 1249f970bd9eSPaolo Abeni */ 1250f970bd9eSPaolo Abeni rmem = atomic_read(&sk->sk_rmem_alloc); 1251363dc73aSPaolo Abeni if (rmem > sk->sk_rcvbuf) 1252f970bd9eSPaolo Abeni goto drop; 1253f970bd9eSPaolo Abeni 1254c8c8b127SEric Dumazet /* Under mem pressure, it might be helpful to help udp_recvmsg() 1255c8c8b127SEric Dumazet * having linear skbs : 1256c8c8b127SEric Dumazet * - Reduce memory overhead and thus increase receive queue capacity 1257c8c8b127SEric Dumazet * - Less cache line misses at copyout() time 1258c8c8b127SEric Dumazet * - Less work at consume_skb() (less alien page frag freeing) 1259c8c8b127SEric Dumazet */ 12604b272750SEric Dumazet if (rmem > (sk->sk_rcvbuf >> 1)) { 1261c8c8b127SEric Dumazet skb_condense(skb); 12624b272750SEric Dumazet 12634b272750SEric Dumazet busy = busylock_acquire(sk); 12644b272750SEric Dumazet } 1265c8c8b127SEric Dumazet size = skb->truesize; 1266c84d9490SEric Dumazet /* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss 1267c84d9490SEric Dumazet * in udp_skb_destructor() 1268c84d9490SEric Dumazet */ 1269c84d9490SEric Dumazet skb->dev_scratch = size; 1270c8c8b127SEric Dumazet 1271f970bd9eSPaolo Abeni /* we drop only if the receive buf is full and the receive 1272f970bd9eSPaolo Abeni * queue contains some other skb 1273f970bd9eSPaolo Abeni */ 1274f970bd9eSPaolo Abeni rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1275363dc73aSPaolo Abeni if (rmem > (size + sk->sk_rcvbuf)) 1276f970bd9eSPaolo Abeni goto uncharge_drop; 1277f970bd9eSPaolo Abeni 1278f970bd9eSPaolo Abeni spin_lock(&list->lock); 1279f970bd9eSPaolo Abeni if (size >= sk->sk_forward_alloc) { 1280f970bd9eSPaolo Abeni amt = sk_mem_pages(size); 1281f970bd9eSPaolo Abeni delta = amt << SK_MEM_QUANTUM_SHIFT; 1282f970bd9eSPaolo Abeni if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { 1283f970bd9eSPaolo Abeni err = -ENOBUFS; 1284f970bd9eSPaolo Abeni spin_unlock(&list->lock); 1285f970bd9eSPaolo Abeni goto uncharge_drop; 1286f970bd9eSPaolo Abeni } 1287f970bd9eSPaolo Abeni 1288f970bd9eSPaolo Abeni sk->sk_forward_alloc += delta; 1289f970bd9eSPaolo Abeni } 1290f970bd9eSPaolo Abeni 1291f970bd9eSPaolo Abeni sk->sk_forward_alloc -= size; 1292f970bd9eSPaolo Abeni 12937c13f97fSPaolo Abeni /* no need to setup a destructor, we will explicitly release the 12947c13f97fSPaolo Abeni * forward allocated memory on dequeue 12957c13f97fSPaolo Abeni */ 1296f970bd9eSPaolo Abeni sock_skb_set_dropcount(sk, skb); 1297f970bd9eSPaolo Abeni 1298f970bd9eSPaolo Abeni __skb_queue_tail(list, skb); 1299f970bd9eSPaolo Abeni spin_unlock(&list->lock); 1300f970bd9eSPaolo Abeni 1301f970bd9eSPaolo Abeni if (!sock_flag(sk, SOCK_DEAD)) 1302f970bd9eSPaolo Abeni sk->sk_data_ready(sk); 1303f970bd9eSPaolo Abeni 13044b272750SEric Dumazet busylock_release(busy); 1305f970bd9eSPaolo Abeni return 0; 1306f970bd9eSPaolo Abeni 1307f970bd9eSPaolo Abeni uncharge_drop: 1308f970bd9eSPaolo Abeni atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1309f970bd9eSPaolo Abeni 1310f970bd9eSPaolo Abeni drop: 1311f970bd9eSPaolo Abeni atomic_inc(&sk->sk_drops); 13124b272750SEric Dumazet busylock_release(busy); 1313f970bd9eSPaolo Abeni return err; 1314f970bd9eSPaolo Abeni } 1315f970bd9eSPaolo Abeni EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1316f970bd9eSPaolo Abeni 1317c915fe13SPaolo Abeni void udp_destruct_sock(struct sock *sk) 1318f970bd9eSPaolo Abeni { 1319f970bd9eSPaolo Abeni /* reclaim completely the forward allocated memory */ 13207c13f97fSPaolo Abeni unsigned int total = 0; 13217c13f97fSPaolo Abeni struct sk_buff *skb; 13227c13f97fSPaolo Abeni 13237c13f97fSPaolo Abeni while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 13247c13f97fSPaolo Abeni total += skb->truesize; 13257c13f97fSPaolo Abeni kfree_skb(skb); 13267c13f97fSPaolo Abeni } 13277c13f97fSPaolo Abeni udp_rmem_release(sk, total, 0); 13287c13f97fSPaolo Abeni 1329f970bd9eSPaolo Abeni inet_sock_destruct(sk); 1330f970bd9eSPaolo Abeni } 1331c915fe13SPaolo Abeni EXPORT_SYMBOL_GPL(udp_destruct_sock); 1332f970bd9eSPaolo Abeni 1333f970bd9eSPaolo Abeni int udp_init_sock(struct sock *sk) 1334f970bd9eSPaolo Abeni { 1335f970bd9eSPaolo Abeni sk->sk_destruct = udp_destruct_sock; 1336f970bd9eSPaolo Abeni return 0; 1337f970bd9eSPaolo Abeni } 1338f970bd9eSPaolo Abeni EXPORT_SYMBOL_GPL(udp_init_sock); 1339f970bd9eSPaolo Abeni 1340f970bd9eSPaolo Abeni void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1341f970bd9eSPaolo Abeni { 1342f970bd9eSPaolo Abeni if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1343f970bd9eSPaolo Abeni bool slow = lock_sock_fast(sk); 1344f970bd9eSPaolo Abeni 1345f970bd9eSPaolo Abeni sk_peek_offset_bwd(sk, len); 1346f970bd9eSPaolo Abeni unlock_sock_fast(sk, slow); 1347f970bd9eSPaolo Abeni } 1348f970bd9eSPaolo Abeni consume_skb(skb); 1349f970bd9eSPaolo Abeni } 1350f970bd9eSPaolo Abeni EXPORT_SYMBOL_GPL(skb_consume_udp); 1351f970bd9eSPaolo Abeni 135285584672SEric Dumazet /** 135385584672SEric Dumazet * first_packet_length - return length of first packet in receive queue 135485584672SEric Dumazet * @sk: socket 135585584672SEric Dumazet * 135685584672SEric Dumazet * Drops all bad checksum frames, until a valid one is found. 1357e83c6744SEric Dumazet * Returns the length of found skb, or -1 if none is found. 135885584672SEric Dumazet */ 1359e83c6744SEric Dumazet static int first_packet_length(struct sock *sk) 136085584672SEric Dumazet { 13617c13f97fSPaolo Abeni struct sk_buff_head *rcvq = &sk->sk_receive_queue; 136285584672SEric Dumazet struct sk_buff *skb; 13637c13f97fSPaolo Abeni int total = 0; 1364e83c6744SEric Dumazet int res; 136585584672SEric Dumazet 136685584672SEric Dumazet spin_lock_bh(&rcvq->lock); 136785584672SEric Dumazet while ((skb = skb_peek(rcvq)) != NULL && 136885584672SEric Dumazet udp_lib_checksum_complete(skb)) { 136902c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 13706a5dc9e5SEric Dumazet IS_UDPLITE(sk)); 137102c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 137285584672SEric Dumazet IS_UDPLITE(sk)); 13738edf19c2SEric Dumazet atomic_inc(&sk->sk_drops); 137485584672SEric Dumazet __skb_unlink(skb, rcvq); 13757c13f97fSPaolo Abeni total += skb->truesize; 13767c13f97fSPaolo Abeni kfree_skb(skb); 137785584672SEric Dumazet } 1378e83c6744SEric Dumazet res = skb ? skb->len : -1; 13797c13f97fSPaolo Abeni if (total) 13807c13f97fSPaolo Abeni udp_rmem_release(sk, total, 1); 138185584672SEric Dumazet spin_unlock_bh(&rcvq->lock); 138285584672SEric Dumazet return res; 138385584672SEric Dumazet } 138485584672SEric Dumazet 13851da177e4SLinus Torvalds /* 13861da177e4SLinus Torvalds * IOCTL requests applicable to the UDP protocol 13871da177e4SLinus Torvalds */ 13881da177e4SLinus Torvalds 13891da177e4SLinus Torvalds int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 13901da177e4SLinus Torvalds { 13916516c655SStephen Hemminger switch (cmd) { 13921da177e4SLinus Torvalds case SIOCOUTQ: 13931da177e4SLinus Torvalds { 139431e6d363SEric Dumazet int amount = sk_wmem_alloc_get(sk); 139531e6d363SEric Dumazet 13961da177e4SLinus Torvalds return put_user(amount, (int __user *)arg); 13971da177e4SLinus Torvalds } 13981da177e4SLinus Torvalds 13991da177e4SLinus Torvalds case SIOCINQ: 14001da177e4SLinus Torvalds { 1401e83c6744SEric Dumazet int amount = max_t(int, 0, first_packet_length(sk)); 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds return put_user(amount, (int __user *)arg); 14041da177e4SLinus Torvalds } 14051da177e4SLinus Torvalds 14061da177e4SLinus Torvalds default: 14071da177e4SLinus Torvalds return -ENOIOCTLCMD; 14081da177e4SLinus Torvalds } 14096516c655SStephen Hemminger 14106516c655SStephen Hemminger return 0; 14111da177e4SLinus Torvalds } 1412c482c568SEric Dumazet EXPORT_SYMBOL(udp_ioctl); 14131da177e4SLinus Torvalds 1414db8dac20SDavid S. Miller /* 1415db8dac20SDavid S. Miller * This should be easy, if there is something there we 1416db8dac20SDavid S. Miller * return it, otherwise we block. 1417db8dac20SDavid S. Miller */ 1418db8dac20SDavid S. Miller 14191b784140SYing Xue int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 14201b784140SYing Xue int flags, int *addr_len) 1421db8dac20SDavid S. Miller { 1422db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sk); 1423342dfc30SSteffen Hurrle DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1424db8dac20SDavid S. Miller struct sk_buff *skb; 142559c2cdaeSDavid S. Miller unsigned int ulen, copied; 1426627d2d6bSsamanthakumar int peeked, peeking, off; 1427db8dac20SDavid S. Miller int err; 1428db8dac20SDavid S. Miller int is_udplite = IS_UDPLITE(sk); 1429197c949eSEric Dumazet bool checksum_valid = false; 1430db8dac20SDavid S. Miller 1431db8dac20SDavid S. Miller if (flags & MSG_ERRQUEUE) 143285fbaa75SHannes Frederic Sowa return ip_recv_error(sk, msg, len, addr_len); 1433db8dac20SDavid S. Miller 1434db8dac20SDavid S. Miller try_again: 1435627d2d6bSsamanthakumar peeking = off = sk_peek_offset(sk, flags); 14367c13f97fSPaolo Abeni skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); 1437db8dac20SDavid S. Miller if (!skb) 1438627d2d6bSsamanthakumar return err; 1439db8dac20SDavid S. Miller 1440e6afc8acSsamanthakumar ulen = skb->len; 144159c2cdaeSDavid S. Miller copied = len; 1442627d2d6bSsamanthakumar if (copied > ulen - off) 1443627d2d6bSsamanthakumar copied = ulen - off; 144459c2cdaeSDavid S. Miller else if (copied < ulen) 1445db8dac20SDavid S. Miller msg->msg_flags |= MSG_TRUNC; 1446db8dac20SDavid S. Miller 1447db8dac20SDavid S. Miller /* 1448db8dac20SDavid S. Miller * If checksum is needed at all, try to do it while copying the 1449db8dac20SDavid S. Miller * data. If the data is truncated, or if we only want a partial 1450db8dac20SDavid S. Miller * coverage checksum (UDP-Lite), do it before the copy. 1451db8dac20SDavid S. Miller */ 1452db8dac20SDavid S. Miller 1453d21dbdfeSEric Dumazet if (copied < ulen || peeking || 1454d21dbdfeSEric Dumazet (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1455197c949eSEric Dumazet checksum_valid = !udp_lib_checksum_complete(skb); 1456197c949eSEric Dumazet if (!checksum_valid) 1457db8dac20SDavid S. Miller goto csum_copy_err; 1458db8dac20SDavid S. Miller } 1459db8dac20SDavid S. Miller 1460197c949eSEric Dumazet if (checksum_valid || skb_csum_unnecessary(skb)) 1461627d2d6bSsamanthakumar err = skb_copy_datagram_msg(skb, off, msg, copied); 1462db8dac20SDavid S. Miller else { 1463627d2d6bSsamanthakumar err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1464db8dac20SDavid S. Miller 1465db8dac20SDavid S. Miller if (err == -EINVAL) 1466db8dac20SDavid S. Miller goto csum_copy_err; 1467db8dac20SDavid S. Miller } 1468db8dac20SDavid S. Miller 146922911fc5SEric Dumazet if (unlikely(err)) { 1470979402b1SEric Dumazet if (!peeked) { 1471979402b1SEric Dumazet atomic_inc(&sk->sk_drops); 14726aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1473979402b1SEric Dumazet UDP_MIB_INERRORS, is_udplite); 1474979402b1SEric Dumazet } 1475850cbaddSPaolo Abeni kfree_skb(skb); 1476627d2d6bSsamanthakumar return err; 147722911fc5SEric Dumazet } 1478db8dac20SDavid S. Miller 1479db8dac20SDavid S. Miller if (!peeked) 14806aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), 1481629ca23cSPavel Emelyanov UDP_MIB_INDATAGRAMS, is_udplite); 1482db8dac20SDavid S. Miller 14833b885787SNeil Horman sock_recv_ts_and_drops(msg, sk, skb); 1484db8dac20SDavid S. Miller 1485db8dac20SDavid S. Miller /* Copy the address. */ 1486c482c568SEric Dumazet if (sin) { 1487db8dac20SDavid S. Miller sin->sin_family = AF_INET; 1488db8dac20SDavid S. Miller sin->sin_port = udp_hdr(skb)->source; 1489db8dac20SDavid S. Miller sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1490db8dac20SDavid S. Miller memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1491bceaa902SHannes Frederic Sowa *addr_len = sizeof(*sin); 1492db8dac20SDavid S. Miller } 1493db8dac20SDavid S. Miller if (inet->cmsg_flags) 1494ad959036SPaolo Abeni ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1495db8dac20SDavid S. Miller 149659c2cdaeSDavid S. Miller err = copied; 1497db8dac20SDavid S. Miller if (flags & MSG_TRUNC) 1498db8dac20SDavid S. Miller err = ulen; 1499db8dac20SDavid S. Miller 1500850cbaddSPaolo Abeni skb_consume_udp(sk, skb, peeking ? -err : err); 1501db8dac20SDavid S. Miller return err; 1502db8dac20SDavid S. Miller 1503db8dac20SDavid S. Miller csum_copy_err: 1504850cbaddSPaolo Abeni if (!__sk_queue_drop_skb(sk, skb, flags)) { 15056aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 15066aef70a8SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 15076a5dc9e5SEric Dumazet } 1508850cbaddSPaolo Abeni kfree_skb(skb); 1509db8dac20SDavid S. Miller 1510beb39db5SEric Dumazet /* starting over for a new packet, but check if we need to yield */ 1511beb39db5SEric Dumazet cond_resched(); 15129cfaa8deSXufeng Zhang msg->msg_flags &= ~MSG_TRUNC; 1513db8dac20SDavid S. Miller goto try_again; 1514db8dac20SDavid S. Miller } 1515db8dac20SDavid S. Miller 1516286c72deSEric Dumazet int __udp_disconnect(struct sock *sk, int flags) 15171da177e4SLinus Torvalds { 15181da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 15191da177e4SLinus Torvalds /* 15201da177e4SLinus Torvalds * 1003.1g - break association. 15211da177e4SLinus Torvalds */ 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds sk->sk_state = TCP_CLOSE; 1524c720c7e8SEric Dumazet inet->inet_daddr = 0; 1525c720c7e8SEric Dumazet inet->inet_dport = 0; 1526bdeab991STom Herbert sock_rps_reset_rxhash(sk); 15271da177e4SLinus Torvalds sk->sk_bound_dev_if = 0; 15281da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 15291da177e4SLinus Torvalds inet_reset_saddr(sk); 15301da177e4SLinus Torvalds 15311da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 15321da177e4SLinus Torvalds sk->sk_prot->unhash(sk); 1533c720c7e8SEric Dumazet inet->inet_sport = 0; 15341da177e4SLinus Torvalds } 15351da177e4SLinus Torvalds sk_dst_reset(sk); 15361da177e4SLinus Torvalds return 0; 15371da177e4SLinus Torvalds } 1538286c72deSEric Dumazet EXPORT_SYMBOL(__udp_disconnect); 1539286c72deSEric Dumazet 1540286c72deSEric Dumazet int udp_disconnect(struct sock *sk, int flags) 1541286c72deSEric Dumazet { 1542286c72deSEric Dumazet lock_sock(sk); 1543286c72deSEric Dumazet __udp_disconnect(sk, flags); 1544286c72deSEric Dumazet release_sock(sk); 1545286c72deSEric Dumazet return 0; 1546286c72deSEric Dumazet } 1547c482c568SEric Dumazet EXPORT_SYMBOL(udp_disconnect); 15481da177e4SLinus Torvalds 1549645ca708SEric Dumazet void udp_lib_unhash(struct sock *sk) 1550645ca708SEric Dumazet { 1551723b4610SEric Dumazet if (sk_hashed(sk)) { 1552645ca708SEric Dumazet struct udp_table *udptable = sk->sk_prot->h.udp_table; 1553512615b6SEric Dumazet struct udp_hslot *hslot, *hslot2; 1554512615b6SEric Dumazet 1555512615b6SEric Dumazet hslot = udp_hashslot(udptable, sock_net(sk), 1556d4cada4aSEric Dumazet udp_sk(sk)->udp_port_hash); 1557512615b6SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1558645ca708SEric Dumazet 1559c8db3fecSEric Dumazet spin_lock_bh(&hslot->lock); 1560e32ea7e7SCraig Gallek if (rcu_access_pointer(sk->sk_reuseport_cb)) 1561e32ea7e7SCraig Gallek reuseport_detach_sock(sk); 1562ca065d0cSEric Dumazet if (sk_del_node_init_rcu(sk)) { 1563fdcc8aa9SEric Dumazet hslot->count--; 1564c720c7e8SEric Dumazet inet_sk(sk)->inet_num = 0; 1565645ca708SEric Dumazet sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1566512615b6SEric Dumazet 1567512615b6SEric Dumazet spin_lock(&hslot2->lock); 1568ca065d0cSEric Dumazet hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1569512615b6SEric Dumazet hslot2->count--; 1570512615b6SEric Dumazet spin_unlock(&hslot2->lock); 1571645ca708SEric Dumazet } 1572c8db3fecSEric Dumazet spin_unlock_bh(&hslot->lock); 1573645ca708SEric Dumazet } 1574723b4610SEric Dumazet } 1575645ca708SEric Dumazet EXPORT_SYMBOL(udp_lib_unhash); 1576645ca708SEric Dumazet 1577719f8358SEric Dumazet /* 1578719f8358SEric Dumazet * inet_rcv_saddr was changed, we must rehash secondary hash 1579719f8358SEric Dumazet */ 1580719f8358SEric Dumazet void udp_lib_rehash(struct sock *sk, u16 newhash) 1581719f8358SEric Dumazet { 1582719f8358SEric Dumazet if (sk_hashed(sk)) { 1583719f8358SEric Dumazet struct udp_table *udptable = sk->sk_prot->h.udp_table; 1584719f8358SEric Dumazet struct udp_hslot *hslot, *hslot2, *nhslot2; 1585719f8358SEric Dumazet 1586719f8358SEric Dumazet hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1587719f8358SEric Dumazet nhslot2 = udp_hashslot2(udptable, newhash); 1588719f8358SEric Dumazet udp_sk(sk)->udp_portaddr_hash = newhash; 1589e32ea7e7SCraig Gallek 1590e32ea7e7SCraig Gallek if (hslot2 != nhslot2 || 1591e32ea7e7SCraig Gallek rcu_access_pointer(sk->sk_reuseport_cb)) { 1592719f8358SEric Dumazet hslot = udp_hashslot(udptable, sock_net(sk), 1593719f8358SEric Dumazet udp_sk(sk)->udp_port_hash); 1594719f8358SEric Dumazet /* we must lock primary chain too */ 1595719f8358SEric Dumazet spin_lock_bh(&hslot->lock); 1596e32ea7e7SCraig Gallek if (rcu_access_pointer(sk->sk_reuseport_cb)) 1597e32ea7e7SCraig Gallek reuseport_detach_sock(sk); 1598719f8358SEric Dumazet 1599e32ea7e7SCraig Gallek if (hslot2 != nhslot2) { 1600719f8358SEric Dumazet spin_lock(&hslot2->lock); 1601ca065d0cSEric Dumazet hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1602719f8358SEric Dumazet hslot2->count--; 1603719f8358SEric Dumazet spin_unlock(&hslot2->lock); 1604719f8358SEric Dumazet 1605719f8358SEric Dumazet spin_lock(&nhslot2->lock); 1606ca065d0cSEric Dumazet hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 1607719f8358SEric Dumazet &nhslot2->head); 1608719f8358SEric Dumazet nhslot2->count++; 1609719f8358SEric Dumazet spin_unlock(&nhslot2->lock); 1610e32ea7e7SCraig Gallek } 1611719f8358SEric Dumazet 1612719f8358SEric Dumazet spin_unlock_bh(&hslot->lock); 1613719f8358SEric Dumazet } 1614719f8358SEric Dumazet } 1615719f8358SEric Dumazet } 1616719f8358SEric Dumazet EXPORT_SYMBOL(udp_lib_rehash); 1617719f8358SEric Dumazet 1618719f8358SEric Dumazet static void udp_v4_rehash(struct sock *sk) 1619719f8358SEric Dumazet { 1620719f8358SEric Dumazet u16 new_hash = udp4_portaddr_hash(sock_net(sk), 1621719f8358SEric Dumazet inet_sk(sk)->inet_rcv_saddr, 1622719f8358SEric Dumazet inet_sk(sk)->inet_num); 1623719f8358SEric Dumazet udp_lib_rehash(sk, new_hash); 1624719f8358SEric Dumazet } 1625719f8358SEric Dumazet 162630c7be26SEric Dumazet int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 162793821778SHerbert Xu { 1628fec5e652STom Herbert int rc; 162993821778SHerbert Xu 1630005ec974SShawn Bohrer if (inet_sk(sk)->inet_daddr) { 1631bdeab991STom Herbert sock_rps_save_rxhash(sk, skb); 1632005ec974SShawn Bohrer sk_mark_napi_id(sk, skb); 16332c8c56e1SEric Dumazet sk_incoming_cpu_update(sk); 1634e68b6e50SEric Dumazet } else { 1635e68b6e50SEric Dumazet sk_mark_napi_id_once(sk, skb); 1636005ec974SShawn Bohrer } 1637fec5e652STom Herbert 1638850cbaddSPaolo Abeni rc = __udp_enqueue_schedule_skb(sk, skb); 1639766e9037SEric Dumazet if (rc < 0) { 1640766e9037SEric Dumazet int is_udplite = IS_UDPLITE(sk); 1641766e9037SEric Dumazet 164293821778SHerbert Xu /* Note that an ENOMEM error is charged twice */ 1643766e9037SEric Dumazet if (rc == -ENOMEM) 1644e61da9e2SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 164593821778SHerbert Xu is_udplite); 1646e61da9e2SEric Dumazet UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1647766e9037SEric Dumazet kfree_skb(skb); 1648296f7ea7SSatoru Moriya trace_udp_fail_queue_rcv_skb(rc, sk); 1649766e9037SEric Dumazet return -1; 165093821778SHerbert Xu } 165193821778SHerbert Xu 165293821778SHerbert Xu return 0; 165393821778SHerbert Xu } 165493821778SHerbert Xu 1655447167bfSEric Dumazet static struct static_key udp_encap_needed __read_mostly; 1656447167bfSEric Dumazet void udp_encap_enable(void) 1657447167bfSEric Dumazet { 1658447167bfSEric Dumazet if (!static_key_enabled(&udp_encap_needed)) 1659447167bfSEric Dumazet static_key_slow_inc(&udp_encap_needed); 1660447167bfSEric Dumazet } 1661447167bfSEric Dumazet EXPORT_SYMBOL(udp_encap_enable); 1662447167bfSEric Dumazet 1663db8dac20SDavid S. Miller /* returns: 1664db8dac20SDavid S. Miller * -1: error 1665db8dac20SDavid S. Miller * 0: success 1666db8dac20SDavid S. Miller * >0: "udp encap" protocol resubmission 1667db8dac20SDavid S. Miller * 1668db8dac20SDavid S. Miller * Note that in the success and error cases, the skb is assumed to 1669db8dac20SDavid S. Miller * have either been requeued or freed. 1670db8dac20SDavid S. Miller */ 1671db8dac20SDavid S. Miller int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1672db8dac20SDavid S. Miller { 1673db8dac20SDavid S. Miller struct udp_sock *up = udp_sk(sk); 1674db8dac20SDavid S. Miller int is_udplite = IS_UDPLITE(sk); 1675db8dac20SDavid S. Miller 1676db8dac20SDavid S. Miller /* 1677db8dac20SDavid S. Miller * Charge it to the socket, dropping if the queue is full. 1678db8dac20SDavid S. Miller */ 1679db8dac20SDavid S. Miller if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1680db8dac20SDavid S. Miller goto drop; 1681db8dac20SDavid S. Miller nf_reset(skb); 1682db8dac20SDavid S. Miller 1683447167bfSEric Dumazet if (static_key_false(&udp_encap_needed) && up->encap_type) { 16840ad92ad0SEric Dumazet int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 16850ad92ad0SEric Dumazet 1686db8dac20SDavid S. Miller /* 1687db8dac20SDavid S. Miller * This is an encapsulation socket so pass the skb to 1688db8dac20SDavid S. Miller * the socket's udp_encap_rcv() hook. Otherwise, just 1689db8dac20SDavid S. Miller * fall through and pass this up the UDP socket. 1690db8dac20SDavid S. Miller * up->encap_rcv() returns the following value: 1691db8dac20SDavid S. Miller * =0 if skb was successfully passed to the encap 1692db8dac20SDavid S. Miller * handler or was discarded by it. 1693db8dac20SDavid S. Miller * >0 if skb should be passed on to UDP. 1694db8dac20SDavid S. Miller * <0 if skb should be resubmitted as proto -N 1695db8dac20SDavid S. Miller */ 1696db8dac20SDavid S. Miller 1697db8dac20SDavid S. Miller /* if we're overly short, let UDP handle it */ 16980ad92ad0SEric Dumazet encap_rcv = ACCESS_ONCE(up->encap_rcv); 1699e5aed006SHannes Frederic Sowa if (encap_rcv) { 1700db8dac20SDavid S. Miller int ret; 1701db8dac20SDavid S. Miller 17020a80966bSTom Herbert /* Verify checksum before giving to encap */ 17030a80966bSTom Herbert if (udp_lib_checksum_complete(skb)) 17040a80966bSTom Herbert goto csum_error; 17050a80966bSTom Herbert 17060ad92ad0SEric Dumazet ret = encap_rcv(sk, skb); 1707db8dac20SDavid S. Miller if (ret <= 0) { 170802c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), 17090283328eSPavel Emelyanov UDP_MIB_INDATAGRAMS, 1710db8dac20SDavid S. Miller is_udplite); 1711db8dac20SDavid S. Miller return -ret; 1712db8dac20SDavid S. Miller } 1713db8dac20SDavid S. Miller } 1714db8dac20SDavid S. Miller 1715db8dac20SDavid S. Miller /* FALLTHROUGH -- it's a UDP Packet */ 1716db8dac20SDavid S. Miller } 1717db8dac20SDavid S. Miller 1718db8dac20SDavid S. Miller /* 1719db8dac20SDavid S. Miller * UDP-Lite specific tests, ignored on UDP sockets 1720db8dac20SDavid S. Miller */ 1721db8dac20SDavid S. Miller if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 1722db8dac20SDavid S. Miller 1723db8dac20SDavid S. Miller /* 1724db8dac20SDavid S. Miller * MIB statistics other than incrementing the error count are 1725db8dac20SDavid S. Miller * disabled for the following two types of errors: these depend 1726db8dac20SDavid S. Miller * on the application settings, not on the functioning of the 1727db8dac20SDavid S. Miller * protocol stack as such. 1728db8dac20SDavid S. Miller * 1729db8dac20SDavid S. Miller * RFC 3828 here recommends (sec 3.3): "There should also be a 1730db8dac20SDavid S. Miller * way ... to ... at least let the receiving application block 1731db8dac20SDavid S. Miller * delivery of packets with coverage values less than a value 1732db8dac20SDavid S. Miller * provided by the application." 1733db8dac20SDavid S. Miller */ 1734db8dac20SDavid S. Miller if (up->pcrlen == 0) { /* full coverage was set */ 1735ba7a46f1SJoe Perches net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 1736db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov, skb->len); 1737db8dac20SDavid S. Miller goto drop; 1738db8dac20SDavid S. Miller } 1739db8dac20SDavid S. Miller /* The next case involves violating the min. coverage requested 1740db8dac20SDavid S. Miller * by the receiver. This is subtle: if receiver wants x and x is 1741db8dac20SDavid S. Miller * greater than the buffersize/MTU then receiver will complain 1742db8dac20SDavid S. Miller * that it wants x while sender emits packets of smaller size y. 1743db8dac20SDavid S. Miller * Therefore the above ...()->partial_cov statement is essential. 1744db8dac20SDavid S. Miller */ 1745db8dac20SDavid S. Miller if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1746ba7a46f1SJoe Perches net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 1747db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov, up->pcrlen); 1748db8dac20SDavid S. Miller goto drop; 1749db8dac20SDavid S. Miller } 1750db8dac20SDavid S. Miller } 1751db8dac20SDavid S. Miller 1752ce25d66aSEric Dumazet if (rcu_access_pointer(sk->sk_filter) && 1753ce25d66aSEric Dumazet udp_lib_checksum_complete(skb)) 17546a5dc9e5SEric Dumazet goto csum_error; 1755ce25d66aSEric Dumazet 1756ba66bbe5SDaniel Borkmann if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 1757a6127697SMichal Kubeček goto drop; 1758db8dac20SDavid S. Miller 1759e6afc8acSsamanthakumar udp_csum_pull_header(skb); 1760db8dac20SDavid S. Miller 1761fbf8866dSShawn Bohrer ipv4_pktinfo_prepare(sk, skb); 1762850cbaddSPaolo Abeni return __udp_queue_rcv_skb(sk, skb); 1763db8dac20SDavid S. Miller 17646a5dc9e5SEric Dumazet csum_error: 176502c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1766db8dac20SDavid S. Miller drop: 176702c22347SEric Dumazet __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 17688edf19c2SEric Dumazet atomic_inc(&sk->sk_drops); 1769db8dac20SDavid S. Miller kfree_skb(skb); 1770db8dac20SDavid S. Miller return -1; 1771db8dac20SDavid S. Miller } 1772db8dac20SDavid S. Miller 177397502231SEric Dumazet /* For TCP sockets, sk_rx_dst is protected by socket lock 1774e47eb5dfSEric Dumazet * For UDP, we use xchg() to guard against concurrent changes. 177597502231SEric Dumazet */ 177697502231SEric Dumazet static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1777421b3885SShawn Bohrer { 177897502231SEric Dumazet struct dst_entry *old; 1779421b3885SShawn Bohrer 1780421b3885SShawn Bohrer dst_hold(dst); 1781e47eb5dfSEric Dumazet old = xchg(&sk->sk_rx_dst, dst); 178297502231SEric Dumazet dst_release(old); 178397502231SEric Dumazet } 1784421b3885SShawn Bohrer 1785db8dac20SDavid S. Miller /* 1786db8dac20SDavid S. Miller * Multicasts and broadcasts go to each listener. 1787db8dac20SDavid S. Miller * 17881240d137SEric Dumazet * Note: called only from the BH handler context. 1789db8dac20SDavid S. Miller */ 1790e3163493SPavel Emelyanov static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 1791db8dac20SDavid S. Miller struct udphdr *uh, 1792db8dac20SDavid S. Miller __be32 saddr, __be32 daddr, 179336cbb245SRick Jones struct udp_table *udptable, 179436cbb245SRick Jones int proto) 1795db8dac20SDavid S. Miller { 1796ca065d0cSEric Dumazet struct sock *sk, *first = NULL; 17975cf3d461SDavid Held unsigned short hnum = ntohs(uh->dest); 17985cf3d461SDavid Held struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 17992dc41cffSDavid Held unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 1800ca065d0cSEric Dumazet unsigned int offset = offsetof(typeof(*sk), sk_node); 1801ca065d0cSEric Dumazet int dif = skb->dev->ifindex; 1802ca065d0cSEric Dumazet struct hlist_node *node; 1803ca065d0cSEric Dumazet struct sk_buff *nskb; 18042dc41cffSDavid Held 18052dc41cffSDavid Held if (use_hash2) { 18062dc41cffSDavid Held hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 180773e2d5e3SPablo Neira udptable->mask; 180873e2d5e3SPablo Neira hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; 18092dc41cffSDavid Held start_lookup: 181073e2d5e3SPablo Neira hslot = &udptable->hash2[hash2]; 18112dc41cffSDavid Held offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 18122dc41cffSDavid Held } 1813db8dac20SDavid S. Miller 1814ca065d0cSEric Dumazet sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 1815ca065d0cSEric Dumazet if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 1816ca065d0cSEric Dumazet uh->source, saddr, dif, hnum)) 1817ca065d0cSEric Dumazet continue; 18181240d137SEric Dumazet 1819ca065d0cSEric Dumazet if (!first) { 1820ca065d0cSEric Dumazet first = sk; 1821ca065d0cSEric Dumazet continue; 1822ca065d0cSEric Dumazet } 1823ca065d0cSEric Dumazet nskb = skb_clone(skb, GFP_ATOMIC); 1824ca065d0cSEric Dumazet 1825ca065d0cSEric Dumazet if (unlikely(!nskb)) { 1826ca065d0cSEric Dumazet atomic_inc(&sk->sk_drops); 182702c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1828ca065d0cSEric Dumazet IS_UDPLITE(sk)); 182902c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_INERRORS, 1830ca065d0cSEric Dumazet IS_UDPLITE(sk)); 1831ca065d0cSEric Dumazet continue; 1832ca065d0cSEric Dumazet } 1833ca065d0cSEric Dumazet if (udp_queue_rcv_skb(sk, nskb) > 0) 1834ca065d0cSEric Dumazet consume_skb(nskb); 1835ca065d0cSEric Dumazet } 18361240d137SEric Dumazet 18372dc41cffSDavid Held /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 18382dc41cffSDavid Held if (use_hash2 && hash2 != hash2_any) { 18392dc41cffSDavid Held hash2 = hash2_any; 18402dc41cffSDavid Held goto start_lookup; 18412dc41cffSDavid Held } 18422dc41cffSDavid Held 1843ca065d0cSEric Dumazet if (first) { 1844ca065d0cSEric Dumazet if (udp_queue_rcv_skb(first, skb) > 0) 1845ca065d0cSEric Dumazet consume_skb(skb); 18461240d137SEric Dumazet } else { 1847ca065d0cSEric Dumazet kfree_skb(skb); 184802c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 184936cbb245SRick Jones proto == IPPROTO_UDPLITE); 18501240d137SEric Dumazet } 1851db8dac20SDavid S. Miller return 0; 1852db8dac20SDavid S. Miller } 1853db8dac20SDavid S. Miller 1854db8dac20SDavid S. Miller /* Initialize UDP checksum. If exited with zero value (success), 1855db8dac20SDavid S. Miller * CHECKSUM_UNNECESSARY means, that no more checks are required. 1856db8dac20SDavid S. Miller * Otherwise, csum completion requires chacksumming packet body, 1857db8dac20SDavid S. Miller * including udp header and folding it to skb->csum. 1858db8dac20SDavid S. Miller */ 1859db8dac20SDavid S. Miller static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 1860db8dac20SDavid S. Miller int proto) 1861db8dac20SDavid S. Miller { 1862db8dac20SDavid S. Miller int err; 1863db8dac20SDavid S. Miller 1864db8dac20SDavid S. Miller UDP_SKB_CB(skb)->partial_cov = 0; 1865db8dac20SDavid S. Miller UDP_SKB_CB(skb)->cscov = skb->len; 1866db8dac20SDavid S. Miller 1867db8dac20SDavid S. Miller if (proto == IPPROTO_UDPLITE) { 1868db8dac20SDavid S. Miller err = udplite_checksum_init(skb, uh); 1869db8dac20SDavid S. Miller if (err) 1870db8dac20SDavid S. Miller return err; 1871db8dac20SDavid S. Miller } 1872db8dac20SDavid S. Miller 1873b46d9f62SHannes Frederic Sowa /* Note, we are only interested in != 0 or == 0, thus the 1874b46d9f62SHannes Frederic Sowa * force to int. 1875b46d9f62SHannes Frederic Sowa */ 1876b46d9f62SHannes Frederic Sowa return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 1877ed70fcfcSTom Herbert inet_compute_pseudo); 1878db8dac20SDavid S. Miller } 1879db8dac20SDavid S. Miller 1880db8dac20SDavid S. Miller /* 1881db8dac20SDavid S. Miller * All we need to do is get the socket, and then do a checksum. 1882db8dac20SDavid S. Miller */ 1883db8dac20SDavid S. Miller 1884645ca708SEric Dumazet int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 1885db8dac20SDavid S. Miller int proto) 1886db8dac20SDavid S. Miller { 1887db8dac20SDavid S. Miller struct sock *sk; 18887b5e56f9SJesper Dangaard Brouer struct udphdr *uh; 1889db8dac20SDavid S. Miller unsigned short ulen; 1890adf30907SEric Dumazet struct rtable *rt = skb_rtable(skb); 18912783ef23SJesper Dangaard Brouer __be32 saddr, daddr; 18920283328eSPavel Emelyanov struct net *net = dev_net(skb->dev); 1893db8dac20SDavid S. Miller 1894db8dac20SDavid S. Miller /* 1895db8dac20SDavid S. Miller * Validate the packet. 1896db8dac20SDavid S. Miller */ 1897db8dac20SDavid S. Miller if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1898db8dac20SDavid S. Miller goto drop; /* No space for header. */ 1899db8dac20SDavid S. Miller 19007b5e56f9SJesper Dangaard Brouer uh = udp_hdr(skb); 1901db8dac20SDavid S. Miller ulen = ntohs(uh->len); 1902ccc2d97cSBjørn Mork saddr = ip_hdr(skb)->saddr; 1903ccc2d97cSBjørn Mork daddr = ip_hdr(skb)->daddr; 1904ccc2d97cSBjørn Mork 1905db8dac20SDavid S. Miller if (ulen > skb->len) 1906db8dac20SDavid S. Miller goto short_packet; 1907db8dac20SDavid S. Miller 1908db8dac20SDavid S. Miller if (proto == IPPROTO_UDP) { 1909db8dac20SDavid S. Miller /* UDP validates ulen. */ 1910db8dac20SDavid S. Miller if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 1911db8dac20SDavid S. Miller goto short_packet; 1912db8dac20SDavid S. Miller uh = udp_hdr(skb); 1913db8dac20SDavid S. Miller } 1914db8dac20SDavid S. Miller 1915db8dac20SDavid S. Miller if (udp4_csum_init(skb, uh, proto)) 1916db8dac20SDavid S. Miller goto csum_error; 1917db8dac20SDavid S. Miller 19188afdd99aSEric Dumazet sk = skb_steal_sock(skb); 19198afdd99aSEric Dumazet if (sk) { 192097502231SEric Dumazet struct dst_entry *dst = skb_dst(skb); 1921421b3885SShawn Bohrer int ret; 1922421b3885SShawn Bohrer 192397502231SEric Dumazet if (unlikely(sk->sk_rx_dst != dst)) 192497502231SEric Dumazet udp_sk_rx_dst_set(sk, dst); 1925421b3885SShawn Bohrer 1926421b3885SShawn Bohrer ret = udp_queue_rcv_skb(sk, skb); 19278afdd99aSEric Dumazet sock_put(sk); 1928421b3885SShawn Bohrer /* a return value > 0 means to resubmit the input, but 1929421b3885SShawn Bohrer * it wants the return to be -protocol, or 0 1930421b3885SShawn Bohrer */ 1931421b3885SShawn Bohrer if (ret > 0) 1932421b3885SShawn Bohrer return -ret; 1933421b3885SShawn Bohrer return 0; 1934c18450a5SFabian Frederick } 1935c18450a5SFabian Frederick 1936db8dac20SDavid S. Miller if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1937e3163493SPavel Emelyanov return __udp4_lib_mcast_deliver(net, skb, uh, 193836cbb245SRick Jones saddr, daddr, udptable, proto); 1939db8dac20SDavid S. Miller 1940607c4aafSKOVACS Krisztian sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 194100db4124SIan Morris if (sk) { 1942a5b50476SEliezer Tamir int ret; 1943a5b50476SEliezer Tamir 1944224d019cSTom Herbert if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 19452abb7cdcSTom Herbert skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 19462abb7cdcSTom Herbert inet_compute_pseudo); 19472abb7cdcSTom Herbert 1948a5b50476SEliezer Tamir ret = udp_queue_rcv_skb(sk, skb); 1949db8dac20SDavid S. Miller 1950db8dac20SDavid S. Miller /* a return value > 0 means to resubmit the input, but 1951db8dac20SDavid S. Miller * it wants the return to be -protocol, or 0 1952db8dac20SDavid S. Miller */ 1953db8dac20SDavid S. Miller if (ret > 0) 1954db8dac20SDavid S. Miller return -ret; 1955db8dac20SDavid S. Miller return 0; 1956db8dac20SDavid S. Miller } 1957db8dac20SDavid S. Miller 1958db8dac20SDavid S. Miller if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1959db8dac20SDavid S. Miller goto drop; 1960db8dac20SDavid S. Miller nf_reset(skb); 1961db8dac20SDavid S. Miller 1962db8dac20SDavid S. Miller /* No socket. Drop packet silently, if checksum is wrong */ 1963db8dac20SDavid S. Miller if (udp_lib_checksum_complete(skb)) 1964db8dac20SDavid S. Miller goto csum_error; 1965db8dac20SDavid S. Miller 196602c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1967db8dac20SDavid S. Miller icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1968db8dac20SDavid S. Miller 1969db8dac20SDavid S. Miller /* 1970db8dac20SDavid S. Miller * Hmm. We got an UDP packet to a port to which we 1971db8dac20SDavid S. Miller * don't wanna listen. Ignore it. 1972db8dac20SDavid S. Miller */ 1973db8dac20SDavid S. Miller kfree_skb(skb); 1974db8dac20SDavid S. Miller return 0; 1975db8dac20SDavid S. Miller 1976db8dac20SDavid S. Miller short_packet: 1977ba7a46f1SJoe Perches net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 1978afd46503SJoe Perches proto == IPPROTO_UDPLITE ? "Lite" : "", 1979afd46503SJoe Perches &saddr, ntohs(uh->source), 1980afd46503SJoe Perches ulen, skb->len, 1981afd46503SJoe Perches &daddr, ntohs(uh->dest)); 1982db8dac20SDavid S. Miller goto drop; 1983db8dac20SDavid S. Miller 1984db8dac20SDavid S. Miller csum_error: 1985db8dac20SDavid S. Miller /* 1986db8dac20SDavid S. Miller * RFC1122: OK. Discards the bad packet silently (as far as 1987db8dac20SDavid S. Miller * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1988db8dac20SDavid S. Miller */ 1989ba7a46f1SJoe Perches net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 1990afd46503SJoe Perches proto == IPPROTO_UDPLITE ? "Lite" : "", 1991afd46503SJoe Perches &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 1992db8dac20SDavid S. Miller ulen); 199302c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1994db8dac20SDavid S. Miller drop: 199502c22347SEric Dumazet __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1996db8dac20SDavid S. Miller kfree_skb(skb); 1997db8dac20SDavid S. Miller return 0; 1998db8dac20SDavid S. Miller } 1999db8dac20SDavid S. Miller 2000421b3885SShawn Bohrer /* We can only early demux multicast if there is a single matching socket. 2001421b3885SShawn Bohrer * If more than one socket found returns NULL 2002421b3885SShawn Bohrer */ 2003421b3885SShawn Bohrer static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2004421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 2005421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 2006421b3885SShawn Bohrer int dif) 2007421b3885SShawn Bohrer { 2008421b3885SShawn Bohrer struct sock *sk, *result; 2009421b3885SShawn Bohrer unsigned short hnum = ntohs(loc_port); 2010ca065d0cSEric Dumazet unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); 2011421b3885SShawn Bohrer struct udp_hslot *hslot = &udp_table.hash[slot]; 2012421b3885SShawn Bohrer 201363c6f81cSEric Dumazet /* Do not bother scanning a too big list */ 201463c6f81cSEric Dumazet if (hslot->count > 10) 201563c6f81cSEric Dumazet return NULL; 201663c6f81cSEric Dumazet 2017421b3885SShawn Bohrer result = NULL; 2018ca065d0cSEric Dumazet sk_for_each_rcu(sk, &hslot->head) { 2019ca065d0cSEric Dumazet if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2020ca065d0cSEric Dumazet rmt_port, rmt_addr, dif, hnum)) { 2021ca065d0cSEric Dumazet if (result) 2022ca065d0cSEric Dumazet return NULL; 2023421b3885SShawn Bohrer result = sk; 2024421b3885SShawn Bohrer } 2025421b3885SShawn Bohrer } 2026421b3885SShawn Bohrer 2027421b3885SShawn Bohrer return result; 2028421b3885SShawn Bohrer } 2029421b3885SShawn Bohrer 2030421b3885SShawn Bohrer /* For unicast we should only early demux connected sockets or we can 2031421b3885SShawn Bohrer * break forwarding setups. The chains here can be long so only check 2032421b3885SShawn Bohrer * if the first socket is an exact match and if not move on. 2033421b3885SShawn Bohrer */ 2034421b3885SShawn Bohrer static struct sock *__udp4_lib_demux_lookup(struct net *net, 2035421b3885SShawn Bohrer __be16 loc_port, __be32 loc_addr, 2036421b3885SShawn Bohrer __be16 rmt_port, __be32 rmt_addr, 2037421b3885SShawn Bohrer int dif) 2038421b3885SShawn Bohrer { 2039421b3885SShawn Bohrer unsigned short hnum = ntohs(loc_port); 2040421b3885SShawn Bohrer unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); 2041421b3885SShawn Bohrer unsigned int slot2 = hash2 & udp_table.mask; 2042421b3885SShawn Bohrer struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 2043c7228317SJoe Perches INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2044421b3885SShawn Bohrer const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 2045ca065d0cSEric Dumazet struct sock *sk; 2046421b3885SShawn Bohrer 2047ca065d0cSEric Dumazet udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2048ca065d0cSEric Dumazet if (INET_MATCH(sk, net, acookie, rmt_addr, 2049ca065d0cSEric Dumazet loc_addr, ports, dif)) 2050ca065d0cSEric Dumazet return sk; 2051421b3885SShawn Bohrer /* Only check first socket in chain */ 2052421b3885SShawn Bohrer break; 2053421b3885SShawn Bohrer } 2054ca065d0cSEric Dumazet return NULL; 2055421b3885SShawn Bohrer } 2056421b3885SShawn Bohrer 2057421b3885SShawn Bohrer void udp_v4_early_demux(struct sk_buff *skb) 2058421b3885SShawn Bohrer { 2059610438b7SEric Dumazet struct net *net = dev_net(skb->dev); 2060610438b7SEric Dumazet const struct iphdr *iph; 2061610438b7SEric Dumazet const struct udphdr *uh; 2062ca065d0cSEric Dumazet struct sock *sk = NULL; 2063421b3885SShawn Bohrer struct dst_entry *dst; 2064421b3885SShawn Bohrer int dif = skb->dev->ifindex; 20656e540309SShawn Bohrer int ours; 2066421b3885SShawn Bohrer 2067421b3885SShawn Bohrer /* validate the packet */ 2068421b3885SShawn Bohrer if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2069421b3885SShawn Bohrer return; 2070421b3885SShawn Bohrer 2071610438b7SEric Dumazet iph = ip_hdr(skb); 2072610438b7SEric Dumazet uh = udp_hdr(skb); 2073610438b7SEric Dumazet 2074421b3885SShawn Bohrer if (skb->pkt_type == PACKET_BROADCAST || 20756e540309SShawn Bohrer skb->pkt_type == PACKET_MULTICAST) { 20766e540309SShawn Bohrer struct in_device *in_dev = __in_dev_get_rcu(skb->dev); 20776e540309SShawn Bohrer 20786e540309SShawn Bohrer if (!in_dev) 20796e540309SShawn Bohrer return; 20806e540309SShawn Bohrer 2081ad0ea198SPaolo Abeni /* we are supposed to accept bcast packets */ 2082ad0ea198SPaolo Abeni if (skb->pkt_type == PACKET_MULTICAST) { 20836e540309SShawn Bohrer ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 20846e540309SShawn Bohrer iph->protocol); 20856e540309SShawn Bohrer if (!ours) 20866e540309SShawn Bohrer return; 2087ad0ea198SPaolo Abeni } 2088ad0ea198SPaolo Abeni 2089421b3885SShawn Bohrer sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2090421b3885SShawn Bohrer uh->source, iph->saddr, dif); 20916e540309SShawn Bohrer } else if (skb->pkt_type == PACKET_HOST) { 2092421b3885SShawn Bohrer sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 2093421b3885SShawn Bohrer uh->source, iph->saddr, dif); 20946e540309SShawn Bohrer } 2095421b3885SShawn Bohrer 2096ca065d0cSEric Dumazet if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) 2097421b3885SShawn Bohrer return; 2098421b3885SShawn Bohrer 2099421b3885SShawn Bohrer skb->sk = sk; 210082eabd9eSAlexander Duyck skb->destructor = sock_efree; 210110e2eb87SEric Dumazet dst = READ_ONCE(sk->sk_rx_dst); 2102421b3885SShawn Bohrer 2103421b3885SShawn Bohrer if (dst) 2104421b3885SShawn Bohrer dst = dst_check(dst, 0); 210510e2eb87SEric Dumazet if (dst) { 210610e2eb87SEric Dumazet /* DST_NOCACHE can not be used without taking a reference */ 210710e2eb87SEric Dumazet if (dst->flags & DST_NOCACHE) { 210810e2eb87SEric Dumazet if (likely(atomic_inc_not_zero(&dst->__refcnt))) 210910e2eb87SEric Dumazet skb_dst_set(skb, dst); 211010e2eb87SEric Dumazet } else { 2111421b3885SShawn Bohrer skb_dst_set_noref(skb, dst); 2112421b3885SShawn Bohrer } 211310e2eb87SEric Dumazet } 211410e2eb87SEric Dumazet } 2115421b3885SShawn Bohrer 2116db8dac20SDavid S. Miller int udp_rcv(struct sk_buff *skb) 2117db8dac20SDavid S. Miller { 2118645ca708SEric Dumazet return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); 2119db8dac20SDavid S. Miller } 2120db8dac20SDavid S. Miller 21217d06b2e0SBrian Haley void udp_destroy_sock(struct sock *sk) 2122db8dac20SDavid S. Miller { 212344046a59STom Parkin struct udp_sock *up = udp_sk(sk); 21248a74ad60SEric Dumazet bool slow = lock_sock_fast(sk); 2125db8dac20SDavid S. Miller udp_flush_pending_frames(sk); 21268a74ad60SEric Dumazet unlock_sock_fast(sk, slow); 212744046a59STom Parkin if (static_key_false(&udp_encap_needed) && up->encap_type) { 212844046a59STom Parkin void (*encap_destroy)(struct sock *sk); 212944046a59STom Parkin encap_destroy = ACCESS_ONCE(up->encap_destroy); 213044046a59STom Parkin if (encap_destroy) 213144046a59STom Parkin encap_destroy(sk); 213244046a59STom Parkin } 2133db8dac20SDavid S. Miller } 2134db8dac20SDavid S. Miller 21351da177e4SLinus Torvalds /* 21361da177e4SLinus Torvalds * Socket option code for UDP 21371da177e4SLinus Torvalds */ 21384c0a6cb0SGerrit Renker int udp_lib_setsockopt(struct sock *sk, int level, int optname, 2139b7058842SDavid S. Miller char __user *optval, unsigned int optlen, 21404c0a6cb0SGerrit Renker int (*push_pending_frames)(struct sock *)) 21411da177e4SLinus Torvalds { 21421da177e4SLinus Torvalds struct udp_sock *up = udp_sk(sk); 21431c19448cSTom Herbert int val, valbool; 21441da177e4SLinus Torvalds int err = 0; 2145b2bf1e26SWang Chen int is_udplite = IS_UDPLITE(sk); 21461da177e4SLinus Torvalds 21471da177e4SLinus Torvalds if (optlen < sizeof(int)) 21481da177e4SLinus Torvalds return -EINVAL; 21491da177e4SLinus Torvalds 21501da177e4SLinus Torvalds if (get_user(val, (int __user *)optval)) 21511da177e4SLinus Torvalds return -EFAULT; 21521da177e4SLinus Torvalds 21531c19448cSTom Herbert valbool = val ? 1 : 0; 21541c19448cSTom Herbert 21551da177e4SLinus Torvalds switch (optname) { 21561da177e4SLinus Torvalds case UDP_CORK: 21571da177e4SLinus Torvalds if (val != 0) { 21581da177e4SLinus Torvalds up->corkflag = 1; 21591da177e4SLinus Torvalds } else { 21601da177e4SLinus Torvalds up->corkflag = 0; 21611da177e4SLinus Torvalds lock_sock(sk); 21624243cdc2SJoe Perches push_pending_frames(sk); 21631da177e4SLinus Torvalds release_sock(sk); 21641da177e4SLinus Torvalds } 21651da177e4SLinus Torvalds break; 21661da177e4SLinus Torvalds 21671da177e4SLinus Torvalds case UDP_ENCAP: 21681da177e4SLinus Torvalds switch (val) { 21691da177e4SLinus Torvalds case 0: 21701da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP: 21711da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP_NON_IKE: 2172067b207bSJames Chapman up->encap_rcv = xfrm4_udp_encap_rcv; 2173067b207bSJames Chapman /* FALLTHROUGH */ 2174342f0234SJames Chapman case UDP_ENCAP_L2TPINUDP: 21751da177e4SLinus Torvalds up->encap_type = val; 2176447167bfSEric Dumazet udp_encap_enable(); 21771da177e4SLinus Torvalds break; 21781da177e4SLinus Torvalds default: 21791da177e4SLinus Torvalds err = -ENOPROTOOPT; 21801da177e4SLinus Torvalds break; 21811da177e4SLinus Torvalds } 21821da177e4SLinus Torvalds break; 21831da177e4SLinus Torvalds 21841c19448cSTom Herbert case UDP_NO_CHECK6_TX: 21851c19448cSTom Herbert up->no_check6_tx = valbool; 21861c19448cSTom Herbert break; 21871c19448cSTom Herbert 21881c19448cSTom Herbert case UDP_NO_CHECK6_RX: 21891c19448cSTom Herbert up->no_check6_rx = valbool; 21901c19448cSTom Herbert break; 21911c19448cSTom Herbert 2192ba4e58ecSGerrit Renker /* 2193ba4e58ecSGerrit Renker * UDP-Lite's partial checksum coverage (RFC 3828). 2194ba4e58ecSGerrit Renker */ 2195ba4e58ecSGerrit Renker /* The sender sets actual checksum coverage length via this option. 2196ba4e58ecSGerrit Renker * The case coverage > packet length is handled by send module. */ 2197ba4e58ecSGerrit Renker case UDPLITE_SEND_CSCOV: 2198b2bf1e26SWang Chen if (!is_udplite) /* Disable the option on UDP sockets */ 2199ba4e58ecSGerrit Renker return -ENOPROTOOPT; 2200ba4e58ecSGerrit Renker if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2201ba4e58ecSGerrit Renker val = 8; 22024be929beSAlexey Dobriyan else if (val > USHRT_MAX) 22034be929beSAlexey Dobriyan val = USHRT_MAX; 2204ba4e58ecSGerrit Renker up->pcslen = val; 2205ba4e58ecSGerrit Renker up->pcflag |= UDPLITE_SEND_CC; 2206ba4e58ecSGerrit Renker break; 2207ba4e58ecSGerrit Renker 2208ba4e58ecSGerrit Renker /* The receiver specifies a minimum checksum coverage value. To make 2209ba4e58ecSGerrit Renker * sense, this should be set to at least 8 (as done below). If zero is 2210ba4e58ecSGerrit Renker * used, this again means full checksum coverage. */ 2211ba4e58ecSGerrit Renker case UDPLITE_RECV_CSCOV: 2212b2bf1e26SWang Chen if (!is_udplite) /* Disable the option on UDP sockets */ 2213ba4e58ecSGerrit Renker return -ENOPROTOOPT; 2214ba4e58ecSGerrit Renker if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2215ba4e58ecSGerrit Renker val = 8; 22164be929beSAlexey Dobriyan else if (val > USHRT_MAX) 22174be929beSAlexey Dobriyan val = USHRT_MAX; 2218ba4e58ecSGerrit Renker up->pcrlen = val; 2219ba4e58ecSGerrit Renker up->pcflag |= UDPLITE_RECV_CC; 2220ba4e58ecSGerrit Renker break; 2221ba4e58ecSGerrit Renker 22221da177e4SLinus Torvalds default: 22231da177e4SLinus Torvalds err = -ENOPROTOOPT; 22241da177e4SLinus Torvalds break; 22256516c655SStephen Hemminger } 22261da177e4SLinus Torvalds 22271da177e4SLinus Torvalds return err; 22281da177e4SLinus Torvalds } 2229c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_setsockopt); 22301da177e4SLinus Torvalds 2231db8dac20SDavid S. Miller int udp_setsockopt(struct sock *sk, int level, int optname, 2232b7058842SDavid S. Miller char __user *optval, unsigned int optlen) 2233db8dac20SDavid S. Miller { 2234db8dac20SDavid S. Miller if (level == SOL_UDP || level == SOL_UDPLITE) 2235db8dac20SDavid S. Miller return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2236db8dac20SDavid S. Miller udp_push_pending_frames); 2237db8dac20SDavid S. Miller return ip_setsockopt(sk, level, optname, optval, optlen); 2238db8dac20SDavid S. Miller } 2239db8dac20SDavid S. Miller 2240db8dac20SDavid S. Miller #ifdef CONFIG_COMPAT 2241db8dac20SDavid S. Miller int compat_udp_setsockopt(struct sock *sk, int level, int optname, 2242b7058842SDavid S. Miller char __user *optval, unsigned int optlen) 2243db8dac20SDavid S. Miller { 2244db8dac20SDavid S. Miller if (level == SOL_UDP || level == SOL_UDPLITE) 2245db8dac20SDavid S. Miller return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2246db8dac20SDavid S. Miller udp_push_pending_frames); 2247db8dac20SDavid S. Miller return compat_ip_setsockopt(sk, level, optname, optval, optlen); 2248db8dac20SDavid S. Miller } 2249db8dac20SDavid S. Miller #endif 2250db8dac20SDavid S. Miller 22514c0a6cb0SGerrit Renker int udp_lib_getsockopt(struct sock *sk, int level, int optname, 22521da177e4SLinus Torvalds char __user *optval, int __user *optlen) 22531da177e4SLinus Torvalds { 22541da177e4SLinus Torvalds struct udp_sock *up = udp_sk(sk); 22551da177e4SLinus Torvalds int val, len; 22561da177e4SLinus Torvalds 22571da177e4SLinus Torvalds if (get_user(len, optlen)) 22581da177e4SLinus Torvalds return -EFAULT; 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(int)); 22611da177e4SLinus Torvalds 22621da177e4SLinus Torvalds if (len < 0) 22631da177e4SLinus Torvalds return -EINVAL; 22641da177e4SLinus Torvalds 22651da177e4SLinus Torvalds switch (optname) { 22661da177e4SLinus Torvalds case UDP_CORK: 22671da177e4SLinus Torvalds val = up->corkflag; 22681da177e4SLinus Torvalds break; 22691da177e4SLinus Torvalds 22701da177e4SLinus Torvalds case UDP_ENCAP: 22711da177e4SLinus Torvalds val = up->encap_type; 22721da177e4SLinus Torvalds break; 22731da177e4SLinus Torvalds 22741c19448cSTom Herbert case UDP_NO_CHECK6_TX: 22751c19448cSTom Herbert val = up->no_check6_tx; 22761c19448cSTom Herbert break; 22771c19448cSTom Herbert 22781c19448cSTom Herbert case UDP_NO_CHECK6_RX: 22791c19448cSTom Herbert val = up->no_check6_rx; 22801c19448cSTom Herbert break; 22811c19448cSTom Herbert 2282ba4e58ecSGerrit Renker /* The following two cannot be changed on UDP sockets, the return is 2283ba4e58ecSGerrit Renker * always 0 (which corresponds to the full checksum coverage of UDP). */ 2284ba4e58ecSGerrit Renker case UDPLITE_SEND_CSCOV: 2285ba4e58ecSGerrit Renker val = up->pcslen; 2286ba4e58ecSGerrit Renker break; 2287ba4e58ecSGerrit Renker 2288ba4e58ecSGerrit Renker case UDPLITE_RECV_CSCOV: 2289ba4e58ecSGerrit Renker val = up->pcrlen; 2290ba4e58ecSGerrit Renker break; 2291ba4e58ecSGerrit Renker 22921da177e4SLinus Torvalds default: 22931da177e4SLinus Torvalds return -ENOPROTOOPT; 22946516c655SStephen Hemminger } 22951da177e4SLinus Torvalds 22961da177e4SLinus Torvalds if (put_user(len, optlen)) 22971da177e4SLinus Torvalds return -EFAULT; 22981da177e4SLinus Torvalds if (copy_to_user(optval, &val, len)) 22991da177e4SLinus Torvalds return -EFAULT; 23001da177e4SLinus Torvalds return 0; 23011da177e4SLinus Torvalds } 2302c482c568SEric Dumazet EXPORT_SYMBOL(udp_lib_getsockopt); 23031da177e4SLinus Torvalds 2304db8dac20SDavid S. Miller int udp_getsockopt(struct sock *sk, int level, int optname, 2305db8dac20SDavid S. Miller char __user *optval, int __user *optlen) 2306db8dac20SDavid S. Miller { 2307db8dac20SDavid S. Miller if (level == SOL_UDP || level == SOL_UDPLITE) 2308db8dac20SDavid S. Miller return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2309db8dac20SDavid S. Miller return ip_getsockopt(sk, level, optname, optval, optlen); 2310db8dac20SDavid S. Miller } 2311db8dac20SDavid S. Miller 2312db8dac20SDavid S. Miller #ifdef CONFIG_COMPAT 2313db8dac20SDavid S. Miller int compat_udp_getsockopt(struct sock *sk, int level, int optname, 2314db8dac20SDavid S. Miller char __user *optval, int __user *optlen) 2315db8dac20SDavid S. Miller { 2316db8dac20SDavid S. Miller if (level == SOL_UDP || level == SOL_UDPLITE) 2317db8dac20SDavid S. Miller return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2318db8dac20SDavid S. Miller return compat_ip_getsockopt(sk, level, optname, optval, optlen); 2319db8dac20SDavid S. Miller } 2320db8dac20SDavid S. Miller #endif 23211da177e4SLinus Torvalds /** 23221da177e4SLinus Torvalds * udp_poll - wait for a UDP event. 23231da177e4SLinus Torvalds * @file - file struct 23241da177e4SLinus Torvalds * @sock - socket 23251da177e4SLinus Torvalds * @wait - poll table 23261da177e4SLinus Torvalds * 23271da177e4SLinus Torvalds * This is same as datagram poll, except for the special case of 23281da177e4SLinus Torvalds * blocking sockets. If application is using a blocking fd 23291da177e4SLinus Torvalds * and a packet with checksum error is in the queue; 23301da177e4SLinus Torvalds * then it could get return from select indicating data available 23311da177e4SLinus Torvalds * but then block when reading it. Add special case code 23321da177e4SLinus Torvalds * to work around these arguably broken applications. 23331da177e4SLinus Torvalds */ 23341da177e4SLinus Torvalds unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) 23351da177e4SLinus Torvalds { 23361da177e4SLinus Torvalds unsigned int mask = datagram_poll(file, sock, wait); 23371da177e4SLinus Torvalds struct sock *sk = sock->sk; 23381da177e4SLinus Torvalds 2339c3f1dbafSDavid Majnemer sock_rps_record_flow(sk); 2340c3f1dbafSDavid Majnemer 23411da177e4SLinus Torvalds /* Check for false positives due to checksum errors */ 234285584672SEric Dumazet if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2343e83c6744SEric Dumazet !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 23441da177e4SLinus Torvalds mask &= ~(POLLIN | POLLRDNORM); 23451da177e4SLinus Torvalds 23461da177e4SLinus Torvalds return mask; 23471da177e4SLinus Torvalds 23481da177e4SLinus Torvalds } 2349c482c568SEric Dumazet EXPORT_SYMBOL(udp_poll); 23501da177e4SLinus Torvalds 23515d77dca8SDavid Ahern int udp_abort(struct sock *sk, int err) 23525d77dca8SDavid Ahern { 23535d77dca8SDavid Ahern lock_sock(sk); 23545d77dca8SDavid Ahern 23555d77dca8SDavid Ahern sk->sk_err = err; 23565d77dca8SDavid Ahern sk->sk_error_report(sk); 2357286c72deSEric Dumazet __udp_disconnect(sk, 0); 23585d77dca8SDavid Ahern 23595d77dca8SDavid Ahern release_sock(sk); 23605d77dca8SDavid Ahern 23615d77dca8SDavid Ahern return 0; 23625d77dca8SDavid Ahern } 23635d77dca8SDavid Ahern EXPORT_SYMBOL_GPL(udp_abort); 23645d77dca8SDavid Ahern 2365db8dac20SDavid S. Miller struct proto udp_prot = { 2366db8dac20SDavid S. Miller .name = "UDP", 2367db8dac20SDavid S. Miller .owner = THIS_MODULE, 2368db8dac20SDavid S. Miller .close = udp_lib_close, 2369db8dac20SDavid S. Miller .connect = ip4_datagram_connect, 2370db8dac20SDavid S. Miller .disconnect = udp_disconnect, 2371db8dac20SDavid S. Miller .ioctl = udp_ioctl, 2372850cbaddSPaolo Abeni .init = udp_init_sock, 2373db8dac20SDavid S. Miller .destroy = udp_destroy_sock, 2374db8dac20SDavid S. Miller .setsockopt = udp_setsockopt, 2375db8dac20SDavid S. Miller .getsockopt = udp_getsockopt, 2376db8dac20SDavid S. Miller .sendmsg = udp_sendmsg, 2377db8dac20SDavid S. Miller .recvmsg = udp_recvmsg, 2378db8dac20SDavid S. Miller .sendpage = udp_sendpage, 23798141ed9fSSteffen Klassert .release_cb = ip4_datagram_release_cb, 2380db8dac20SDavid S. Miller .hash = udp_lib_hash, 2381db8dac20SDavid S. Miller .unhash = udp_lib_unhash, 2382719f8358SEric Dumazet .rehash = udp_v4_rehash, 2383db8dac20SDavid S. Miller .get_port = udp_v4_get_port, 2384db8dac20SDavid S. Miller .memory_allocated = &udp_memory_allocated, 2385db8dac20SDavid S. Miller .sysctl_mem = sysctl_udp_mem, 2386db8dac20SDavid S. Miller .sysctl_wmem = &sysctl_udp_wmem_min, 2387db8dac20SDavid S. Miller .sysctl_rmem = &sysctl_udp_rmem_min, 2388db8dac20SDavid S. Miller .obj_size = sizeof(struct udp_sock), 2389645ca708SEric Dumazet .h.udp_table = &udp_table, 2390db8dac20SDavid S. Miller #ifdef CONFIG_COMPAT 2391db8dac20SDavid S. Miller .compat_setsockopt = compat_udp_setsockopt, 2392db8dac20SDavid S. Miller .compat_getsockopt = compat_udp_getsockopt, 2393db8dac20SDavid S. Miller #endif 23945d77dca8SDavid Ahern .diag_destroy = udp_abort, 2395db8dac20SDavid S. Miller }; 2396c482c568SEric Dumazet EXPORT_SYMBOL(udp_prot); 23971da177e4SLinus Torvalds 23981da177e4SLinus Torvalds /* ------------------------------------------------------------------------ */ 23991da177e4SLinus Torvalds #ifdef CONFIG_PROC_FS 24001da177e4SLinus Torvalds 2401645ca708SEric Dumazet static struct sock *udp_get_first(struct seq_file *seq, int start) 24021da177e4SLinus Torvalds { 24031da177e4SLinus Torvalds struct sock *sk; 24041da177e4SLinus Torvalds struct udp_iter_state *state = seq->private; 24056f191efeSDenis V. Lunev struct net *net = seq_file_net(seq); 24061da177e4SLinus Torvalds 2407f86dcc5aSEric Dumazet for (state->bucket = start; state->bucket <= state->udp_table->mask; 2408f86dcc5aSEric Dumazet ++state->bucket) { 2409645ca708SEric Dumazet struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; 2410f86dcc5aSEric Dumazet 2411ca065d0cSEric Dumazet if (hlist_empty(&hslot->head)) 2412f86dcc5aSEric Dumazet continue; 2413f86dcc5aSEric Dumazet 2414645ca708SEric Dumazet spin_lock_bh(&hslot->lock); 2415ca065d0cSEric Dumazet sk_for_each(sk, &hslot->head) { 2416878628fbSYOSHIFUJI Hideaki if (!net_eq(sock_net(sk), net)) 2417a91275efSDaniel Lezcano continue; 24181da177e4SLinus Torvalds if (sk->sk_family == state->family) 24191da177e4SLinus Torvalds goto found; 24201da177e4SLinus Torvalds } 2421645ca708SEric Dumazet spin_unlock_bh(&hslot->lock); 24221da177e4SLinus Torvalds } 24231da177e4SLinus Torvalds sk = NULL; 24241da177e4SLinus Torvalds found: 24251da177e4SLinus Torvalds return sk; 24261da177e4SLinus Torvalds } 24271da177e4SLinus Torvalds 24281da177e4SLinus Torvalds static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 24291da177e4SLinus Torvalds { 24301da177e4SLinus Torvalds struct udp_iter_state *state = seq->private; 24316f191efeSDenis V. Lunev struct net *net = seq_file_net(seq); 24321da177e4SLinus Torvalds 24331da177e4SLinus Torvalds do { 2434ca065d0cSEric Dumazet sk = sk_next(sk); 2435878628fbSYOSHIFUJI Hideaki } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); 24361da177e4SLinus Torvalds 2437645ca708SEric Dumazet if (!sk) { 2438f86dcc5aSEric Dumazet if (state->bucket <= state->udp_table->mask) 2439f52b5054SEric Dumazet spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2440645ca708SEric Dumazet return udp_get_first(seq, state->bucket + 1); 24411da177e4SLinus Torvalds } 24421da177e4SLinus Torvalds return sk; 24431da177e4SLinus Torvalds } 24441da177e4SLinus Torvalds 24451da177e4SLinus Torvalds static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 24461da177e4SLinus Torvalds { 2447645ca708SEric Dumazet struct sock *sk = udp_get_first(seq, 0); 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds if (sk) 24501da177e4SLinus Torvalds while (pos && (sk = udp_get_next(seq, sk)) != NULL) 24511da177e4SLinus Torvalds --pos; 24521da177e4SLinus Torvalds return pos ? NULL : sk; 24531da177e4SLinus Torvalds } 24541da177e4SLinus Torvalds 24551da177e4SLinus Torvalds static void *udp_seq_start(struct seq_file *seq, loff_t *pos) 24561da177e4SLinus Torvalds { 245730842f29SVitaly Mayatskikh struct udp_iter_state *state = seq->private; 2458f86dcc5aSEric Dumazet state->bucket = MAX_UDP_PORTS; 245930842f29SVitaly Mayatskikh 2460b50660f1SYOSHIFUJI Hideaki return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 24611da177e4SLinus Torvalds } 24621da177e4SLinus Torvalds 24631da177e4SLinus Torvalds static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 24641da177e4SLinus Torvalds { 24651da177e4SLinus Torvalds struct sock *sk; 24661da177e4SLinus Torvalds 2467b50660f1SYOSHIFUJI Hideaki if (v == SEQ_START_TOKEN) 24681da177e4SLinus Torvalds sk = udp_get_idx(seq, 0); 24691da177e4SLinus Torvalds else 24701da177e4SLinus Torvalds sk = udp_get_next(seq, v); 24711da177e4SLinus Torvalds 24721da177e4SLinus Torvalds ++*pos; 24731da177e4SLinus Torvalds return sk; 24741da177e4SLinus Torvalds } 24751da177e4SLinus Torvalds 24761da177e4SLinus Torvalds static void udp_seq_stop(struct seq_file *seq, void *v) 24771da177e4SLinus Torvalds { 2478645ca708SEric Dumazet struct udp_iter_state *state = seq->private; 2479645ca708SEric Dumazet 2480f86dcc5aSEric Dumazet if (state->bucket <= state->udp_table->mask) 2481645ca708SEric Dumazet spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 24821da177e4SLinus Torvalds } 24831da177e4SLinus Torvalds 248473cb88ecSArjan van de Ven int udp_seq_open(struct inode *inode, struct file *file) 24851da177e4SLinus Torvalds { 2486d9dda78bSAl Viro struct udp_seq_afinfo *afinfo = PDE_DATA(inode); 2487a2be75c1SDenis V. Lunev struct udp_iter_state *s; 2488a2be75c1SDenis V. Lunev int err; 24891da177e4SLinus Torvalds 2490a2be75c1SDenis V. Lunev err = seq_open_net(inode, file, &afinfo->seq_ops, 2491a2be75c1SDenis V. Lunev sizeof(struct udp_iter_state)); 2492a2be75c1SDenis V. Lunev if (err < 0) 2493a2be75c1SDenis V. Lunev return err; 2494a91275efSDaniel Lezcano 2495a2be75c1SDenis V. Lunev s = ((struct seq_file *)file->private_data)->private; 24961da177e4SLinus Torvalds s->family = afinfo->family; 2497645ca708SEric Dumazet s->udp_table = afinfo->udp_table; 2498a2be75c1SDenis V. Lunev return err; 2499a91275efSDaniel Lezcano } 250073cb88ecSArjan van de Ven EXPORT_SYMBOL(udp_seq_open); 2501a91275efSDaniel Lezcano 25021da177e4SLinus Torvalds /* ------------------------------------------------------------------------ */ 25030c96d8c5SDaniel Lezcano int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) 25041da177e4SLinus Torvalds { 25051da177e4SLinus Torvalds struct proc_dir_entry *p; 25061da177e4SLinus Torvalds int rc = 0; 25071da177e4SLinus Torvalds 2508dda61925SDenis V. Lunev afinfo->seq_ops.start = udp_seq_start; 2509dda61925SDenis V. Lunev afinfo->seq_ops.next = udp_seq_next; 2510dda61925SDenis V. Lunev afinfo->seq_ops.stop = udp_seq_stop; 2511dda61925SDenis V. Lunev 251284841c3cSDenis V. Lunev p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 251373cb88ecSArjan van de Ven afinfo->seq_fops, afinfo); 251484841c3cSDenis V. Lunev if (!p) 25151da177e4SLinus Torvalds rc = -ENOMEM; 25161da177e4SLinus Torvalds return rc; 25171da177e4SLinus Torvalds } 2518c482c568SEric Dumazet EXPORT_SYMBOL(udp_proc_register); 25191da177e4SLinus Torvalds 25200c96d8c5SDaniel Lezcano void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) 25211da177e4SLinus Torvalds { 2522ece31ffdSGao feng remove_proc_entry(afinfo->name, net->proc_net); 25231da177e4SLinus Torvalds } 2524c482c568SEric Dumazet EXPORT_SYMBOL(udp_proc_unregister); 2525db8dac20SDavid S. Miller 2526db8dac20SDavid S. Miller /* ------------------------------------------------------------------------ */ 25275e659e4cSPavel Emelyanov static void udp4_format_sock(struct sock *sp, struct seq_file *f, 2528652586dfSTetsuo Handa int bucket) 2529db8dac20SDavid S. Miller { 2530db8dac20SDavid S. Miller struct inet_sock *inet = inet_sk(sp); 2531c720c7e8SEric Dumazet __be32 dest = inet->inet_daddr; 2532c720c7e8SEric Dumazet __be32 src = inet->inet_rcv_saddr; 2533c720c7e8SEric Dumazet __u16 destp = ntohs(inet->inet_dport); 2534c720c7e8SEric Dumazet __u16 srcp = ntohs(inet->inet_sport); 2535db8dac20SDavid S. Miller 2536f86dcc5aSEric Dumazet seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2537652586dfSTetsuo Handa " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", 2538db8dac20SDavid S. Miller bucket, src, srcp, dest, destp, sp->sk_state, 253931e6d363SEric Dumazet sk_wmem_alloc_get(sp), 254031e6d363SEric Dumazet sk_rmem_alloc_get(sp), 2541a7cb5a49SEric W. Biederman 0, 0L, 0, 2542a7cb5a49SEric W. Biederman from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2543a7cb5a49SEric W. Biederman 0, sock_i_ino(sp), 2544cb61cb9bSEric Dumazet atomic_read(&sp->sk_refcnt), sp, 2545652586dfSTetsuo Handa atomic_read(&sp->sk_drops)); 2546db8dac20SDavid S. Miller } 2547db8dac20SDavid S. Miller 2548db8dac20SDavid S. Miller int udp4_seq_show(struct seq_file *seq, void *v) 2549db8dac20SDavid S. Miller { 2550652586dfSTetsuo Handa seq_setwidth(seq, 127); 2551db8dac20SDavid S. Miller if (v == SEQ_START_TOKEN) 2552652586dfSTetsuo Handa seq_puts(seq, " sl local_address rem_address st tx_queue " 2553db8dac20SDavid S. Miller "rx_queue tr tm->when retrnsmt uid timeout " 2554cb61cb9bSEric Dumazet "inode ref pointer drops"); 2555db8dac20SDavid S. Miller else { 2556db8dac20SDavid S. Miller struct udp_iter_state *state = seq->private; 2557db8dac20SDavid S. Miller 2558652586dfSTetsuo Handa udp4_format_sock(v, seq, state->bucket); 2559db8dac20SDavid S. Miller } 2560652586dfSTetsuo Handa seq_pad(seq, '\n'); 2561db8dac20SDavid S. Miller return 0; 2562db8dac20SDavid S. Miller } 2563db8dac20SDavid S. Miller 256473cb88ecSArjan van de Ven static const struct file_operations udp_afinfo_seq_fops = { 256573cb88ecSArjan van de Ven .owner = THIS_MODULE, 256673cb88ecSArjan van de Ven .open = udp_seq_open, 256773cb88ecSArjan van de Ven .read = seq_read, 256873cb88ecSArjan van de Ven .llseek = seq_lseek, 256973cb88ecSArjan van de Ven .release = seq_release_net 257073cb88ecSArjan van de Ven }; 257173cb88ecSArjan van de Ven 2572db8dac20SDavid S. Miller /* ------------------------------------------------------------------------ */ 2573db8dac20SDavid S. Miller static struct udp_seq_afinfo udp4_seq_afinfo = { 2574db8dac20SDavid S. Miller .name = "udp", 2575db8dac20SDavid S. Miller .family = AF_INET, 2576645ca708SEric Dumazet .udp_table = &udp_table, 257773cb88ecSArjan van de Ven .seq_fops = &udp_afinfo_seq_fops, 2578dda61925SDenis V. Lunev .seq_ops = { 2579dda61925SDenis V. Lunev .show = udp4_seq_show, 2580dda61925SDenis V. Lunev }, 2581db8dac20SDavid S. Miller }; 2582db8dac20SDavid S. Miller 25832c8c1e72SAlexey Dobriyan static int __net_init udp4_proc_init_net(struct net *net) 258415439febSPavel Emelyanov { 258515439febSPavel Emelyanov return udp_proc_register(net, &udp4_seq_afinfo); 258615439febSPavel Emelyanov } 258715439febSPavel Emelyanov 25882c8c1e72SAlexey Dobriyan static void __net_exit udp4_proc_exit_net(struct net *net) 258915439febSPavel Emelyanov { 259015439febSPavel Emelyanov udp_proc_unregister(net, &udp4_seq_afinfo); 259115439febSPavel Emelyanov } 259215439febSPavel Emelyanov 259315439febSPavel Emelyanov static struct pernet_operations udp4_net_ops = { 259415439febSPavel Emelyanov .init = udp4_proc_init_net, 259515439febSPavel Emelyanov .exit = udp4_proc_exit_net, 259615439febSPavel Emelyanov }; 259715439febSPavel Emelyanov 2598db8dac20SDavid S. Miller int __init udp4_proc_init(void) 2599db8dac20SDavid S. Miller { 260015439febSPavel Emelyanov return register_pernet_subsys(&udp4_net_ops); 2601db8dac20SDavid S. Miller } 2602db8dac20SDavid S. Miller 2603db8dac20SDavid S. Miller void udp4_proc_exit(void) 2604db8dac20SDavid S. Miller { 260515439febSPavel Emelyanov unregister_pernet_subsys(&udp4_net_ops); 2606db8dac20SDavid S. Miller } 26071da177e4SLinus Torvalds #endif /* CONFIG_PROC_FS */ 26081da177e4SLinus Torvalds 2609f86dcc5aSEric Dumazet static __initdata unsigned long uhash_entries; 2610f86dcc5aSEric Dumazet static int __init set_uhash_entries(char *str) 2611645ca708SEric Dumazet { 2612413c27d8SEldad Zack ssize_t ret; 2613413c27d8SEldad Zack 2614f86dcc5aSEric Dumazet if (!str) 2615f86dcc5aSEric Dumazet return 0; 2616413c27d8SEldad Zack 2617413c27d8SEldad Zack ret = kstrtoul(str, 0, &uhash_entries); 2618413c27d8SEldad Zack if (ret) 2619413c27d8SEldad Zack return 0; 2620413c27d8SEldad Zack 2621f86dcc5aSEric Dumazet if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 2622f86dcc5aSEric Dumazet uhash_entries = UDP_HTABLE_SIZE_MIN; 2623f86dcc5aSEric Dumazet return 1; 2624f86dcc5aSEric Dumazet } 2625f86dcc5aSEric Dumazet __setup("uhash_entries=", set_uhash_entries); 2626645ca708SEric Dumazet 2627f86dcc5aSEric Dumazet void __init udp_table_init(struct udp_table *table, const char *name) 2628f86dcc5aSEric Dumazet { 2629f86dcc5aSEric Dumazet unsigned int i; 2630f86dcc5aSEric Dumazet 2631f86dcc5aSEric Dumazet table->hash = alloc_large_system_hash(name, 2632512615b6SEric Dumazet 2 * sizeof(struct udp_hslot), 2633f86dcc5aSEric Dumazet uhash_entries, 2634f86dcc5aSEric Dumazet 21, /* one slot per 2 MB */ 2635f86dcc5aSEric Dumazet 0, 2636f86dcc5aSEric Dumazet &table->log, 2637f86dcc5aSEric Dumazet &table->mask, 263831fe62b9STim Bird UDP_HTABLE_SIZE_MIN, 2639f86dcc5aSEric Dumazet 64 * 1024); 264031fe62b9STim Bird 2641512615b6SEric Dumazet table->hash2 = table->hash + (table->mask + 1); 2642f86dcc5aSEric Dumazet for (i = 0; i <= table->mask; i++) { 2643ca065d0cSEric Dumazet INIT_HLIST_HEAD(&table->hash[i].head); 2644fdcc8aa9SEric Dumazet table->hash[i].count = 0; 2645645ca708SEric Dumazet spin_lock_init(&table->hash[i].lock); 2646645ca708SEric Dumazet } 2647512615b6SEric Dumazet for (i = 0; i <= table->mask; i++) { 2648ca065d0cSEric Dumazet INIT_HLIST_HEAD(&table->hash2[i].head); 2649512615b6SEric Dumazet table->hash2[i].count = 0; 2650512615b6SEric Dumazet spin_lock_init(&table->hash2[i].lock); 2651512615b6SEric Dumazet } 2652645ca708SEric Dumazet } 2653645ca708SEric Dumazet 2654723b8e46STom Herbert u32 udp_flow_hashrnd(void) 2655723b8e46STom Herbert { 2656723b8e46STom Herbert static u32 hashrnd __read_mostly; 2657723b8e46STom Herbert 2658723b8e46STom Herbert net_get_random_once(&hashrnd, sizeof(hashrnd)); 2659723b8e46STom Herbert 2660723b8e46STom Herbert return hashrnd; 2661723b8e46STom Herbert } 2662723b8e46STom Herbert EXPORT_SYMBOL(udp_flow_hashrnd); 2663723b8e46STom Herbert 266495766fffSHideo Aoki void __init udp_init(void) 266595766fffSHideo Aoki { 2666f03d78dbSEric Dumazet unsigned long limit; 26674b272750SEric Dumazet unsigned int i; 266895766fffSHideo Aoki 2669f86dcc5aSEric Dumazet udp_table_init(&udp_table, "UDP"); 2670f03d78dbSEric Dumazet limit = nr_free_buffer_pages() / 8; 267195766fffSHideo Aoki limit = max(limit, 128UL); 267295766fffSHideo Aoki sysctl_udp_mem[0] = limit / 4 * 3; 267395766fffSHideo Aoki sysctl_udp_mem[1] = limit; 267495766fffSHideo Aoki sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 267595766fffSHideo Aoki 267695766fffSHideo Aoki sysctl_udp_rmem_min = SK_MEM_QUANTUM; 267795766fffSHideo Aoki sysctl_udp_wmem_min = SK_MEM_QUANTUM; 26784b272750SEric Dumazet 26794b272750SEric Dumazet /* 16 spinlocks per cpu */ 26804b272750SEric Dumazet udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 26814b272750SEric Dumazet udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 26824b272750SEric Dumazet GFP_KERNEL); 26834b272750SEric Dumazet if (!udp_busylocks) 26844b272750SEric Dumazet panic("UDP: failed to alloc udp_busylocks\n"); 26854b272750SEric Dumazet for (i = 0; i < (1U << udp_busylocks_log); i++) 26864b272750SEric Dumazet spin_lock_init(udp_busylocks + i); 268795766fffSHideo Aoki } 2688