Lines Matching refs:sk

156 static void sock_def_write_space_wfree(struct sock *sk);
157 static void sock_def_write_space(struct sock *sk);
169 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument
172 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable()
186 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument
188 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable()
201 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument
203 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable()
300 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument
302 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc()
303 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc()
308 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument
310 sock_reset_flag(sk, SOCK_MEMALLOC); in sk_clear_memalloc()
311 sk->sk_allocation &= ~__GFP_MEMALLOC; in sk_clear_memalloc()
321 sk_mem_reclaim(sk); in sk_clear_memalloc()
325 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in __sk_backlog_rcv() argument
331 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); in __sk_backlog_rcv()
334 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, in __sk_backlog_rcv()
337 sk, skb); in __sk_backlog_rcv()
344 void sk_error_report(struct sock *sk) in sk_error_report() argument
346 sk->sk_error_report(sk); in sk_error_report()
348 switch (sk->sk_family) { in sk_error_report()
352 trace_inet_sk_error_report(sk); in sk_error_report()
457 static bool sock_needs_netstamp(const struct sock *sk) in sock_needs_netstamp() argument
459 switch (sk->sk_family) { in sock_needs_netstamp()
468 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) in sock_disable_timestamp() argument
470 if (sk->sk_flags & flags) { in sock_disable_timestamp()
471 sk->sk_flags &= ~flags; in sock_disable_timestamp()
472 if (sock_needs_netstamp(sk) && in sock_disable_timestamp()
473 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) in sock_disable_timestamp()
479 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __sock_queue_rcv_skb() argument
482 struct sk_buff_head *list = &sk->sk_receive_queue; in __sock_queue_rcv_skb()
484 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { in __sock_queue_rcv_skb()
485 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
486 trace_sock_rcvqueue_full(sk, skb); in __sock_queue_rcv_skb()
490 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { in __sock_queue_rcv_skb()
491 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
496 skb_set_owner_r(skb, sk); in __sock_queue_rcv_skb()
504 sock_skb_set_dropcount(sk, skb); in __sock_queue_rcv_skb()
508 if (!sock_flag(sk, SOCK_DEAD)) in __sock_queue_rcv_skb()
509 sk->sk_data_ready(sk); in __sock_queue_rcv_skb()
514 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, in sock_queue_rcv_skb_reason() argument
520 err = sk_filter(sk, skb); in sock_queue_rcv_skb_reason()
525 err = __sock_queue_rcv_skb(sk, skb); in sock_queue_rcv_skb_reason()
544 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, in __sk_receive_skb() argument
549 if (sk_filter_trim_cap(sk, skb, trim_cap)) in __sk_receive_skb()
554 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { in __sk_receive_skb()
555 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
559 bh_lock_sock_nested(sk); in __sk_receive_skb()
561 bh_lock_sock(sk); in __sk_receive_skb()
562 if (!sock_owned_by_user(sk)) { in __sk_receive_skb()
566 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); in __sk_receive_skb()
568 rc = sk_backlog_rcv(sk, skb); in __sk_receive_skb()
570 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in __sk_receive_skb()
571 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { in __sk_receive_skb()
572 bh_unlock_sock(sk); in __sk_receive_skb()
573 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
577 bh_unlock_sock(sk); in __sk_receive_skb()
580 sock_put(sk); in __sk_receive_skb()
592 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) in __sk_dst_check() argument
594 struct dst_entry *dst = __sk_dst_get(sk); in __sk_dst_check()
599 sk_tx_queue_clear(sk); in __sk_dst_check()
600 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in __sk_dst_check()
601 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); in __sk_dst_check()
610 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) in sk_dst_check() argument
612 struct dst_entry *dst = sk_dst_get(sk); in sk_dst_check()
617 sk_dst_reset(sk); in sk_dst_check()
626 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) in sock_bindtoindex_locked() argument
630 struct net *net = sock_net(sk); in sock_bindtoindex_locked()
634 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) in sock_bindtoindex_locked()
642 WRITE_ONCE(sk->sk_bound_dev_if, ifindex); in sock_bindtoindex_locked()
644 if (sk->sk_prot->rehash) in sock_bindtoindex_locked()
645 sk->sk_prot->rehash(sk); in sock_bindtoindex_locked()
646 sk_dst_reset(sk); in sock_bindtoindex_locked()
656 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) in sock_bindtoindex() argument
661 lock_sock(sk); in sock_bindtoindex()
662 ret = sock_bindtoindex_locked(sk, ifindex); in sock_bindtoindex()
664 release_sock(sk); in sock_bindtoindex()
670 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) in sock_setbindtodevice() argument
674 struct net *net = sock_net(sk); in sock_setbindtodevice()
709 sockopt_lock_sock(sk); in sock_setbindtodevice()
710 ret = sock_bindtoindex_locked(sk, index); in sock_setbindtodevice()
711 sockopt_release_sock(sk); in sock_setbindtodevice()
718 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, in sock_getbindtodevice() argument
723 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); in sock_getbindtodevice()
724 struct net *net = sock_net(sk); in sock_getbindtodevice()
759 bool sk_mc_loop(const struct sock *sk) in sk_mc_loop() argument
763 if (!sk) in sk_mc_loop()
766 switch (READ_ONCE(sk->sk_family)) { in sk_mc_loop()
768 return inet_test_bit(MC_LOOP, sk); in sk_mc_loop()
771 return inet6_test_bit(MC6_LOOP, sk); in sk_mc_loop()
779 void sock_set_reuseaddr(struct sock *sk) in sock_set_reuseaddr() argument
781 lock_sock(sk); in sock_set_reuseaddr()
782 sk->sk_reuse = SK_CAN_REUSE; in sock_set_reuseaddr()
783 release_sock(sk); in sock_set_reuseaddr()
787 void sock_set_reuseport(struct sock *sk) in sock_set_reuseport() argument
789 lock_sock(sk); in sock_set_reuseport()
790 sk->sk_reuseport = true; in sock_set_reuseport()
791 release_sock(sk); in sock_set_reuseport()
795 void sock_no_linger(struct sock *sk) in sock_no_linger() argument
797 lock_sock(sk); in sock_no_linger()
798 WRITE_ONCE(sk->sk_lingertime, 0); in sock_no_linger()
799 sock_set_flag(sk, SOCK_LINGER); in sock_no_linger()
800 release_sock(sk); in sock_no_linger()
804 void sock_set_priority(struct sock *sk, u32 priority) in sock_set_priority() argument
806 WRITE_ONCE(sk->sk_priority, priority); in sock_set_priority()
810 void sock_set_sndtimeo(struct sock *sk, s64 secs) in sock_set_sndtimeo() argument
812 lock_sock(sk); in sock_set_sndtimeo()
814 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); in sock_set_sndtimeo()
816 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); in sock_set_sndtimeo()
817 release_sock(sk); in sock_set_sndtimeo()
821 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) in __sock_set_timestamps() argument
823 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); in __sock_set_timestamps()
824 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); in __sock_set_timestamps()
826 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); in __sock_set_timestamps()
827 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in __sock_set_timestamps()
831 void sock_enable_timestamps(struct sock *sk) in sock_enable_timestamps() argument
833 lock_sock(sk); in sock_enable_timestamps()
834 __sock_set_timestamps(sk, true, false, true); in sock_enable_timestamps()
835 release_sock(sk); in sock_enable_timestamps()
839 void sock_set_timestamp(struct sock *sk, int optname, bool valbool) in sock_set_timestamp() argument
843 __sock_set_timestamps(sk, valbool, false, false); in sock_set_timestamp()
846 __sock_set_timestamps(sk, valbool, true, false); in sock_set_timestamp()
849 __sock_set_timestamps(sk, valbool, false, true); in sock_set_timestamp()
852 __sock_set_timestamps(sk, valbool, true, true); in sock_set_timestamp()
857 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) in sock_timestamping_bind_phc() argument
859 struct net *net = sock_net(sk); in sock_timestamping_bind_phc()
865 if (sk->sk_bound_dev_if) in sock_timestamping_bind_phc()
866 dev = dev_get_by_index(net, sk->sk_bound_dev_if); in sock_timestamping_bind_phc()
889 WRITE_ONCE(sk->sk_bind_phc, phc_index); in sock_timestamping_bind_phc()
894 int sock_set_timestamping(struct sock *sk, int optname, in sock_set_timestamping() argument
908 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { in sock_set_timestamping()
909 if (sk_is_tcp(sk)) { in sock_set_timestamping()
910 if ((1 << sk->sk_state) & in sock_set_timestamping()
914 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq); in sock_set_timestamping()
916 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); in sock_set_timestamping()
918 atomic_set(&sk->sk_tskey, 0); in sock_set_timestamping()
927 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); in sock_set_timestamping()
932 WRITE_ONCE(sk->sk_tsflags, val); in sock_set_timestamping()
933 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); in sock_set_timestamping()
936 sock_enable_timestamp(sk, in sock_set_timestamping()
939 sock_disable_timestamp(sk, in sock_set_timestamping()
944 void sock_set_keepalive(struct sock *sk) in sock_set_keepalive() argument
946 lock_sock(sk); in sock_set_keepalive()
947 if (sk->sk_prot->keepalive) in sock_set_keepalive()
948 sk->sk_prot->keepalive(sk, true); in sock_set_keepalive()
949 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); in sock_set_keepalive()
950 release_sock(sk); in sock_set_keepalive()
954 static void __sock_set_rcvbuf(struct sock *sk, int val) in __sock_set_rcvbuf() argument
960 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in __sock_set_rcvbuf()
972 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); in __sock_set_rcvbuf()
975 void sock_set_rcvbuf(struct sock *sk, int val) in sock_set_rcvbuf() argument
977 lock_sock(sk); in sock_set_rcvbuf()
978 __sock_set_rcvbuf(sk, val); in sock_set_rcvbuf()
979 release_sock(sk); in sock_set_rcvbuf()
983 static void __sock_set_mark(struct sock *sk, u32 val) in __sock_set_mark() argument
985 if (val != sk->sk_mark) { in __sock_set_mark()
986 WRITE_ONCE(sk->sk_mark, val); in __sock_set_mark()
987 sk_dst_reset(sk); in __sock_set_mark()
991 void sock_set_mark(struct sock *sk, u32 val) in sock_set_mark() argument
993 lock_sock(sk); in sock_set_mark()
994 __sock_set_mark(sk, val); in sock_set_mark()
995 release_sock(sk); in sock_set_mark()
999 static void sock_release_reserved_memory(struct sock *sk, int bytes) in sock_release_reserved_memory() argument
1004 WARN_ON(bytes > sk->sk_reserved_mem); in sock_release_reserved_memory()
1005 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); in sock_release_reserved_memory()
1006 sk_mem_reclaim(sk); in sock_release_reserved_memory()
1009 static int sock_reserve_memory(struct sock *sk, int bytes) in sock_reserve_memory() argument
1015 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk)) in sock_reserve_memory()
1024 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages, in sock_reserve_memory()
1030 sk_memory_allocated_add(sk, pages); in sock_reserve_memory()
1031 allocated = sk_memory_allocated(sk); in sock_reserve_memory()
1035 if (allocated > sk_prot_mem_limits(sk, 1)) { in sock_reserve_memory()
1036 sk_memory_allocated_sub(sk, pages); in sock_reserve_memory()
1037 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); in sock_reserve_memory()
1040 sk_forward_alloc_add(sk, pages << PAGE_SHIFT); in sock_reserve_memory()
1042 WRITE_ONCE(sk->sk_reserved_mem, in sock_reserve_memory()
1043 sk->sk_reserved_mem + (pages << PAGE_SHIFT)); in sock_reserve_memory()
1059 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) in sock_devmem_dontneed() argument
1066 if (!sk_is_tcp(sk)) in sock_devmem_dontneed()
1083 xa_lock_bh(&sk->sk_user_frags); in sock_devmem_dontneed()
1090 &sk->sk_user_frags, tokens[i].token_start + j); in sock_devmem_dontneed()
1097 xa_unlock_bh(&sk->sk_user_frags); in sock_devmem_dontneed()
1101 xa_lock_bh(&sk->sk_user_frags); in sock_devmem_dontneed()
1108 xa_unlock_bh(&sk->sk_user_frags); in sock_devmem_dontneed()
1117 void sockopt_lock_sock(struct sock *sk) in sockopt_lock_sock() argument
1126 lock_sock(sk); in sockopt_lock_sock()
1130 void sockopt_release_sock(struct sock *sk) in sockopt_release_sock() argument
1135 release_sock(sk); in sockopt_release_sock()
1167 int sk_setsockopt(struct sock *sk, int level, int optname, in sk_setsockopt() argument
1171 struct socket *sock = sk->sk_socket; in sk_setsockopt()
1183 return sock_setbindtodevice(sk, optval, optlen); in sk_setsockopt()
1197 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || in sk_setsockopt()
1198 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sk_setsockopt()
1199 sock_set_priority(sk, val); in sk_setsockopt()
1221 WRITE_ONCE(sk->sk_ll_usec, val); in sk_setsockopt()
1226 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); in sk_setsockopt()
1229 if (val > READ_ONCE(sk->sk_busy_poll_budget) && in sk_setsockopt()
1234 WRITE_ONCE(sk->sk_busy_poll_budget, val); in sk_setsockopt()
1248 cmpxchg(&sk->sk_pacing_status, in sk_setsockopt()
1252 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); in sk_setsockopt()
1253 pacing_rate = READ_ONCE(sk->sk_pacing_rate); in sk_setsockopt()
1255 WRITE_ONCE(sk->sk_pacing_rate, ulval); in sk_setsockopt()
1262 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); in sk_setsockopt()
1266 WRITE_ONCE(sk->sk_txrehash, (u8)val); in sk_setsockopt()
1270 int (*set_peek_off)(struct sock *sk, int val); in sk_setsockopt()
1274 ret = set_peek_off(sk, val); in sk_setsockopt()
1281 return sock_devmem_dontneed(sk, optval, optlen); in sk_setsockopt()
1285 sockopt_lock_sock(sk); in sk_setsockopt()
1292 sock_valbool_flag(sk, SOCK_DBG, valbool); in sk_setsockopt()
1295 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); in sk_setsockopt()
1298 if (valbool && !sk_is_inet(sk)) in sk_setsockopt()
1301 sk->sk_reuseport = valbool; in sk_setsockopt()
1304 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); in sk_setsockopt()
1305 sk_dst_reset(sk); in sk_setsockopt()
1308 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); in sk_setsockopt()
1322 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in sk_setsockopt()
1323 WRITE_ONCE(sk->sk_sndbuf, in sk_setsockopt()
1326 sk->sk_write_space(sk); in sk_setsockopt()
1348 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); in sk_setsockopt()
1360 __sock_set_rcvbuf(sk, max(val, 0)); in sk_setsockopt()
1364 if (sk->sk_prot->keepalive) in sk_setsockopt()
1365 sk->sk_prot->keepalive(sk, valbool); in sk_setsockopt()
1366 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); in sk_setsockopt()
1370 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); in sk_setsockopt()
1374 sk->sk_no_check_tx = valbool; in sk_setsockopt()
1387 sock_reset_flag(sk, SOCK_LINGER); in sk_setsockopt()
1392 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT); in sk_setsockopt()
1394 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ); in sk_setsockopt()
1395 sock_set_flag(sk, SOCK_LINGER); in sk_setsockopt()
1406 sock_set_timestamp(sk, optname, valbool); in sk_setsockopt()
1421 ret = sock_set_timestamping(sk, optname, timestamping); in sk_setsockopt()
1426 int (*set_rcvlowat)(struct sock *sk, int val) = NULL; in sk_setsockopt()
1433 ret = set_rcvlowat(sk, val); in sk_setsockopt()
1435 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); in sk_setsockopt()
1440 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, in sk_setsockopt()
1446 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, in sk_setsockopt()
1455 ret = sk_attach_filter(&fprog, sk); in sk_setsockopt()
1467 ret = sk_attach_bpf(ufd, sk); in sk_setsockopt()
1476 ret = sk_reuseport_attach_filter(&fprog, sk); in sk_setsockopt()
1488 ret = sk_reuseport_attach_bpf(ufd, sk); in sk_setsockopt()
1493 ret = reuseport_detach_prog(sk); in sk_setsockopt()
1497 ret = sk_detach_filter(sk); in sk_setsockopt()
1501 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) in sk_setsockopt()
1504 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); in sk_setsockopt()
1508 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && in sk_setsockopt()
1509 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sk_setsockopt()
1514 __sock_set_mark(sk, val); in sk_setsockopt()
1517 sock_valbool_flag(sk, SOCK_RCVMARK, valbool); in sk_setsockopt()
1521 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); in sk_setsockopt()
1525 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); in sk_setsockopt()
1529 sock_valbool_flag(sk, SOCK_NOFCS, valbool); in sk_setsockopt()
1533 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); in sk_setsockopt()
1538 reuseport_update_incoming_cpu(sk, val); in sk_setsockopt()
1543 dst_negative_advice(sk); in sk_setsockopt()
1547 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { in sk_setsockopt()
1548 if (!(sk_is_tcp(sk) || in sk_setsockopt()
1549 (sk->sk_type == SOCK_DGRAM && in sk_setsockopt()
1550 sk->sk_protocol == IPPROTO_UDP))) in sk_setsockopt()
1552 } else if (sk->sk_family != PF_RDS) { in sk_setsockopt()
1559 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); in sk_setsockopt()
1579 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sk_setsockopt()
1588 sock_valbool_flag(sk, SOCK_TXTIME, true); in sk_setsockopt()
1589 sk->sk_clockid = sk_txtime.clockid; in sk_setsockopt()
1590 sk->sk_txtime_deadline_mode = in sk_setsockopt()
1592 sk->sk_txtime_report_errors = in sk_setsockopt()
1597 ret = sock_bindtoindex_locked(sk, val); in sk_setsockopt()
1605 sk->sk_userlocks = val | (sk->sk_userlocks & in sk_setsockopt()
1618 delta = val - sk->sk_reserved_mem; in sk_setsockopt()
1620 sock_release_reserved_memory(sk, -delta); in sk_setsockopt()
1622 ret = sock_reserve_memory(sk, delta); in sk_setsockopt()
1630 sockopt_release_sock(sk); in sk_setsockopt()
1637 return sk_setsockopt(sock->sk, level, optname, in sock_setsockopt()
1642 static const struct cred *sk_get_peer_cred(struct sock *sk) in sk_get_peer_cred() argument
1646 spin_lock(&sk->sk_peer_lock); in sk_get_peer_cred()
1647 cred = get_cred(sk->sk_peer_cred); in sk_get_peer_cred()
1648 spin_unlock(&sk->sk_peer_lock); in sk_get_peer_cred()
1681 int sk_getsockopt(struct sock *sk, int level, int optname, in sk_getsockopt() argument
1684 struct socket *sock = sk->sk_socket; in sk_getsockopt()
1710 v.val = sock_flag(sk, SOCK_DBG); in sk_getsockopt()
1714 v.val = sock_flag(sk, SOCK_LOCALROUTE); in sk_getsockopt()
1718 v.val = sock_flag(sk, SOCK_BROADCAST); in sk_getsockopt()
1722 v.val = READ_ONCE(sk->sk_sndbuf); in sk_getsockopt()
1726 v.val = READ_ONCE(sk->sk_rcvbuf); in sk_getsockopt()
1730 v.val = sk->sk_reuse; in sk_getsockopt()
1734 v.val = sk->sk_reuseport; in sk_getsockopt()
1738 v.val = sock_flag(sk, SOCK_KEEPOPEN); in sk_getsockopt()
1742 v.val = sk->sk_type; in sk_getsockopt()
1746 v.val = sk->sk_protocol; in sk_getsockopt()
1750 v.val = sk->sk_family; in sk_getsockopt()
1754 v.val = -sock_error(sk); in sk_getsockopt()
1756 v.val = xchg(&sk->sk_err_soft, 0); in sk_getsockopt()
1760 v.val = sock_flag(sk, SOCK_URGINLINE); in sk_getsockopt()
1764 v.val = sk->sk_no_check_tx; in sk_getsockopt()
1768 v.val = READ_ONCE(sk->sk_priority); in sk_getsockopt()
1773 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); in sk_getsockopt()
1774 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ; in sk_getsockopt()
1781 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && in sk_getsockopt()
1782 !sock_flag(sk, SOCK_TSTAMP_NEW) && in sk_getsockopt()
1783 !sock_flag(sk, SOCK_RCVTSTAMPNS); in sk_getsockopt()
1787 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1791 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1795 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1805 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { in sk_getsockopt()
1806 v.timestamping.flags = READ_ONCE(sk->sk_tsflags); in sk_getsockopt()
1807 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); in sk_getsockopt()
1813 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, in sk_getsockopt()
1819 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, in sk_getsockopt()
1824 v.val = READ_ONCE(sk->sk_rcvlowat); in sk_getsockopt()
1845 spin_lock(&sk->sk_peer_lock); in sk_getsockopt()
1846 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); in sk_getsockopt()
1847 spin_unlock(&sk->sk_peer_lock); in sk_getsockopt()
1863 spin_lock(&sk->sk_peer_lock); in sk_getsockopt()
1864 peer_pid = get_pid(sk->sk_peer_pid); in sk_getsockopt()
1865 spin_unlock(&sk->sk_peer_lock); in sk_getsockopt()
1892 cred = sk_get_peer_cred(sk); in sk_getsockopt()
1929 v.val = sk->sk_state == TCP_LISTEN; in sk_getsockopt()
1941 v.val = READ_ONCE(sk->sk_mark); in sk_getsockopt()
1945 v.val = sock_flag(sk, SOCK_RCVMARK); in sk_getsockopt()
1949 v.val = sock_flag(sk, SOCK_RXQ_OVFL); in sk_getsockopt()
1953 v.val = sock_flag(sk, SOCK_WIFI_STATUS); in sk_getsockopt()
1960 v.val = READ_ONCE(sk->sk_peek_off); in sk_getsockopt()
1963 v.val = sock_flag(sk, SOCK_NOFCS); in sk_getsockopt()
1967 return sock_getbindtodevice(sk, optval, optlen, len); in sk_getsockopt()
1970 len = sk_get_filter(sk, optval, len); in sk_getsockopt()
1977 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); in sk_getsockopt()
1985 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); in sk_getsockopt()
1990 v.val = READ_ONCE(sk->sk_ll_usec); in sk_getsockopt()
1993 v.val = READ_ONCE(sk->sk_prefer_busy_poll); in sk_getsockopt()
2001 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); in sk_getsockopt()
2005 READ_ONCE(sk->sk_max_pacing_rate)); in sk_getsockopt()
2010 v.val = READ_ONCE(sk->sk_incoming_cpu); in sk_getsockopt()
2017 sk_get_meminfo(sk, meminfo); in sk_getsockopt()
2028 v.val = READ_ONCE(sk->sk_napi_id); in sk_getsockopt()
2041 v.val64 = sock_gen_cookie(sk); in sk_getsockopt()
2045 v.val = sock_flag(sk, SOCK_ZEROCOPY); in sk_getsockopt()
2050 v.txtime.clockid = sk->sk_clockid; in sk_getsockopt()
2051 v.txtime.flags |= sk->sk_txtime_deadline_mode ? in sk_getsockopt()
2053 v.txtime.flags |= sk->sk_txtime_report_errors ? in sk_getsockopt()
2058 v.val = READ_ONCE(sk->sk_bound_dev_if); in sk_getsockopt()
2065 v.val64 = sock_net(sk)->net_cookie; in sk_getsockopt()
2069 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; in sk_getsockopt()
2073 v.val = READ_ONCE(sk->sk_reserved_mem); in sk_getsockopt()
2078 v.val = READ_ONCE(sk->sk_txrehash); in sk_getsockopt()
2103 static inline void sock_lock_init(struct sock *sk) in sock_lock_init() argument
2105 if (sk->sk_kern_sock) in sock_lock_init()
2107 sk, in sock_lock_init()
2108 af_family_kern_slock_key_strings[sk->sk_family], in sock_lock_init()
2109 af_family_kern_slock_keys + sk->sk_family, in sock_lock_init()
2110 af_family_kern_key_strings[sk->sk_family], in sock_lock_init()
2111 af_family_kern_keys + sk->sk_family); in sock_lock_init()
2114 sk, in sock_lock_init()
2115 af_family_slock_key_strings[sk->sk_family], in sock_lock_init()
2116 af_family_slock_keys + sk->sk_family, in sock_lock_init()
2117 af_family_key_strings[sk->sk_family], in sock_lock_init()
2118 af_family_keys + sk->sk_family); in sock_lock_init()
2157 struct sock *sk; in sk_prot_alloc() local
2162 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc()
2163 if (!sk) in sk_prot_alloc()
2164 return sk; in sk_prot_alloc()
2166 sk_prot_clear_nulls(sk, prot->obj_size); in sk_prot_alloc()
2168 sk = kmalloc(prot->obj_size, priority); in sk_prot_alloc()
2170 if (sk != NULL) { in sk_prot_alloc()
2171 if (security_sk_alloc(sk, family, priority)) in sk_prot_alloc()
2178 return sk; in sk_prot_alloc()
2181 security_sk_free(sk); in sk_prot_alloc()
2184 kmem_cache_free(slab, sk); in sk_prot_alloc()
2186 kfree(sk); in sk_prot_alloc()
2190 static void sk_prot_free(struct proto *prot, struct sock *sk) in sk_prot_free() argument
2198 cgroup_sk_free(&sk->sk_cgrp_data); in sk_prot_free()
2199 mem_cgroup_sk_free(sk); in sk_prot_free()
2200 security_sk_free(sk); in sk_prot_free()
2202 kmem_cache_free(slab, sk); in sk_prot_free()
2204 kfree(sk); in sk_prot_free()
2219 struct sock *sk; in sk_alloc() local
2221 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); in sk_alloc()
2222 if (sk) { in sk_alloc()
2223 sk->sk_family = family; in sk_alloc()
2228 sk->sk_prot = sk->sk_prot_creator = prot; in sk_alloc()
2229 sk->sk_kern_sock = kern; in sk_alloc()
2230 sock_lock_init(sk); in sk_alloc()
2231 sk->sk_net_refcnt = kern ? 0 : 1; in sk_alloc()
2232 if (likely(sk->sk_net_refcnt)) { in sk_alloc()
2233 get_net_track(net, &sk->ns_tracker, priority); in sk_alloc()
2236 __netns_tracker_alloc(net, &sk->ns_tracker, in sk_alloc()
2240 sock_net_set(sk, net); in sk_alloc()
2241 refcount_set(&sk->sk_wmem_alloc, 1); in sk_alloc()
2243 mem_cgroup_sk_alloc(sk); in sk_alloc()
2244 cgroup_sk_alloc(&sk->sk_cgrp_data); in sk_alloc()
2245 sock_update_classid(&sk->sk_cgrp_data); in sk_alloc()
2246 sock_update_netprioidx(&sk->sk_cgrp_data); in sk_alloc()
2247 sk_tx_queue_clear(sk); in sk_alloc()
2250 return sk; in sk_alloc()
2259 struct sock *sk = container_of(head, struct sock, sk_rcu); in __sk_destruct() local
2262 if (sk->sk_destruct) in __sk_destruct()
2263 sk->sk_destruct(sk); in __sk_destruct()
2265 filter = rcu_dereference_check(sk->sk_filter, in __sk_destruct()
2266 refcount_read(&sk->sk_wmem_alloc) == 0); in __sk_destruct()
2268 sk_filter_uncharge(sk, filter); in __sk_destruct()
2269 RCU_INIT_POINTER(sk->sk_filter, NULL); in __sk_destruct()
2272 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); in __sk_destruct()
2275 bpf_sk_storage_free(sk); in __sk_destruct()
2278 if (atomic_read(&sk->sk_omem_alloc)) in __sk_destruct()
2280 __func__, atomic_read(&sk->sk_omem_alloc)); in __sk_destruct()
2282 if (sk->sk_frag.page) { in __sk_destruct()
2283 put_page(sk->sk_frag.page); in __sk_destruct()
2284 sk->sk_frag.page = NULL; in __sk_destruct()
2288 put_cred(sk->sk_peer_cred); in __sk_destruct()
2289 put_pid(sk->sk_peer_pid); in __sk_destruct()
2291 if (likely(sk->sk_net_refcnt)) in __sk_destruct()
2292 put_net_track(sock_net(sk), &sk->ns_tracker); in __sk_destruct()
2294 __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); in __sk_destruct()
2296 sk_prot_free(sk->sk_prot_creator, sk); in __sk_destruct()
2299 void sk_destruct(struct sock *sk) in sk_destruct() argument
2301 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); in sk_destruct()
2303 if (rcu_access_pointer(sk->sk_reuseport_cb)) { in sk_destruct()
2304 reuseport_detach_sock(sk); in sk_destruct()
2309 call_rcu(&sk->sk_rcu, __sk_destruct); in sk_destruct()
2311 __sk_destruct(&sk->sk_rcu); in sk_destruct()
2314 static void __sk_free(struct sock *sk) in __sk_free() argument
2316 if (likely(sk->sk_net_refcnt)) in __sk_free()
2317 sock_inuse_add(sock_net(sk), -1); in __sk_free()
2319 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) in __sk_free()
2320 sock_diag_broadcast_destroy(sk); in __sk_free()
2322 sk_destruct(sk); in __sk_free()
2325 void sk_free(struct sock *sk) in sk_free() argument
2332 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) in sk_free()
2333 __sk_free(sk); in sk_free()
2337 static void sk_init_common(struct sock *sk) in sk_init_common() argument
2339 skb_queue_head_init(&sk->sk_receive_queue); in sk_init_common()
2340 skb_queue_head_init(&sk->sk_write_queue); in sk_init_common()
2341 skb_queue_head_init(&sk->sk_error_queue); in sk_init_common()
2343 rwlock_init(&sk->sk_callback_lock); in sk_init_common()
2344 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, in sk_init_common()
2345 af_rlock_keys + sk->sk_family, in sk_init_common()
2346 af_family_rlock_key_strings[sk->sk_family]); in sk_init_common()
2347 lockdep_set_class_and_name(&sk->sk_write_queue.lock, in sk_init_common()
2348 af_wlock_keys + sk->sk_family, in sk_init_common()
2349 af_family_wlock_key_strings[sk->sk_family]); in sk_init_common()
2350 lockdep_set_class_and_name(&sk->sk_error_queue.lock, in sk_init_common()
2351 af_elock_keys + sk->sk_family, in sk_init_common()
2352 af_family_elock_key_strings[sk->sk_family]); in sk_init_common()
2353 if (sk->sk_kern_sock) in sk_init_common()
2354 lockdep_set_class_and_name(&sk->sk_callback_lock, in sk_init_common()
2355 af_kern_callback_keys + sk->sk_family, in sk_init_common()
2356 af_family_kern_clock_key_strings[sk->sk_family]); in sk_init_common()
2358 lockdep_set_class_and_name(&sk->sk_callback_lock, in sk_init_common()
2359 af_callback_keys + sk->sk_family, in sk_init_common()
2360 af_family_clock_key_strings[sk->sk_family]); in sk_init_common()
2370 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) in sk_clone_lock() argument
2372 struct proto *prot = READ_ONCE(sk->sk_prot); in sk_clone_lock()
2377 newsk = sk_prot_alloc(prot, priority, sk->sk_family); in sk_clone_lock()
2381 sock_copy(newsk, sk); in sk_clone_lock()
2419 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; in sk_clone_lock()
2430 filter = rcu_dereference(sk->sk_filter); in sk_clone_lock()
2440 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { in sk_clone_lock()
2453 if (bpf_sk_storage_clone(sk, newsk)) { in sk_clone_lock()
2483 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) in sk_clone_lock()
2490 void sk_free_unlock_clone(struct sock *sk) in sk_free_unlock_clone() argument
2494 sk->sk_destruct = NULL; in sk_free_unlock_clone()
2495 bh_unlock_sock(sk); in sk_free_unlock_clone()
2496 sk_free(sk); in sk_free_unlock_clone()
2500 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) in sk_dst_gso_max_size() argument
2506 is_ipv6 = (sk->sk_family == AF_INET6 && in sk_dst_gso_max_size()
2507 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); in sk_dst_gso_max_size()
2512 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) in sk_dst_gso_max_size()
2518 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) in sk_setup_caps() argument
2522 sk->sk_route_caps = dst->dev->features; in sk_setup_caps()
2523 if (sk_is_tcp(sk)) in sk_setup_caps()
2524 sk->sk_route_caps |= NETIF_F_GSO; in sk_setup_caps()
2525 if (sk->sk_route_caps & NETIF_F_GSO) in sk_setup_caps()
2526 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; in sk_setup_caps()
2527 if (unlikely(sk->sk_gso_disabled)) in sk_setup_caps()
2528 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_setup_caps()
2529 if (sk_can_gso(sk)) { in sk_setup_caps()
2531 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_setup_caps()
2533 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; in sk_setup_caps()
2534 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); in sk_setup_caps()
2539 sk->sk_gso_max_segs = max_segs; in sk_setup_caps()
2540 sk_dst_set(sk, dst); in sk_setup_caps()
2554 struct sock *sk = skb->sk; in sock_wfree() local
2558 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { in sock_wfree()
2559 if (sock_flag(sk, SOCK_RCU_FREE) && in sock_wfree()
2560 sk->sk_write_space == sock_def_write_space) { in sock_wfree()
2562 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); in sock_wfree()
2563 sock_def_write_space_wfree(sk); in sock_wfree()
2566 __sk_free(sk); in sock_wfree()
2574 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); in sock_wfree()
2575 sk->sk_write_space(sk); in sock_wfree()
2582 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) in sock_wfree()
2583 __sk_free(sk); in sock_wfree()
2592 struct sock *sk = skb->sk; in __sock_wfree() local
2594 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) in __sock_wfree()
2595 __sk_free(sk); in __sock_wfree()
2598 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
2602 if (unlikely(!sk_fullsock(sk))) in skb_set_owner_w()
2603 return skb_set_owner_edemux(skb, sk); in skb_set_owner_w()
2605 skb->sk = sk; in skb_set_owner_w()
2607 skb_set_hash_from_sk(skb, sk); in skb_set_owner_w()
2613 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
2640 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) in skb_orphan_partial()
2652 struct sock *sk = skb->sk; in sock_rfree() local
2655 atomic_sub(len, &sk->sk_rmem_alloc); in sock_rfree()
2656 sk_mem_uncharge(sk, len); in sock_rfree()
2666 sock_put(skb->sk); in sock_efree()
2676 struct sock *sk = skb->sk; in sock_pfree() local
2678 if (!sk_is_refcounted(sk)) in sock_pfree()
2681 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { in sock_pfree()
2682 inet_reqsk(sk)->rsk_listener = NULL; in sock_pfree()
2683 reqsk_free(inet_reqsk(sk)); in sock_pfree()
2687 sock_gen_put(sk); in sock_pfree()
2692 kuid_t sock_i_uid(struct sock *sk) in sock_i_uid() argument
2696 read_lock_bh(&sk->sk_callback_lock); in sock_i_uid()
2697 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; in sock_i_uid()
2698 read_unlock_bh(&sk->sk_callback_lock); in sock_i_uid()
2703 unsigned long __sock_i_ino(struct sock *sk) in __sock_i_ino() argument
2707 read_lock(&sk->sk_callback_lock); in __sock_i_ino()
2708 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; in __sock_i_ino()
2709 read_unlock(&sk->sk_callback_lock); in __sock_i_ino()
2714 unsigned long sock_i_ino(struct sock *sk) in sock_i_ino() argument
2719 ino = __sock_i_ino(sk); in sock_i_ino()
2728 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, in sock_wmalloc() argument
2732 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { in sock_wmalloc()
2736 skb_set_owner_w(skb, sk); in sock_wmalloc()
2746 struct sock *sk = skb->sk; in sock_ofree() local
2748 atomic_sub(skb->truesize, &sk->sk_omem_alloc); in sock_ofree()
2751 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, in sock_omalloc() argument
2757 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > in sock_omalloc()
2758 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max)) in sock_omalloc()
2765 atomic_add(skb->truesize, &sk->sk_omem_alloc); in sock_omalloc()
2766 skb->sk = sk; in sock_omalloc()
2774 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) in sock_kmalloc() argument
2776 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); in sock_kmalloc()
2779 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { in sock_kmalloc()
2784 atomic_add(size, &sk->sk_omem_alloc); in sock_kmalloc()
2788 atomic_sub(size, &sk->sk_omem_alloc); in sock_kmalloc()
2798 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, in __sock_kfree_s() argument
2807 atomic_sub(size, &sk->sk_omem_alloc); in __sock_kfree_s()
2810 void sock_kfree_s(struct sock *sk, void *mem, int size) in sock_kfree_s() argument
2812 __sock_kfree_s(sk, mem, size, false); in sock_kfree_s()
2816 void sock_kzfree_s(struct sock *sk, void *mem, int size) in sock_kzfree_s() argument
2818 __sock_kfree_s(sk, mem, size, true); in sock_kzfree_s()
2825 static long sock_wait_for_wmem(struct sock *sk, long timeo) in sock_wait_for_wmem() argument
2829 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_wait_for_wmem()
2835 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_wait_for_wmem()
2836 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sock_wait_for_wmem()
2837 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) in sock_wait_for_wmem()
2839 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) in sock_wait_for_wmem()
2841 if (READ_ONCE(sk->sk_err)) in sock_wait_for_wmem()
2845 finish_wait(sk_sleep(sk), &wait); in sock_wait_for_wmem()
2854 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, in sock_alloc_send_pskb() argument
2862 timeo = sock_sndtimeo(sk, noblock); in sock_alloc_send_pskb()
2864 err = sock_error(sk); in sock_alloc_send_pskb()
2869 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) in sock_alloc_send_pskb()
2872 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) in sock_alloc_send_pskb()
2875 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_alloc_send_pskb()
2876 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_alloc_send_pskb()
2882 timeo = sock_wait_for_wmem(sk, timeo); in sock_alloc_send_pskb()
2885 errcode, sk->sk_allocation); in sock_alloc_send_pskb()
2887 skb_set_owner_w(skb, sk); in sock_alloc_send_pskb()
2898 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, in __sock_cmsg_send() argument
2907 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && in __sock_cmsg_send()
2908 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in __sock_cmsg_send()
2927 if (!sock_flag(sk, SOCK_TXTIME)) in __sock_cmsg_send()
2934 if (sk_is_tcp(sk)) in __sock_cmsg_send()
2936 tsflags = READ_ONCE(sk->sk_tsflags); in __sock_cmsg_send()
2955 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, in sock_cmsg_send() argument
2966 ret = __sock_cmsg_send(sk, cmsg, sockc); in sock_cmsg_send()
2974 static void sk_enter_memory_pressure(struct sock *sk) in sk_enter_memory_pressure() argument
2976 if (!sk->sk_prot->enter_memory_pressure) in sk_enter_memory_pressure()
2979 sk->sk_prot->enter_memory_pressure(sk); in sk_enter_memory_pressure()
2982 static void sk_leave_memory_pressure(struct sock *sk) in sk_leave_memory_pressure() argument
2984 if (sk->sk_prot->leave_memory_pressure) { in sk_leave_memory_pressure()
2985 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, in sk_leave_memory_pressure()
2986 tcp_leave_memory_pressure, sk); in sk_leave_memory_pressure()
2988 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; in sk_leave_memory_pressure()
3041 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) in sk_page_frag_refill() argument
3043 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) in sk_page_frag_refill()
3046 sk_enter_memory_pressure(sk); in sk_page_frag_refill()
3047 sk_stream_moderate_sndbuf(sk); in sk_page_frag_refill()
3052 void __lock_sock(struct sock *sk) in __lock_sock() argument
3053 __releases(&sk->sk_lock.slock) in __lock_sock()
3054 __acquires(&sk->sk_lock.slock) in __lock_sock()
3059 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
3061 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
3063 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
3064 if (!sock_owned_by_user(sk)) in __lock_sock()
3067 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
3070 void __release_sock(struct sock *sk) in __release_sock() argument
3071 __releases(&sk->sk_lock.slock) in __release_sock()
3072 __acquires(&sk->sk_lock.slock) in __release_sock()
3076 while ((skb = sk->sk_backlog.head) != NULL) { in __release_sock()
3077 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; in __release_sock()
3079 spin_unlock_bh(&sk->sk_lock.slock); in __release_sock()
3086 sk_backlog_rcv(sk, skb); in __release_sock()
3093 spin_lock_bh(&sk->sk_lock.slock); in __release_sock()
3100 sk->sk_backlog.len = 0; in __release_sock()
3103 void __sk_flush_backlog(struct sock *sk) in __sk_flush_backlog() argument
3105 spin_lock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
3106 __release_sock(sk); in __sk_flush_backlog()
3108 if (sk->sk_prot->release_cb) in __sk_flush_backlog()
3109 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, in __sk_flush_backlog()
3110 tcp_release_cb, sk); in __sk_flush_backlog()
3112 spin_unlock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
3127 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) in sk_wait_data() argument
3132 add_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
3133 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
3134 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); in sk_wait_data()
3135 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
3136 remove_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
3156 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) in __sk_mem_raise_allocated() argument
3158 struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; in __sk_mem_raise_allocated()
3159 struct proto *prot = sk->sk_prot; in __sk_mem_raise_allocated()
3163 sk_memory_allocated_add(sk, amt); in __sk_mem_raise_allocated()
3164 allocated = sk_memory_allocated(sk); in __sk_mem_raise_allocated()
3173 if (allocated <= sk_prot_mem_limits(sk, 0)) { in __sk_mem_raise_allocated()
3174 sk_leave_memory_pressure(sk); in __sk_mem_raise_allocated()
3179 if (allocated > sk_prot_mem_limits(sk, 1)) in __sk_mem_raise_allocated()
3180 sk_enter_memory_pressure(sk); in __sk_mem_raise_allocated()
3183 if (allocated > sk_prot_mem_limits(sk, 2)) in __sk_mem_raise_allocated()
3195 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) in __sk_mem_raise_allocated()
3199 int wmem0 = sk_get_wmem0(sk, prot); in __sk_mem_raise_allocated()
3201 if (sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
3202 if (sk->sk_wmem_queued < wmem0) in __sk_mem_raise_allocated()
3204 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { in __sk_mem_raise_allocated()
3209 if (sk_has_memory_pressure(sk)) { in __sk_mem_raise_allocated()
3216 if (!sk_under_global_memory_pressure(sk)) in __sk_mem_raise_allocated()
3223 alloc = sk_sockets_allocated_read_positive(sk); in __sk_mem_raise_allocated()
3224 if (sk_prot_mem_limits(sk, 2) > alloc * in __sk_mem_raise_allocated()
3225 sk_mem_pages(sk->sk_wmem_queued + in __sk_mem_raise_allocated()
3226 atomic_read(&sk->sk_rmem_alloc) + in __sk_mem_raise_allocated()
3227 sk->sk_forward_alloc)) in __sk_mem_raise_allocated()
3233 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
3234 sk_stream_moderate_sndbuf(sk); in __sk_mem_raise_allocated()
3239 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) { in __sk_mem_raise_allocated()
3250 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); in __sk_mem_raise_allocated()
3252 sk_memory_allocated_sub(sk, amt); in __sk_mem_raise_allocated()
3270 int __sk_mem_schedule(struct sock *sk, int size, int kind) in __sk_mem_schedule() argument
3274 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); in __sk_mem_schedule()
3275 ret = __sk_mem_raise_allocated(sk, size, amt, kind); in __sk_mem_schedule()
3277 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT)); in __sk_mem_schedule()
3289 void __sk_mem_reduce_allocated(struct sock *sk, int amount) in __sk_mem_reduce_allocated() argument
3291 sk_memory_allocated_sub(sk, amount); in __sk_mem_reduce_allocated()
3293 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in __sk_mem_reduce_allocated()
3294 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); in __sk_mem_reduce_allocated()
3296 if (sk_under_global_memory_pressure(sk) && in __sk_mem_reduce_allocated()
3297 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) in __sk_mem_reduce_allocated()
3298 sk_leave_memory_pressure(sk); in __sk_mem_reduce_allocated()
3306 void __sk_mem_reclaim(struct sock *sk, int amount) in __sk_mem_reclaim() argument
3309 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT)); in __sk_mem_reclaim()
3310 __sk_mem_reduce_allocated(sk, amount); in __sk_mem_reclaim()
3314 int sk_set_peek_off(struct sock *sk, int val) in sk_set_peek_off() argument
3316 WRITE_ONCE(sk->sk_peek_off, val); in sk_set_peek_off()
3385 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) in sock_no_sendmsg_locked() argument
3415 sock_update_netprioidx(&sock->sk->sk_cgrp_data); in __receive_sock()
3416 sock_update_classid(&sock->sk->sk_cgrp_data); in __receive_sock()
3424 static void sock_def_wakeup(struct sock *sk) in sock_def_wakeup() argument
3429 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup()
3435 static void sock_def_error_report(struct sock *sk) in sock_def_error_report() argument
3440 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report()
3443 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR); in sock_def_error_report()
3447 void sock_def_readable(struct sock *sk) in sock_def_readable() argument
3451 trace_sk_data_ready(sk); in sock_def_readable()
3454 wq = rcu_dereference(sk->sk_wq); in sock_def_readable()
3458 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); in sock_def_readable()
3462 static void sock_def_write_space(struct sock *sk) in sock_def_write_space() argument
3471 if (sock_writeable(sk)) { in sock_def_write_space()
3472 wq = rcu_dereference(sk->sk_wq); in sock_def_write_space()
3478 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); in sock_def_write_space()
3488 static void sock_def_write_space_wfree(struct sock *sk) in sock_def_write_space_wfree() argument
3493 if (sock_writeable(sk)) { in sock_def_write_space_wfree()
3494 struct socket_wq *wq = rcu_dereference(sk->sk_wq); in sock_def_write_space_wfree()
3503 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); in sock_def_write_space_wfree()
3507 static void sock_def_destruct(struct sock *sk) in sock_def_destruct() argument
3511 void sk_send_sigurg(struct sock *sk) in sk_send_sigurg() argument
3513 if (sk->sk_socket && sk->sk_socket->file) in sk_send_sigurg()
3514 if (send_sigurg(sk->sk_socket->file)) in sk_send_sigurg()
3515 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); in sk_send_sigurg()
3519 void sk_reset_timer(struct sock *sk, struct timer_list* timer, in sk_reset_timer() argument
3523 sock_hold(sk); in sk_reset_timer()
3527 void sk_stop_timer(struct sock *sk, struct timer_list* timer) in sk_stop_timer() argument
3530 __sock_put(sk); in sk_stop_timer()
3534 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) in sk_stop_timer_sync() argument
3537 __sock_put(sk); in sk_stop_timer_sync()
3541 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) in sock_init_data_uid() argument
3543 sk_init_common(sk); in sock_init_data_uid()
3544 sk->sk_send_head = NULL; in sock_init_data_uid()
3546 timer_setup(&sk->sk_timer, NULL, 0); in sock_init_data_uid()
3548 sk->sk_allocation = GFP_KERNEL; in sock_init_data_uid()
3549 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); in sock_init_data_uid()
3550 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); in sock_init_data_uid()
3551 sk->sk_state = TCP_CLOSE; in sock_init_data_uid()
3552 sk->sk_use_task_frag = true; in sock_init_data_uid()
3553 sk_set_socket(sk, sock); in sock_init_data_uid()
3555 sock_set_flag(sk, SOCK_ZAPPED); in sock_init_data_uid()
3558 sk->sk_type = sock->type; in sock_init_data_uid()
3559 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); in sock_init_data_uid()
3560 sock->sk = sk; in sock_init_data_uid()
3562 RCU_INIT_POINTER(sk->sk_wq, NULL); in sock_init_data_uid()
3564 sk->sk_uid = uid; in sock_init_data_uid()
3566 sk->sk_state_change = sock_def_wakeup; in sock_init_data_uid()
3567 sk->sk_data_ready = sock_def_readable; in sock_init_data_uid()
3568 sk->sk_write_space = sock_def_write_space; in sock_init_data_uid()
3569 sk->sk_error_report = sock_def_error_report; in sock_init_data_uid()
3570 sk->sk_destruct = sock_def_destruct; in sock_init_data_uid()
3572 sk->sk_frag.page = NULL; in sock_init_data_uid()
3573 sk->sk_frag.offset = 0; in sock_init_data_uid()
3574 sk->sk_peek_off = -1; in sock_init_data_uid()
3576 sk->sk_peer_pid = NULL; in sock_init_data_uid()
3577 sk->sk_peer_cred = NULL; in sock_init_data_uid()
3578 spin_lock_init(&sk->sk_peer_lock); in sock_init_data_uid()
3580 sk->sk_write_pending = 0; in sock_init_data_uid()
3581 sk->sk_rcvlowat = 1; in sock_init_data_uid()
3582 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data_uid()
3583 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data_uid()
3585 sk->sk_stamp = SK_DEFAULT_STAMP; in sock_init_data_uid()
3587 seqlock_init(&sk->sk_stamp_seq); in sock_init_data_uid()
3589 atomic_set(&sk->sk_zckey, 0); in sock_init_data_uid()
3592 sk->sk_napi_id = 0; in sock_init_data_uid()
3593 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); in sock_init_data_uid()
3596 sk->sk_max_pacing_rate = ~0UL; in sock_init_data_uid()
3597 sk->sk_pacing_rate = ~0UL; in sock_init_data_uid()
3598 WRITE_ONCE(sk->sk_pacing_shift, 10); in sock_init_data_uid()
3599 sk->sk_incoming_cpu = -1; in sock_init_data_uid()
3601 sk_rx_queue_clear(sk); in sock_init_data_uid()
3607 refcount_set(&sk->sk_refcnt, 1); in sock_init_data_uid()
3608 atomic_set(&sk->sk_drops, 0); in sock_init_data_uid()
3612 void sock_init_data(struct socket *sock, struct sock *sk) in sock_init_data() argument
3616 make_kuid(sock_net(sk)->user_ns, 0); in sock_init_data()
3618 sock_init_data_uid(sock, sk, uid); in sock_init_data()
3622 void lock_sock_nested(struct sock *sk, int subclass) in lock_sock_nested() argument
3625 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
3628 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3629 if (sock_owned_by_user_nocheck(sk)) in lock_sock_nested()
3630 __lock_sock(sk); in lock_sock_nested()
3631 sk->sk_lock.owned = 1; in lock_sock_nested()
3632 spin_unlock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3636 void release_sock(struct sock *sk) in release_sock() argument
3638 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
3639 if (sk->sk_backlog.tail) in release_sock()
3640 __release_sock(sk); in release_sock()
3642 if (sk->sk_prot->release_cb) in release_sock()
3643 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, in release_sock()
3644 tcp_release_cb, sk); in release_sock()
3646 sock_release_ownership(sk); in release_sock()
3647 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
3648 wake_up(&sk->sk_lock.wq); in release_sock()
3649 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
3653 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) in __lock_sock_fast() argument
3656 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock_fast()
3658 if (!sock_owned_by_user_nocheck(sk)) { in __lock_sock_fast()
3677 __lock_sock(sk); in __lock_sock_fast()
3678 sk->sk_lock.owned = 1; in __lock_sock_fast()
3679 __acquire(&sk->sk_lock.slock); in __lock_sock_fast()
3680 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock_fast()
3688 struct sock *sk = sock->sk; in sock_gettstamp() local
3691 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_gettstamp()
3692 ts = ktime_to_timespec64(sock_read_timestamp(sk)); in sock_gettstamp()
3697 sock_write_timestamp(sk, kt); in sock_gettstamp()
3724 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) in sock_enable_timestamp() argument
3726 if (!sock_flag(sk, flag)) { in sock_enable_timestamp()
3727 unsigned long previous_flags = sk->sk_flags; in sock_enable_timestamp()
3729 sock_set_flag(sk, flag); in sock_enable_timestamp()
3735 if (sock_needs_netstamp(sk) && in sock_enable_timestamp()
3741 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, in sock_recv_errqueue() argument
3749 skb = sock_dequeue_err_skb(sk); in sock_recv_errqueue()
3762 sock_recv_timestamp(msg, sk, skb); in sock_recv_errqueue()
3787 struct sock *sk = sock->sk; in sock_common_getsockopt() local
3790 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); in sock_common_getsockopt()
3797 struct sock *sk = sock->sk; in sock_common_recvmsg() local
3801 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); in sock_common_recvmsg()
3814 struct sock *sk = sock->sk; in sock_common_setsockopt() local
3817 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); in sock_common_setsockopt()
3821 void sk_common_release(struct sock *sk) in sk_common_release() argument
3823 if (sk->sk_prot->destroy) in sk_common_release()
3824 sk->sk_prot->destroy(sk); in sk_common_release()
3834 sk->sk_prot->unhash(sk); in sk_common_release()
3848 sock_orphan(sk); in sk_common_release()
3850 xfrm_sk_free_policy(sk); in sk_common_release()
3852 sock_put(sk); in sk_common_release()
3856 void sk_get_meminfo(const struct sock *sk, u32 *mem) in sk_get_meminfo() argument
3860 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); in sk_get_meminfo()
3861 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); in sk_get_meminfo()
3862 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); in sk_get_meminfo()
3863 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); in sk_get_meminfo()
3864 mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk); in sk_get_meminfo()
3865 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); in sk_get_meminfo()
3866 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); in sk_get_meminfo()
3867 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); in sk_get_meminfo()
3868 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); in sk_get_meminfo()
4247 struct sock *sk = p; in sk_busy_loop_end() local
4249 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in sk_busy_loop_end()
4252 if (sk_is_udp(sk) && in sk_busy_loop_end()
4253 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) in sk_busy_loop_end()
4256 return sk_busy_loop_timeout(sk, start_time); in sk_busy_loop_end()
4261 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) in sock_bind_add() argument
4263 if (!sk->sk_prot->bind_add) in sock_bind_add()
4265 return sk->sk_prot->bind_add(sk, addr, addr_len); in sock_bind_add()
4270 int sock_ioctl_inout(struct sock *sk, unsigned int cmd, in sock_ioctl_inout() argument
4278 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); in sock_ioctl_inout()
4293 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) in sock_ioctl_out() argument
4297 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); in sock_ioctl_out()
4309 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) in sk_ioctl() argument
4313 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) in sk_ioctl()
4314 rc = ipmr_sk_ioctl(sk, cmd, arg); in sk_ioctl()
4315 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) in sk_ioctl()
4316 rc = ip6mr_sk_ioctl(sk, cmd, arg); in sk_ioctl()
4317 else if (sk_is_phonet(sk)) in sk_ioctl()
4318 rc = phonet_sk_ioctl(sk, cmd, arg); in sk_ioctl()
4325 return sock_ioctl_out(sk, cmd, arg); in sk_ioctl()