1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic socket support routines. Memory allocators, socket lock/release
8 * handler for protocols to use and generic option handler.
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 */
85
86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87
88 #include <linux/unaligned.h>
89 #include <linux/capability.h>
90 #include <linux/errno.h>
91 #include <linux/errqueue.h>
92 #include <linux/types.h>
93 #include <linux/socket.h>
94 #include <linux/in.h>
95 #include <linux/kernel.h>
96 #include <linux/module.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/sched.h>
100 #include <linux/sched/mm.h>
101 #include <linux/timer.h>
102 #include <linux/string.h>
103 #include <linux/sockios.h>
104 #include <linux/net.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/tcp.h>
110 #include <linux/udp.h>
111 #include <linux/init.h>
112 #include <linux/highmem.h>
113 #include <linux/user_namespace.h>
114 #include <linux/static_key.h>
115 #include <linux/memcontrol.h>
116 #include <linux/prefetch.h>
117 #include <linux/compat.h>
118 #include <linux/mroute.h>
119 #include <linux/mroute6.h>
120 #include <linux/icmpv6.h>
121
122 #include <linux/uaccess.h>
123
124 #include <linux/netdevice.h>
125 #include <net/protocol.h>
126 #include <linux/skbuff.h>
127 #include <linux/skbuff_ref.h>
128 #include <net/net_namespace.h>
129 #include <net/request_sock.h>
130 #include <net/sock.h>
131 #include <net/proto_memory.h>
132 #include <linux/net_tstamp.h>
133 #include <net/xfrm.h>
134 #include <linux/ipsec.h>
135 #include <net/cls_cgroup.h>
136 #include <net/netprio_cgroup.h>
137 #include <linux/sock_diag.h>
138
139 #include <linux/filter.h>
140 #include <net/sock_reuseport.h>
141 #include <net/bpf_sk_storage.h>
142
143 #include <trace/events/sock.h>
144
145 #include <net/tcp.h>
146 #include <net/busy_poll.h>
147 #include <net/phonet/phonet.h>
148
149 #include <linux/ethtool.h>
150
151 #include "dev.h"
152
153 static DEFINE_MUTEX(proto_list_mutex);
154 static LIST_HEAD(proto_list);
155
156 static void sock_def_write_space_wfree(struct sock *sk);
157 static void sock_def_write_space(struct sock *sk);
158
159 /**
160 * sk_ns_capable - General socket capability test
161 * @sk: Socket to use a capability on or through
162 * @user_ns: The user namespace of the capability to use
163 * @cap: The capability to use
164 *
165 * Test to see if the opener of the socket had when the socket was
166 * created and the current process has the capability @cap in the user
167 * namespace @user_ns.
168 */
sk_ns_capable(const struct sock * sk,struct user_namespace * user_ns,int cap)169 bool sk_ns_capable(const struct sock *sk,
170 struct user_namespace *user_ns, int cap)
171 {
172 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
173 ns_capable(user_ns, cap);
174 }
175 EXPORT_SYMBOL(sk_ns_capable);
176
177 /**
178 * sk_capable - Socket global capability test
179 * @sk: Socket to use a capability on or through
180 * @cap: The global capability to use
181 *
182 * Test to see if the opener of the socket had when the socket was
183 * created and the current process has the capability @cap in all user
184 * namespaces.
185 */
sk_capable(const struct sock * sk,int cap)186 bool sk_capable(const struct sock *sk, int cap)
187 {
188 return sk_ns_capable(sk, &init_user_ns, cap);
189 }
190 EXPORT_SYMBOL(sk_capable);
191
192 /**
193 * sk_net_capable - Network namespace socket capability test
194 * @sk: Socket to use a capability on or through
195 * @cap: The capability to use
196 *
197 * Test to see if the opener of the socket had when the socket was created
198 * and the current process has the capability @cap over the network namespace
199 * the socket is a member of.
200 */
sk_net_capable(const struct sock * sk,int cap)201 bool sk_net_capable(const struct sock *sk, int cap)
202 {
203 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
204 }
205 EXPORT_SYMBOL(sk_net_capable);
206
207 /*
208 * Each address family might have different locking rules, so we have
209 * one slock key per address family and separate keys for internal and
210 * userspace sockets.
211 */
212 static struct lock_class_key af_family_keys[AF_MAX];
213 static struct lock_class_key af_family_kern_keys[AF_MAX];
214 static struct lock_class_key af_family_slock_keys[AF_MAX];
215 static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
216
217 /*
218 * Make lock validator output more readable. (we pre-construct these
219 * strings build-time, so that runtime initialization of socket
220 * locks is fast):
221 */
222
223 #define _sock_locks(x) \
224 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
225 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
226 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
227 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
228 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
229 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
230 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
231 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
232 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
233 x "27" , x "28" , x "AF_CAN" , \
234 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
235 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
236 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
237 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
238 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
239 x "AF_MCTP" , \
240 x "AF_MAX"
241
242 static const char *const af_family_key_strings[AF_MAX+1] = {
243 _sock_locks("sk_lock-")
244 };
245 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
246 _sock_locks("slock-")
247 };
248 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
249 _sock_locks("clock-")
250 };
251
252 static const char *const af_family_kern_key_strings[AF_MAX+1] = {
253 _sock_locks("k-sk_lock-")
254 };
255 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
256 _sock_locks("k-slock-")
257 };
258 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
259 _sock_locks("k-clock-")
260 };
261 static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
262 _sock_locks("rlock-")
263 };
264 static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
265 _sock_locks("wlock-")
266 };
267 static const char *const af_family_elock_key_strings[AF_MAX+1] = {
268 _sock_locks("elock-")
269 };
270
271 /*
272 * sk_callback_lock and sk queues locking rules are per-address-family,
273 * so split the lock classes by using a per-AF key:
274 */
275 static struct lock_class_key af_callback_keys[AF_MAX];
276 static struct lock_class_key af_rlock_keys[AF_MAX];
277 static struct lock_class_key af_wlock_keys[AF_MAX];
278 static struct lock_class_key af_elock_keys[AF_MAX];
279 static struct lock_class_key af_kern_callback_keys[AF_MAX];
280
281 /* Run time adjustable parameters. */
282 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
283 EXPORT_SYMBOL(sysctl_wmem_max);
284 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
285 EXPORT_SYMBOL(sysctl_rmem_max);
286 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
287 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
288
289 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
290 EXPORT_SYMBOL_GPL(memalloc_socks_key);
291
292 /**
293 * sk_set_memalloc - sets %SOCK_MEMALLOC
294 * @sk: socket to set it on
295 *
296 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
297 * It's the responsibility of the admin to adjust min_free_kbytes
298 * to meet the requirements
299 */
sk_set_memalloc(struct sock * sk)300 void sk_set_memalloc(struct sock *sk)
301 {
302 sock_set_flag(sk, SOCK_MEMALLOC);
303 sk->sk_allocation |= __GFP_MEMALLOC;
304 static_branch_inc(&memalloc_socks_key);
305 }
306 EXPORT_SYMBOL_GPL(sk_set_memalloc);
307
sk_clear_memalloc(struct sock * sk)308 void sk_clear_memalloc(struct sock *sk)
309 {
310 sock_reset_flag(sk, SOCK_MEMALLOC);
311 sk->sk_allocation &= ~__GFP_MEMALLOC;
312 static_branch_dec(&memalloc_socks_key);
313
314 /*
315 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
316 * progress of swapping. SOCK_MEMALLOC may be cleared while
317 * it has rmem allocations due to the last swapfile being deactivated
318 * but there is a risk that the socket is unusable due to exceeding
319 * the rmem limits. Reclaim the reserves and obey rmem limits again.
320 */
321 sk_mem_reclaim(sk);
322 }
323 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
324
__sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)325 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
326 {
327 int ret;
328 unsigned int noreclaim_flag;
329
330 /* these should have been dropped before queueing */
331 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
332
333 noreclaim_flag = memalloc_noreclaim_save();
334 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
335 tcp_v6_do_rcv,
336 tcp_v4_do_rcv,
337 sk, skb);
338 memalloc_noreclaim_restore(noreclaim_flag);
339
340 return ret;
341 }
342 EXPORT_SYMBOL(__sk_backlog_rcv);
343
sk_error_report(struct sock * sk)344 void sk_error_report(struct sock *sk)
345 {
346 sk->sk_error_report(sk);
347
348 switch (sk->sk_family) {
349 case AF_INET:
350 fallthrough;
351 case AF_INET6:
352 trace_inet_sk_error_report(sk);
353 break;
354 default:
355 break;
356 }
357 }
358 EXPORT_SYMBOL(sk_error_report);
359
sock_get_timeout(long timeo,void * optval,bool old_timeval)360 int sock_get_timeout(long timeo, void *optval, bool old_timeval)
361 {
362 struct __kernel_sock_timeval tv;
363
364 if (timeo == MAX_SCHEDULE_TIMEOUT) {
365 tv.tv_sec = 0;
366 tv.tv_usec = 0;
367 } else {
368 tv.tv_sec = timeo / HZ;
369 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
370 }
371
372 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
373 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
374 *(struct old_timeval32 *)optval = tv32;
375 return sizeof(tv32);
376 }
377
378 if (old_timeval) {
379 struct __kernel_old_timeval old_tv;
380 old_tv.tv_sec = tv.tv_sec;
381 old_tv.tv_usec = tv.tv_usec;
382 *(struct __kernel_old_timeval *)optval = old_tv;
383 return sizeof(old_tv);
384 }
385
386 *(struct __kernel_sock_timeval *)optval = tv;
387 return sizeof(tv);
388 }
389 EXPORT_SYMBOL(sock_get_timeout);
390
sock_copy_user_timeval(struct __kernel_sock_timeval * tv,sockptr_t optval,int optlen,bool old_timeval)391 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
392 sockptr_t optval, int optlen, bool old_timeval)
393 {
394 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
395 struct old_timeval32 tv32;
396
397 if (optlen < sizeof(tv32))
398 return -EINVAL;
399
400 if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
401 return -EFAULT;
402 tv->tv_sec = tv32.tv_sec;
403 tv->tv_usec = tv32.tv_usec;
404 } else if (old_timeval) {
405 struct __kernel_old_timeval old_tv;
406
407 if (optlen < sizeof(old_tv))
408 return -EINVAL;
409 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
410 return -EFAULT;
411 tv->tv_sec = old_tv.tv_sec;
412 tv->tv_usec = old_tv.tv_usec;
413 } else {
414 if (optlen < sizeof(*tv))
415 return -EINVAL;
416 if (copy_from_sockptr(tv, optval, sizeof(*tv)))
417 return -EFAULT;
418 }
419
420 return 0;
421 }
422 EXPORT_SYMBOL(sock_copy_user_timeval);
423
sock_set_timeout(long * timeo_p,sockptr_t optval,int optlen,bool old_timeval)424 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
425 bool old_timeval)
426 {
427 struct __kernel_sock_timeval tv;
428 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
429 long val;
430
431 if (err)
432 return err;
433
434 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
435 return -EDOM;
436
437 if (tv.tv_sec < 0) {
438 static int warned __read_mostly;
439
440 WRITE_ONCE(*timeo_p, 0);
441 if (warned < 10 && net_ratelimit()) {
442 warned++;
443 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
444 __func__, current->comm, task_pid_nr(current));
445 }
446 return 0;
447 }
448 val = MAX_SCHEDULE_TIMEOUT;
449 if ((tv.tv_sec || tv.tv_usec) &&
450 (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)))
451 val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec,
452 USEC_PER_SEC / HZ);
453 WRITE_ONCE(*timeo_p, val);
454 return 0;
455 }
456
sk_set_prio_allowed(const struct sock * sk,int val)457 static bool sk_set_prio_allowed(const struct sock *sk, int val)
458 {
459 return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) ||
460 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
461 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN));
462 }
463
sock_needs_netstamp(const struct sock * sk)464 static bool sock_needs_netstamp(const struct sock *sk)
465 {
466 switch (sk->sk_family) {
467 case AF_UNSPEC:
468 case AF_UNIX:
469 return false;
470 default:
471 return true;
472 }
473 }
474
sock_disable_timestamp(struct sock * sk,unsigned long flags)475 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
476 {
477 if (sk->sk_flags & flags) {
478 sk->sk_flags &= ~flags;
479 if (sock_needs_netstamp(sk) &&
480 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
481 net_disable_timestamp();
482 }
483 }
484
485
__sock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)486 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
487 {
488 unsigned long flags;
489 struct sk_buff_head *list = &sk->sk_receive_queue;
490
491 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
492 atomic_inc(&sk->sk_drops);
493 trace_sock_rcvqueue_full(sk, skb);
494 return -ENOMEM;
495 }
496
497 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
498 atomic_inc(&sk->sk_drops);
499 return -ENOBUFS;
500 }
501
502 skb->dev = NULL;
503 skb_set_owner_r(skb, sk);
504
505 /* we escape from rcu protected region, make sure we dont leak
506 * a norefcounted dst
507 */
508 skb_dst_force(skb);
509
510 spin_lock_irqsave(&list->lock, flags);
511 sock_skb_set_dropcount(sk, skb);
512 __skb_queue_tail(list, skb);
513 spin_unlock_irqrestore(&list->lock, flags);
514
515 if (!sock_flag(sk, SOCK_DEAD))
516 sk->sk_data_ready(sk);
517 return 0;
518 }
519 EXPORT_SYMBOL(__sock_queue_rcv_skb);
520
sock_queue_rcv_skb_reason(struct sock * sk,struct sk_buff * skb,enum skb_drop_reason * reason)521 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
522 enum skb_drop_reason *reason)
523 {
524 enum skb_drop_reason drop_reason;
525 int err;
526
527 err = sk_filter(sk, skb);
528 if (err) {
529 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
530 goto out;
531 }
532 err = __sock_queue_rcv_skb(sk, skb);
533 switch (err) {
534 case -ENOMEM:
535 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
536 break;
537 case -ENOBUFS:
538 drop_reason = SKB_DROP_REASON_PROTO_MEM;
539 break;
540 default:
541 drop_reason = SKB_NOT_DROPPED_YET;
542 break;
543 }
544 out:
545 if (reason)
546 *reason = drop_reason;
547 return err;
548 }
549 EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
550
__sk_receive_skb(struct sock * sk,struct sk_buff * skb,const int nested,unsigned int trim_cap,bool refcounted)551 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
552 const int nested, unsigned int trim_cap, bool refcounted)
553 {
554 int rc = NET_RX_SUCCESS;
555
556 if (sk_filter_trim_cap(sk, skb, trim_cap))
557 goto discard_and_relse;
558
559 skb->dev = NULL;
560
561 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
562 atomic_inc(&sk->sk_drops);
563 goto discard_and_relse;
564 }
565 if (nested)
566 bh_lock_sock_nested(sk);
567 else
568 bh_lock_sock(sk);
569 if (!sock_owned_by_user(sk)) {
570 /*
571 * trylock + unlock semantics:
572 */
573 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
574
575 rc = sk_backlog_rcv(sk, skb);
576
577 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
578 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
579 bh_unlock_sock(sk);
580 atomic_inc(&sk->sk_drops);
581 goto discard_and_relse;
582 }
583
584 bh_unlock_sock(sk);
585 out:
586 if (refcounted)
587 sock_put(sk);
588 return rc;
589 discard_and_relse:
590 kfree_skb(skb);
591 goto out;
592 }
593 EXPORT_SYMBOL(__sk_receive_skb);
594
595 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
596 u32));
597 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
598 u32));
__sk_dst_check(struct sock * sk,u32 cookie)599 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
600 {
601 struct dst_entry *dst = __sk_dst_get(sk);
602
603 if (dst && dst->obsolete &&
604 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
605 dst, cookie) == NULL) {
606 sk_tx_queue_clear(sk);
607 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
608 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
609 dst_release(dst);
610 return NULL;
611 }
612
613 return dst;
614 }
615 EXPORT_SYMBOL(__sk_dst_check);
616
sk_dst_check(struct sock * sk,u32 cookie)617 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
618 {
619 struct dst_entry *dst = sk_dst_get(sk);
620
621 if (dst && dst->obsolete &&
622 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
623 dst, cookie) == NULL) {
624 sk_dst_reset(sk);
625 dst_release(dst);
626 return NULL;
627 }
628
629 return dst;
630 }
631 EXPORT_SYMBOL(sk_dst_check);
632
sock_bindtoindex_locked(struct sock * sk,int ifindex)633 static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
634 {
635 int ret = -ENOPROTOOPT;
636 #ifdef CONFIG_NETDEVICES
637 struct net *net = sock_net(sk);
638
639 /* Sorry... */
640 ret = -EPERM;
641 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
642 goto out;
643
644 ret = -EINVAL;
645 if (ifindex < 0)
646 goto out;
647
648 /* Paired with all READ_ONCE() done locklessly. */
649 WRITE_ONCE(sk->sk_bound_dev_if, ifindex);
650
651 if (sk->sk_prot->rehash)
652 sk->sk_prot->rehash(sk);
653 sk_dst_reset(sk);
654
655 ret = 0;
656
657 out:
658 #endif
659
660 return ret;
661 }
662
sock_bindtoindex(struct sock * sk,int ifindex,bool lock_sk)663 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
664 {
665 int ret;
666
667 if (lock_sk)
668 lock_sock(sk);
669 ret = sock_bindtoindex_locked(sk, ifindex);
670 if (lock_sk)
671 release_sock(sk);
672
673 return ret;
674 }
675 EXPORT_SYMBOL(sock_bindtoindex);
676
sock_setbindtodevice(struct sock * sk,sockptr_t optval,int optlen)677 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
678 {
679 int ret = -ENOPROTOOPT;
680 #ifdef CONFIG_NETDEVICES
681 struct net *net = sock_net(sk);
682 char devname[IFNAMSIZ];
683 int index;
684
685 ret = -EINVAL;
686 if (optlen < 0)
687 goto out;
688
689 /* Bind this socket to a particular device like "eth0",
690 * as specified in the passed interface name. If the
691 * name is "" or the option length is zero the socket
692 * is not bound.
693 */
694 if (optlen > IFNAMSIZ - 1)
695 optlen = IFNAMSIZ - 1;
696 memset(devname, 0, sizeof(devname));
697
698 ret = -EFAULT;
699 if (copy_from_sockptr(devname, optval, optlen))
700 goto out;
701
702 index = 0;
703 if (devname[0] != '\0') {
704 struct net_device *dev;
705
706 rcu_read_lock();
707 dev = dev_get_by_name_rcu(net, devname);
708 if (dev)
709 index = dev->ifindex;
710 rcu_read_unlock();
711 ret = -ENODEV;
712 if (!dev)
713 goto out;
714 }
715
716 sockopt_lock_sock(sk);
717 ret = sock_bindtoindex_locked(sk, index);
718 sockopt_release_sock(sk);
719 out:
720 #endif
721
722 return ret;
723 }
724
sock_getbindtodevice(struct sock * sk,sockptr_t optval,sockptr_t optlen,int len)725 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
726 sockptr_t optlen, int len)
727 {
728 int ret = -ENOPROTOOPT;
729 #ifdef CONFIG_NETDEVICES
730 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
731 struct net *net = sock_net(sk);
732 char devname[IFNAMSIZ];
733
734 if (bound_dev_if == 0) {
735 len = 0;
736 goto zero;
737 }
738
739 ret = -EINVAL;
740 if (len < IFNAMSIZ)
741 goto out;
742
743 ret = netdev_get_name(net, devname, bound_dev_if);
744 if (ret)
745 goto out;
746
747 len = strlen(devname) + 1;
748
749 ret = -EFAULT;
750 if (copy_to_sockptr(optval, devname, len))
751 goto out;
752
753 zero:
754 ret = -EFAULT;
755 if (copy_to_sockptr(optlen, &len, sizeof(int)))
756 goto out;
757
758 ret = 0;
759
760 out:
761 #endif
762
763 return ret;
764 }
765
sk_mc_loop(const struct sock * sk)766 bool sk_mc_loop(const struct sock *sk)
767 {
768 if (dev_recursion_level())
769 return false;
770 if (!sk)
771 return true;
772 /* IPV6_ADDRFORM can change sk->sk_family under us. */
773 switch (READ_ONCE(sk->sk_family)) {
774 case AF_INET:
775 return inet_test_bit(MC_LOOP, sk);
776 #if IS_ENABLED(CONFIG_IPV6)
777 case AF_INET6:
778 return inet6_test_bit(MC6_LOOP, sk);
779 #endif
780 }
781 WARN_ON_ONCE(1);
782 return true;
783 }
784 EXPORT_SYMBOL(sk_mc_loop);
785
sock_set_reuseaddr(struct sock * sk)786 void sock_set_reuseaddr(struct sock *sk)
787 {
788 lock_sock(sk);
789 sk->sk_reuse = SK_CAN_REUSE;
790 release_sock(sk);
791 }
792 EXPORT_SYMBOL(sock_set_reuseaddr);
793
sock_set_reuseport(struct sock * sk)794 void sock_set_reuseport(struct sock *sk)
795 {
796 lock_sock(sk);
797 sk->sk_reuseport = true;
798 release_sock(sk);
799 }
800 EXPORT_SYMBOL(sock_set_reuseport);
801
sock_no_linger(struct sock * sk)802 void sock_no_linger(struct sock *sk)
803 {
804 lock_sock(sk);
805 WRITE_ONCE(sk->sk_lingertime, 0);
806 sock_set_flag(sk, SOCK_LINGER);
807 release_sock(sk);
808 }
809 EXPORT_SYMBOL(sock_no_linger);
810
sock_set_priority(struct sock * sk,u32 priority)811 void sock_set_priority(struct sock *sk, u32 priority)
812 {
813 WRITE_ONCE(sk->sk_priority, priority);
814 }
815 EXPORT_SYMBOL(sock_set_priority);
816
sock_set_sndtimeo(struct sock * sk,s64 secs)817 void sock_set_sndtimeo(struct sock *sk, s64 secs)
818 {
819 lock_sock(sk);
820 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
821 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
822 else
823 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
824 release_sock(sk);
825 }
826 EXPORT_SYMBOL(sock_set_sndtimeo);
827
__sock_set_timestamps(struct sock * sk,bool val,bool new,bool ns)828 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
829 {
830 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val);
831 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns);
832 if (val) {
833 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
834 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
835 }
836 }
837
sock_enable_timestamps(struct sock * sk)838 void sock_enable_timestamps(struct sock *sk)
839 {
840 lock_sock(sk);
841 __sock_set_timestamps(sk, true, false, true);
842 release_sock(sk);
843 }
844 EXPORT_SYMBOL(sock_enable_timestamps);
845
sock_set_timestamp(struct sock * sk,int optname,bool valbool)846 void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
847 {
848 switch (optname) {
849 case SO_TIMESTAMP_OLD:
850 __sock_set_timestamps(sk, valbool, false, false);
851 break;
852 case SO_TIMESTAMP_NEW:
853 __sock_set_timestamps(sk, valbool, true, false);
854 break;
855 case SO_TIMESTAMPNS_OLD:
856 __sock_set_timestamps(sk, valbool, false, true);
857 break;
858 case SO_TIMESTAMPNS_NEW:
859 __sock_set_timestamps(sk, valbool, true, true);
860 break;
861 }
862 }
863
sock_timestamping_bind_phc(struct sock * sk,int phc_index)864 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
865 {
866 struct net *net = sock_net(sk);
867 struct net_device *dev = NULL;
868 bool match = false;
869 int *vclock_index;
870 int i, num;
871
872 if (sk->sk_bound_dev_if)
873 dev = dev_get_by_index(net, sk->sk_bound_dev_if);
874
875 if (!dev) {
876 pr_err("%s: sock not bind to device\n", __func__);
877 return -EOPNOTSUPP;
878 }
879
880 num = ethtool_get_phc_vclocks(dev, &vclock_index);
881 dev_put(dev);
882
883 for (i = 0; i < num; i++) {
884 if (*(vclock_index + i) == phc_index) {
885 match = true;
886 break;
887 }
888 }
889
890 if (num > 0)
891 kfree(vclock_index);
892
893 if (!match)
894 return -EINVAL;
895
896 WRITE_ONCE(sk->sk_bind_phc, phc_index);
897
898 return 0;
899 }
900
sock_set_timestamping(struct sock * sk,int optname,struct so_timestamping timestamping)901 int sock_set_timestamping(struct sock *sk, int optname,
902 struct so_timestamping timestamping)
903 {
904 int val = timestamping.flags;
905 int ret;
906
907 if (val & ~SOF_TIMESTAMPING_MASK)
908 return -EINVAL;
909
910 if (val & SOF_TIMESTAMPING_OPT_ID_TCP &&
911 !(val & SOF_TIMESTAMPING_OPT_ID))
912 return -EINVAL;
913
914 if (val & SOF_TIMESTAMPING_OPT_ID &&
915 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
916 if (sk_is_tcp(sk)) {
917 if ((1 << sk->sk_state) &
918 (TCPF_CLOSE | TCPF_LISTEN))
919 return -EINVAL;
920 if (val & SOF_TIMESTAMPING_OPT_ID_TCP)
921 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq);
922 else
923 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
924 } else {
925 atomic_set(&sk->sk_tskey, 0);
926 }
927 }
928
929 if (val & SOF_TIMESTAMPING_OPT_STATS &&
930 !(val & SOF_TIMESTAMPING_OPT_TSONLY))
931 return -EINVAL;
932
933 if (val & SOF_TIMESTAMPING_BIND_PHC) {
934 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
935 if (ret)
936 return ret;
937 }
938
939 WRITE_ONCE(sk->sk_tsflags, val);
940 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
941 sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY));
942
943 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
944 sock_enable_timestamp(sk,
945 SOCK_TIMESTAMPING_RX_SOFTWARE);
946 else
947 sock_disable_timestamp(sk,
948 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
949 return 0;
950 }
951
952 #if defined(CONFIG_CGROUP_BPF)
bpf_skops_tx_timestamping(struct sock * sk,struct sk_buff * skb,int op)953 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op)
954 {
955 struct bpf_sock_ops_kern sock_ops;
956
957 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
958 sock_ops.op = op;
959 sock_ops.is_fullsock = 1;
960 sock_ops.sk = sk;
961 bpf_skops_init_skb(&sock_ops, skb, 0);
962 __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS);
963 }
964 #endif
965
sock_set_keepalive(struct sock * sk)966 void sock_set_keepalive(struct sock *sk)
967 {
968 lock_sock(sk);
969 if (sk->sk_prot->keepalive)
970 sk->sk_prot->keepalive(sk, true);
971 sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
972 release_sock(sk);
973 }
974 EXPORT_SYMBOL(sock_set_keepalive);
975
__sock_set_rcvbuf(struct sock * sk,int val)976 static void __sock_set_rcvbuf(struct sock *sk, int val)
977 {
978 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it
979 * as a negative value.
980 */
981 val = min_t(int, val, INT_MAX / 2);
982 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
983
984 /* We double it on the way in to account for "struct sk_buff" etc.
985 * overhead. Applications assume that the SO_RCVBUF setting they make
986 * will allow that much actual data to be received on that socket.
987 *
988 * Applications are unaware that "struct sk_buff" and other overheads
989 * allocate from the receive buffer during socket buffer allocation.
990 *
991 * And after considering the possible alternatives, returning the value
992 * we actually used in getsockopt is the most desirable behavior.
993 */
994 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
995 }
996
sock_set_rcvbuf(struct sock * sk,int val)997 void sock_set_rcvbuf(struct sock *sk, int val)
998 {
999 lock_sock(sk);
1000 __sock_set_rcvbuf(sk, val);
1001 release_sock(sk);
1002 }
1003 EXPORT_SYMBOL(sock_set_rcvbuf);
1004
__sock_set_mark(struct sock * sk,u32 val)1005 static void __sock_set_mark(struct sock *sk, u32 val)
1006 {
1007 if (val != sk->sk_mark) {
1008 WRITE_ONCE(sk->sk_mark, val);
1009 sk_dst_reset(sk);
1010 }
1011 }
1012
sock_set_mark(struct sock * sk,u32 val)1013 void sock_set_mark(struct sock *sk, u32 val)
1014 {
1015 lock_sock(sk);
1016 __sock_set_mark(sk, val);
1017 release_sock(sk);
1018 }
1019 EXPORT_SYMBOL(sock_set_mark);
1020
sock_release_reserved_memory(struct sock * sk,int bytes)1021 static void sock_release_reserved_memory(struct sock *sk, int bytes)
1022 {
1023 /* Round down bytes to multiple of pages */
1024 bytes = round_down(bytes, PAGE_SIZE);
1025
1026 WARN_ON(bytes > sk->sk_reserved_mem);
1027 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
1028 sk_mem_reclaim(sk);
1029 }
1030
sock_reserve_memory(struct sock * sk,int bytes)1031 static int sock_reserve_memory(struct sock *sk, int bytes)
1032 {
1033 long allocated;
1034 bool charged;
1035 int pages;
1036
1037 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
1038 return -EOPNOTSUPP;
1039
1040 if (!bytes)
1041 return 0;
1042
1043 pages = sk_mem_pages(bytes);
1044
1045 /* pre-charge to memcg */
1046 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
1047 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1048 if (!charged)
1049 return -ENOMEM;
1050
1051 /* pre-charge to forward_alloc */
1052 sk_memory_allocated_add(sk, pages);
1053 allocated = sk_memory_allocated(sk);
1054 /* If the system goes into memory pressure with this
1055 * precharge, give up and return error.
1056 */
1057 if (allocated > sk_prot_mem_limits(sk, 1)) {
1058 sk_memory_allocated_sub(sk, pages);
1059 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
1060 return -ENOMEM;
1061 }
1062 sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
1063
1064 WRITE_ONCE(sk->sk_reserved_mem,
1065 sk->sk_reserved_mem + (pages << PAGE_SHIFT));
1066
1067 return 0;
1068 }
1069
1070 #ifdef CONFIG_PAGE_POOL
1071
1072 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
1073 * in 1 syscall. The limit exists to limit the amount of memory the kernel
1074 * allocates to copy these tokens, and to prevent looping over the frags for
1075 * too long.
1076 */
1077 #define MAX_DONTNEED_TOKENS 128
1078 #define MAX_DONTNEED_FRAGS 1024
1079
1080 static noinline_for_stack int
sock_devmem_dontneed(struct sock * sk,sockptr_t optval,unsigned int optlen)1081 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
1082 {
1083 unsigned int num_tokens, i, j, k, netmem_num = 0;
1084 struct dmabuf_token *tokens;
1085 int ret = 0, num_frags = 0;
1086 netmem_ref netmems[16];
1087
1088 if (!sk_is_tcp(sk))
1089 return -EBADF;
1090
1091 if (optlen % sizeof(*tokens) ||
1092 optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
1093 return -EINVAL;
1094
1095 num_tokens = optlen / sizeof(*tokens);
1096 tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
1097 if (!tokens)
1098 return -ENOMEM;
1099
1100 if (copy_from_sockptr(tokens, optval, optlen)) {
1101 kvfree(tokens);
1102 return -EFAULT;
1103 }
1104
1105 xa_lock_bh(&sk->sk_user_frags);
1106 for (i = 0; i < num_tokens; i++) {
1107 for (j = 0; j < tokens[i].token_count; j++) {
1108 if (++num_frags > MAX_DONTNEED_FRAGS)
1109 goto frag_limit_reached;
1110
1111 netmem_ref netmem = (__force netmem_ref)__xa_erase(
1112 &sk->sk_user_frags, tokens[i].token_start + j);
1113
1114 if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1115 continue;
1116
1117 netmems[netmem_num++] = netmem;
1118 if (netmem_num == ARRAY_SIZE(netmems)) {
1119 xa_unlock_bh(&sk->sk_user_frags);
1120 for (k = 0; k < netmem_num; k++)
1121 WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
1122 netmem_num = 0;
1123 xa_lock_bh(&sk->sk_user_frags);
1124 }
1125 ret++;
1126 }
1127 }
1128
1129 frag_limit_reached:
1130 xa_unlock_bh(&sk->sk_user_frags);
1131 for (k = 0; k < netmem_num; k++)
1132 WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
1133
1134 kvfree(tokens);
1135 return ret;
1136 }
1137 #endif
1138
sockopt_lock_sock(struct sock * sk)1139 void sockopt_lock_sock(struct sock *sk)
1140 {
1141 /* When current->bpf_ctx is set, the setsockopt is called from
1142 * a bpf prog. bpf has ensured the sk lock has been
1143 * acquired before calling setsockopt().
1144 */
1145 if (has_current_bpf_ctx())
1146 return;
1147
1148 lock_sock(sk);
1149 }
1150 EXPORT_SYMBOL(sockopt_lock_sock);
1151
sockopt_release_sock(struct sock * sk)1152 void sockopt_release_sock(struct sock *sk)
1153 {
1154 if (has_current_bpf_ctx())
1155 return;
1156
1157 release_sock(sk);
1158 }
1159 EXPORT_SYMBOL(sockopt_release_sock);
1160
sockopt_ns_capable(struct user_namespace * ns,int cap)1161 bool sockopt_ns_capable(struct user_namespace *ns, int cap)
1162 {
1163 return has_current_bpf_ctx() || ns_capable(ns, cap);
1164 }
1165 EXPORT_SYMBOL(sockopt_ns_capable);
1166
sockopt_capable(int cap)1167 bool sockopt_capable(int cap)
1168 {
1169 return has_current_bpf_ctx() || capable(cap);
1170 }
1171 EXPORT_SYMBOL(sockopt_capable);
1172
sockopt_validate_clockid(__kernel_clockid_t value)1173 static int sockopt_validate_clockid(__kernel_clockid_t value)
1174 {
1175 switch (value) {
1176 case CLOCK_REALTIME:
1177 case CLOCK_MONOTONIC:
1178 case CLOCK_TAI:
1179 return 0;
1180 }
1181 return -EINVAL;
1182 }
1183
1184 /*
1185 * This is meant for all protocols to use and covers goings on
1186 * at the socket level. Everything here is generic.
1187 */
1188
sk_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1189 int sk_setsockopt(struct sock *sk, int level, int optname,
1190 sockptr_t optval, unsigned int optlen)
1191 {
1192 struct so_timestamping timestamping;
1193 struct socket *sock = sk->sk_socket;
1194 struct sock_txtime sk_txtime;
1195 int val;
1196 int valbool;
1197 struct linger ling;
1198 int ret = 0;
1199
1200 /*
1201 * Options without arguments
1202 */
1203
1204 if (optname == SO_BINDTODEVICE)
1205 return sock_setbindtodevice(sk, optval, optlen);
1206
1207 if (optlen < sizeof(int))
1208 return -EINVAL;
1209
1210 if (copy_from_sockptr(&val, optval, sizeof(val)))
1211 return -EFAULT;
1212
1213 valbool = val ? 1 : 0;
1214
1215 /* handle options which do not require locking the socket. */
1216 switch (optname) {
1217 case SO_PRIORITY:
1218 if (sk_set_prio_allowed(sk, val)) {
1219 sock_set_priority(sk, val);
1220 return 0;
1221 }
1222 return -EPERM;
1223 case SO_PASSSEC:
1224 assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
1225 return 0;
1226 case SO_PASSCRED:
1227 assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
1228 return 0;
1229 case SO_PASSPIDFD:
1230 assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
1231 return 0;
1232 case SO_TYPE:
1233 case SO_PROTOCOL:
1234 case SO_DOMAIN:
1235 case SO_ERROR:
1236 return -ENOPROTOOPT;
1237 #ifdef CONFIG_NET_RX_BUSY_POLL
1238 case SO_BUSY_POLL:
1239 if (val < 0)
1240 return -EINVAL;
1241 WRITE_ONCE(sk->sk_ll_usec, val);
1242 return 0;
1243 case SO_PREFER_BUSY_POLL:
1244 if (valbool && !sockopt_capable(CAP_NET_ADMIN))
1245 return -EPERM;
1246 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1247 return 0;
1248 case SO_BUSY_POLL_BUDGET:
1249 if (val > READ_ONCE(sk->sk_busy_poll_budget) &&
1250 !sockopt_capable(CAP_NET_ADMIN))
1251 return -EPERM;
1252 if (val < 0 || val > U16_MAX)
1253 return -EINVAL;
1254 WRITE_ONCE(sk->sk_busy_poll_budget, val);
1255 return 0;
1256 #endif
1257 case SO_MAX_PACING_RATE:
1258 {
1259 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1260 unsigned long pacing_rate;
1261
1262 if (sizeof(ulval) != sizeof(val) &&
1263 optlen >= sizeof(ulval) &&
1264 copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1265 return -EFAULT;
1266 }
1267 if (ulval != ~0UL)
1268 cmpxchg(&sk->sk_pacing_status,
1269 SK_PACING_NONE,
1270 SK_PACING_NEEDED);
1271 /* Pairs with READ_ONCE() from sk_getsockopt() */
1272 WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
1273 pacing_rate = READ_ONCE(sk->sk_pacing_rate);
1274 if (ulval < pacing_rate)
1275 WRITE_ONCE(sk->sk_pacing_rate, ulval);
1276 return 0;
1277 }
1278 case SO_TXREHASH:
1279 if (val < -1 || val > 1)
1280 return -EINVAL;
1281 if ((u8)val == SOCK_TXREHASH_DEFAULT)
1282 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
1283 /* Paired with READ_ONCE() in tcp_rtx_synack()
1284 * and sk_getsockopt().
1285 */
1286 WRITE_ONCE(sk->sk_txrehash, (u8)val);
1287 return 0;
1288 case SO_PEEK_OFF:
1289 {
1290 int (*set_peek_off)(struct sock *sk, int val);
1291
1292 set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
1293 if (set_peek_off)
1294 ret = set_peek_off(sk, val);
1295 else
1296 ret = -EOPNOTSUPP;
1297 return ret;
1298 }
1299 #ifdef CONFIG_PAGE_POOL
1300 case SO_DEVMEM_DONTNEED:
1301 return sock_devmem_dontneed(sk, optval, optlen);
1302 #endif
1303 }
1304
1305 sockopt_lock_sock(sk);
1306
1307 switch (optname) {
1308 case SO_DEBUG:
1309 if (val && !sockopt_capable(CAP_NET_ADMIN))
1310 ret = -EACCES;
1311 else
1312 sock_valbool_flag(sk, SOCK_DBG, valbool);
1313 break;
1314 case SO_REUSEADDR:
1315 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
1316 break;
1317 case SO_REUSEPORT:
1318 if (valbool && !sk_is_inet(sk))
1319 ret = -EOPNOTSUPP;
1320 else
1321 sk->sk_reuseport = valbool;
1322 break;
1323 case SO_DONTROUTE:
1324 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1325 sk_dst_reset(sk);
1326 break;
1327 case SO_BROADCAST:
1328 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1329 break;
1330 case SO_SNDBUF:
1331 /* Don't error on this BSD doesn't and if you think
1332 * about it this is right. Otherwise apps have to
1333 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1334 * are treated in BSD as hints
1335 */
1336 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
1337 set_sndbuf:
1338 /* Ensure val * 2 fits into an int, to prevent max_t()
1339 * from treating it as a negative value.
1340 */
1341 val = min_t(int, val, INT_MAX / 2);
1342 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1343 WRITE_ONCE(sk->sk_sndbuf,
1344 max_t(int, val * 2, SOCK_MIN_SNDBUF));
1345 /* Wake up sending tasks if we upped the value. */
1346 sk->sk_write_space(sk);
1347 break;
1348
1349 case SO_SNDBUFFORCE:
1350 if (!sockopt_capable(CAP_NET_ADMIN)) {
1351 ret = -EPERM;
1352 break;
1353 }
1354
1355 /* No negative values (to prevent underflow, as val will be
1356 * multiplied by 2).
1357 */
1358 if (val < 0)
1359 val = 0;
1360 goto set_sndbuf;
1361
1362 case SO_RCVBUF:
1363 /* Don't error on this BSD doesn't and if you think
1364 * about it this is right. Otherwise apps have to
1365 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1366 * are treated in BSD as hints
1367 */
1368 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
1369 break;
1370
1371 case SO_RCVBUFFORCE:
1372 if (!sockopt_capable(CAP_NET_ADMIN)) {
1373 ret = -EPERM;
1374 break;
1375 }
1376
1377 /* No negative values (to prevent underflow, as val will be
1378 * multiplied by 2).
1379 */
1380 __sock_set_rcvbuf(sk, max(val, 0));
1381 break;
1382
1383 case SO_KEEPALIVE:
1384 if (sk->sk_prot->keepalive)
1385 sk->sk_prot->keepalive(sk, valbool);
1386 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1387 break;
1388
1389 case SO_OOBINLINE:
1390 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1391 break;
1392
1393 case SO_NO_CHECK:
1394 sk->sk_no_check_tx = valbool;
1395 break;
1396
1397 case SO_LINGER:
1398 if (optlen < sizeof(ling)) {
1399 ret = -EINVAL; /* 1003.1g */
1400 break;
1401 }
1402 if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1403 ret = -EFAULT;
1404 break;
1405 }
1406 if (!ling.l_onoff) {
1407 sock_reset_flag(sk, SOCK_LINGER);
1408 } else {
1409 unsigned long t_sec = ling.l_linger;
1410
1411 if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
1412 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
1413 else
1414 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
1415 sock_set_flag(sk, SOCK_LINGER);
1416 }
1417 break;
1418
1419 case SO_BSDCOMPAT:
1420 break;
1421
1422 case SO_TIMESTAMP_OLD:
1423 case SO_TIMESTAMP_NEW:
1424 case SO_TIMESTAMPNS_OLD:
1425 case SO_TIMESTAMPNS_NEW:
1426 sock_set_timestamp(sk, optname, valbool);
1427 break;
1428
1429 case SO_TIMESTAMPING_NEW:
1430 case SO_TIMESTAMPING_OLD:
1431 if (optlen == sizeof(timestamping)) {
1432 if (copy_from_sockptr(×tamping, optval,
1433 sizeof(timestamping))) {
1434 ret = -EFAULT;
1435 break;
1436 }
1437 } else {
1438 memset(×tamping, 0, sizeof(timestamping));
1439 timestamping.flags = val;
1440 }
1441 ret = sock_set_timestamping(sk, optname, timestamping);
1442 break;
1443
1444 case SO_RCVLOWAT:
1445 {
1446 int (*set_rcvlowat)(struct sock *sk, int val) = NULL;
1447
1448 if (val < 0)
1449 val = INT_MAX;
1450 if (sock)
1451 set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat;
1452 if (set_rcvlowat)
1453 ret = set_rcvlowat(sk, val);
1454 else
1455 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1456 break;
1457 }
1458 case SO_RCVTIMEO_OLD:
1459 case SO_RCVTIMEO_NEW:
1460 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1461 optlen, optname == SO_RCVTIMEO_OLD);
1462 break;
1463
1464 case SO_SNDTIMEO_OLD:
1465 case SO_SNDTIMEO_NEW:
1466 ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1467 optlen, optname == SO_SNDTIMEO_OLD);
1468 break;
1469
1470 case SO_ATTACH_FILTER: {
1471 struct sock_fprog fprog;
1472
1473 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1474 if (!ret)
1475 ret = sk_attach_filter(&fprog, sk);
1476 break;
1477 }
1478 case SO_ATTACH_BPF:
1479 ret = -EINVAL;
1480 if (optlen == sizeof(u32)) {
1481 u32 ufd;
1482
1483 ret = -EFAULT;
1484 if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1485 break;
1486
1487 ret = sk_attach_bpf(ufd, sk);
1488 }
1489 break;
1490
1491 case SO_ATTACH_REUSEPORT_CBPF: {
1492 struct sock_fprog fprog;
1493
1494 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1495 if (!ret)
1496 ret = sk_reuseport_attach_filter(&fprog, sk);
1497 break;
1498 }
1499 case SO_ATTACH_REUSEPORT_EBPF:
1500 ret = -EINVAL;
1501 if (optlen == sizeof(u32)) {
1502 u32 ufd;
1503
1504 ret = -EFAULT;
1505 if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1506 break;
1507
1508 ret = sk_reuseport_attach_bpf(ufd, sk);
1509 }
1510 break;
1511
1512 case SO_DETACH_REUSEPORT_BPF:
1513 ret = reuseport_detach_prog(sk);
1514 break;
1515
1516 case SO_DETACH_FILTER:
1517 ret = sk_detach_filter(sk);
1518 break;
1519
1520 case SO_LOCK_FILTER:
1521 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1522 ret = -EPERM;
1523 else
1524 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1525 break;
1526
1527 case SO_MARK:
1528 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1529 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1530 ret = -EPERM;
1531 break;
1532 }
1533
1534 __sock_set_mark(sk, val);
1535 break;
1536 case SO_RCVMARK:
1537 sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
1538 break;
1539
1540 case SO_RCVPRIORITY:
1541 sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool);
1542 break;
1543
1544 case SO_RXQ_OVFL:
1545 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1546 break;
1547
1548 case SO_WIFI_STATUS:
1549 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1550 break;
1551
1552 case SO_NOFCS:
1553 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1554 break;
1555
1556 case SO_SELECT_ERR_QUEUE:
1557 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1558 break;
1559
1560
1561 case SO_INCOMING_CPU:
1562 reuseport_update_incoming_cpu(sk, val);
1563 break;
1564
1565 case SO_CNX_ADVICE:
1566 if (val == 1)
1567 dst_negative_advice(sk);
1568 break;
1569
1570 case SO_ZEROCOPY:
1571 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1572 if (!(sk_is_tcp(sk) ||
1573 (sk->sk_type == SOCK_DGRAM &&
1574 sk->sk_protocol == IPPROTO_UDP)))
1575 ret = -EOPNOTSUPP;
1576 } else if (sk->sk_family != PF_RDS) {
1577 ret = -EOPNOTSUPP;
1578 }
1579 if (!ret) {
1580 if (val < 0 || val > 1)
1581 ret = -EINVAL;
1582 else
1583 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1584 }
1585 break;
1586
1587 case SO_TXTIME:
1588 if (optlen != sizeof(struct sock_txtime)) {
1589 ret = -EINVAL;
1590 break;
1591 } else if (copy_from_sockptr(&sk_txtime, optval,
1592 sizeof(struct sock_txtime))) {
1593 ret = -EFAULT;
1594 break;
1595 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1596 ret = -EINVAL;
1597 break;
1598 }
1599 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1600 * scheduler has enough safe guards.
1601 */
1602 if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1603 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1604 ret = -EPERM;
1605 break;
1606 }
1607
1608 ret = sockopt_validate_clockid(sk_txtime.clockid);
1609 if (ret)
1610 break;
1611
1612 sock_valbool_flag(sk, SOCK_TXTIME, true);
1613 sk->sk_clockid = sk_txtime.clockid;
1614 sk->sk_txtime_deadline_mode =
1615 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1616 sk->sk_txtime_report_errors =
1617 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1618 break;
1619
1620 case SO_BINDTOIFINDEX:
1621 ret = sock_bindtoindex_locked(sk, val);
1622 break;
1623
1624 case SO_BUF_LOCK:
1625 if (val & ~SOCK_BUF_LOCK_MASK) {
1626 ret = -EINVAL;
1627 break;
1628 }
1629 sk->sk_userlocks = val | (sk->sk_userlocks &
1630 ~SOCK_BUF_LOCK_MASK);
1631 break;
1632
1633 case SO_RESERVE_MEM:
1634 {
1635 int delta;
1636
1637 if (val < 0) {
1638 ret = -EINVAL;
1639 break;
1640 }
1641
1642 delta = val - sk->sk_reserved_mem;
1643 if (delta < 0)
1644 sock_release_reserved_memory(sk, -delta);
1645 else
1646 ret = sock_reserve_memory(sk, delta);
1647 break;
1648 }
1649
1650 default:
1651 ret = -ENOPROTOOPT;
1652 break;
1653 }
1654 sockopt_release_sock(sk);
1655 return ret;
1656 }
1657
sock_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1658 int sock_setsockopt(struct socket *sock, int level, int optname,
1659 sockptr_t optval, unsigned int optlen)
1660 {
1661 return sk_setsockopt(sock->sk, level, optname,
1662 optval, optlen);
1663 }
1664 EXPORT_SYMBOL(sock_setsockopt);
1665
sk_get_peer_cred(struct sock * sk)1666 static const struct cred *sk_get_peer_cred(struct sock *sk)
1667 {
1668 const struct cred *cred;
1669
1670 spin_lock(&sk->sk_peer_lock);
1671 cred = get_cred(sk->sk_peer_cred);
1672 spin_unlock(&sk->sk_peer_lock);
1673
1674 return cred;
1675 }
1676
cred_to_ucred(struct pid * pid,const struct cred * cred,struct ucred * ucred)1677 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1678 struct ucred *ucred)
1679 {
1680 ucred->pid = pid_vnr(pid);
1681 ucred->uid = ucred->gid = -1;
1682 if (cred) {
1683 struct user_namespace *current_ns = current_user_ns();
1684
1685 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1686 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1687 }
1688 }
1689
groups_to_user(sockptr_t dst,const struct group_info * src)1690 static int groups_to_user(sockptr_t dst, const struct group_info *src)
1691 {
1692 struct user_namespace *user_ns = current_user_ns();
1693 int i;
1694
1695 for (i = 0; i < src->ngroups; i++) {
1696 gid_t gid = from_kgid_munged(user_ns, src->gid[i]);
1697
1698 if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid)))
1699 return -EFAULT;
1700 }
1701
1702 return 0;
1703 }
1704
sk_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen)1705 int sk_getsockopt(struct sock *sk, int level, int optname,
1706 sockptr_t optval, sockptr_t optlen)
1707 {
1708 struct socket *sock = sk->sk_socket;
1709
1710 union {
1711 int val;
1712 u64 val64;
1713 unsigned long ulval;
1714 struct linger ling;
1715 struct old_timeval32 tm32;
1716 struct __kernel_old_timeval tm;
1717 struct __kernel_sock_timeval stm;
1718 struct sock_txtime txtime;
1719 struct so_timestamping timestamping;
1720 } v;
1721
1722 int lv = sizeof(int);
1723 int len;
1724
1725 if (copy_from_sockptr(&len, optlen, sizeof(int)))
1726 return -EFAULT;
1727 if (len < 0)
1728 return -EINVAL;
1729
1730 memset(&v, 0, sizeof(v));
1731
1732 switch (optname) {
1733 case SO_DEBUG:
1734 v.val = sock_flag(sk, SOCK_DBG);
1735 break;
1736
1737 case SO_DONTROUTE:
1738 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1739 break;
1740
1741 case SO_BROADCAST:
1742 v.val = sock_flag(sk, SOCK_BROADCAST);
1743 break;
1744
1745 case SO_SNDBUF:
1746 v.val = READ_ONCE(sk->sk_sndbuf);
1747 break;
1748
1749 case SO_RCVBUF:
1750 v.val = READ_ONCE(sk->sk_rcvbuf);
1751 break;
1752
1753 case SO_REUSEADDR:
1754 v.val = sk->sk_reuse;
1755 break;
1756
1757 case SO_REUSEPORT:
1758 v.val = sk->sk_reuseport;
1759 break;
1760
1761 case SO_KEEPALIVE:
1762 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1763 break;
1764
1765 case SO_TYPE:
1766 v.val = sk->sk_type;
1767 break;
1768
1769 case SO_PROTOCOL:
1770 v.val = sk->sk_protocol;
1771 break;
1772
1773 case SO_DOMAIN:
1774 v.val = sk->sk_family;
1775 break;
1776
1777 case SO_ERROR:
1778 v.val = -sock_error(sk);
1779 if (v.val == 0)
1780 v.val = xchg(&sk->sk_err_soft, 0);
1781 break;
1782
1783 case SO_OOBINLINE:
1784 v.val = sock_flag(sk, SOCK_URGINLINE);
1785 break;
1786
1787 case SO_NO_CHECK:
1788 v.val = sk->sk_no_check_tx;
1789 break;
1790
1791 case SO_PRIORITY:
1792 v.val = READ_ONCE(sk->sk_priority);
1793 break;
1794
1795 case SO_LINGER:
1796 lv = sizeof(v.ling);
1797 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1798 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ;
1799 break;
1800
1801 case SO_BSDCOMPAT:
1802 break;
1803
1804 case SO_TIMESTAMP_OLD:
1805 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1806 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
1807 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1808 break;
1809
1810 case SO_TIMESTAMPNS_OLD:
1811 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1812 break;
1813
1814 case SO_TIMESTAMP_NEW:
1815 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1816 break;
1817
1818 case SO_TIMESTAMPNS_NEW:
1819 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1820 break;
1821
1822 case SO_TIMESTAMPING_OLD:
1823 case SO_TIMESTAMPING_NEW:
1824 lv = sizeof(v.timestamping);
1825 /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
1826 * returning the flags when they were set through the same option.
1827 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
1828 */
1829 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
1830 v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
1831 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
1832 }
1833 break;
1834
1835 case SO_RCVTIMEO_OLD:
1836 case SO_RCVTIMEO_NEW:
1837 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
1838 SO_RCVTIMEO_OLD == optname);
1839 break;
1840
1841 case SO_SNDTIMEO_OLD:
1842 case SO_SNDTIMEO_NEW:
1843 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
1844 SO_SNDTIMEO_OLD == optname);
1845 break;
1846
1847 case SO_RCVLOWAT:
1848 v.val = READ_ONCE(sk->sk_rcvlowat);
1849 break;
1850
1851 case SO_SNDLOWAT:
1852 v.val = 1;
1853 break;
1854
1855 case SO_PASSCRED:
1856 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1857 break;
1858
1859 case SO_PASSPIDFD:
1860 v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags);
1861 break;
1862
1863 case SO_PEERCRED:
1864 {
1865 struct ucred peercred;
1866 if (len > sizeof(peercred))
1867 len = sizeof(peercred);
1868
1869 spin_lock(&sk->sk_peer_lock);
1870 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1871 spin_unlock(&sk->sk_peer_lock);
1872
1873 if (copy_to_sockptr(optval, &peercred, len))
1874 return -EFAULT;
1875 goto lenout;
1876 }
1877
1878 case SO_PEERPIDFD:
1879 {
1880 struct pid *peer_pid;
1881 struct file *pidfd_file = NULL;
1882 int pidfd;
1883
1884 if (len > sizeof(pidfd))
1885 len = sizeof(pidfd);
1886
1887 spin_lock(&sk->sk_peer_lock);
1888 peer_pid = get_pid(sk->sk_peer_pid);
1889 spin_unlock(&sk->sk_peer_lock);
1890
1891 if (!peer_pid)
1892 return -ENODATA;
1893
1894 pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file);
1895 put_pid(peer_pid);
1896 if (pidfd < 0)
1897 return pidfd;
1898
1899 if (copy_to_sockptr(optval, &pidfd, len) ||
1900 copy_to_sockptr(optlen, &len, sizeof(int))) {
1901 put_unused_fd(pidfd);
1902 fput(pidfd_file);
1903
1904 return -EFAULT;
1905 }
1906
1907 fd_install(pidfd, pidfd_file);
1908 return 0;
1909 }
1910
1911 case SO_PEERGROUPS:
1912 {
1913 const struct cred *cred;
1914 int ret, n;
1915
1916 cred = sk_get_peer_cred(sk);
1917 if (!cred)
1918 return -ENODATA;
1919
1920 n = cred->group_info->ngroups;
1921 if (len < n * sizeof(gid_t)) {
1922 len = n * sizeof(gid_t);
1923 put_cred(cred);
1924 return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE;
1925 }
1926 len = n * sizeof(gid_t);
1927
1928 ret = groups_to_user(optval, cred->group_info);
1929 put_cred(cred);
1930 if (ret)
1931 return ret;
1932 goto lenout;
1933 }
1934
1935 case SO_PEERNAME:
1936 {
1937 struct sockaddr_storage address;
1938
1939 lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2);
1940 if (lv < 0)
1941 return -ENOTCONN;
1942 if (lv < len)
1943 return -EINVAL;
1944 if (copy_to_sockptr(optval, &address, len))
1945 return -EFAULT;
1946 goto lenout;
1947 }
1948
1949 /* Dubious BSD thing... Probably nobody even uses it, but
1950 * the UNIX standard wants it for whatever reason... -DaveM
1951 */
1952 case SO_ACCEPTCONN:
1953 v.val = sk->sk_state == TCP_LISTEN;
1954 break;
1955
1956 case SO_PASSSEC:
1957 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1958 break;
1959
1960 case SO_PEERSEC:
1961 return security_socket_getpeersec_stream(sock,
1962 optval, optlen, len);
1963
1964 case SO_MARK:
1965 v.val = READ_ONCE(sk->sk_mark);
1966 break;
1967
1968 case SO_RCVMARK:
1969 v.val = sock_flag(sk, SOCK_RCVMARK);
1970 break;
1971
1972 case SO_RCVPRIORITY:
1973 v.val = sock_flag(sk, SOCK_RCVPRIORITY);
1974 break;
1975
1976 case SO_RXQ_OVFL:
1977 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1978 break;
1979
1980 case SO_WIFI_STATUS:
1981 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1982 break;
1983
1984 case SO_PEEK_OFF:
1985 if (!READ_ONCE(sock->ops)->set_peek_off)
1986 return -EOPNOTSUPP;
1987
1988 v.val = READ_ONCE(sk->sk_peek_off);
1989 break;
1990 case SO_NOFCS:
1991 v.val = sock_flag(sk, SOCK_NOFCS);
1992 break;
1993
1994 case SO_BINDTODEVICE:
1995 return sock_getbindtodevice(sk, optval, optlen, len);
1996
1997 case SO_GET_FILTER:
1998 len = sk_get_filter(sk, optval, len);
1999 if (len < 0)
2000 return len;
2001
2002 goto lenout;
2003
2004 case SO_LOCK_FILTER:
2005 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
2006 break;
2007
2008 case SO_BPF_EXTENSIONS:
2009 v.val = bpf_tell_extensions();
2010 break;
2011
2012 case SO_SELECT_ERR_QUEUE:
2013 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
2014 break;
2015
2016 #ifdef CONFIG_NET_RX_BUSY_POLL
2017 case SO_BUSY_POLL:
2018 v.val = READ_ONCE(sk->sk_ll_usec);
2019 break;
2020 case SO_PREFER_BUSY_POLL:
2021 v.val = READ_ONCE(sk->sk_prefer_busy_poll);
2022 break;
2023 #endif
2024
2025 case SO_MAX_PACING_RATE:
2026 /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
2027 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
2028 lv = sizeof(v.ulval);
2029 v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
2030 } else {
2031 /* 32bit version */
2032 v.val = min_t(unsigned long, ~0U,
2033 READ_ONCE(sk->sk_max_pacing_rate));
2034 }
2035 break;
2036
2037 case SO_INCOMING_CPU:
2038 v.val = READ_ONCE(sk->sk_incoming_cpu);
2039 break;
2040
2041 case SO_MEMINFO:
2042 {
2043 u32 meminfo[SK_MEMINFO_VARS];
2044
2045 sk_get_meminfo(sk, meminfo);
2046
2047 len = min_t(unsigned int, len, sizeof(meminfo));
2048 if (copy_to_sockptr(optval, &meminfo, len))
2049 return -EFAULT;
2050
2051 goto lenout;
2052 }
2053
2054 #ifdef CONFIG_NET_RX_BUSY_POLL
2055 case SO_INCOMING_NAPI_ID:
2056 v.val = READ_ONCE(sk->sk_napi_id);
2057
2058 /* aggregate non-NAPI IDs down to 0 */
2059 if (!napi_id_valid(v.val))
2060 v.val = 0;
2061
2062 break;
2063 #endif
2064
2065 case SO_COOKIE:
2066 lv = sizeof(u64);
2067 if (len < lv)
2068 return -EINVAL;
2069 v.val64 = sock_gen_cookie(sk);
2070 break;
2071
2072 case SO_ZEROCOPY:
2073 v.val = sock_flag(sk, SOCK_ZEROCOPY);
2074 break;
2075
2076 case SO_TXTIME:
2077 lv = sizeof(v.txtime);
2078 v.txtime.clockid = sk->sk_clockid;
2079 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
2080 SOF_TXTIME_DEADLINE_MODE : 0;
2081 v.txtime.flags |= sk->sk_txtime_report_errors ?
2082 SOF_TXTIME_REPORT_ERRORS : 0;
2083 break;
2084
2085 case SO_BINDTOIFINDEX:
2086 v.val = READ_ONCE(sk->sk_bound_dev_if);
2087 break;
2088
2089 case SO_NETNS_COOKIE:
2090 lv = sizeof(u64);
2091 if (len != lv)
2092 return -EINVAL;
2093 v.val64 = sock_net(sk)->net_cookie;
2094 break;
2095
2096 case SO_BUF_LOCK:
2097 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
2098 break;
2099
2100 case SO_RESERVE_MEM:
2101 v.val = READ_ONCE(sk->sk_reserved_mem);
2102 break;
2103
2104 case SO_TXREHASH:
2105 /* Paired with WRITE_ONCE() in sk_setsockopt() */
2106 v.val = READ_ONCE(sk->sk_txrehash);
2107 break;
2108
2109 default:
2110 /* We implement the SO_SNDLOWAT etc to not be settable
2111 * (1003.1g 7).
2112 */
2113 return -ENOPROTOOPT;
2114 }
2115
2116 if (len > lv)
2117 len = lv;
2118 if (copy_to_sockptr(optval, &v, len))
2119 return -EFAULT;
2120 lenout:
2121 if (copy_to_sockptr(optlen, &len, sizeof(int)))
2122 return -EFAULT;
2123 return 0;
2124 }
2125
2126 /*
2127 * Initialize an sk_lock.
2128 *
2129 * (We also register the sk_lock with the lock validator.)
2130 */
sock_lock_init(struct sock * sk)2131 static inline void sock_lock_init(struct sock *sk)
2132 {
2133 sk_owner_clear(sk);
2134
2135 if (sk->sk_kern_sock)
2136 sock_lock_init_class_and_name(
2137 sk,
2138 af_family_kern_slock_key_strings[sk->sk_family],
2139 af_family_kern_slock_keys + sk->sk_family,
2140 af_family_kern_key_strings[sk->sk_family],
2141 af_family_kern_keys + sk->sk_family);
2142 else
2143 sock_lock_init_class_and_name(
2144 sk,
2145 af_family_slock_key_strings[sk->sk_family],
2146 af_family_slock_keys + sk->sk_family,
2147 af_family_key_strings[sk->sk_family],
2148 af_family_keys + sk->sk_family);
2149 }
2150
2151 /*
2152 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
2153 * even temporarily, because of RCU lookups. sk_node should also be left as is.
2154 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
2155 */
sock_copy(struct sock * nsk,const struct sock * osk)2156 static void sock_copy(struct sock *nsk, const struct sock *osk)
2157 {
2158 const struct proto *prot = READ_ONCE(osk->sk_prot);
2159 #ifdef CONFIG_SECURITY_NETWORK
2160 void *sptr = nsk->sk_security;
2161 #endif
2162
2163 /* If we move sk_tx_queue_mapping out of the private section,
2164 * we must check if sk_tx_queue_clear() is called after
2165 * sock_copy() in sk_clone_lock().
2166 */
2167 BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
2168 offsetof(struct sock, sk_dontcopy_begin) ||
2169 offsetof(struct sock, sk_tx_queue_mapping) >=
2170 offsetof(struct sock, sk_dontcopy_end));
2171
2172 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
2173
2174 unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
2175 prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
2176 /* alloc is larger than struct, see sk_prot_alloc() */);
2177
2178 #ifdef CONFIG_SECURITY_NETWORK
2179 nsk->sk_security = sptr;
2180 security_sk_clone(osk, nsk);
2181 #endif
2182 }
2183
sk_prot_alloc(struct proto * prot,gfp_t priority,int family)2184 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
2185 int family)
2186 {
2187 struct sock *sk;
2188 struct kmem_cache *slab;
2189
2190 slab = prot->slab;
2191 if (slab != NULL) {
2192 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
2193 if (!sk)
2194 return sk;
2195 if (want_init_on_alloc(priority))
2196 sk_prot_clear_nulls(sk, prot->obj_size);
2197 } else
2198 sk = kmalloc(prot->obj_size, priority);
2199
2200 if (sk != NULL) {
2201 if (security_sk_alloc(sk, family, priority))
2202 goto out_free;
2203
2204 if (!try_module_get(prot->owner))
2205 goto out_free_sec;
2206 }
2207
2208 return sk;
2209
2210 out_free_sec:
2211 security_sk_free(sk);
2212 out_free:
2213 if (slab != NULL)
2214 kmem_cache_free(slab, sk);
2215 else
2216 kfree(sk);
2217 return NULL;
2218 }
2219
sk_prot_free(struct proto * prot,struct sock * sk)2220 static void sk_prot_free(struct proto *prot, struct sock *sk)
2221 {
2222 struct kmem_cache *slab;
2223 struct module *owner;
2224
2225 owner = prot->owner;
2226 slab = prot->slab;
2227
2228 cgroup_sk_free(&sk->sk_cgrp_data);
2229 mem_cgroup_sk_free(sk);
2230 security_sk_free(sk);
2231
2232 sk_owner_put(sk);
2233
2234 if (slab != NULL)
2235 kmem_cache_free(slab, sk);
2236 else
2237 kfree(sk);
2238 module_put(owner);
2239 }
2240
2241 /**
2242 * sk_alloc - All socket objects are allocated here
2243 * @net: the applicable net namespace
2244 * @family: protocol family
2245 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2246 * @prot: struct proto associated with this new sock instance
2247 * @kern: is this to be a kernel socket?
2248 */
sk_alloc(struct net * net,int family,gfp_t priority,struct proto * prot,int kern)2249 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
2250 struct proto *prot, int kern)
2251 {
2252 struct sock *sk;
2253
2254 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
2255 if (sk) {
2256 sk->sk_family = family;
2257 /*
2258 * See comment in struct sock definition to understand
2259 * why we need sk_prot_creator -acme
2260 */
2261 sk->sk_prot = sk->sk_prot_creator = prot;
2262 sk->sk_kern_sock = kern;
2263 sock_lock_init(sk);
2264 sk->sk_net_refcnt = kern ? 0 : 1;
2265 if (likely(sk->sk_net_refcnt)) {
2266 get_net_track(net, &sk->ns_tracker, priority);
2267 sock_inuse_add(net, 1);
2268 } else {
2269 net_passive_inc(net);
2270 __netns_tracker_alloc(net, &sk->ns_tracker,
2271 false, priority);
2272 }
2273
2274 sock_net_set(sk, net);
2275 refcount_set(&sk->sk_wmem_alloc, 1);
2276
2277 mem_cgroup_sk_alloc(sk);
2278 cgroup_sk_alloc(&sk->sk_cgrp_data);
2279 sock_update_classid(&sk->sk_cgrp_data);
2280 sock_update_netprioidx(&sk->sk_cgrp_data);
2281 sk_tx_queue_clear(sk);
2282 }
2283
2284 return sk;
2285 }
2286 EXPORT_SYMBOL(sk_alloc);
2287
2288 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
2289 * grace period. This is the case for UDP sockets and TCP listeners.
2290 */
__sk_destruct(struct rcu_head * head)2291 static void __sk_destruct(struct rcu_head *head)
2292 {
2293 struct sock *sk = container_of(head, struct sock, sk_rcu);
2294 struct net *net = sock_net(sk);
2295 struct sk_filter *filter;
2296
2297 if (sk->sk_destruct)
2298 sk->sk_destruct(sk);
2299
2300 filter = rcu_dereference_check(sk->sk_filter,
2301 refcount_read(&sk->sk_wmem_alloc) == 0);
2302 if (filter) {
2303 sk_filter_uncharge(sk, filter);
2304 RCU_INIT_POINTER(sk->sk_filter, NULL);
2305 }
2306
2307 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
2308
2309 #ifdef CONFIG_BPF_SYSCALL
2310 bpf_sk_storage_free(sk);
2311 #endif
2312
2313 if (atomic_read(&sk->sk_omem_alloc))
2314 pr_debug("%s: optmem leakage (%d bytes) detected\n",
2315 __func__, atomic_read(&sk->sk_omem_alloc));
2316
2317 if (sk->sk_frag.page) {
2318 put_page(sk->sk_frag.page);
2319 sk->sk_frag.page = NULL;
2320 }
2321
2322 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
2323 put_cred(sk->sk_peer_cred);
2324 put_pid(sk->sk_peer_pid);
2325
2326 if (likely(sk->sk_net_refcnt)) {
2327 put_net_track(net, &sk->ns_tracker);
2328 } else {
2329 __netns_tracker_free(net, &sk->ns_tracker, false);
2330 net_passive_dec(net);
2331 }
2332 sk_prot_free(sk->sk_prot_creator, sk);
2333 }
2334
sk_net_refcnt_upgrade(struct sock * sk)2335 void sk_net_refcnt_upgrade(struct sock *sk)
2336 {
2337 struct net *net = sock_net(sk);
2338
2339 WARN_ON_ONCE(sk->sk_net_refcnt);
2340 __netns_tracker_free(net, &sk->ns_tracker, false);
2341 net_passive_dec(net);
2342 sk->sk_net_refcnt = 1;
2343 get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
2344 sock_inuse_add(net, 1);
2345 }
2346 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
2347
sk_destruct(struct sock * sk)2348 void sk_destruct(struct sock *sk)
2349 {
2350 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
2351
2352 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
2353 reuseport_detach_sock(sk);
2354 use_call_rcu = true;
2355 }
2356
2357 if (use_call_rcu)
2358 call_rcu(&sk->sk_rcu, __sk_destruct);
2359 else
2360 __sk_destruct(&sk->sk_rcu);
2361 }
2362
__sk_free(struct sock * sk)2363 static void __sk_free(struct sock *sk)
2364 {
2365 if (likely(sk->sk_net_refcnt))
2366 sock_inuse_add(sock_net(sk), -1);
2367
2368 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
2369 sock_diag_broadcast_destroy(sk);
2370 else
2371 sk_destruct(sk);
2372 }
2373
sk_free(struct sock * sk)2374 void sk_free(struct sock *sk)
2375 {
2376 /*
2377 * We subtract one from sk_wmem_alloc and can know if
2378 * some packets are still in some tx queue.
2379 * If not null, sock_wfree() will call __sk_free(sk) later
2380 */
2381 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2382 __sk_free(sk);
2383 }
2384 EXPORT_SYMBOL(sk_free);
2385
sk_init_common(struct sock * sk)2386 static void sk_init_common(struct sock *sk)
2387 {
2388 skb_queue_head_init(&sk->sk_receive_queue);
2389 skb_queue_head_init(&sk->sk_write_queue);
2390 skb_queue_head_init(&sk->sk_error_queue);
2391
2392 rwlock_init(&sk->sk_callback_lock);
2393 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
2394 af_rlock_keys + sk->sk_family,
2395 af_family_rlock_key_strings[sk->sk_family]);
2396 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
2397 af_wlock_keys + sk->sk_family,
2398 af_family_wlock_key_strings[sk->sk_family]);
2399 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2400 af_elock_keys + sk->sk_family,
2401 af_family_elock_key_strings[sk->sk_family]);
2402 if (sk->sk_kern_sock)
2403 lockdep_set_class_and_name(&sk->sk_callback_lock,
2404 af_kern_callback_keys + sk->sk_family,
2405 af_family_kern_clock_key_strings[sk->sk_family]);
2406 else
2407 lockdep_set_class_and_name(&sk->sk_callback_lock,
2408 af_callback_keys + sk->sk_family,
2409 af_family_clock_key_strings[sk->sk_family]);
2410 }
2411
2412 /**
2413 * sk_clone_lock - clone a socket, and lock its clone
2414 * @sk: the socket to clone
2415 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2416 *
2417 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2418 */
sk_clone_lock(const struct sock * sk,const gfp_t priority)2419 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2420 {
2421 struct proto *prot = READ_ONCE(sk->sk_prot);
2422 struct sk_filter *filter;
2423 bool is_charged = true;
2424 struct sock *newsk;
2425
2426 newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2427 if (!newsk)
2428 goto out;
2429
2430 sock_copy(newsk, sk);
2431
2432 newsk->sk_prot_creator = prot;
2433
2434 /* SANITY */
2435 if (likely(newsk->sk_net_refcnt)) {
2436 get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
2437 sock_inuse_add(sock_net(newsk), 1);
2438 } else {
2439 /* Kernel sockets are not elevating the struct net refcount.
2440 * Instead, use a tracker to more easily detect if a layer
2441 * is not properly dismantling its kernel sockets at netns
2442 * destroy time.
2443 */
2444 net_passive_inc(sock_net(newsk));
2445 __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
2446 false, priority);
2447 }
2448 sk_node_init(&newsk->sk_node);
2449 sock_lock_init(newsk);
2450 bh_lock_sock(newsk);
2451 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
2452 newsk->sk_backlog.len = 0;
2453
2454 atomic_set(&newsk->sk_rmem_alloc, 0);
2455
2456 /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2457 refcount_set(&newsk->sk_wmem_alloc, 1);
2458
2459 atomic_set(&newsk->sk_omem_alloc, 0);
2460 sk_init_common(newsk);
2461
2462 newsk->sk_dst_cache = NULL;
2463 newsk->sk_dst_pending_confirm = 0;
2464 newsk->sk_wmem_queued = 0;
2465 newsk->sk_forward_alloc = 0;
2466 newsk->sk_reserved_mem = 0;
2467 atomic_set(&newsk->sk_drops, 0);
2468 newsk->sk_send_head = NULL;
2469 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2470 atomic_set(&newsk->sk_zckey, 0);
2471
2472 sock_reset_flag(newsk, SOCK_DONE);
2473
2474 /* sk->sk_memcg will be populated at accept() time */
2475 newsk->sk_memcg = NULL;
2476
2477 cgroup_sk_clone(&newsk->sk_cgrp_data);
2478
2479 rcu_read_lock();
2480 filter = rcu_dereference(sk->sk_filter);
2481 if (filter != NULL)
2482 /* though it's an empty new sock, the charging may fail
2483 * if sysctl_optmem_max was changed between creation of
2484 * original socket and cloning
2485 */
2486 is_charged = sk_filter_charge(newsk, filter);
2487 RCU_INIT_POINTER(newsk->sk_filter, filter);
2488 rcu_read_unlock();
2489
2490 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2491 /* We need to make sure that we don't uncharge the new
2492 * socket if we couldn't charge it in the first place
2493 * as otherwise we uncharge the parent's filter.
2494 */
2495 if (!is_charged)
2496 RCU_INIT_POINTER(newsk->sk_filter, NULL);
2497 sk_free_unlock_clone(newsk);
2498 newsk = NULL;
2499 goto out;
2500 }
2501 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2502
2503 if (bpf_sk_storage_clone(sk, newsk)) {
2504 sk_free_unlock_clone(newsk);
2505 newsk = NULL;
2506 goto out;
2507 }
2508
2509 /* Clear sk_user_data if parent had the pointer tagged
2510 * as not suitable for copying when cloning.
2511 */
2512 if (sk_user_data_is_nocopy(newsk))
2513 newsk->sk_user_data = NULL;
2514
2515 newsk->sk_err = 0;
2516 newsk->sk_err_soft = 0;
2517 newsk->sk_priority = 0;
2518 newsk->sk_incoming_cpu = raw_smp_processor_id();
2519
2520 /* Before updating sk_refcnt, we must commit prior changes to memory
2521 * (Documentation/RCU/rculist_nulls.rst for details)
2522 */
2523 smp_wmb();
2524 refcount_set(&newsk->sk_refcnt, 2);
2525
2526 sk_set_socket(newsk, NULL);
2527 sk_tx_queue_clear(newsk);
2528 RCU_INIT_POINTER(newsk->sk_wq, NULL);
2529
2530 if (newsk->sk_prot->sockets_allocated)
2531 sk_sockets_allocated_inc(newsk);
2532
2533 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2534 net_enable_timestamp();
2535 out:
2536 return newsk;
2537 }
2538 EXPORT_SYMBOL_GPL(sk_clone_lock);
2539
sk_free_unlock_clone(struct sock * sk)2540 void sk_free_unlock_clone(struct sock *sk)
2541 {
2542 /* It is still raw copy of parent, so invalidate
2543 * destructor and make plain sk_free() */
2544 sk->sk_destruct = NULL;
2545 bh_unlock_sock(sk);
2546 sk_free(sk);
2547 }
2548 EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2549
sk_dst_gso_max_size(struct sock * sk,struct dst_entry * dst)2550 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
2551 {
2552 bool is_ipv6 = false;
2553 u32 max_size;
2554
2555 #if IS_ENABLED(CONFIG_IPV6)
2556 is_ipv6 = (sk->sk_family == AF_INET6 &&
2557 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
2558 #endif
2559 /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
2560 max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
2561 READ_ONCE(dst->dev->gso_ipv4_max_size);
2562 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
2563 max_size = GSO_LEGACY_MAX_SIZE;
2564
2565 return max_size - (MAX_TCP_HEADER + 1);
2566 }
2567
sk_setup_caps(struct sock * sk,struct dst_entry * dst)2568 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2569 {
2570 u32 max_segs = 1;
2571
2572 sk->sk_route_caps = dst->dev->features;
2573 if (sk_is_tcp(sk)) {
2574 struct inet_connection_sock *icsk = inet_csk(sk);
2575
2576 sk->sk_route_caps |= NETIF_F_GSO;
2577 icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK);
2578 }
2579 if (sk->sk_route_caps & NETIF_F_GSO)
2580 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2581 if (unlikely(sk->sk_gso_disabled))
2582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2583 if (sk_can_gso(sk)) {
2584 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2585 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2586 } else {
2587 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2588 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
2589 /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
2590 max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
2591 }
2592 }
2593 sk->sk_gso_max_segs = max_segs;
2594 sk_dst_set(sk, dst);
2595 }
2596 EXPORT_SYMBOL_GPL(sk_setup_caps);
2597
2598 /*
2599 * Simple resource managers for sockets.
2600 */
2601
2602
2603 /*
2604 * Write buffer destructor automatically called from kfree_skb.
2605 */
sock_wfree(struct sk_buff * skb)2606 void sock_wfree(struct sk_buff *skb)
2607 {
2608 struct sock *sk = skb->sk;
2609 unsigned int len = skb->truesize;
2610 bool free;
2611
2612 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2613 if (sock_flag(sk, SOCK_RCU_FREE) &&
2614 sk->sk_write_space == sock_def_write_space) {
2615 rcu_read_lock();
2616 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
2617 sock_def_write_space_wfree(sk);
2618 rcu_read_unlock();
2619 if (unlikely(free))
2620 __sk_free(sk);
2621 return;
2622 }
2623
2624 /*
2625 * Keep a reference on sk_wmem_alloc, this will be released
2626 * after sk_write_space() call
2627 */
2628 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2629 sk->sk_write_space(sk);
2630 len = 1;
2631 }
2632 /*
2633 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2634 * could not do because of in-flight packets
2635 */
2636 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2637 __sk_free(sk);
2638 }
2639 EXPORT_SYMBOL(sock_wfree);
2640
2641 /* This variant of sock_wfree() is used by TCP,
2642 * since it sets SOCK_USE_WRITE_QUEUE.
2643 */
__sock_wfree(struct sk_buff * skb)2644 void __sock_wfree(struct sk_buff *skb)
2645 {
2646 struct sock *sk = skb->sk;
2647
2648 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2649 __sk_free(sk);
2650 }
2651
skb_set_owner_w(struct sk_buff * skb,struct sock * sk)2652 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2653 {
2654 skb_orphan(skb);
2655 #ifdef CONFIG_INET
2656 if (unlikely(!sk_fullsock(sk)))
2657 return skb_set_owner_edemux(skb, sk);
2658 #endif
2659 skb->sk = sk;
2660 skb->destructor = sock_wfree;
2661 skb_set_hash_from_sk(skb, sk);
2662 /*
2663 * We used to take a refcount on sk, but following operation
2664 * is enough to guarantee sk_free() won't free this sock until
2665 * all in-flight packets are completed
2666 */
2667 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2668 }
2669 EXPORT_SYMBOL(skb_set_owner_w);
2670
can_skb_orphan_partial(const struct sk_buff * skb)2671 static bool can_skb_orphan_partial(const struct sk_buff *skb)
2672 {
2673 /* Drivers depend on in-order delivery for crypto offload,
2674 * partial orphan breaks out-of-order-OK logic.
2675 */
2676 if (skb_is_decrypted(skb))
2677 return false;
2678
2679 return (skb->destructor == sock_wfree ||
2680 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2681 }
2682
2683 /* This helper is used by netem, as it can hold packets in its
2684 * delay queue. We want to allow the owner socket to send more
2685 * packets, as if they were already TX completed by a typical driver.
2686 * But we also want to keep skb->sk set because some packet schedulers
2687 * rely on it (sch_fq for example).
2688 */
skb_orphan_partial(struct sk_buff * skb)2689 void skb_orphan_partial(struct sk_buff *skb)
2690 {
2691 if (skb_is_tcp_pure_ack(skb))
2692 return;
2693
2694 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2695 return;
2696
2697 skb_orphan(skb);
2698 }
2699 EXPORT_SYMBOL(skb_orphan_partial);
2700
2701 /*
2702 * Read buffer destructor automatically called from kfree_skb.
2703 */
sock_rfree(struct sk_buff * skb)2704 void sock_rfree(struct sk_buff *skb)
2705 {
2706 struct sock *sk = skb->sk;
2707 unsigned int len = skb->truesize;
2708
2709 atomic_sub(len, &sk->sk_rmem_alloc);
2710 sk_mem_uncharge(sk, len);
2711 }
2712 EXPORT_SYMBOL(sock_rfree);
2713
2714 /*
2715 * Buffer destructor for skbs that are not used directly in read or write
2716 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2717 */
sock_efree(struct sk_buff * skb)2718 void sock_efree(struct sk_buff *skb)
2719 {
2720 sock_put(skb->sk);
2721 }
2722 EXPORT_SYMBOL(sock_efree);
2723
2724 /* Buffer destructor for prefetch/receive path where reference count may
2725 * not be held, e.g. for listen sockets.
2726 */
2727 #ifdef CONFIG_INET
sock_pfree(struct sk_buff * skb)2728 void sock_pfree(struct sk_buff *skb)
2729 {
2730 struct sock *sk = skb->sk;
2731
2732 if (!sk_is_refcounted(sk))
2733 return;
2734
2735 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
2736 inet_reqsk(sk)->rsk_listener = NULL;
2737 reqsk_free(inet_reqsk(sk));
2738 return;
2739 }
2740
2741 sock_gen_put(sk);
2742 }
2743 EXPORT_SYMBOL(sock_pfree);
2744 #endif /* CONFIG_INET */
2745
sock_i_uid(struct sock * sk)2746 kuid_t sock_i_uid(struct sock *sk)
2747 {
2748 kuid_t uid;
2749
2750 read_lock_bh(&sk->sk_callback_lock);
2751 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2752 read_unlock_bh(&sk->sk_callback_lock);
2753 return uid;
2754 }
2755 EXPORT_SYMBOL(sock_i_uid);
2756
__sock_i_ino(struct sock * sk)2757 unsigned long __sock_i_ino(struct sock *sk)
2758 {
2759 unsigned long ino;
2760
2761 read_lock(&sk->sk_callback_lock);
2762 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2763 read_unlock(&sk->sk_callback_lock);
2764 return ino;
2765 }
2766 EXPORT_SYMBOL(__sock_i_ino);
2767
sock_i_ino(struct sock * sk)2768 unsigned long sock_i_ino(struct sock *sk)
2769 {
2770 unsigned long ino;
2771
2772 local_bh_disable();
2773 ino = __sock_i_ino(sk);
2774 local_bh_enable();
2775 return ino;
2776 }
2777 EXPORT_SYMBOL(sock_i_ino);
2778
2779 /*
2780 * Allocate a skb from the socket's send buffer.
2781 */
sock_wmalloc(struct sock * sk,unsigned long size,int force,gfp_t priority)2782 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2783 gfp_t priority)
2784 {
2785 if (force ||
2786 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2787 struct sk_buff *skb = alloc_skb(size, priority);
2788
2789 if (skb) {
2790 skb_set_owner_w(skb, sk);
2791 return skb;
2792 }
2793 }
2794 return NULL;
2795 }
2796 EXPORT_SYMBOL(sock_wmalloc);
2797
sock_ofree(struct sk_buff * skb)2798 static void sock_ofree(struct sk_buff *skb)
2799 {
2800 struct sock *sk = skb->sk;
2801
2802 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2803 }
2804
sock_omalloc(struct sock * sk,unsigned long size,gfp_t priority)2805 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2806 gfp_t priority)
2807 {
2808 struct sk_buff *skb;
2809
2810 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2811 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2812 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max))
2813 return NULL;
2814
2815 skb = alloc_skb(size, priority);
2816 if (!skb)
2817 return NULL;
2818
2819 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2820 skb->sk = sk;
2821 skb->destructor = sock_ofree;
2822 return skb;
2823 }
2824
2825 /*
2826 * Allocate a memory block from the socket's option memory buffer.
2827 */
sock_kmalloc(struct sock * sk,int size,gfp_t priority)2828 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2829 {
2830 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max);
2831
2832 if ((unsigned int)size <= optmem_max &&
2833 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
2834 void *mem;
2835 /* First do the add, to avoid the race if kmalloc
2836 * might sleep.
2837 */
2838 atomic_add(size, &sk->sk_omem_alloc);
2839 mem = kmalloc(size, priority);
2840 if (mem)
2841 return mem;
2842 atomic_sub(size, &sk->sk_omem_alloc);
2843 }
2844 return NULL;
2845 }
2846 EXPORT_SYMBOL(sock_kmalloc);
2847
2848 /*
2849 * Duplicate the input "src" memory block using the socket's
2850 * option memory buffer.
2851 */
sock_kmemdup(struct sock * sk,const void * src,int size,gfp_t priority)2852 void *sock_kmemdup(struct sock *sk, const void *src,
2853 int size, gfp_t priority)
2854 {
2855 void *mem;
2856
2857 mem = sock_kmalloc(sk, size, priority);
2858 if (mem)
2859 memcpy(mem, src, size);
2860 return mem;
2861 }
2862 EXPORT_SYMBOL(sock_kmemdup);
2863
2864 /* Free an option memory block. Note, we actually want the inline
2865 * here as this allows gcc to detect the nullify and fold away the
2866 * condition entirely.
2867 */
__sock_kfree_s(struct sock * sk,void * mem,int size,const bool nullify)2868 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2869 const bool nullify)
2870 {
2871 if (WARN_ON_ONCE(!mem))
2872 return;
2873 if (nullify)
2874 kfree_sensitive(mem);
2875 else
2876 kfree(mem);
2877 atomic_sub(size, &sk->sk_omem_alloc);
2878 }
2879
sock_kfree_s(struct sock * sk,void * mem,int size)2880 void sock_kfree_s(struct sock *sk, void *mem, int size)
2881 {
2882 __sock_kfree_s(sk, mem, size, false);
2883 }
2884 EXPORT_SYMBOL(sock_kfree_s);
2885
sock_kzfree_s(struct sock * sk,void * mem,int size)2886 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2887 {
2888 __sock_kfree_s(sk, mem, size, true);
2889 }
2890 EXPORT_SYMBOL(sock_kzfree_s);
2891
2892 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2893 I think, these locks should be removed for datagram sockets.
2894 */
sock_wait_for_wmem(struct sock * sk,long timeo)2895 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2896 {
2897 DEFINE_WAIT(wait);
2898
2899 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2900 for (;;) {
2901 if (!timeo)
2902 break;
2903 if (signal_pending(current))
2904 break;
2905 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2906 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2907 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2908 break;
2909 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2910 break;
2911 if (READ_ONCE(sk->sk_err))
2912 break;
2913 timeo = schedule_timeout(timeo);
2914 }
2915 finish_wait(sk_sleep(sk), &wait);
2916 return timeo;
2917 }
2918
2919
2920 /*
2921 * Generic send/receive buffer handlers
2922 */
2923
sock_alloc_send_pskb(struct sock * sk,unsigned long header_len,unsigned long data_len,int noblock,int * errcode,int max_page_order)2924 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2925 unsigned long data_len, int noblock,
2926 int *errcode, int max_page_order)
2927 {
2928 struct sk_buff *skb;
2929 long timeo;
2930 int err;
2931
2932 timeo = sock_sndtimeo(sk, noblock);
2933 for (;;) {
2934 err = sock_error(sk);
2935 if (err != 0)
2936 goto failure;
2937
2938 err = -EPIPE;
2939 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2940 goto failure;
2941
2942 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2943 break;
2944
2945 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2946 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2947 err = -EAGAIN;
2948 if (!timeo)
2949 goto failure;
2950 if (signal_pending(current))
2951 goto interrupted;
2952 timeo = sock_wait_for_wmem(sk, timeo);
2953 }
2954 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2955 errcode, sk->sk_allocation);
2956 if (skb)
2957 skb_set_owner_w(skb, sk);
2958 return skb;
2959
2960 interrupted:
2961 err = sock_intr_errno(timeo);
2962 failure:
2963 *errcode = err;
2964 return NULL;
2965 }
2966 EXPORT_SYMBOL(sock_alloc_send_pskb);
2967
__sock_cmsg_send(struct sock * sk,struct cmsghdr * cmsg,struct sockcm_cookie * sockc)2968 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
2969 struct sockcm_cookie *sockc)
2970 {
2971 u32 tsflags;
2972
2973 BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31));
2974
2975 switch (cmsg->cmsg_type) {
2976 case SO_MARK:
2977 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
2978 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2979 return -EPERM;
2980 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2981 return -EINVAL;
2982 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2983 break;
2984 case SO_TIMESTAMPING_OLD:
2985 case SO_TIMESTAMPING_NEW:
2986 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2987 return -EINVAL;
2988
2989 tsflags = *(u32 *)CMSG_DATA(cmsg);
2990 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2991 return -EINVAL;
2992
2993 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2994 sockc->tsflags |= tsflags;
2995 break;
2996 case SCM_TXTIME:
2997 if (!sock_flag(sk, SOCK_TXTIME))
2998 return -EINVAL;
2999 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
3000 return -EINVAL;
3001 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
3002 break;
3003 case SCM_TS_OPT_ID:
3004 if (sk_is_tcp(sk))
3005 return -EINVAL;
3006 tsflags = READ_ONCE(sk->sk_tsflags);
3007 if (!(tsflags & SOF_TIMESTAMPING_OPT_ID))
3008 return -EINVAL;
3009 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
3010 return -EINVAL;
3011 sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg);
3012 sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID;
3013 break;
3014 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
3015 case SCM_RIGHTS:
3016 case SCM_CREDENTIALS:
3017 break;
3018 case SO_PRIORITY:
3019 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
3020 return -EINVAL;
3021 if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg)))
3022 return -EPERM;
3023 sockc->priority = *(u32 *)CMSG_DATA(cmsg);
3024 break;
3025 default:
3026 return -EINVAL;
3027 }
3028 return 0;
3029 }
3030 EXPORT_SYMBOL(__sock_cmsg_send);
3031
sock_cmsg_send(struct sock * sk,struct msghdr * msg,struct sockcm_cookie * sockc)3032 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
3033 struct sockcm_cookie *sockc)
3034 {
3035 struct cmsghdr *cmsg;
3036 int ret;
3037
3038 for_each_cmsghdr(cmsg, msg) {
3039 if (!CMSG_OK(msg, cmsg))
3040 return -EINVAL;
3041 if (cmsg->cmsg_level != SOL_SOCKET)
3042 continue;
3043 ret = __sock_cmsg_send(sk, cmsg, sockc);
3044 if (ret)
3045 return ret;
3046 }
3047 return 0;
3048 }
3049 EXPORT_SYMBOL(sock_cmsg_send);
3050
sk_enter_memory_pressure(struct sock * sk)3051 static void sk_enter_memory_pressure(struct sock *sk)
3052 {
3053 if (!sk->sk_prot->enter_memory_pressure)
3054 return;
3055
3056 sk->sk_prot->enter_memory_pressure(sk);
3057 }
3058
sk_leave_memory_pressure(struct sock * sk)3059 static void sk_leave_memory_pressure(struct sock *sk)
3060 {
3061 if (sk->sk_prot->leave_memory_pressure) {
3062 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
3063 tcp_leave_memory_pressure, sk);
3064 } else {
3065 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
3066
3067 if (memory_pressure && READ_ONCE(*memory_pressure))
3068 WRITE_ONCE(*memory_pressure, 0);
3069 }
3070 }
3071
3072 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
3073
3074 /**
3075 * skb_page_frag_refill - check that a page_frag contains enough room
3076 * @sz: minimum size of the fragment we want to get
3077 * @pfrag: pointer to page_frag
3078 * @gfp: priority for memory allocation
3079 *
3080 * Note: While this allocator tries to use high order pages, there is
3081 * no guarantee that allocations succeed. Therefore, @sz MUST be
3082 * less or equal than PAGE_SIZE.
3083 */
skb_page_frag_refill(unsigned int sz,struct page_frag * pfrag,gfp_t gfp)3084 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
3085 {
3086 if (pfrag->page) {
3087 if (page_ref_count(pfrag->page) == 1) {
3088 pfrag->offset = 0;
3089 return true;
3090 }
3091 if (pfrag->offset + sz <= pfrag->size)
3092 return true;
3093 put_page(pfrag->page);
3094 }
3095
3096 pfrag->offset = 0;
3097 if (SKB_FRAG_PAGE_ORDER &&
3098 !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
3099 /* Avoid direct reclaim but allow kswapd to wake */
3100 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
3101 __GFP_COMP | __GFP_NOWARN |
3102 __GFP_NORETRY,
3103 SKB_FRAG_PAGE_ORDER);
3104 if (likely(pfrag->page)) {
3105 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
3106 return true;
3107 }
3108 }
3109 pfrag->page = alloc_page(gfp);
3110 if (likely(pfrag->page)) {
3111 pfrag->size = PAGE_SIZE;
3112 return true;
3113 }
3114 return false;
3115 }
3116 EXPORT_SYMBOL(skb_page_frag_refill);
3117
sk_page_frag_refill(struct sock * sk,struct page_frag * pfrag)3118 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
3119 {
3120 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
3121 return true;
3122
3123 sk_enter_memory_pressure(sk);
3124 sk_stream_moderate_sndbuf(sk);
3125 return false;
3126 }
3127 EXPORT_SYMBOL(sk_page_frag_refill);
3128
__lock_sock(struct sock * sk)3129 void __lock_sock(struct sock *sk)
3130 __releases(&sk->sk_lock.slock)
3131 __acquires(&sk->sk_lock.slock)
3132 {
3133 DEFINE_WAIT(wait);
3134
3135 for (;;) {
3136 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
3137 TASK_UNINTERRUPTIBLE);
3138 spin_unlock_bh(&sk->sk_lock.slock);
3139 schedule();
3140 spin_lock_bh(&sk->sk_lock.slock);
3141 if (!sock_owned_by_user(sk))
3142 break;
3143 }
3144 finish_wait(&sk->sk_lock.wq, &wait);
3145 }
3146
__release_sock(struct sock * sk)3147 void __release_sock(struct sock *sk)
3148 __releases(&sk->sk_lock.slock)
3149 __acquires(&sk->sk_lock.slock)
3150 {
3151 struct sk_buff *skb, *next;
3152
3153 while ((skb = sk->sk_backlog.head) != NULL) {
3154 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
3155
3156 spin_unlock_bh(&sk->sk_lock.slock);
3157
3158 do {
3159 next = skb->next;
3160 prefetch(next);
3161 DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
3162 skb_mark_not_on_list(skb);
3163 sk_backlog_rcv(sk, skb);
3164
3165 cond_resched();
3166
3167 skb = next;
3168 } while (skb != NULL);
3169
3170 spin_lock_bh(&sk->sk_lock.slock);
3171 }
3172
3173 /*
3174 * Doing the zeroing here guarantee we can not loop forever
3175 * while a wild producer attempts to flood us.
3176 */
3177 sk->sk_backlog.len = 0;
3178 }
3179
__sk_flush_backlog(struct sock * sk)3180 void __sk_flush_backlog(struct sock *sk)
3181 {
3182 spin_lock_bh(&sk->sk_lock.slock);
3183 __release_sock(sk);
3184
3185 if (sk->sk_prot->release_cb)
3186 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
3187 tcp_release_cb, sk);
3188
3189 spin_unlock_bh(&sk->sk_lock.slock);
3190 }
3191 EXPORT_SYMBOL_GPL(__sk_flush_backlog);
3192
3193 /**
3194 * sk_wait_data - wait for data to arrive at sk_receive_queue
3195 * @sk: sock to wait on
3196 * @timeo: for how long
3197 * @skb: last skb seen on sk_receive_queue
3198 *
3199 * Now socket state including sk->sk_err is changed only under lock,
3200 * hence we may omit checks after joining wait queue.
3201 * We check receive queue before schedule() only as optimization;
3202 * it is very likely that release_sock() added new data.
3203 */
sk_wait_data(struct sock * sk,long * timeo,const struct sk_buff * skb)3204 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
3205 {
3206 DEFINE_WAIT_FUNC(wait, woken_wake_function);
3207 int rc;
3208
3209 add_wait_queue(sk_sleep(sk), &wait);
3210 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3211 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
3212 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3213 remove_wait_queue(sk_sleep(sk), &wait);
3214 return rc;
3215 }
3216 EXPORT_SYMBOL(sk_wait_data);
3217
3218 /**
3219 * __sk_mem_raise_allocated - increase memory_allocated
3220 * @sk: socket
3221 * @size: memory size to allocate
3222 * @amt: pages to allocate
3223 * @kind: allocation type
3224 *
3225 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc.
3226 *
3227 * Unlike the globally shared limits among the sockets under same protocol,
3228 * consuming the budget of a memcg won't have direct effect on other ones.
3229 * So be optimistic about memcg's tolerance, and leave the callers to decide
3230 * whether or not to raise allocated through sk_under_memory_pressure() or
3231 * its variants.
3232 */
__sk_mem_raise_allocated(struct sock * sk,int size,int amt,int kind)3233 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3234 {
3235 struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
3236 struct proto *prot = sk->sk_prot;
3237 bool charged = false;
3238 long allocated;
3239
3240 sk_memory_allocated_add(sk, amt);
3241 allocated = sk_memory_allocated(sk);
3242
3243 if (memcg) {
3244 if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
3245 goto suppress_allocation;
3246 charged = true;
3247 }
3248
3249 /* Under limit. */
3250 if (allocated <= sk_prot_mem_limits(sk, 0)) {
3251 sk_leave_memory_pressure(sk);
3252 return 1;
3253 }
3254
3255 /* Under pressure. */
3256 if (allocated > sk_prot_mem_limits(sk, 1))
3257 sk_enter_memory_pressure(sk);
3258
3259 /* Over hard limit. */
3260 if (allocated > sk_prot_mem_limits(sk, 2))
3261 goto suppress_allocation;
3262
3263 /* Guarantee minimum buffer size under pressure (either global
3264 * or memcg) to make sure features described in RFC 7323 (TCP
3265 * Extensions for High Performance) work properly.
3266 *
3267 * This rule does NOT stand when exceeds global or memcg's hard
3268 * limit, or else a DoS attack can be taken place by spawning
3269 * lots of sockets whose usage are under minimum buffer size.
3270 */
3271 if (kind == SK_MEM_RECV) {
3272 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3273 return 1;
3274
3275 } else { /* SK_MEM_SEND */
3276 int wmem0 = sk_get_wmem0(sk, prot);
3277
3278 if (sk->sk_type == SOCK_STREAM) {
3279 if (sk->sk_wmem_queued < wmem0)
3280 return 1;
3281 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3282 return 1;
3283 }
3284 }
3285
3286 if (sk_has_memory_pressure(sk)) {
3287 u64 alloc;
3288
3289 /* The following 'average' heuristic is within the
3290 * scope of global accounting, so it only makes
3291 * sense for global memory pressure.
3292 */
3293 if (!sk_under_global_memory_pressure(sk))
3294 return 1;
3295
3296 /* Try to be fair among all the sockets under global
3297 * pressure by allowing the ones that below average
3298 * usage to raise.
3299 */
3300 alloc = sk_sockets_allocated_read_positive(sk);
3301 if (sk_prot_mem_limits(sk, 2) > alloc *
3302 sk_mem_pages(sk->sk_wmem_queued +
3303 atomic_read(&sk->sk_rmem_alloc) +
3304 sk->sk_forward_alloc))
3305 return 1;
3306 }
3307
3308 suppress_allocation:
3309
3310 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
3311 sk_stream_moderate_sndbuf(sk);
3312
3313 /* Fail only if socket is _under_ its sndbuf.
3314 * In this case we cannot block, so that we have to fail.
3315 */
3316 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
3317 /* Force charge with __GFP_NOFAIL */
3318 if (memcg && !charged) {
3319 mem_cgroup_charge_skmem(memcg, amt,
3320 gfp_memcg_charge() | __GFP_NOFAIL);
3321 }
3322 return 1;
3323 }
3324 }
3325
3326 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
3327 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3328
3329 sk_memory_allocated_sub(sk, amt);
3330
3331 if (charged)
3332 mem_cgroup_uncharge_skmem(memcg, amt);
3333
3334 return 0;
3335 }
3336
3337 /**
3338 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
3339 * @sk: socket
3340 * @size: memory size to allocate
3341 * @kind: allocation type
3342 *
3343 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
3344 * rmem allocation. This function assumes that protocols which have
3345 * memory_pressure use sk_wmem_queued as write buffer accounting.
3346 */
__sk_mem_schedule(struct sock * sk,int size,int kind)3347 int __sk_mem_schedule(struct sock *sk, int size, int kind)
3348 {
3349 int ret, amt = sk_mem_pages(size);
3350
3351 sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3352 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
3353 if (!ret)
3354 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
3355 return ret;
3356 }
3357 EXPORT_SYMBOL(__sk_mem_schedule);
3358
3359 /**
3360 * __sk_mem_reduce_allocated - reclaim memory_allocated
3361 * @sk: socket
3362 * @amount: number of quanta
3363 *
3364 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
3365 */
__sk_mem_reduce_allocated(struct sock * sk,int amount)3366 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3367 {
3368 sk_memory_allocated_sub(sk, amount);
3369
3370 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3371 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
3372
3373 if (sk_under_global_memory_pressure(sk) &&
3374 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
3375 sk_leave_memory_pressure(sk);
3376 }
3377
3378 /**
3379 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
3380 * @sk: socket
3381 * @amount: number of bytes (rounded down to a PAGE_SIZE multiple)
3382 */
__sk_mem_reclaim(struct sock * sk,int amount)3383 void __sk_mem_reclaim(struct sock *sk, int amount)
3384 {
3385 amount >>= PAGE_SHIFT;
3386 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
3387 __sk_mem_reduce_allocated(sk, amount);
3388 }
3389 EXPORT_SYMBOL(__sk_mem_reclaim);
3390
sk_set_peek_off(struct sock * sk,int val)3391 int sk_set_peek_off(struct sock *sk, int val)
3392 {
3393 WRITE_ONCE(sk->sk_peek_off, val);
3394 return 0;
3395 }
3396 EXPORT_SYMBOL_GPL(sk_set_peek_off);
3397
3398 /*
3399 * Set of default routines for initialising struct proto_ops when
3400 * the protocol does not support a particular function. In certain
3401 * cases where it makes no sense for a protocol to have a "do nothing"
3402 * function, some default processing is provided.
3403 */
3404
sock_no_bind(struct socket * sock,struct sockaddr * saddr,int len)3405 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
3406 {
3407 return -EOPNOTSUPP;
3408 }
3409 EXPORT_SYMBOL(sock_no_bind);
3410
sock_no_connect(struct socket * sock,struct sockaddr * saddr,int len,int flags)3411 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
3412 int len, int flags)
3413 {
3414 return -EOPNOTSUPP;
3415 }
3416 EXPORT_SYMBOL(sock_no_connect);
3417
sock_no_socketpair(struct socket * sock1,struct socket * sock2)3418 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
3419 {
3420 return -EOPNOTSUPP;
3421 }
3422 EXPORT_SYMBOL(sock_no_socketpair);
3423
sock_no_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)3424 int sock_no_accept(struct socket *sock, struct socket *newsock,
3425 struct proto_accept_arg *arg)
3426 {
3427 return -EOPNOTSUPP;
3428 }
3429 EXPORT_SYMBOL(sock_no_accept);
3430
sock_no_getname(struct socket * sock,struct sockaddr * saddr,int peer)3431 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
3432 int peer)
3433 {
3434 return -EOPNOTSUPP;
3435 }
3436 EXPORT_SYMBOL(sock_no_getname);
3437
sock_no_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3438 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3439 {
3440 return -EOPNOTSUPP;
3441 }
3442 EXPORT_SYMBOL(sock_no_ioctl);
3443
sock_no_listen(struct socket * sock,int backlog)3444 int sock_no_listen(struct socket *sock, int backlog)
3445 {
3446 return -EOPNOTSUPP;
3447 }
3448 EXPORT_SYMBOL(sock_no_listen);
3449
sock_no_shutdown(struct socket * sock,int how)3450 int sock_no_shutdown(struct socket *sock, int how)
3451 {
3452 return -EOPNOTSUPP;
3453 }
3454 EXPORT_SYMBOL(sock_no_shutdown);
3455
sock_no_sendmsg(struct socket * sock,struct msghdr * m,size_t len)3456 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
3457 {
3458 return -EOPNOTSUPP;
3459 }
3460 EXPORT_SYMBOL(sock_no_sendmsg);
3461
sock_no_sendmsg_locked(struct sock * sk,struct msghdr * m,size_t len)3462 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
3463 {
3464 return -EOPNOTSUPP;
3465 }
3466 EXPORT_SYMBOL(sock_no_sendmsg_locked);
3467
sock_no_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)3468 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
3469 int flags)
3470 {
3471 return -EOPNOTSUPP;
3472 }
3473 EXPORT_SYMBOL(sock_no_recvmsg);
3474
sock_no_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)3475 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
3476 {
3477 /* Mirror missing mmap method error code */
3478 return -ENODEV;
3479 }
3480 EXPORT_SYMBOL(sock_no_mmap);
3481
3482 /*
3483 * When a file is received (via SCM_RIGHTS, etc), we must bump the
3484 * various sock-based usage counts.
3485 */
__receive_sock(struct file * file)3486 void __receive_sock(struct file *file)
3487 {
3488 struct socket *sock;
3489
3490 sock = sock_from_file(file);
3491 if (sock) {
3492 sock_update_netprioidx(&sock->sk->sk_cgrp_data);
3493 sock_update_classid(&sock->sk->sk_cgrp_data);
3494 }
3495 }
3496
3497 /*
3498 * Default Socket Callbacks
3499 */
3500
sock_def_wakeup(struct sock * sk)3501 static void sock_def_wakeup(struct sock *sk)
3502 {
3503 struct socket_wq *wq;
3504
3505 rcu_read_lock();
3506 wq = rcu_dereference(sk->sk_wq);
3507 if (skwq_has_sleeper(wq))
3508 wake_up_interruptible_all(&wq->wait);
3509 rcu_read_unlock();
3510 }
3511
sock_def_error_report(struct sock * sk)3512 static void sock_def_error_report(struct sock *sk)
3513 {
3514 struct socket_wq *wq;
3515
3516 rcu_read_lock();
3517 wq = rcu_dereference(sk->sk_wq);
3518 if (skwq_has_sleeper(wq))
3519 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3520 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR);
3521 rcu_read_unlock();
3522 }
3523
sock_def_readable(struct sock * sk)3524 void sock_def_readable(struct sock *sk)
3525 {
3526 struct socket_wq *wq;
3527
3528 trace_sk_data_ready(sk);
3529
3530 rcu_read_lock();
3531 wq = rcu_dereference(sk->sk_wq);
3532 if (skwq_has_sleeper(wq))
3533 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3534 EPOLLRDNORM | EPOLLRDBAND);
3535 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
3536 rcu_read_unlock();
3537 }
3538
sock_def_write_space(struct sock * sk)3539 static void sock_def_write_space(struct sock *sk)
3540 {
3541 struct socket_wq *wq;
3542
3543 rcu_read_lock();
3544
3545 /* Do not wake up a writer until he can make "significant"
3546 * progress. --DaveM
3547 */
3548 if (sock_writeable(sk)) {
3549 wq = rcu_dereference(sk->sk_wq);
3550 if (skwq_has_sleeper(wq))
3551 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3552 EPOLLWRNORM | EPOLLWRBAND);
3553
3554 /* Should agree with poll, otherwise some programs break */
3555 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
3556 }
3557
3558 rcu_read_unlock();
3559 }
3560
3561 /* An optimised version of sock_def_write_space(), should only be called
3562 * for SOCK_RCU_FREE sockets under RCU read section and after putting
3563 * ->sk_wmem_alloc.
3564 */
sock_def_write_space_wfree(struct sock * sk)3565 static void sock_def_write_space_wfree(struct sock *sk)
3566 {
3567 /* Do not wake up a writer until he can make "significant"
3568 * progress. --DaveM
3569 */
3570 if (sock_writeable(sk)) {
3571 struct socket_wq *wq = rcu_dereference(sk->sk_wq);
3572
3573 /* rely on refcount_sub from sock_wfree() */
3574 smp_mb__after_atomic();
3575 if (wq && waitqueue_active(&wq->wait))
3576 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3577 EPOLLWRNORM | EPOLLWRBAND);
3578
3579 /* Should agree with poll, otherwise some programs break */
3580 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
3581 }
3582 }
3583
sock_def_destruct(struct sock * sk)3584 static void sock_def_destruct(struct sock *sk)
3585 {
3586 }
3587
sk_send_sigurg(struct sock * sk)3588 void sk_send_sigurg(struct sock *sk)
3589 {
3590 if (sk->sk_socket && sk->sk_socket->file)
3591 if (send_sigurg(sk->sk_socket->file))
3592 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3593 }
3594 EXPORT_SYMBOL(sk_send_sigurg);
3595
sk_reset_timer(struct sock * sk,struct timer_list * timer,unsigned long expires)3596 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3597 unsigned long expires)
3598 {
3599 if (!mod_timer(timer, expires))
3600 sock_hold(sk);
3601 }
3602 EXPORT_SYMBOL(sk_reset_timer);
3603
sk_stop_timer(struct sock * sk,struct timer_list * timer)3604 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3605 {
3606 if (timer_delete(timer))
3607 __sock_put(sk);
3608 }
3609 EXPORT_SYMBOL(sk_stop_timer);
3610
sk_stop_timer_sync(struct sock * sk,struct timer_list * timer)3611 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3612 {
3613 if (timer_delete_sync(timer))
3614 __sock_put(sk);
3615 }
3616 EXPORT_SYMBOL(sk_stop_timer_sync);
3617
sock_init_data_uid(struct socket * sock,struct sock * sk,kuid_t uid)3618 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
3619 {
3620 sk_init_common(sk);
3621 sk->sk_send_head = NULL;
3622
3623 timer_setup(&sk->sk_timer, NULL, 0);
3624
3625 sk->sk_allocation = GFP_KERNEL;
3626 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
3627 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
3628 sk->sk_state = TCP_CLOSE;
3629 sk->sk_use_task_frag = true;
3630 sk_set_socket(sk, sock);
3631
3632 sock_set_flag(sk, SOCK_ZAPPED);
3633
3634 if (sock) {
3635 sk->sk_type = sock->type;
3636 RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3637 sock->sk = sk;
3638 } else {
3639 RCU_INIT_POINTER(sk->sk_wq, NULL);
3640 }
3641 sk->sk_uid = uid;
3642
3643 sk->sk_state_change = sock_def_wakeup;
3644 sk->sk_data_ready = sock_def_readable;
3645 sk->sk_write_space = sock_def_write_space;
3646 sk->sk_error_report = sock_def_error_report;
3647 sk->sk_destruct = sock_def_destruct;
3648
3649 sk->sk_frag.page = NULL;
3650 sk->sk_frag.offset = 0;
3651 sk->sk_peek_off = -1;
3652
3653 sk->sk_peer_pid = NULL;
3654 sk->sk_peer_cred = NULL;
3655 spin_lock_init(&sk->sk_peer_lock);
3656
3657 sk->sk_write_pending = 0;
3658 sk->sk_rcvlowat = 1;
3659 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
3660 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
3661
3662 sk->sk_stamp = SK_DEFAULT_STAMP;
3663 #if BITS_PER_LONG==32
3664 seqlock_init(&sk->sk_stamp_seq);
3665 #endif
3666 atomic_set(&sk->sk_zckey, 0);
3667
3668 #ifdef CONFIG_NET_RX_BUSY_POLL
3669 sk->sk_napi_id = 0;
3670 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
3671 #endif
3672
3673 sk->sk_max_pacing_rate = ~0UL;
3674 sk->sk_pacing_rate = ~0UL;
3675 WRITE_ONCE(sk->sk_pacing_shift, 10);
3676 sk->sk_incoming_cpu = -1;
3677
3678 sk_rx_queue_clear(sk);
3679 /*
3680 * Before updating sk_refcnt, we must commit prior changes to memory
3681 * (Documentation/RCU/rculist_nulls.rst for details)
3682 */
3683 smp_wmb();
3684 refcount_set(&sk->sk_refcnt, 1);
3685 atomic_set(&sk->sk_drops, 0);
3686 }
3687 EXPORT_SYMBOL(sock_init_data_uid);
3688
sock_init_data(struct socket * sock,struct sock * sk)3689 void sock_init_data(struct socket *sock, struct sock *sk)
3690 {
3691 kuid_t uid = sock ?
3692 SOCK_INODE(sock)->i_uid :
3693 make_kuid(sock_net(sk)->user_ns, 0);
3694
3695 sock_init_data_uid(sock, sk, uid);
3696 }
3697 EXPORT_SYMBOL(sock_init_data);
3698
lock_sock_nested(struct sock * sk,int subclass)3699 void lock_sock_nested(struct sock *sk, int subclass)
3700 {
3701 /* The sk_lock has mutex_lock() semantics here. */
3702 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3703
3704 might_sleep();
3705 spin_lock_bh(&sk->sk_lock.slock);
3706 if (sock_owned_by_user_nocheck(sk))
3707 __lock_sock(sk);
3708 sk->sk_lock.owned = 1;
3709 spin_unlock_bh(&sk->sk_lock.slock);
3710 }
3711 EXPORT_SYMBOL(lock_sock_nested);
3712
release_sock(struct sock * sk)3713 void release_sock(struct sock *sk)
3714 {
3715 spin_lock_bh(&sk->sk_lock.slock);
3716 if (sk->sk_backlog.tail)
3717 __release_sock(sk);
3718
3719 if (sk->sk_prot->release_cb)
3720 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
3721 tcp_release_cb, sk);
3722
3723 sock_release_ownership(sk);
3724 if (waitqueue_active(&sk->sk_lock.wq))
3725 wake_up(&sk->sk_lock.wq);
3726 spin_unlock_bh(&sk->sk_lock.slock);
3727 }
3728 EXPORT_SYMBOL(release_sock);
3729
__lock_sock_fast(struct sock * sk)3730 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3731 {
3732 might_sleep();
3733 spin_lock_bh(&sk->sk_lock.slock);
3734
3735 if (!sock_owned_by_user_nocheck(sk)) {
3736 /*
3737 * Fast path return with bottom halves disabled and
3738 * sock::sk_lock.slock held.
3739 *
3740 * The 'mutex' is not contended and holding
3741 * sock::sk_lock.slock prevents all other lockers to
3742 * proceed so the corresponding unlock_sock_fast() can
3743 * avoid the slow path of release_sock() completely and
3744 * just release slock.
3745 *
3746 * From a semantical POV this is equivalent to 'acquiring'
3747 * the 'mutex', hence the corresponding lockdep
3748 * mutex_release() has to happen in the fast path of
3749 * unlock_sock_fast().
3750 */
3751 return false;
3752 }
3753
3754 __lock_sock(sk);
3755 sk->sk_lock.owned = 1;
3756 __acquire(&sk->sk_lock.slock);
3757 spin_unlock_bh(&sk->sk_lock.slock);
3758 return true;
3759 }
3760 EXPORT_SYMBOL(__lock_sock_fast);
3761
sock_gettstamp(struct socket * sock,void __user * userstamp,bool timeval,bool time32)3762 int sock_gettstamp(struct socket *sock, void __user *userstamp,
3763 bool timeval, bool time32)
3764 {
3765 struct sock *sk = sock->sk;
3766 struct timespec64 ts;
3767
3768 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3769 ts = ktime_to_timespec64(sock_read_timestamp(sk));
3770 if (ts.tv_sec == -1)
3771 return -ENOENT;
3772 if (ts.tv_sec == 0) {
3773 ktime_t kt = ktime_get_real();
3774 sock_write_timestamp(sk, kt);
3775 ts = ktime_to_timespec64(kt);
3776 }
3777
3778 if (timeval)
3779 ts.tv_nsec /= 1000;
3780
3781 #ifdef CONFIG_COMPAT_32BIT_TIME
3782 if (time32)
3783 return put_old_timespec32(&ts, userstamp);
3784 #endif
3785 #ifdef CONFIG_SPARC64
3786 /* beware of padding in sparc64 timeval */
3787 if (timeval && !in_compat_syscall()) {
3788 struct __kernel_old_timeval __user tv = {
3789 .tv_sec = ts.tv_sec,
3790 .tv_usec = ts.tv_nsec,
3791 };
3792 if (copy_to_user(userstamp, &tv, sizeof(tv)))
3793 return -EFAULT;
3794 return 0;
3795 }
3796 #endif
3797 return put_timespec64(&ts, userstamp);
3798 }
3799 EXPORT_SYMBOL(sock_gettstamp);
3800
sock_enable_timestamp(struct sock * sk,enum sock_flags flag)3801 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3802 {
3803 if (!sock_flag(sk, flag)) {
3804 unsigned long previous_flags = sk->sk_flags;
3805
3806 sock_set_flag(sk, flag);
3807 /*
3808 * we just set one of the two flags which require net
3809 * time stamping, but time stamping might have been on
3810 * already because of the other one
3811 */
3812 if (sock_needs_netstamp(sk) &&
3813 !(previous_flags & SK_FLAGS_TIMESTAMP))
3814 net_enable_timestamp();
3815 }
3816 }
3817
sock_recv_errqueue(struct sock * sk,struct msghdr * msg,int len,int level,int type)3818 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3819 int level, int type)
3820 {
3821 struct sock_exterr_skb *serr;
3822 struct sk_buff *skb;
3823 int copied, err;
3824
3825 err = -EAGAIN;
3826 skb = sock_dequeue_err_skb(sk);
3827 if (skb == NULL)
3828 goto out;
3829
3830 copied = skb->len;
3831 if (copied > len) {
3832 msg->msg_flags |= MSG_TRUNC;
3833 copied = len;
3834 }
3835 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3836 if (err)
3837 goto out_free_skb;
3838
3839 sock_recv_timestamp(msg, sk, skb);
3840
3841 serr = SKB_EXT_ERR(skb);
3842 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3843
3844 msg->msg_flags |= MSG_ERRQUEUE;
3845 err = copied;
3846
3847 out_free_skb:
3848 kfree_skb(skb);
3849 out:
3850 return err;
3851 }
3852 EXPORT_SYMBOL(sock_recv_errqueue);
3853
3854 /*
3855 * Get a socket option on an socket.
3856 *
3857 * FIX: POSIX 1003.1g is very ambiguous here. It states that
3858 * asynchronous errors should be reported by getsockopt. We assume
3859 * this means if you specify SO_ERROR (otherwise what is the point of it).
3860 */
sock_common_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)3861 int sock_common_getsockopt(struct socket *sock, int level, int optname,
3862 char __user *optval, int __user *optlen)
3863 {
3864 struct sock *sk = sock->sk;
3865
3866 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
3867 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
3868 }
3869 EXPORT_SYMBOL(sock_common_getsockopt);
3870
sock_common_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)3871 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3872 int flags)
3873 {
3874 struct sock *sk = sock->sk;
3875 int addr_len = 0;
3876 int err;
3877
3878 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
3879 if (err >= 0)
3880 msg->msg_namelen = addr_len;
3881 return err;
3882 }
3883 EXPORT_SYMBOL(sock_common_recvmsg);
3884
3885 /*
3886 * Set socket options on an inet socket.
3887 */
sock_common_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3888 int sock_common_setsockopt(struct socket *sock, int level, int optname,
3889 sockptr_t optval, unsigned int optlen)
3890 {
3891 struct sock *sk = sock->sk;
3892
3893 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
3894 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
3895 }
3896 EXPORT_SYMBOL(sock_common_setsockopt);
3897
sk_common_release(struct sock * sk)3898 void sk_common_release(struct sock *sk)
3899 {
3900 if (sk->sk_prot->destroy)
3901 sk->sk_prot->destroy(sk);
3902
3903 /*
3904 * Observation: when sk_common_release is called, processes have
3905 * no access to socket. But net still has.
3906 * Step one, detach it from networking:
3907 *
3908 * A. Remove from hash tables.
3909 */
3910
3911 sk->sk_prot->unhash(sk);
3912
3913 /*
3914 * In this point socket cannot receive new packets, but it is possible
3915 * that some packets are in flight because some CPU runs receiver and
3916 * did hash table lookup before we unhashed socket. They will achieve
3917 * receive queue and will be purged by socket destructor.
3918 *
3919 * Also we still have packets pending on receive queue and probably,
3920 * our own packets waiting in device queues. sock_destroy will drain
3921 * receive queue, but transmitted packets will delay socket destruction
3922 * until the last reference will be released.
3923 */
3924
3925 sock_orphan(sk);
3926
3927 xfrm_sk_free_policy(sk);
3928
3929 sock_put(sk);
3930 }
3931 EXPORT_SYMBOL(sk_common_release);
3932
sk_get_meminfo(const struct sock * sk,u32 * mem)3933 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3934 {
3935 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3936
3937 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3938 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3939 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3940 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3941 mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc);
3942 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3943 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3944 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3945 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3946 }
3947
3948 #ifdef CONFIG_PROC_FS
3949 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3950
sock_prot_inuse_get(struct net * net,struct proto * prot)3951 int sock_prot_inuse_get(struct net *net, struct proto *prot)
3952 {
3953 int cpu, idx = prot->inuse_idx;
3954 int res = 0;
3955
3956 for_each_possible_cpu(cpu)
3957 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3958
3959 return res >= 0 ? res : 0;
3960 }
3961 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3962
sock_inuse_get(struct net * net)3963 int sock_inuse_get(struct net *net)
3964 {
3965 int cpu, res = 0;
3966
3967 for_each_possible_cpu(cpu)
3968 res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
3969
3970 return res;
3971 }
3972
3973 EXPORT_SYMBOL_GPL(sock_inuse_get);
3974
sock_inuse_init_net(struct net * net)3975 static int __net_init sock_inuse_init_net(struct net *net)
3976 {
3977 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3978 if (net->core.prot_inuse == NULL)
3979 return -ENOMEM;
3980 return 0;
3981 }
3982
sock_inuse_exit_net(struct net * net)3983 static void __net_exit sock_inuse_exit_net(struct net *net)
3984 {
3985 free_percpu(net->core.prot_inuse);
3986 }
3987
3988 static struct pernet_operations net_inuse_ops = {
3989 .init = sock_inuse_init_net,
3990 .exit = sock_inuse_exit_net,
3991 };
3992
net_inuse_init(void)3993 static __init int net_inuse_init(void)
3994 {
3995 if (register_pernet_subsys(&net_inuse_ops))
3996 panic("Cannot initialize net inuse counters");
3997
3998 return 0;
3999 }
4000
4001 core_initcall(net_inuse_init);
4002
assign_proto_idx(struct proto * prot)4003 static int assign_proto_idx(struct proto *prot)
4004 {
4005 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
4006
4007 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
4008 pr_err("PROTO_INUSE_NR exhausted\n");
4009 return -ENOSPC;
4010 }
4011
4012 set_bit(prot->inuse_idx, proto_inuse_idx);
4013 return 0;
4014 }
4015
release_proto_idx(struct proto * prot)4016 static void release_proto_idx(struct proto *prot)
4017 {
4018 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
4019 clear_bit(prot->inuse_idx, proto_inuse_idx);
4020 }
4021 #else
assign_proto_idx(struct proto * prot)4022 static inline int assign_proto_idx(struct proto *prot)
4023 {
4024 return 0;
4025 }
4026
release_proto_idx(struct proto * prot)4027 static inline void release_proto_idx(struct proto *prot)
4028 {
4029 }
4030
4031 #endif
4032
tw_prot_cleanup(struct timewait_sock_ops * twsk_prot)4033 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
4034 {
4035 if (!twsk_prot)
4036 return;
4037 kfree(twsk_prot->twsk_slab_name);
4038 twsk_prot->twsk_slab_name = NULL;
4039 kmem_cache_destroy(twsk_prot->twsk_slab);
4040 twsk_prot->twsk_slab = NULL;
4041 }
4042
tw_prot_init(const struct proto * prot)4043 static int tw_prot_init(const struct proto *prot)
4044 {
4045 struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
4046
4047 if (!twsk_prot)
4048 return 0;
4049
4050 twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
4051 prot->name);
4052 if (!twsk_prot->twsk_slab_name)
4053 return -ENOMEM;
4054
4055 twsk_prot->twsk_slab =
4056 kmem_cache_create(twsk_prot->twsk_slab_name,
4057 twsk_prot->twsk_obj_size, 0,
4058 SLAB_ACCOUNT | prot->slab_flags,
4059 NULL);
4060 if (!twsk_prot->twsk_slab) {
4061 pr_crit("%s: Can't create timewait sock SLAB cache!\n",
4062 prot->name);
4063 return -ENOMEM;
4064 }
4065
4066 return 0;
4067 }
4068
req_prot_cleanup(struct request_sock_ops * rsk_prot)4069 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
4070 {
4071 if (!rsk_prot)
4072 return;
4073 kfree(rsk_prot->slab_name);
4074 rsk_prot->slab_name = NULL;
4075 kmem_cache_destroy(rsk_prot->slab);
4076 rsk_prot->slab = NULL;
4077 }
4078
req_prot_init(const struct proto * prot)4079 static int req_prot_init(const struct proto *prot)
4080 {
4081 struct request_sock_ops *rsk_prot = prot->rsk_prot;
4082
4083 if (!rsk_prot)
4084 return 0;
4085
4086 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
4087 prot->name);
4088 if (!rsk_prot->slab_name)
4089 return -ENOMEM;
4090
4091 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
4092 rsk_prot->obj_size, 0,
4093 SLAB_ACCOUNT | prot->slab_flags,
4094 NULL);
4095
4096 if (!rsk_prot->slab) {
4097 pr_crit("%s: Can't create request sock SLAB cache!\n",
4098 prot->name);
4099 return -ENOMEM;
4100 }
4101 return 0;
4102 }
4103
proto_register(struct proto * prot,int alloc_slab)4104 int proto_register(struct proto *prot, int alloc_slab)
4105 {
4106 int ret = -ENOBUFS;
4107
4108 if (prot->memory_allocated && !prot->sysctl_mem) {
4109 pr_err("%s: missing sysctl_mem\n", prot->name);
4110 return -EINVAL;
4111 }
4112 if (prot->memory_allocated && !prot->per_cpu_fw_alloc) {
4113 pr_err("%s: missing per_cpu_fw_alloc\n", prot->name);
4114 return -EINVAL;
4115 }
4116 if (alloc_slab) {
4117 prot->slab = kmem_cache_create_usercopy(prot->name,
4118 prot->obj_size, 0,
4119 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
4120 prot->slab_flags,
4121 prot->useroffset, prot->usersize,
4122 NULL);
4123
4124 if (prot->slab == NULL) {
4125 pr_crit("%s: Can't create sock SLAB cache!\n",
4126 prot->name);
4127 goto out;
4128 }
4129
4130 if (req_prot_init(prot))
4131 goto out_free_request_sock_slab;
4132
4133 if (tw_prot_init(prot))
4134 goto out_free_timewait_sock_slab;
4135 }
4136
4137 mutex_lock(&proto_list_mutex);
4138 ret = assign_proto_idx(prot);
4139 if (ret) {
4140 mutex_unlock(&proto_list_mutex);
4141 goto out_free_timewait_sock_slab;
4142 }
4143 list_add(&prot->node, &proto_list);
4144 mutex_unlock(&proto_list_mutex);
4145 return ret;
4146
4147 out_free_timewait_sock_slab:
4148 if (alloc_slab)
4149 tw_prot_cleanup(prot->twsk_prot);
4150 out_free_request_sock_slab:
4151 if (alloc_slab) {
4152 req_prot_cleanup(prot->rsk_prot);
4153
4154 kmem_cache_destroy(prot->slab);
4155 prot->slab = NULL;
4156 }
4157 out:
4158 return ret;
4159 }
4160 EXPORT_SYMBOL(proto_register);
4161
proto_unregister(struct proto * prot)4162 void proto_unregister(struct proto *prot)
4163 {
4164 mutex_lock(&proto_list_mutex);
4165 release_proto_idx(prot);
4166 list_del(&prot->node);
4167 mutex_unlock(&proto_list_mutex);
4168
4169 kmem_cache_destroy(prot->slab);
4170 prot->slab = NULL;
4171
4172 req_prot_cleanup(prot->rsk_prot);
4173 tw_prot_cleanup(prot->twsk_prot);
4174 }
4175 EXPORT_SYMBOL(proto_unregister);
4176
sock_load_diag_module(int family,int protocol)4177 int sock_load_diag_module(int family, int protocol)
4178 {
4179 if (!protocol) {
4180 if (!sock_is_registered(family))
4181 return -ENOENT;
4182
4183 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
4184 NETLINK_SOCK_DIAG, family);
4185 }
4186
4187 #ifdef CONFIG_INET
4188 if (family == AF_INET &&
4189 protocol != IPPROTO_RAW &&
4190 protocol < MAX_INET_PROTOS &&
4191 !rcu_access_pointer(inet_protos[protocol]))
4192 return -ENOENT;
4193 #endif
4194
4195 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
4196 NETLINK_SOCK_DIAG, family, protocol);
4197 }
4198 EXPORT_SYMBOL(sock_load_diag_module);
4199
4200 #ifdef CONFIG_PROC_FS
proto_seq_start(struct seq_file * seq,loff_t * pos)4201 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
4202 __acquires(proto_list_mutex)
4203 {
4204 mutex_lock(&proto_list_mutex);
4205 return seq_list_start_head(&proto_list, *pos);
4206 }
4207
proto_seq_next(struct seq_file * seq,void * v,loff_t * pos)4208 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4209 {
4210 return seq_list_next(v, &proto_list, pos);
4211 }
4212
proto_seq_stop(struct seq_file * seq,void * v)4213 static void proto_seq_stop(struct seq_file *seq, void *v)
4214 __releases(proto_list_mutex)
4215 {
4216 mutex_unlock(&proto_list_mutex);
4217 }
4218
proto_method_implemented(const void * method)4219 static char proto_method_implemented(const void *method)
4220 {
4221 return method == NULL ? 'n' : 'y';
4222 }
sock_prot_memory_allocated(struct proto * proto)4223 static long sock_prot_memory_allocated(struct proto *proto)
4224 {
4225 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
4226 }
4227
sock_prot_memory_pressure(struct proto * proto)4228 static const char *sock_prot_memory_pressure(struct proto *proto)
4229 {
4230 return proto->memory_pressure != NULL ?
4231 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
4232 }
4233
proto_seq_printf(struct seq_file * seq,struct proto * proto)4234 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
4235 {
4236
4237 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
4238 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
4239 proto->name,
4240 proto->obj_size,
4241 sock_prot_inuse_get(seq_file_net(seq), proto),
4242 sock_prot_memory_allocated(proto),
4243 sock_prot_memory_pressure(proto),
4244 proto->max_header,
4245 proto->slab == NULL ? "no" : "yes",
4246 module_name(proto->owner),
4247 proto_method_implemented(proto->close),
4248 proto_method_implemented(proto->connect),
4249 proto_method_implemented(proto->disconnect),
4250 proto_method_implemented(proto->accept),
4251 proto_method_implemented(proto->ioctl),
4252 proto_method_implemented(proto->init),
4253 proto_method_implemented(proto->destroy),
4254 proto_method_implemented(proto->shutdown),
4255 proto_method_implemented(proto->setsockopt),
4256 proto_method_implemented(proto->getsockopt),
4257 proto_method_implemented(proto->sendmsg),
4258 proto_method_implemented(proto->recvmsg),
4259 proto_method_implemented(proto->bind),
4260 proto_method_implemented(proto->backlog_rcv),
4261 proto_method_implemented(proto->hash),
4262 proto_method_implemented(proto->unhash),
4263 proto_method_implemented(proto->get_port),
4264 proto_method_implemented(proto->enter_memory_pressure));
4265 }
4266
proto_seq_show(struct seq_file * seq,void * v)4267 static int proto_seq_show(struct seq_file *seq, void *v)
4268 {
4269 if (v == &proto_list)
4270 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
4271 "protocol",
4272 "size",
4273 "sockets",
4274 "memory",
4275 "press",
4276 "maxhdr",
4277 "slab",
4278 "module",
4279 "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n");
4280 else
4281 proto_seq_printf(seq, list_entry(v, struct proto, node));
4282 return 0;
4283 }
4284
4285 static const struct seq_operations proto_seq_ops = {
4286 .start = proto_seq_start,
4287 .next = proto_seq_next,
4288 .stop = proto_seq_stop,
4289 .show = proto_seq_show,
4290 };
4291
proto_init_net(struct net * net)4292 static __net_init int proto_init_net(struct net *net)
4293 {
4294 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
4295 sizeof(struct seq_net_private)))
4296 return -ENOMEM;
4297
4298 return 0;
4299 }
4300
proto_exit_net(struct net * net)4301 static __net_exit void proto_exit_net(struct net *net)
4302 {
4303 remove_proc_entry("protocols", net->proc_net);
4304 }
4305
4306
4307 static __net_initdata struct pernet_operations proto_net_ops = {
4308 .init = proto_init_net,
4309 .exit = proto_exit_net,
4310 };
4311
proto_init(void)4312 static int __init proto_init(void)
4313 {
4314 return register_pernet_subsys(&proto_net_ops);
4315 }
4316
4317 subsys_initcall(proto_init);
4318
4319 #endif /* PROC_FS */
4320
4321 #ifdef CONFIG_NET_RX_BUSY_POLL
sk_busy_loop_end(void * p,unsigned long start_time)4322 bool sk_busy_loop_end(void *p, unsigned long start_time)
4323 {
4324 struct sock *sk = p;
4325
4326 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
4327 return true;
4328
4329 if (sk_is_udp(sk) &&
4330 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
4331 return true;
4332
4333 return sk_busy_loop_timeout(sk, start_time);
4334 }
4335 EXPORT_SYMBOL(sk_busy_loop_end);
4336 #endif /* CONFIG_NET_RX_BUSY_POLL */
4337
sock_bind_add(struct sock * sk,struct sockaddr * addr,int addr_len)4338 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
4339 {
4340 if (!sk->sk_prot->bind_add)
4341 return -EOPNOTSUPP;
4342 return sk->sk_prot->bind_add(sk, addr, addr_len);
4343 }
4344 EXPORT_SYMBOL(sock_bind_add);
4345
4346 /* Copy 'size' bytes from userspace and return `size` back to userspace */
sock_ioctl_inout(struct sock * sk,unsigned int cmd,void __user * arg,void * karg,size_t size)4347 int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
4348 void __user *arg, void *karg, size_t size)
4349 {
4350 int ret;
4351
4352 if (copy_from_user(karg, arg, size))
4353 return -EFAULT;
4354
4355 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg);
4356 if (ret)
4357 return ret;
4358
4359 if (copy_to_user(arg, karg, size))
4360 return -EFAULT;
4361
4362 return 0;
4363 }
4364 EXPORT_SYMBOL(sock_ioctl_inout);
4365
4366 /* This is the most common ioctl prep function, where the result (4 bytes) is
4367 * copied back to userspace if the ioctl() returns successfully. No input is
4368 * copied from userspace as input argument.
4369 */
sock_ioctl_out(struct sock * sk,unsigned int cmd,void __user * arg)4370 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg)
4371 {
4372 int ret, karg = 0;
4373
4374 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg);
4375 if (ret)
4376 return ret;
4377
4378 return put_user(karg, (int __user *)arg);
4379 }
4380
4381 /* A wrapper around sock ioctls, which copies the data from userspace
4382 * (depending on the protocol/ioctl), and copies back the result to userspace.
4383 * The main motivation for this function is to pass kernel memory to the
4384 * protocol ioctl callbacks, instead of userspace memory.
4385 */
sk_ioctl(struct sock * sk,unsigned int cmd,void __user * arg)4386 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
4387 {
4388 int rc = 1;
4389
4390 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET)
4391 rc = ipmr_sk_ioctl(sk, cmd, arg);
4392 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6)
4393 rc = ip6mr_sk_ioctl(sk, cmd, arg);
4394 else if (sk_is_phonet(sk))
4395 rc = phonet_sk_ioctl(sk, cmd, arg);
4396
4397 /* If ioctl was processed, returns its value */
4398 if (rc <= 0)
4399 return rc;
4400
4401 /* Otherwise call the default handler */
4402 return sock_ioctl_out(sk, cmd, arg);
4403 }
4404 EXPORT_SYMBOL(sk_ioctl);
4405
sock_struct_check(void)4406 static int __init sock_struct_check(void)
4407 {
4408 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
4409 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
4410 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
4411 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
4412 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
4413
4414 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
4415 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
4416 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
4417 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
4418 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
4419 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
4420 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
4421 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
4422 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
4423
4424 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
4425 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
4426 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
4427
4428 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
4429 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
4430 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
4431 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
4432
4433 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4434 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4435 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
4436 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
4437 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
4438 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
4439 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
4440 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
4441 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
4442 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
4443 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
4444 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
4445 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
4446 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
4447 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
4448 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
4449
4450 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
4451 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
4452 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
4453 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
4454 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
4455 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
4456 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
4457 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
4458 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
4459 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
4460 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
4461 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
4462 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
4463 return 0;
4464 }
4465
4466 core_initcall(sock_struct_check);
4467