1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _PROTO_MEMORY_H
3 #define _PROTO_MEMORY_H
4
5 #include <net/sock.h>
6 #include <net/hotdata.h>
7
8 /* 1 MB per cpu, in page units */
9 #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
10
sk_has_memory_pressure(const struct sock * sk)11 static inline bool sk_has_memory_pressure(const struct sock *sk)
12 {
13 return sk->sk_prot->memory_pressure != NULL;
14 }
15
16 static inline bool
proto_memory_pressure(const struct proto * prot)17 proto_memory_pressure(const struct proto *prot)
18 {
19 if (!prot->memory_pressure)
20 return false;
21 return !!READ_ONCE(*prot->memory_pressure);
22 }
23
sk_under_global_memory_pressure(const struct sock * sk)24 static inline bool sk_under_global_memory_pressure(const struct sock *sk)
25 {
26 return proto_memory_pressure(sk->sk_prot);
27 }
28
sk_under_memory_pressure(const struct sock * sk)29 static inline bool sk_under_memory_pressure(const struct sock *sk)
30 {
31 if (!sk->sk_prot->memory_pressure)
32 return false;
33
34 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
35 mem_cgroup_under_socket_pressure(sk->sk_memcg))
36 return true;
37
38 return !!READ_ONCE(*sk->sk_prot->memory_pressure);
39 }
40
41 static inline long
proto_memory_allocated(const struct proto * prot)42 proto_memory_allocated(const struct proto *prot)
43 {
44 return max(0L, atomic_long_read(prot->memory_allocated));
45 }
46
47 static inline long
sk_memory_allocated(const struct sock * sk)48 sk_memory_allocated(const struct sock *sk)
49 {
50 return proto_memory_allocated(sk->sk_prot);
51 }
52
proto_memory_pcpu_drain(struct proto * proto)53 static inline void proto_memory_pcpu_drain(struct proto *proto)
54 {
55 int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
56
57 if (val)
58 atomic_long_add(val, proto->memory_allocated);
59 }
60
61 static inline void
sk_memory_allocated_add(const struct sock * sk,int val)62 sk_memory_allocated_add(const struct sock *sk, int val)
63 {
64 struct proto *proto = sk->sk_prot;
65
66 val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
67
68 if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
69 proto_memory_pcpu_drain(proto);
70 }
71
72 static inline void
sk_memory_allocated_sub(const struct sock * sk,int val)73 sk_memory_allocated_sub(const struct sock *sk, int val)
74 {
75 struct proto *proto = sk->sk_prot;
76
77 val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
78
79 if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
80 proto_memory_pcpu_drain(proto);
81 }
82
83 #endif /* _PROTO_MEMORY_H */
84