1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _NET_RPS_H 3 #define _NET_RPS_H 4 5 #include <linux/types.h> 6 #include <linux/static_key.h> 7 #include <net/sock.h> 8 #include <net/hotdata.h> 9 10 #ifdef CONFIG_RPS 11 12 extern struct static_key_false rps_needed; 13 extern struct static_key_false rfs_needed; 14 15 /* 16 * This structure holds an RPS map which can be of variable length. The 17 * map is an array of CPUs. 18 */ 19 struct rps_map { 20 unsigned int len; 21 struct rcu_head rcu; 22 u16 cpus[]; 23 }; 24 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 25 26 /* 27 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 28 * tail pointer for that CPU's input queue at the time of last enqueue, a 29 * hardware filter index, and the hash of the flow if aRFS is enabled. 30 */ 31 struct rps_dev_flow { 32 u16 cpu; 33 u16 filter; 34 unsigned int last_qtail; 35 #ifdef CONFIG_RFS_ACCEL 36 u32 hash; 37 #endif 38 }; 39 #define RPS_NO_FILTER 0xffff 40 41 /* 42 * The rps_dev_flow_table structure contains a table of flow mappings. 43 */ 44 struct rps_dev_flow_table { 45 u8 log; 46 struct rcu_head rcu; 47 struct rps_dev_flow flows[]; 48 }; 49 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 50 ((_num) * sizeof(struct rps_dev_flow))) 51 52 /* 53 * The rps_sock_flow_table contains mappings of flows to the last CPU 54 * on which they were processed by the application (set in recvmsg). 55 * Each entry is a 32bit value. Upper part is the high-order bits 56 * of flow hash, lower part is CPU number. 57 * rps_cpu_mask is used to partition the space, depending on number of 58 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 59 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 60 * meaning we use 32-6=26 bits for the hash. 61 */ 62 struct rps_sock_flow_table { 63 struct rcu_head rcu; 64 u32 mask; 65 66 u32 ents[] ____cacheline_aligned_in_smp; 67 }; 68 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 69 70 #define RPS_NO_CPU 0xffff 71 72 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 73 u32 hash) 74 { 75 unsigned int index = hash & table->mask; 76 u32 val = hash & ~net_hotdata.rps_cpu_mask; 77 78 /* We only give a hint, preemption can change CPU under us */ 79 val |= raw_smp_processor_id(); 80 81 /* The following WRITE_ONCE() is paired with the READ_ONCE() 82 * here, and another one in get_rps_cpu(). 83 */ 84 if (READ_ONCE(table->ents[index]) != val) 85 WRITE_ONCE(table->ents[index], val); 86 } 87 88 #endif /* CONFIG_RPS */ 89 90 static inline void sock_rps_record_flow_hash(__u32 hash) 91 { 92 #ifdef CONFIG_RPS 93 struct rps_sock_flow_table *sock_flow_table; 94 95 if (!hash) 96 return; 97 rcu_read_lock(); 98 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); 99 if (sock_flow_table) 100 rps_record_sock_flow(sock_flow_table, hash); 101 rcu_read_unlock(); 102 #endif 103 } 104 105 static inline void sock_rps_record_flow(const struct sock *sk) 106 { 107 #ifdef CONFIG_RPS 108 if (static_branch_unlikely(&rfs_needed)) { 109 /* Reading sk->sk_rxhash might incur an expensive cache line 110 * miss. 111 * 112 * TCP_ESTABLISHED does cover almost all states where RFS 113 * might be useful, and is cheaper [1] than testing : 114 * IPv4: inet_sk(sk)->inet_daddr 115 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr) 116 * OR an additional socket flag 117 * [1] : sk_state and sk_prot are in the same cache line. 118 */ 119 if (sk->sk_state == TCP_ESTABLISHED) { 120 /* This READ_ONCE() is paired with the WRITE_ONCE() 121 * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). 122 */ 123 sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); 124 } 125 } 126 #endif 127 } 128 129 static inline void sock_rps_delete_flow(const struct sock *sk) 130 { 131 #ifdef CONFIG_RPS 132 struct rps_sock_flow_table *table; 133 u32 hash, index; 134 135 if (!static_branch_unlikely(&rfs_needed)) 136 return; 137 138 hash = READ_ONCE(sk->sk_rxhash); 139 if (!hash) 140 return; 141 142 rcu_read_lock(); 143 table = rcu_dereference(net_hotdata.rps_sock_flow_table); 144 if (table) { 145 index = hash & table->mask; 146 if (READ_ONCE(table->ents[index]) != RPS_NO_CPU) 147 WRITE_ONCE(table->ents[index], RPS_NO_CPU); 148 } 149 rcu_read_unlock(); 150 #endif 151 } 152 153 static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd) 154 { 155 #ifdef CONFIG_RPS 156 return ++sd->input_queue_tail; 157 #else 158 return 0; 159 #endif 160 } 161 162 static inline void rps_input_queue_tail_save(u32 *dest, u32 tail) 163 { 164 #ifdef CONFIG_RPS 165 WRITE_ONCE(*dest, tail); 166 #endif 167 } 168 169 static inline void rps_input_queue_head_add(struct softnet_data *sd, int val) 170 { 171 #ifdef CONFIG_RPS 172 WRITE_ONCE(sd->input_queue_head, sd->input_queue_head + val); 173 #endif 174 } 175 176 static inline void rps_input_queue_head_incr(struct softnet_data *sd) 177 { 178 rps_input_queue_head_add(sd, 1); 179 } 180 181 #endif /* _NET_RPS_H */ 182