1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _NET_CORE_DEV_H 3 #define _NET_CORE_DEV_H 4 5 #include <linux/cleanup.h> 6 #include <linux/types.h> 7 #include <linux/rwsem.h> 8 #include <linux/netdevice.h> 9 10 struct net; 11 struct netlink_ext_ack; 12 struct cpumask; 13 14 /* Random bits of netdevice that don't need to be exposed */ 15 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 16 struct sd_flow_limit { 17 u64 count; 18 unsigned int num_buckets; 19 unsigned int history_head; 20 u16 history[FLOW_LIMIT_HISTORY]; 21 u8 buckets[]; 22 }; 23 24 extern int netdev_flow_limit_table_len; 25 26 struct napi_struct * 27 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); 28 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 29 30 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex); 31 struct net_device *netdev_get_by_name_lock(struct net *net, const char *name); 32 struct net_device *__netdev_put_lock(struct net_device *dev); 33 struct net_device * 34 netdev_xa_find_lock(struct net *net, struct net_device *dev, 35 unsigned long *index); 36 37 DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T)); 38 39 #define for_each_netdev_lock_scoped(net, var_name, ifindex) \ 40 for (struct net_device *var_name __free(netdev_unlock) = NULL; \ 41 (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \ 42 ifindex++) 43 44 #ifdef CONFIG_PROC_FS 45 int __init dev_proc_init(void); 46 #else 47 #define dev_proc_init() 0 48 #endif 49 50 void linkwatch_init_dev(struct net_device *dev); 51 void linkwatch_run_queue(void); 52 53 void dev_addr_flush(struct net_device *dev); 54 int dev_addr_init(struct net_device *dev); 55 void dev_addr_check(struct net_device *dev); 56 57 #if IS_ENABLED(CONFIG_NET_SHAPER) 58 void net_shaper_flush_netdev(struct net_device *dev); 59 void net_shaper_set_real_num_tx_queues(struct net_device *dev, 60 unsigned int txq); 61 #else 62 static inline void net_shaper_flush_netdev(struct net_device *dev) {} 63 static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev, 64 unsigned int txq) {} 65 #endif 66 67 /* sysctls not referred to from outside net/core/ */ 68 extern int netdev_unregister_timeout_secs; 69 extern int weight_p; 70 extern int dev_weight_rx_bias; 71 extern int dev_weight_tx_bias; 72 73 /* rtnl helpers */ 74 extern struct list_head net_todo_list; 75 void netdev_run_todo(void); 76 77 /* netdev management, shared between various uAPI entry points */ 78 struct netdev_name_node { 79 struct hlist_node hlist; 80 struct list_head list; 81 struct net_device *dev; 82 const char *name; 83 struct rcu_head rcu; 84 }; 85 86 int netdev_get_name(struct net *net, char *name, int ifindex); 87 int netif_change_name(struct net_device *dev, const char *newname); 88 int dev_change_name(struct net_device *dev, const char *newname); 89 90 #define netdev_for_each_altname(dev, namenode) \ 91 list_for_each_entry((namenode), &(dev)->name_node->list, list) 92 #define netdev_for_each_altname_safe(dev, namenode, next) \ 93 list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \ 94 list) 95 96 int netdev_name_node_alt_create(struct net_device *dev, const char *name); 97 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); 98 99 int dev_validate_mtu(struct net_device *dev, int mtu, 100 struct netlink_ext_ack *extack); 101 int netif_set_mtu_ext(struct net_device *dev, int new_mtu, 102 struct netlink_ext_ack *extack); 103 104 int dev_get_phys_port_id(struct net_device *dev, 105 struct netdev_phys_item_id *ppid); 106 int dev_get_phys_port_name(struct net_device *dev, 107 char *name, size_t len); 108 109 int netif_change_proto_down(struct net_device *dev, bool proto_down); 110 int dev_change_proto_down(struct net_device *dev, bool proto_down); 111 void netdev_change_proto_down_reason_locked(struct net_device *dev, 112 unsigned long mask, u32 value); 113 114 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 115 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 116 int fd, int expected_fd, u32 flags); 117 118 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len); 119 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len); 120 void netif_set_group(struct net_device *dev, int new_group); 121 void dev_set_group(struct net_device *dev, int new_group); 122 int netif_change_carrier(struct net_device *dev, bool new_carrier); 123 int dev_change_carrier(struct net_device *dev, bool new_carrier); 124 125 void __dev_set_rx_mode(struct net_device *dev); 126 127 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 128 unsigned int gchanges, u32 portid, 129 const struct nlmsghdr *nlh); 130 131 void unregister_netdevice_many_notify(struct list_head *head, 132 u32 portid, const struct nlmsghdr *nlh); 133 134 static inline void netif_set_up(struct net_device *dev, bool value) 135 { 136 if (value) 137 dev->flags |= IFF_UP; 138 else 139 dev->flags &= ~IFF_UP; 140 141 if (!netdev_need_ops_lock(dev)) 142 netdev_lock(dev); 143 dev->up = value; 144 if (!netdev_need_ops_lock(dev)) 145 netdev_unlock(dev); 146 } 147 148 static inline void netif_set_gso_max_size(struct net_device *dev, 149 unsigned int size) 150 { 151 /* dev->gso_max_size is read locklessly from sk_setup_caps() */ 152 WRITE_ONCE(dev->gso_max_size, size); 153 if (size <= GSO_LEGACY_MAX_SIZE) 154 WRITE_ONCE(dev->gso_ipv4_max_size, size); 155 } 156 157 static inline void netif_set_gso_max_segs(struct net_device *dev, 158 unsigned int segs) 159 { 160 /* dev->gso_max_segs is read locklessly from sk_setup_caps() */ 161 WRITE_ONCE(dev->gso_max_segs, segs); 162 } 163 164 static inline void netif_set_gro_max_size(struct net_device *dev, 165 unsigned int size) 166 { 167 /* This pairs with the READ_ONCE() in skb_gro_receive() */ 168 WRITE_ONCE(dev->gro_max_size, size); 169 if (size <= GRO_LEGACY_MAX_SIZE) 170 WRITE_ONCE(dev->gro_ipv4_max_size, size); 171 } 172 173 static inline void netif_set_gso_ipv4_max_size(struct net_device *dev, 174 unsigned int size) 175 { 176 /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */ 177 WRITE_ONCE(dev->gso_ipv4_max_size, size); 178 } 179 180 static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, 181 unsigned int size) 182 { 183 /* This pairs with the READ_ONCE() in skb_gro_receive() */ 184 WRITE_ONCE(dev->gro_ipv4_max_size, size); 185 } 186 187 /** 188 * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs 189 * @n: napi struct to get the defer_hard_irqs field from 190 * 191 * Return: the per-NAPI value of the defar_hard_irqs field. 192 */ 193 static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n) 194 { 195 return READ_ONCE(n->defer_hard_irqs); 196 } 197 198 /** 199 * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi 200 * @n: napi_struct to set the defer_hard_irqs field 201 * @defer: the value the field should be set to 202 */ 203 static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer) 204 { 205 WRITE_ONCE(n->defer_hard_irqs, defer); 206 } 207 208 /** 209 * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev 210 * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set 211 * @defer: the defer_hard_irqs value to set 212 */ 213 static inline void netdev_set_defer_hard_irqs(struct net_device *netdev, 214 u32 defer) 215 { 216 unsigned int count = max(netdev->num_rx_queues, 217 netdev->num_tx_queues); 218 struct napi_struct *napi; 219 int i; 220 221 WRITE_ONCE(netdev->napi_defer_hard_irqs, defer); 222 list_for_each_entry(napi, &netdev->napi_list, dev_list) 223 napi_set_defer_hard_irqs(napi, defer); 224 225 for (i = 0; i < count; i++) 226 netdev->napi_config[i].defer_hard_irqs = defer; 227 } 228 229 /** 230 * napi_get_gro_flush_timeout - get the gro_flush_timeout 231 * @n: napi struct to get the gro_flush_timeout from 232 * 233 * Return: the per-NAPI value of the gro_flush_timeout field. 234 */ 235 static inline unsigned long 236 napi_get_gro_flush_timeout(const struct napi_struct *n) 237 { 238 return READ_ONCE(n->gro_flush_timeout); 239 } 240 241 /** 242 * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi 243 * @n: napi struct to set the gro_flush_timeout 244 * @timeout: timeout value to set 245 * 246 * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout 247 */ 248 static inline void napi_set_gro_flush_timeout(struct napi_struct *n, 249 unsigned long timeout) 250 { 251 WRITE_ONCE(n->gro_flush_timeout, timeout); 252 } 253 254 /** 255 * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs 256 * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set 257 * @timeout: the timeout value to set 258 */ 259 static inline void netdev_set_gro_flush_timeout(struct net_device *netdev, 260 unsigned long timeout) 261 { 262 unsigned int count = max(netdev->num_rx_queues, 263 netdev->num_tx_queues); 264 struct napi_struct *napi; 265 int i; 266 267 WRITE_ONCE(netdev->gro_flush_timeout, timeout); 268 list_for_each_entry(napi, &netdev->napi_list, dev_list) 269 napi_set_gro_flush_timeout(napi, timeout); 270 271 for (i = 0; i < count; i++) 272 netdev->napi_config[i].gro_flush_timeout = timeout; 273 } 274 275 /** 276 * napi_get_irq_suspend_timeout - get the irq_suspend_timeout 277 * @n: napi struct to get the irq_suspend_timeout from 278 * 279 * Return: the per-NAPI value of the irq_suspend_timeout field. 280 */ 281 static inline unsigned long 282 napi_get_irq_suspend_timeout(const struct napi_struct *n) 283 { 284 return READ_ONCE(n->irq_suspend_timeout); 285 } 286 287 /** 288 * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi 289 * @n: napi struct to set the irq_suspend_timeout 290 * @timeout: timeout value to set 291 * 292 * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout 293 */ 294 static inline void napi_set_irq_suspend_timeout(struct napi_struct *n, 295 unsigned long timeout) 296 { 297 WRITE_ONCE(n->irq_suspend_timeout, timeout); 298 } 299 300 int rps_cpumask_housekeeping(struct cpumask *mask); 301 302 #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) 303 void xdp_do_check_flushed(struct napi_struct *napi); 304 #else 305 static inline void xdp_do_check_flushed(struct napi_struct *napi) { } 306 #endif 307 308 /* Best effort check that NAPI is not idle (can't be scheduled to run) */ 309 static inline void napi_assert_will_not_race(const struct napi_struct *napi) 310 { 311 /* uninitialized instance, can't race */ 312 if (!napi->poll_list.next) 313 return; 314 315 /* SCHED bit is set on disabled instances */ 316 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); 317 WARN_ON(READ_ONCE(napi->list_owner) != -1); 318 } 319 320 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); 321 322 #define XMIT_RECURSION_LIMIT 8 323 324 #ifndef CONFIG_PREEMPT_RT 325 static inline bool dev_xmit_recursion(void) 326 { 327 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 328 XMIT_RECURSION_LIMIT); 329 } 330 331 static inline void dev_xmit_recursion_inc(void) 332 { 333 __this_cpu_inc(softnet_data.xmit.recursion); 334 } 335 336 static inline void dev_xmit_recursion_dec(void) 337 { 338 __this_cpu_dec(softnet_data.xmit.recursion); 339 } 340 #else 341 static inline bool dev_xmit_recursion(void) 342 { 343 return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 344 } 345 346 static inline void dev_xmit_recursion_inc(void) 347 { 348 current->net_xmit.recursion++; 349 } 350 351 static inline void dev_xmit_recursion_dec(void) 352 { 353 current->net_xmit.recursion--; 354 } 355 #endif 356 357 int dev_set_hwtstamp_phylib(struct net_device *dev, 358 struct kernel_hwtstamp_config *cfg, 359 struct netlink_ext_ack *extack); 360 int dev_get_hwtstamp_phylib(struct net_device *dev, 361 struct kernel_hwtstamp_config *cfg); 362 int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg); 363 364 #endif 365