1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _NET_CORE_DEV_H 3 #define _NET_CORE_DEV_H 4 5 #include <linux/cleanup.h> 6 #include <linux/types.h> 7 #include <linux/rwsem.h> 8 #include <linux/netdevice.h> 9 #include <net/netdev_lock.h> 10 11 struct net; 12 struct netlink_ext_ack; 13 struct cpumask; 14 15 /* Random bits of netdevice that don't need to be exposed */ 16 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 17 struct sd_flow_limit { 18 u64 count; 19 unsigned int num_buckets; 20 unsigned int history_head; 21 u16 history[FLOW_LIMIT_HISTORY]; 22 u8 buckets[]; 23 }; 24 25 extern int netdev_flow_limit_table_len; 26 27 struct napi_struct * 28 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); 29 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 30 31 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex); 32 struct net_device *netdev_get_by_name_lock(struct net *net, const char *name); 33 struct net_device *__netdev_put_lock(struct net_device *dev); 34 struct net_device * 35 netdev_xa_find_lock(struct net *net, struct net_device *dev, 36 unsigned long *index); 37 38 DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T)); 39 40 #define for_each_netdev_lock_scoped(net, var_name, ifindex) \ 41 for (struct net_device *var_name __free(netdev_unlock) = NULL; \ 42 (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \ 43 ifindex++) 44 45 #ifdef CONFIG_PROC_FS 46 int __init dev_proc_init(void); 47 #else 48 #define dev_proc_init() 0 49 #endif 50 51 void linkwatch_init_dev(struct net_device *dev); 52 void linkwatch_run_queue(void); 53 54 void dev_addr_flush(struct net_device *dev); 55 int dev_addr_init(struct net_device *dev); 56 void dev_addr_check(struct net_device *dev); 57 58 #if IS_ENABLED(CONFIG_NET_SHAPER) 59 void net_shaper_flush_netdev(struct net_device *dev); 60 void net_shaper_set_real_num_tx_queues(struct net_device *dev, 61 unsigned int txq); 62 #else 63 static inline void net_shaper_flush_netdev(struct net_device *dev) {} 64 static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev, 65 unsigned int txq) {} 66 #endif 67 68 /* sysctls not referred to from outside net/core/ */ 69 extern int netdev_unregister_timeout_secs; 70 extern int weight_p; 71 extern int dev_weight_rx_bias; 72 extern int dev_weight_tx_bias; 73 74 /* rtnl helpers */ 75 extern struct list_head net_todo_list; 76 void netdev_run_todo(void); 77 78 /* netdev management, shared between various uAPI entry points */ 79 struct netdev_name_node { 80 struct hlist_node hlist; 81 struct list_head list; 82 struct net_device *dev; 83 const char *name; 84 struct rcu_head rcu; 85 }; 86 87 int netdev_get_name(struct net *net, char *name, int ifindex); 88 int netif_change_name(struct net_device *dev, const char *newname); 89 int dev_change_name(struct net_device *dev, const char *newname); 90 91 #define netdev_for_each_altname(dev, namenode) \ 92 list_for_each_entry((namenode), &(dev)->name_node->list, list) 93 #define netdev_for_each_altname_safe(dev, namenode, next) \ 94 list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \ 95 list) 96 97 int netdev_name_node_alt_create(struct net_device *dev, const char *name); 98 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); 99 100 int dev_validate_mtu(struct net_device *dev, int mtu, 101 struct netlink_ext_ack *extack); 102 int netif_set_mtu_ext(struct net_device *dev, int new_mtu, 103 struct netlink_ext_ack *extack); 104 105 int dev_get_phys_port_id(struct net_device *dev, 106 struct netdev_phys_item_id *ppid); 107 int dev_get_phys_port_name(struct net_device *dev, 108 char *name, size_t len); 109 110 int netif_change_proto_down(struct net_device *dev, bool proto_down); 111 int dev_change_proto_down(struct net_device *dev, bool proto_down); 112 void netdev_change_proto_down_reason_locked(struct net_device *dev, 113 unsigned long mask, u32 value); 114 115 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 116 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 117 int fd, int expected_fd, u32 flags); 118 119 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len); 120 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len); 121 void netif_set_group(struct net_device *dev, int new_group); 122 void dev_set_group(struct net_device *dev, int new_group); 123 int netif_change_carrier(struct net_device *dev, bool new_carrier); 124 int dev_change_carrier(struct net_device *dev, bool new_carrier); 125 126 void __dev_set_rx_mode(struct net_device *dev); 127 128 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 129 unsigned int gchanges, u32 portid, 130 const struct nlmsghdr *nlh); 131 132 void unregister_netdevice_many_notify(struct list_head *head, 133 u32 portid, const struct nlmsghdr *nlh); 134 135 static inline void netif_set_up(struct net_device *dev, bool value) 136 { 137 if (value) 138 dev->flags |= IFF_UP; 139 else 140 dev->flags &= ~IFF_UP; 141 142 if (!netdev_need_ops_lock(dev)) 143 netdev_lock(dev); 144 dev->up = value; 145 if (!netdev_need_ops_lock(dev)) 146 netdev_unlock(dev); 147 } 148 149 static inline void netif_set_gso_max_size(struct net_device *dev, 150 unsigned int size) 151 { 152 /* dev->gso_max_size is read locklessly from sk_setup_caps() */ 153 WRITE_ONCE(dev->gso_max_size, size); 154 if (size <= GSO_LEGACY_MAX_SIZE) 155 WRITE_ONCE(dev->gso_ipv4_max_size, size); 156 } 157 158 static inline void netif_set_gso_max_segs(struct net_device *dev, 159 unsigned int segs) 160 { 161 /* dev->gso_max_segs is read locklessly from sk_setup_caps() */ 162 WRITE_ONCE(dev->gso_max_segs, segs); 163 } 164 165 static inline void netif_set_gro_max_size(struct net_device *dev, 166 unsigned int size) 167 { 168 /* This pairs with the READ_ONCE() in skb_gro_receive() */ 169 WRITE_ONCE(dev->gro_max_size, size); 170 if (size <= GRO_LEGACY_MAX_SIZE) 171 WRITE_ONCE(dev->gro_ipv4_max_size, size); 172 } 173 174 static inline void netif_set_gso_ipv4_max_size(struct net_device *dev, 175 unsigned int size) 176 { 177 /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */ 178 WRITE_ONCE(dev->gso_ipv4_max_size, size); 179 } 180 181 static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, 182 unsigned int size) 183 { 184 /* This pairs with the READ_ONCE() in skb_gro_receive() */ 185 WRITE_ONCE(dev->gro_ipv4_max_size, size); 186 } 187 188 /** 189 * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs 190 * @n: napi struct to get the defer_hard_irqs field from 191 * 192 * Return: the per-NAPI value of the defar_hard_irqs field. 193 */ 194 static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n) 195 { 196 return READ_ONCE(n->defer_hard_irqs); 197 } 198 199 /** 200 * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi 201 * @n: napi_struct to set the defer_hard_irqs field 202 * @defer: the value the field should be set to 203 */ 204 static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer) 205 { 206 WRITE_ONCE(n->defer_hard_irqs, defer); 207 } 208 209 /** 210 * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev 211 * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set 212 * @defer: the defer_hard_irqs value to set 213 */ 214 static inline void netdev_set_defer_hard_irqs(struct net_device *netdev, 215 u32 defer) 216 { 217 unsigned int count = max(netdev->num_rx_queues, 218 netdev->num_tx_queues); 219 struct napi_struct *napi; 220 int i; 221 222 WRITE_ONCE(netdev->napi_defer_hard_irqs, defer); 223 list_for_each_entry(napi, &netdev->napi_list, dev_list) 224 napi_set_defer_hard_irqs(napi, defer); 225 226 for (i = 0; i < count; i++) 227 netdev->napi_config[i].defer_hard_irqs = defer; 228 } 229 230 /** 231 * napi_get_gro_flush_timeout - get the gro_flush_timeout 232 * @n: napi struct to get the gro_flush_timeout from 233 * 234 * Return: the per-NAPI value of the gro_flush_timeout field. 235 */ 236 static inline unsigned long 237 napi_get_gro_flush_timeout(const struct napi_struct *n) 238 { 239 return READ_ONCE(n->gro_flush_timeout); 240 } 241 242 /** 243 * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi 244 * @n: napi struct to set the gro_flush_timeout 245 * @timeout: timeout value to set 246 * 247 * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout 248 */ 249 static inline void napi_set_gro_flush_timeout(struct napi_struct *n, 250 unsigned long timeout) 251 { 252 WRITE_ONCE(n->gro_flush_timeout, timeout); 253 } 254 255 /** 256 * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs 257 * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set 258 * @timeout: the timeout value to set 259 */ 260 static inline void netdev_set_gro_flush_timeout(struct net_device *netdev, 261 unsigned long timeout) 262 { 263 unsigned int count = max(netdev->num_rx_queues, 264 netdev->num_tx_queues); 265 struct napi_struct *napi; 266 int i; 267 268 WRITE_ONCE(netdev->gro_flush_timeout, timeout); 269 list_for_each_entry(napi, &netdev->napi_list, dev_list) 270 napi_set_gro_flush_timeout(napi, timeout); 271 272 for (i = 0; i < count; i++) 273 netdev->napi_config[i].gro_flush_timeout = timeout; 274 } 275 276 /** 277 * napi_get_irq_suspend_timeout - get the irq_suspend_timeout 278 * @n: napi struct to get the irq_suspend_timeout from 279 * 280 * Return: the per-NAPI value of the irq_suspend_timeout field. 281 */ 282 static inline unsigned long 283 napi_get_irq_suspend_timeout(const struct napi_struct *n) 284 { 285 return READ_ONCE(n->irq_suspend_timeout); 286 } 287 288 /** 289 * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi 290 * @n: napi struct to set the irq_suspend_timeout 291 * @timeout: timeout value to set 292 * 293 * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout 294 */ 295 static inline void napi_set_irq_suspend_timeout(struct napi_struct *n, 296 unsigned long timeout) 297 { 298 WRITE_ONCE(n->irq_suspend_timeout, timeout); 299 } 300 301 int rps_cpumask_housekeeping(struct cpumask *mask); 302 303 #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) 304 void xdp_do_check_flushed(struct napi_struct *napi); 305 #else 306 static inline void xdp_do_check_flushed(struct napi_struct *napi) { } 307 #endif 308 309 /* Best effort check that NAPI is not idle (can't be scheduled to run) */ 310 static inline void napi_assert_will_not_race(const struct napi_struct *napi) 311 { 312 /* uninitialized instance, can't race */ 313 if (!napi->poll_list.next) 314 return; 315 316 /* SCHED bit is set on disabled instances */ 317 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); 318 WARN_ON(READ_ONCE(napi->list_owner) != -1); 319 } 320 321 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); 322 323 #define XMIT_RECURSION_LIMIT 8 324 325 #ifndef CONFIG_PREEMPT_RT 326 static inline bool dev_xmit_recursion(void) 327 { 328 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 329 XMIT_RECURSION_LIMIT); 330 } 331 332 static inline void dev_xmit_recursion_inc(void) 333 { 334 __this_cpu_inc(softnet_data.xmit.recursion); 335 } 336 337 static inline void dev_xmit_recursion_dec(void) 338 { 339 __this_cpu_dec(softnet_data.xmit.recursion); 340 } 341 #else 342 static inline bool dev_xmit_recursion(void) 343 { 344 return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 345 } 346 347 static inline void dev_xmit_recursion_inc(void) 348 { 349 current->net_xmit.recursion++; 350 } 351 352 static inline void dev_xmit_recursion_dec(void) 353 { 354 current->net_xmit.recursion--; 355 } 356 #endif 357 358 int dev_set_hwtstamp_phylib(struct net_device *dev, 359 struct kernel_hwtstamp_config *cfg, 360 struct netlink_ext_ack *extack); 361 int dev_get_hwtstamp_phylib(struct net_device *dev, 362 struct kernel_hwtstamp_config *cfg); 363 int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg); 364 365 #endif 366