xref: /linux/net/core/dev.h (revision 3d2c3d2eea9acdbee5b5742d15d021069b49d3f9)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _NET_CORE_DEV_H
3 #define _NET_CORE_DEV_H
4 
5 #include <linux/cleanup.h>
6 #include <linux/types.h>
7 #include <linux/rwsem.h>
8 #include <linux/netdevice.h>
9 #include <net/netdev_lock.h>
10 
11 struct net;
12 struct netlink_ext_ack;
13 struct netdev_queue_config;
14 struct cpumask;
15 struct pp_memory_provider_params;
16 
17 /* Random bits of netdevice that don't need to be exposed */
18 #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
19 struct sd_flow_limit {
20 	struct rcu_head		rcu;
21 	unsigned int		count;
22 	u8			log_buckets;
23 	unsigned int		history_head;
24 	u16			history[FLOW_LIMIT_HISTORY];
25 	u8			buckets[];
26 };
27 
28 extern int netdev_flow_limit_table_len;
29 
30 struct napi_struct *
31 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
32 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
33 
34 struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net);
35 struct net_device *netdev_put_lock(struct net_device *dev, struct net *net,
36 				   netdevice_tracker *tracker);
37 struct net_device *
38 netdev_xa_find_lock(struct net *net, struct net_device *dev,
39 		    unsigned long *index);
40 
41 DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T));
42 
43 #define for_each_netdev_lock_scoped(net, var_name, ifindex)		\
44 	for (struct net_device *var_name __free(netdev_unlock) = NULL;	\
45 	     (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \
46 	     ifindex++)
47 
48 struct net_device *
49 netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex);
50 struct net_device *
51 netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev,
52 			       unsigned long *index);
53 
54 DEFINE_FREE(netdev_unlock_ops_compat, struct net_device *,
55 	    if (_T) netdev_unlock_ops_compat(_T));
56 
57 #define for_each_netdev_lock_ops_compat_scoped(net, var_name, ifindex)	\
58 	for (struct net_device *var_name __free(netdev_unlock_ops_compat) = NULL; \
59 	     (var_name = netdev_xa_find_lock_ops_compat(net, var_name,	\
60 							&ifindex));	\
61 	     ifindex++)
62 
63 #ifdef CONFIG_PROC_FS
64 int __init dev_proc_init(void);
65 #else
66 #define dev_proc_init() 0
67 #endif
68 
69 void linkwatch_init_dev(struct net_device *dev);
70 void linkwatch_run_queue(void);
71 
72 void dev_addr_flush(struct net_device *dev);
73 int dev_addr_init(struct net_device *dev);
74 void dev_addr_check(struct net_device *dev);
75 
76 #if IS_ENABLED(CONFIG_NET_SHAPER)
77 void net_shaper_flush_netdev(struct net_device *dev);
78 void net_shaper_set_real_num_tx_queues(struct net_device *dev,
79 				       unsigned int txq);
80 #else
81 static inline void net_shaper_flush_netdev(struct net_device *dev) {}
82 static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
83 						     unsigned int txq) {}
84 #endif
85 
86 /* sysctls not referred to from outside net/core/ */
87 extern int		netdev_unregister_timeout_secs;
88 extern int		weight_p;
89 extern int		dev_weight_rx_bias;
90 extern int		dev_weight_tx_bias;
91 
92 extern struct rw_semaphore dev_addr_sem;
93 
94 /* rtnl helpers */
95 extern struct list_head net_todo_list;
96 void netdev_run_todo(void);
97 
98 int netdev_queue_config_validate(struct net_device *dev, int rxq_idx,
99 				 struct netdev_queue_config *qcfg,
100 				 struct netlink_ext_ack *extack);
101 
102 bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
103 bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
104 bool netif_is_queue_leasee(const struct net_device *dev);
105 
106 void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
107 			      const struct pp_memory_provider_params *p);
108 
109 void netif_rxq_cleanup_unlease(struct netdev_rx_queue *phys_rxq,
110 			       struct netdev_rx_queue *virt_rxq);
111 
112 /* netdev management, shared between various uAPI entry points */
113 struct netdev_name_node {
114 	struct hlist_node hlist;
115 	struct list_head list;
116 	struct net_device *dev;
117 	const char *name;
118 	struct rcu_head rcu;
119 };
120 
121 int netdev_get_name(struct net *net, char *name, int ifindex);
122 int netif_change_name(struct net_device *dev, const char *newname);
123 int dev_change_name(struct net_device *dev, const char *newname);
124 
125 #define netdev_for_each_altname(dev, namenode)				\
126 	list_for_each_entry((namenode), &(dev)->name_node->list, list)
127 #define netdev_for_each_altname_safe(dev, namenode, next)		\
128 	list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
129 				 list)
130 
131 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
132 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
133 
134 int dev_validate_mtu(struct net_device *dev, int mtu,
135 		     struct netlink_ext_ack *extack);
136 int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
137 		      struct netlink_ext_ack *extack);
138 
139 int dev_get_phys_port_id(struct net_device *dev,
140 			 struct netdev_phys_item_id *ppid);
141 int dev_get_phys_port_name(struct net_device *dev,
142 			   char *name, size_t len);
143 
144 int netif_change_proto_down(struct net_device *dev, bool proto_down);
145 int dev_change_proto_down(struct net_device *dev, bool proto_down);
146 void netdev_change_proto_down_reason_locked(struct net_device *dev,
147 					    unsigned long mask, u32 value);
148 
149 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
150 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
151 		      int fd, int expected_fd, u32 flags);
152 
153 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
154 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
155 void netif_set_group(struct net_device *dev, int new_group);
156 void dev_set_group(struct net_device *dev, int new_group);
157 int netif_change_carrier(struct net_device *dev, bool new_carrier);
158 int dev_change_carrier(struct net_device *dev, bool new_carrier);
159 
160 void __dev_set_rx_mode(struct net_device *dev);
161 
162 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
163 			unsigned int gchanges, u32 portid,
164 			const struct nlmsghdr *nlh);
165 
166 void unregister_netdevice_many_notify(struct list_head *head,
167 				      u32 portid, const struct nlmsghdr *nlh);
168 
169 static inline void netif_set_up(struct net_device *dev, bool value)
170 {
171 	if (value)
172 		dev->flags |= IFF_UP;
173 	else
174 		dev->flags &= ~IFF_UP;
175 
176 	if (!netdev_need_ops_lock(dev))
177 		netdev_lock(dev);
178 	dev->up = value;
179 	if (!netdev_need_ops_lock(dev))
180 		netdev_unlock(dev);
181 }
182 
183 static inline void netif_set_gso_max_size(struct net_device *dev,
184 					  unsigned int size)
185 {
186 	/* dev->gso_max_size is read locklessly from sk_setup_caps() */
187 	WRITE_ONCE(dev->gso_max_size, size);
188 	if (size <= GSO_LEGACY_MAX_SIZE)
189 		WRITE_ONCE(dev->gso_ipv4_max_size, size);
190 }
191 
192 static inline void netif_set_gso_max_segs(struct net_device *dev,
193 					  unsigned int segs)
194 {
195 	/* dev->gso_max_segs is read locklessly from sk_setup_caps() */
196 	WRITE_ONCE(dev->gso_max_segs, segs);
197 }
198 
199 static inline void netif_set_gro_max_size(struct net_device *dev,
200 					  unsigned int size)
201 {
202 	/* This pairs with the READ_ONCE() in skb_gro_receive() */
203 	WRITE_ONCE(dev->gro_max_size, size);
204 	if (size <= GRO_LEGACY_MAX_SIZE)
205 		WRITE_ONCE(dev->gro_ipv4_max_size, size);
206 }
207 
208 static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
209 					       unsigned int size)
210 {
211 	/* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
212 	WRITE_ONCE(dev->gso_ipv4_max_size, size);
213 }
214 
215 static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
216 					       unsigned int size)
217 {
218 	/* This pairs with the READ_ONCE() in skb_gro_receive() */
219 	WRITE_ONCE(dev->gro_ipv4_max_size, size);
220 }
221 
222 /**
223  * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
224  * @n: napi struct to get the defer_hard_irqs field from
225  *
226  * Return: the per-NAPI value of the defar_hard_irqs field.
227  */
228 static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
229 {
230 	return READ_ONCE(n->defer_hard_irqs);
231 }
232 
233 /**
234  * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
235  * @n: napi_struct to set the defer_hard_irqs field
236  * @defer: the value the field should be set to
237  */
238 static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
239 {
240 	WRITE_ONCE(n->defer_hard_irqs, defer);
241 }
242 
243 /**
244  * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
245  * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
246  * @defer: the defer_hard_irqs value to set
247  */
248 static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
249 					      u32 defer)
250 {
251 	unsigned int count = max(netdev->num_rx_queues,
252 				 netdev->num_tx_queues);
253 	struct napi_struct *napi;
254 	int i;
255 
256 	WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
257 	list_for_each_entry(napi, &netdev->napi_list, dev_list)
258 		napi_set_defer_hard_irqs(napi, defer);
259 
260 	for (i = 0; i < count; i++)
261 		netdev->napi_config[i].defer_hard_irqs = defer;
262 }
263 
264 /**
265  * napi_get_gro_flush_timeout - get the gro_flush_timeout
266  * @n: napi struct to get the gro_flush_timeout from
267  *
268  * Return: the per-NAPI value of the gro_flush_timeout field.
269  */
270 static inline unsigned long
271 napi_get_gro_flush_timeout(const struct napi_struct *n)
272 {
273 	return READ_ONCE(n->gro_flush_timeout);
274 }
275 
276 /**
277  * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
278  * @n: napi struct to set the gro_flush_timeout
279  * @timeout: timeout value to set
280  *
281  * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
282  */
283 static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
284 					      unsigned long timeout)
285 {
286 	WRITE_ONCE(n->gro_flush_timeout, timeout);
287 }
288 
289 /**
290  * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
291  * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
292  * @timeout: the timeout value to set
293  */
294 static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
295 						unsigned long timeout)
296 {
297 	unsigned int count = max(netdev->num_rx_queues,
298 				 netdev->num_tx_queues);
299 	struct napi_struct *napi;
300 	int i;
301 
302 	WRITE_ONCE(netdev->gro_flush_timeout, timeout);
303 	list_for_each_entry(napi, &netdev->napi_list, dev_list)
304 		napi_set_gro_flush_timeout(napi, timeout);
305 
306 	for (i = 0; i < count; i++)
307 		netdev->napi_config[i].gro_flush_timeout = timeout;
308 }
309 
310 /**
311  * napi_get_irq_suspend_timeout - get the irq_suspend_timeout
312  * @n: napi struct to get the irq_suspend_timeout from
313  *
314  * Return: the per-NAPI value of the irq_suspend_timeout field.
315  */
316 static inline unsigned long
317 napi_get_irq_suspend_timeout(const struct napi_struct *n)
318 {
319 	return READ_ONCE(n->irq_suspend_timeout);
320 }
321 
322 /**
323  * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
324  * @n: napi struct to set the irq_suspend_timeout
325  * @timeout: timeout value to set
326  *
327  * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
328  */
329 static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
330 						unsigned long timeout)
331 {
332 	WRITE_ONCE(n->irq_suspend_timeout, timeout);
333 }
334 
335 static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
336 {
337 	if (test_bit(NAPI_STATE_THREADED_BUSY_POLL, &n->state))
338 		return NETDEV_NAPI_THREADED_BUSY_POLL;
339 
340 	if (test_bit(NAPI_STATE_THREADED, &n->state))
341 		return NETDEV_NAPI_THREADED_ENABLED;
342 
343 	return NETDEV_NAPI_THREADED_DISABLED;
344 }
345 
346 static inline enum netdev_napi_threaded
347 napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
348 {
349 	if (n->config)
350 		return n->config->threaded;
351 	return dev->threaded;
352 }
353 
354 int napi_set_threaded(struct napi_struct *n,
355 		      enum netdev_napi_threaded threaded);
356 
357 int netif_set_threaded(struct net_device *dev,
358 		       enum netdev_napi_threaded threaded);
359 
360 int rps_cpumask_housekeeping(struct cpumask *mask);
361 
362 #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
363 void xdp_do_check_flushed(struct napi_struct *napi);
364 #else
365 static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
366 #endif
367 
368 /* Best effort check that NAPI is not idle (can't be scheduled to run) */
369 static inline void napi_assert_will_not_race(const struct napi_struct *napi)
370 {
371 	/* uninitialized instance, can't race */
372 	if (!napi->poll_list.next)
373 		return;
374 
375 	/* SCHED bit is set on disabled instances */
376 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
377 	WARN_ON(READ_ONCE(napi->list_owner) != -1);
378 }
379 
380 void kick_defer_list_purge(unsigned int cpu);
381 
382 int dev_set_hwtstamp_phylib(struct net_device *dev,
383 			    struct kernel_hwtstamp_config *cfg,
384 			    struct netlink_ext_ack *extack);
385 int dev_get_hwtstamp_phylib(struct net_device *dev,
386 			    struct kernel_hwtstamp_config *cfg);
387 int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg);
388 
389 #endif
390