xref: /linux/include/net/netdev_lock.h (revision de5ca699bc3f7fe9f90ba927d8a6e7783cd7311d)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef _NET_NETDEV_LOCK_H
4 #define _NET_NETDEV_LOCK_H
5 
6 #include <linux/lockdep.h>
7 #include <linux/netdevice.h>
8 
9 static inline bool netdev_trylock(struct net_device *dev)
10 {
11 	return mutex_trylock(&dev->lock);
12 }
13 
14 static inline void netdev_assert_locked(struct net_device *dev)
15 {
16 	lockdep_assert_held(&dev->lock);
17 }
18 
19 static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
20 {
21 	if (dev->reg_state == NETREG_REGISTERED ||
22 	    dev->reg_state == NETREG_UNREGISTERING)
23 		netdev_assert_locked(dev);
24 }
25 
26 static inline bool netdev_need_ops_lock(struct net_device *dev)
27 {
28 	bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
29 
30 #if IS_ENABLED(CONFIG_NET_SHAPER)
31 	ret |= !!dev->netdev_ops->net_shaper_ops;
32 #endif
33 
34 	return ret;
35 }
36 
37 static inline void netdev_lock_ops(struct net_device *dev)
38 {
39 	if (netdev_need_ops_lock(dev))
40 		netdev_lock(dev);
41 }
42 
43 static inline void netdev_unlock_ops(struct net_device *dev)
44 {
45 	if (netdev_need_ops_lock(dev))
46 		netdev_unlock(dev);
47 }
48 
49 static inline void netdev_ops_assert_locked(struct net_device *dev)
50 {
51 	if (netdev_need_ops_lock(dev))
52 		lockdep_assert_held(&dev->lock);
53 }
54 
55 static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
56 				     const struct lockdep_map *b)
57 {
58 	/* Only lower devices currently grab the instance lock, so no
59 	 * real ordering issues can occur. In the near future, only
60 	 * hardware devices will grab instance lock which also does not
61 	 * involve any ordering. Suppress lockdep ordering warnings
62 	 * until (if) we start grabbing instance lock on pure SW
63 	 * devices (bond/team/veth/etc).
64 	 */
65 	if (a == b)
66 		return 0;
67 	return -1;
68 }
69 
70 #define netdev_lockdep_set_classes(dev)				\
71 {								\
72 	static struct lock_class_key qdisc_tx_busylock_key;	\
73 	static struct lock_class_key qdisc_xmit_lock_key;	\
74 	static struct lock_class_key dev_addr_list_lock_key;	\
75 	static struct lock_class_key dev_instance_lock_key;	\
76 	unsigned int i;						\
77 								\
78 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
79 	lockdep_set_class(&(dev)->addr_list_lock,		\
80 			  &dev_addr_list_lock_key);		\
81 	lockdep_set_class(&(dev)->lock,				\
82 			  &dev_instance_lock_key);		\
83 	lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL);	\
84 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
85 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
86 				  &qdisc_xmit_lock_key);	\
87 }
88 
89 #endif
90