xref: /linux/include/net/netdev_lock.h (revision ef2233850edc4cc0d5fc6136fcdb004a1ddfa7db)
18ef890dfSJakub Kicinski /* SPDX-License-Identifier: GPL-2.0-or-later */
28ef890dfSJakub Kicinski 
38ef890dfSJakub Kicinski #ifndef _NET_NETDEV_LOCK_H
48ef890dfSJakub Kicinski #define _NET_NETDEV_LOCK_H
58ef890dfSJakub Kicinski 
68ef890dfSJakub Kicinski #include <linux/lockdep.h>
78ef890dfSJakub Kicinski #include <linux/netdevice.h>
84b702f8bSJakub Kicinski #include <linux/rtnetlink.h>
98ef890dfSJakub Kicinski 
netdev_trylock(struct net_device * dev)108ef890dfSJakub Kicinski static inline bool netdev_trylock(struct net_device *dev)
118ef890dfSJakub Kicinski {
128ef890dfSJakub Kicinski 	return mutex_trylock(&dev->lock);
138ef890dfSJakub Kicinski }
148ef890dfSJakub Kicinski 
netdev_assert_locked(const struct net_device * dev)15e2f81e8fSJakub Kicinski static inline void netdev_assert_locked(const struct net_device *dev)
168ef890dfSJakub Kicinski {
178ef890dfSJakub Kicinski 	lockdep_assert_held(&dev->lock);
188ef890dfSJakub Kicinski }
198ef890dfSJakub Kicinski 
20e2f81e8fSJakub Kicinski static inline void
netdev_assert_locked_or_invisible(const struct net_device * dev)21e2f81e8fSJakub Kicinski netdev_assert_locked_or_invisible(const struct net_device *dev)
228ef890dfSJakub Kicinski {
238ef890dfSJakub Kicinski 	if (dev->reg_state == NETREG_REGISTERED ||
248ef890dfSJakub Kicinski 	    dev->reg_state == NETREG_UNREGISTERING)
258ef890dfSJakub Kicinski 		netdev_assert_locked(dev);
268ef890dfSJakub Kicinski }
278ef890dfSJakub Kicinski 
netdev_need_ops_lock(const struct net_device * dev)28e2f81e8fSJakub Kicinski static inline bool netdev_need_ops_lock(const struct net_device *dev)
298ef890dfSJakub Kicinski {
308ef890dfSJakub Kicinski 	bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
318ef890dfSJakub Kicinski 
328ef890dfSJakub Kicinski #if IS_ENABLED(CONFIG_NET_SHAPER)
338ef890dfSJakub Kicinski 	ret |= !!dev->netdev_ops->net_shaper_ops;
348ef890dfSJakub Kicinski #endif
358ef890dfSJakub Kicinski 
368ef890dfSJakub Kicinski 	return ret;
378ef890dfSJakub Kicinski }
388ef890dfSJakub Kicinski 
netdev_lock_ops(struct net_device * dev)398ef890dfSJakub Kicinski static inline void netdev_lock_ops(struct net_device *dev)
408ef890dfSJakub Kicinski {
418ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
428ef890dfSJakub Kicinski 		netdev_lock(dev);
438ef890dfSJakub Kicinski }
448ef890dfSJakub Kicinski 
netdev_unlock_ops(struct net_device * dev)458ef890dfSJakub Kicinski static inline void netdev_unlock_ops(struct net_device *dev)
468ef890dfSJakub Kicinski {
478ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
488ef890dfSJakub Kicinski 		netdev_unlock(dev);
498ef890dfSJakub Kicinski }
508ef890dfSJakub Kicinski 
netdev_lock_ops_to_full(struct net_device * dev)51e2f81e8fSJakub Kicinski static inline void netdev_lock_ops_to_full(struct net_device *dev)
528ef890dfSJakub Kicinski {
538ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
548ef890dfSJakub Kicinski 		netdev_assert_locked(dev);
554b702f8bSJakub Kicinski 	else
564b702f8bSJakub Kicinski 		netdev_lock(dev);
578ef890dfSJakub Kicinski }
588ef890dfSJakub Kicinski 
netdev_unlock_full_to_ops(struct net_device * dev)59310ae9ebSJakub Kicinski static inline void netdev_unlock_full_to_ops(struct net_device *dev)
60310ae9ebSJakub Kicinski {
61310ae9ebSJakub Kicinski 	if (netdev_need_ops_lock(dev))
62310ae9ebSJakub Kicinski 		netdev_assert_locked(dev);
63310ae9ebSJakub Kicinski 	else
64310ae9ebSJakub Kicinski 		netdev_unlock(dev);
65310ae9ebSJakub Kicinski }
66310ae9ebSJakub Kicinski 
netdev_ops_assert_locked(const struct net_device * dev)678ef890dfSJakub Kicinski static inline void netdev_ops_assert_locked(const struct net_device *dev)
688ef890dfSJakub Kicinski {
698ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
708ef890dfSJakub Kicinski 		lockdep_assert_held(&dev->lock);
718ef890dfSJakub Kicinski 	else
728ef890dfSJakub Kicinski 		ASSERT_RTNL();
738ef890dfSJakub Kicinski }
748ef890dfSJakub Kicinski 
758ef890dfSJakub Kicinski static inline void
netdev_ops_assert_locked_or_invisible(const struct net_device * dev)768ef890dfSJakub Kicinski netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
778ef890dfSJakub Kicinski {
788ef890dfSJakub Kicinski 	if (dev->reg_state == NETREG_REGISTERED ||
798ef890dfSJakub Kicinski 	    dev->reg_state == NETREG_UNREGISTERING)
808ef890dfSJakub Kicinski 		netdev_ops_assert_locked(dev);
818ef890dfSJakub Kicinski }
828ef890dfSJakub Kicinski 
netdev_lock_ops_compat(struct net_device * dev)838ef890dfSJakub Kicinski static inline void netdev_lock_ops_compat(struct net_device *dev)
848ef890dfSJakub Kicinski {
858ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
868ef890dfSJakub Kicinski 		netdev_lock(dev);
878ef890dfSJakub Kicinski 	else
888ef890dfSJakub Kicinski 		rtnl_lock();
898ef890dfSJakub Kicinski }
908ef890dfSJakub Kicinski 
netdev_unlock_ops_compat(struct net_device * dev)918ef890dfSJakub Kicinski static inline void netdev_unlock_ops_compat(struct net_device *dev)
928ef890dfSJakub Kicinski {
938ef890dfSJakub Kicinski 	if (netdev_need_ops_lock(dev))
948ef890dfSJakub Kicinski 		netdev_unlock(dev);
958ef890dfSJakub Kicinski 	else
968ef890dfSJakub Kicinski 		rtnl_unlock();
978ef890dfSJakub Kicinski }
988ef890dfSJakub Kicinski 
netdev_lock_cmp_fn(const struct lockdep_map * a,const struct lockdep_map * b)998ef890dfSJakub Kicinski static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
1008ef890dfSJakub Kicinski 				     const struct lockdep_map *b)
101*aed031daSMichael Chan {
102*aed031daSMichael Chan 	if (a == b)
103*aed031daSMichael Chan 		return 0;
1041901066aSStanislav Fomichev 
1051901066aSStanislav Fomichev 	/* Allow locking multiple devices only under rtnl_lock,
1061901066aSStanislav Fomichev 	 * the exact order doesn't matter.
1078ef890dfSJakub Kicinski 	 * Note that upper devices don't lock their ops, so nesting
108 	 * mostly happens in batched device removal for now.
109 	 */
110 	return lockdep_rtnl_is_held() ? -1 : 1;
111 }
112 
113 #define netdev_lockdep_set_classes(dev)				\
114 {								\
115 	static struct lock_class_key qdisc_tx_busylock_key;	\
116 	static struct lock_class_key qdisc_xmit_lock_key;	\
117 	static struct lock_class_key dev_addr_list_lock_key;	\
118 	static struct lock_class_key dev_instance_lock_key;	\
119 	unsigned int i;						\
120 								\
121 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
122 	lockdep_set_class(&(dev)->addr_list_lock,		\
123 			  &dev_addr_list_lock_key);		\
124 	lockdep_set_class(&(dev)->lock,				\
125 			  &dev_instance_lock_key);		\
126 	lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL);	\
127 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
128 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
129 				  &qdisc_xmit_lock_key);	\
130 }
131 
132 #define netdev_lock_dereference(p, dev)				\
133 	rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
134 
135 int netdev_debug_event(struct notifier_block *nb, unsigned long event,
136 		       void *ptr);
137 
138 #endif
139