1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 3 #ifndef _NET_NETDEV_LOCK_H 4 #define _NET_NETDEV_LOCK_H 5 6 #include <linux/lockdep.h> 7 #include <linux/netdevice.h> 8 #include <linux/rtnetlink.h> 9 10 static inline bool netdev_trylock(struct net_device *dev) 11 { 12 return mutex_trylock(&dev->lock); 13 } 14 15 static inline void netdev_assert_locked(const struct net_device *dev) 16 { 17 lockdep_assert_held(&dev->lock); 18 } 19 20 static inline void 21 netdev_assert_locked_or_invisible(const struct net_device *dev) 22 { 23 if (dev->reg_state == NETREG_REGISTERED || 24 dev->reg_state == NETREG_UNREGISTERING) 25 netdev_assert_locked(dev); 26 } 27 28 static inline bool netdev_need_ops_lock(const struct net_device *dev) 29 { 30 bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops; 31 32 #if IS_ENABLED(CONFIG_NET_SHAPER) 33 ret |= !!dev->netdev_ops->net_shaper_ops; 34 #endif 35 36 return ret; 37 } 38 39 static inline void netdev_lock_ops(struct net_device *dev) 40 { 41 if (netdev_need_ops_lock(dev)) 42 netdev_lock(dev); 43 } 44 45 static inline void netdev_unlock_ops(struct net_device *dev) 46 { 47 if (netdev_need_ops_lock(dev)) 48 netdev_unlock(dev); 49 } 50 51 static inline void netdev_ops_assert_locked(const struct net_device *dev) 52 { 53 if (netdev_need_ops_lock(dev)) 54 lockdep_assert_held(&dev->lock); 55 else 56 ASSERT_RTNL(); 57 } 58 59 static inline void 60 netdev_ops_assert_locked_or_invisible(const struct net_device *dev) 61 { 62 if (dev->reg_state == NETREG_REGISTERED || 63 dev->reg_state == NETREG_UNREGISTERING) 64 netdev_ops_assert_locked(dev); 65 } 66 67 static inline void netdev_lock_ops_compat(struct net_device *dev) 68 { 69 if (netdev_need_ops_lock(dev)) 70 netdev_lock(dev); 71 else 72 rtnl_lock(); 73 } 74 75 static inline void netdev_unlock_ops_compat(struct net_device *dev) 76 { 77 if (netdev_need_ops_lock(dev)) 78 netdev_unlock(dev); 79 else 80 rtnl_unlock(); 81 } 82 83 static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, 84 const struct lockdep_map *b) 85 { 86 /* Only lower devices currently grab the instance lock, so no 87 * real ordering issues can occur. In the near future, only 88 * hardware devices will grab instance lock which also does not 89 * involve any ordering. Suppress lockdep ordering warnings 90 * until (if) we start grabbing instance lock on pure SW 91 * devices (bond/team/veth/etc). 92 */ 93 if (a == b) 94 return 0; 95 return -1; 96 } 97 98 #define netdev_lockdep_set_classes(dev) \ 99 { \ 100 static struct lock_class_key qdisc_tx_busylock_key; \ 101 static struct lock_class_key qdisc_xmit_lock_key; \ 102 static struct lock_class_key dev_addr_list_lock_key; \ 103 static struct lock_class_key dev_instance_lock_key; \ 104 unsigned int i; \ 105 \ 106 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 107 lockdep_set_class(&(dev)->addr_list_lock, \ 108 &dev_addr_list_lock_key); \ 109 lockdep_set_class(&(dev)->lock, \ 110 &dev_instance_lock_key); \ 111 lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \ 112 for (i = 0; i < (dev)->num_tx_queues; i++) \ 113 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 114 &qdisc_xmit_lock_key); \ 115 } 116 117 int netdev_debug_event(struct notifier_block *nb, unsigned long event, 118 void *ptr); 119 120 #endif 121