1 /* 2 * net/core/dst.c Protocol independent destination cache. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/sched.h> 16 #include <linux/skbuff.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 20 #include <net/dst.h> 21 22 /* Locking strategy: 23 * 1) Garbage collection state of dead destination cache 24 * entries is protected by dst_lock. 25 * 2) GC is run only from BH context, and is the only remover 26 * of entries. 27 * 3) Entries are added to the garbage list from both BH 28 * and non-BH context, so local BH disabling is needed. 29 * 4) All operations modify state, so a spinlock is used. 30 */ 31 static struct dst_entry *dst_garbage_list; 32 #if RT_CACHE_DEBUG >= 2 33 static atomic_t dst_total = ATOMIC_INIT(0); 34 #endif 35 static DEFINE_SPINLOCK(dst_lock); 36 37 static unsigned long dst_gc_timer_expires; 38 static unsigned long dst_gc_timer_inc = DST_GC_MAX; 39 static void dst_run_gc(unsigned long); 40 static void ___dst_free(struct dst_entry * dst); 41 42 static struct timer_list dst_gc_timer = 43 TIMER_INITIALIZER(dst_run_gc, DST_GC_MIN, 0); 44 45 static void dst_run_gc(unsigned long dummy) 46 { 47 int delayed = 0; 48 struct dst_entry * dst, **dstp; 49 50 if (!spin_trylock(&dst_lock)) { 51 mod_timer(&dst_gc_timer, jiffies + HZ/10); 52 return; 53 } 54 55 56 del_timer(&dst_gc_timer); 57 dstp = &dst_garbage_list; 58 while ((dst = *dstp) != NULL) { 59 if (atomic_read(&dst->__refcnt)) { 60 dstp = &dst->next; 61 delayed++; 62 continue; 63 } 64 *dstp = dst->next; 65 66 dst = dst_destroy(dst); 67 if (dst) { 68 /* NOHASH and still referenced. Unless it is already 69 * on gc list, invalidate it and add to gc list. 70 * 71 * Note: this is temporary. Actually, NOHASH dst's 72 * must be obsoleted when parent is obsoleted. 73 * But we do not have state "obsoleted, but 74 * referenced by parent", so it is right. 75 */ 76 if (dst->obsolete > 1) 77 continue; 78 79 ___dst_free(dst); 80 dst->next = *dstp; 81 *dstp = dst; 82 dstp = &dst->next; 83 } 84 } 85 if (!dst_garbage_list) { 86 dst_gc_timer_inc = DST_GC_MAX; 87 goto out; 88 } 89 if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) 90 dst_gc_timer_expires = DST_GC_MAX; 91 dst_gc_timer_inc += DST_GC_INC; 92 dst_gc_timer.expires = jiffies + dst_gc_timer_expires; 93 #if RT_CACHE_DEBUG >= 2 94 printk("dst_total: %d/%d %ld\n", 95 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 96 #endif 97 add_timer(&dst_gc_timer); 98 99 out: 100 spin_unlock(&dst_lock); 101 } 102 103 static int dst_discard_in(struct sk_buff *skb) 104 { 105 kfree_skb(skb); 106 return 0; 107 } 108 109 static int dst_discard_out(struct sk_buff *skb) 110 { 111 kfree_skb(skb); 112 return 0; 113 } 114 115 void * dst_alloc(struct dst_ops * ops) 116 { 117 struct dst_entry * dst; 118 119 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { 120 if (ops->gc()) 121 return NULL; 122 } 123 dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); 124 if (!dst) 125 return NULL; 126 memset(dst, 0, ops->entry_size); 127 atomic_set(&dst->__refcnt, 0); 128 dst->ops = ops; 129 dst->lastuse = jiffies; 130 dst->path = dst; 131 dst->input = dst_discard_in; 132 dst->output = dst_discard_out; 133 #if RT_CACHE_DEBUG >= 2 134 atomic_inc(&dst_total); 135 #endif 136 atomic_inc(&ops->entries); 137 return dst; 138 } 139 140 static void ___dst_free(struct dst_entry * dst) 141 { 142 /* The first case (dev==NULL) is required, when 143 protocol module is unloaded. 144 */ 145 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { 146 dst->input = dst_discard_in; 147 dst->output = dst_discard_out; 148 } 149 dst->obsolete = 2; 150 } 151 152 void __dst_free(struct dst_entry * dst) 153 { 154 spin_lock_bh(&dst_lock); 155 ___dst_free(dst); 156 dst->next = dst_garbage_list; 157 dst_garbage_list = dst; 158 if (dst_gc_timer_inc > DST_GC_INC) { 159 dst_gc_timer_inc = DST_GC_INC; 160 dst_gc_timer_expires = DST_GC_MIN; 161 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); 162 } 163 spin_unlock_bh(&dst_lock); 164 } 165 166 struct dst_entry *dst_destroy(struct dst_entry * dst) 167 { 168 struct dst_entry *child; 169 struct neighbour *neigh; 170 struct hh_cache *hh; 171 172 smp_rmb(); 173 174 again: 175 neigh = dst->neighbour; 176 hh = dst->hh; 177 child = dst->child; 178 179 dst->hh = NULL; 180 if (hh && atomic_dec_and_test(&hh->hh_refcnt)) 181 kfree(hh); 182 183 if (neigh) { 184 dst->neighbour = NULL; 185 neigh_release(neigh); 186 } 187 188 atomic_dec(&dst->ops->entries); 189 190 if (dst->ops->destroy) 191 dst->ops->destroy(dst); 192 if (dst->dev) 193 dev_put(dst->dev); 194 #if RT_CACHE_DEBUG >= 2 195 atomic_dec(&dst_total); 196 #endif 197 kmem_cache_free(dst->ops->kmem_cachep, dst); 198 199 dst = child; 200 if (dst) { 201 int nohash = dst->flags & DST_NOHASH; 202 203 if (atomic_dec_and_test(&dst->__refcnt)) { 204 /* We were real parent of this dst, so kill child. */ 205 if (nohash) 206 goto again; 207 } else { 208 /* Child is still referenced, return it for freeing. */ 209 if (nohash) 210 return dst; 211 /* Child is still in his hash table */ 212 } 213 } 214 return NULL; 215 } 216 217 /* Dirty hack. We did it in 2.2 (in __dst_free), 218 * we have _very_ good reasons not to repeat 219 * this mistake in 2.3, but we have no choice 220 * now. _It_ _is_ _explicit_ _deliberate_ 221 * _race_ _condition_. 222 * 223 * Commented and originally written by Alexey. 224 */ 225 static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 226 int unregister) 227 { 228 if (dst->ops->ifdown) 229 dst->ops->ifdown(dst, dev, unregister); 230 231 if (dev != dst->dev) 232 return; 233 234 if (!unregister) { 235 dst->input = dst_discard_in; 236 dst->output = dst_discard_out; 237 } else { 238 dst->dev = &loopback_dev; 239 dev_hold(&loopback_dev); 240 dev_put(dev); 241 if (dst->neighbour && dst->neighbour->dev == dev) { 242 dst->neighbour->dev = &loopback_dev; 243 dev_put(dev); 244 dev_hold(&loopback_dev); 245 } 246 } 247 } 248 249 static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 250 { 251 struct net_device *dev = ptr; 252 struct dst_entry *dst; 253 254 switch (event) { 255 case NETDEV_UNREGISTER: 256 case NETDEV_DOWN: 257 spin_lock_bh(&dst_lock); 258 for (dst = dst_garbage_list; dst; dst = dst->next) { 259 dst_ifdown(dst, dev, event != NETDEV_DOWN); 260 } 261 spin_unlock_bh(&dst_lock); 262 break; 263 } 264 return NOTIFY_DONE; 265 } 266 267 static struct notifier_block dst_dev_notifier = { 268 .notifier_call = dst_dev_event, 269 }; 270 271 void __init dst_init(void) 272 { 273 register_netdevice_notifier(&dst_dev_notifier); 274 } 275 276 EXPORT_SYMBOL(__dst_free); 277 EXPORT_SYMBOL(dst_alloc); 278 EXPORT_SYMBOL(dst_destroy); 279