1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic address resolution entity 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * Fixes: 10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 11 * Harald Welte Add neighbour cache statistics like rtstat 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/slab.h> 17 #include <linux/kmemleak.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/socket.h> 22 #include <linux/netdevice.h> 23 #include <linux/proc_fs.h> 24 #ifdef CONFIG_SYSCTL 25 #include <linux/sysctl.h> 26 #endif 27 #include <linux/times.h> 28 #include <net/net_namespace.h> 29 #include <net/neighbour.h> 30 #include <net/arp.h> 31 #include <net/dst.h> 32 #include <net/sock.h> 33 #include <net/netevent.h> 34 #include <net/netlink.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/random.h> 37 #include <linux/string.h> 38 #include <linux/log2.h> 39 #include <linux/inetdevice.h> 40 #include <net/addrconf.h> 41 42 #include <trace/events/neigh.h> 43 44 #define NEIGH_DEBUG 1 45 #define neigh_dbg(level, fmt, ...) \ 46 do { \ 47 if (level <= NEIGH_DEBUG) \ 48 pr_debug(fmt, ##__VA_ARGS__); \ 49 } while (0) 50 51 #define PNEIGH_HASHMASK 0xF 52 53 static void neigh_timer_handler(struct timer_list *t); 54 static void __neigh_notify(struct neighbour *n, int type, int flags, 55 u32 pid); 56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 58 struct net_device *dev); 59 60 #ifdef CONFIG_PROC_FS 61 static const struct seq_operations neigh_stat_seq_ops; 62 #endif 63 64 /* 65 Neighbour hash table buckets are protected with rwlock tbl->lock. 66 67 - All the scans/updates to hash buckets MUST be made under this lock. 68 - NOTHING clever should be made under this lock: no callbacks 69 to protocol backends, no attempts to send something to network. 70 It will result in deadlocks, if backend/driver wants to use neighbour 71 cache. 72 - If the entry requires some non-trivial actions, increase 73 its reference count and release table lock. 74 75 Neighbour entries are protected: 76 - with reference count. 77 - with rwlock neigh->lock 78 79 Reference count prevents destruction. 80 81 neigh->lock mainly serializes ll address data and its validity state. 82 However, the same lock is used to protect another entry fields: 83 - timer 84 - resolution queue 85 86 Again, nothing clever shall be made under neigh->lock, 87 the most complicated procedure, which we allow is dev->hard_header. 88 It is supposed, that dev->hard_header is simplistic and does 89 not make callbacks to neighbour tables. 90 */ 91 92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 93 { 94 kfree_skb(skb); 95 return -ENETDOWN; 96 } 97 98 static void neigh_cleanup_and_release(struct neighbour *neigh) 99 { 100 trace_neigh_cleanup_and_release(neigh, 0); 101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 103 neigh_release(neigh); 104 } 105 106 /* 107 * It is random distribution in the interval (1/2)*base...(3/2)*base. 108 * It corresponds to default IPv6 settings and is not overridable, 109 * because it is really reasonable choice. 110 */ 111 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 { 114 return base ? get_random_u32_below(base) + (base >> 1) : 0; 115 } 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 118 static void neigh_mark_dead(struct neighbour *n) 119 { 120 n->dead = 1; 121 if (!list_empty(&n->gc_list)) { 122 list_del_init(&n->gc_list); 123 atomic_dec(&n->tbl->gc_entries); 124 } 125 if (!list_empty(&n->managed_list)) 126 list_del_init(&n->managed_list); 127 } 128 129 static void neigh_update_gc_list(struct neighbour *n) 130 { 131 bool on_gc_list, exempt_from_gc; 132 133 write_lock_bh(&n->tbl->lock); 134 write_lock(&n->lock); 135 if (n->dead) 136 goto out; 137 138 /* remove from the gc list if new state is permanent or if neighbor 139 * is externally learned; otherwise entry should be on the gc list 140 */ 141 exempt_from_gc = n->nud_state & NUD_PERMANENT || 142 n->flags & NTF_EXT_LEARNED; 143 on_gc_list = !list_empty(&n->gc_list); 144 145 if (exempt_from_gc && on_gc_list) { 146 list_del_init(&n->gc_list); 147 atomic_dec(&n->tbl->gc_entries); 148 } else if (!exempt_from_gc && !on_gc_list) { 149 /* add entries to the tail; cleaning removes from the front */ 150 list_add_tail(&n->gc_list, &n->tbl->gc_list); 151 atomic_inc(&n->tbl->gc_entries); 152 } 153 out: 154 write_unlock(&n->lock); 155 write_unlock_bh(&n->tbl->lock); 156 } 157 158 static void neigh_update_managed_list(struct neighbour *n) 159 { 160 bool on_managed_list, add_to_managed; 161 162 write_lock_bh(&n->tbl->lock); 163 write_lock(&n->lock); 164 if (n->dead) 165 goto out; 166 167 add_to_managed = n->flags & NTF_MANAGED; 168 on_managed_list = !list_empty(&n->managed_list); 169 170 if (!add_to_managed && on_managed_list) 171 list_del_init(&n->managed_list); 172 else if (add_to_managed && !on_managed_list) 173 list_add_tail(&n->managed_list, &n->tbl->managed_list); 174 out: 175 write_unlock(&n->lock); 176 write_unlock_bh(&n->tbl->lock); 177 } 178 179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify, 180 bool *gc_update, bool *managed_update) 181 { 182 u32 ndm_flags, old_flags = neigh->flags; 183 184 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 185 return; 186 187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0; 189 190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) { 191 if (ndm_flags & NTF_EXT_LEARNED) 192 neigh->flags |= NTF_EXT_LEARNED; 193 else 194 neigh->flags &= ~NTF_EXT_LEARNED; 195 *notify = 1; 196 *gc_update = true; 197 } 198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) { 199 if (ndm_flags & NTF_MANAGED) 200 neigh->flags |= NTF_MANAGED; 201 else 202 neigh->flags &= ~NTF_MANAGED; 203 *notify = 1; 204 *managed_update = true; 205 } 206 } 207 208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 209 struct neigh_table *tbl) 210 { 211 bool retval = false; 212 213 write_lock(&n->lock); 214 if (refcount_read(&n->refcnt) == 1) { 215 struct neighbour *neigh; 216 217 neigh = rcu_dereference_protected(n->next, 218 lockdep_is_held(&tbl->lock)); 219 rcu_assign_pointer(*np, neigh); 220 neigh_mark_dead(n); 221 retval = true; 222 } 223 write_unlock(&n->lock); 224 if (retval) 225 neigh_cleanup_and_release(n); 226 return retval; 227 } 228 229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 230 { 231 struct neigh_hash_table *nht; 232 void *pkey = ndel->primary_key; 233 u32 hash_val; 234 struct neighbour *n; 235 struct neighbour __rcu **np; 236 237 nht = rcu_dereference_protected(tbl->nht, 238 lockdep_is_held(&tbl->lock)); 239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 240 hash_val = hash_val >> (32 - nht->hash_shift); 241 242 np = &nht->hash_buckets[hash_val]; 243 while ((n = rcu_dereference_protected(*np, 244 lockdep_is_held(&tbl->lock)))) { 245 if (n == ndel) 246 return neigh_del(n, np, tbl); 247 np = &n->next; 248 } 249 return false; 250 } 251 252 static int neigh_forced_gc(struct neigh_table *tbl) 253 { 254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 255 unsigned long tref = jiffies - 5 * HZ; 256 struct neighbour *n, *tmp; 257 int shrunk = 0; 258 259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 260 261 write_lock_bh(&tbl->lock); 262 263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 264 if (refcount_read(&n->refcnt) == 1) { 265 bool remove = false; 266 267 write_lock(&n->lock); 268 if ((n->nud_state == NUD_FAILED) || 269 (n->nud_state == NUD_NOARP) || 270 (tbl->is_multicast && 271 tbl->is_multicast(n->primary_key)) || 272 !time_in_range(n->updated, tref, jiffies)) 273 remove = true; 274 write_unlock(&n->lock); 275 276 if (remove && neigh_remove_one(n, tbl)) 277 shrunk++; 278 if (shrunk >= max_clean) 279 break; 280 } 281 } 282 283 tbl->last_flush = jiffies; 284 285 write_unlock_bh(&tbl->lock); 286 287 return shrunk; 288 } 289 290 static void neigh_add_timer(struct neighbour *n, unsigned long when) 291 { 292 /* Use safe distance from the jiffies - LONG_MAX point while timer 293 * is running in DELAY/PROBE state but still show to user space 294 * large times in the past. 295 */ 296 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ); 297 298 neigh_hold(n); 299 if (!time_in_range(n->confirmed, mint, jiffies)) 300 n->confirmed = mint; 301 if (time_before(n->used, n->confirmed)) 302 n->used = n->confirmed; 303 if (unlikely(mod_timer(&n->timer, when))) { 304 printk("NEIGH: BUG, double timer add, state is %x\n", 305 n->nud_state); 306 dump_stack(); 307 } 308 } 309 310 static int neigh_del_timer(struct neighbour *n) 311 { 312 if ((n->nud_state & NUD_IN_TIMER) && 313 del_timer(&n->timer)) { 314 neigh_release(n); 315 return 1; 316 } 317 return 0; 318 } 319 320 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 321 int family) 322 { 323 switch (family) { 324 case AF_INET: 325 return __in_dev_arp_parms_get_rcu(dev); 326 case AF_INET6: 327 return __in6_dev_nd_parms_get_rcu(dev); 328 } 329 return NULL; 330 } 331 332 static void neigh_parms_qlen_dec(struct net_device *dev, int family) 333 { 334 struct neigh_parms *p; 335 336 rcu_read_lock(); 337 p = neigh_get_dev_parms_rcu(dev, family); 338 if (p) 339 p->qlen--; 340 rcu_read_unlock(); 341 } 342 343 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net, 344 int family) 345 { 346 struct sk_buff_head tmp; 347 unsigned long flags; 348 struct sk_buff *skb; 349 350 skb_queue_head_init(&tmp); 351 spin_lock_irqsave(&list->lock, flags); 352 skb = skb_peek(list); 353 while (skb != NULL) { 354 struct sk_buff *skb_next = skb_peek_next(skb, list); 355 struct net_device *dev = skb->dev; 356 357 if (net == NULL || net_eq(dev_net(dev), net)) { 358 neigh_parms_qlen_dec(dev, family); 359 __skb_unlink(skb, list); 360 __skb_queue_tail(&tmp, skb); 361 } 362 skb = skb_next; 363 } 364 spin_unlock_irqrestore(&list->lock, flags); 365 366 while ((skb = __skb_dequeue(&tmp))) { 367 dev_put(skb->dev); 368 kfree_skb(skb); 369 } 370 } 371 372 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 373 bool skip_perm) 374 { 375 int i; 376 struct neigh_hash_table *nht; 377 378 nht = rcu_dereference_protected(tbl->nht, 379 lockdep_is_held(&tbl->lock)); 380 381 for (i = 0; i < (1 << nht->hash_shift); i++) { 382 struct neighbour *n; 383 struct neighbour __rcu **np = &nht->hash_buckets[i]; 384 385 while ((n = rcu_dereference_protected(*np, 386 lockdep_is_held(&tbl->lock))) != NULL) { 387 if (dev && n->dev != dev) { 388 np = &n->next; 389 continue; 390 } 391 if (skip_perm && n->nud_state & NUD_PERMANENT) { 392 np = &n->next; 393 continue; 394 } 395 rcu_assign_pointer(*np, 396 rcu_dereference_protected(n->next, 397 lockdep_is_held(&tbl->lock))); 398 write_lock(&n->lock); 399 neigh_del_timer(n); 400 neigh_mark_dead(n); 401 if (refcount_read(&n->refcnt) != 1) { 402 /* The most unpleasant situation. 403 We must destroy neighbour entry, 404 but someone still uses it. 405 406 The destroy will be delayed until 407 the last user releases us, but 408 we must kill timers etc. and move 409 it to safe state. 410 */ 411 __skb_queue_purge(&n->arp_queue); 412 n->arp_queue_len_bytes = 0; 413 n->output = neigh_blackhole; 414 if (n->nud_state & NUD_VALID) 415 n->nud_state = NUD_NOARP; 416 else 417 n->nud_state = NUD_NONE; 418 neigh_dbg(2, "neigh %p is stray\n", n); 419 } 420 write_unlock(&n->lock); 421 neigh_cleanup_and_release(n); 422 } 423 } 424 } 425 426 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 427 { 428 write_lock_bh(&tbl->lock); 429 neigh_flush_dev(tbl, dev, false); 430 write_unlock_bh(&tbl->lock); 431 } 432 EXPORT_SYMBOL(neigh_changeaddr); 433 434 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 435 bool skip_perm) 436 { 437 write_lock_bh(&tbl->lock); 438 neigh_flush_dev(tbl, dev, skip_perm); 439 pneigh_ifdown_and_unlock(tbl, dev); 440 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, 441 tbl->family); 442 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 443 del_timer_sync(&tbl->proxy_timer); 444 return 0; 445 } 446 447 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 448 { 449 __neigh_ifdown(tbl, dev, true); 450 return 0; 451 } 452 EXPORT_SYMBOL(neigh_carrier_down); 453 454 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 455 { 456 __neigh_ifdown(tbl, dev, false); 457 return 0; 458 } 459 EXPORT_SYMBOL(neigh_ifdown); 460 461 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 462 struct net_device *dev, 463 u32 flags, bool exempt_from_gc) 464 { 465 struct neighbour *n = NULL; 466 unsigned long now = jiffies; 467 int entries; 468 469 if (exempt_from_gc) 470 goto do_alloc; 471 472 entries = atomic_inc_return(&tbl->gc_entries) - 1; 473 if (entries >= tbl->gc_thresh3 || 474 (entries >= tbl->gc_thresh2 && 475 time_after(now, tbl->last_flush + 5 * HZ))) { 476 if (!neigh_forced_gc(tbl) && 477 entries >= tbl->gc_thresh3) { 478 net_info_ratelimited("%s: neighbor table overflow!\n", 479 tbl->id); 480 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 481 goto out_entries; 482 } 483 } 484 485 do_alloc: 486 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 487 if (!n) 488 goto out_entries; 489 490 __skb_queue_head_init(&n->arp_queue); 491 rwlock_init(&n->lock); 492 seqlock_init(&n->ha_lock); 493 n->updated = n->used = now; 494 n->nud_state = NUD_NONE; 495 n->output = neigh_blackhole; 496 n->flags = flags; 497 seqlock_init(&n->hh.hh_lock); 498 n->parms = neigh_parms_clone(&tbl->parms); 499 timer_setup(&n->timer, neigh_timer_handler, 0); 500 501 NEIGH_CACHE_STAT_INC(tbl, allocs); 502 n->tbl = tbl; 503 refcount_set(&n->refcnt, 1); 504 n->dead = 1; 505 INIT_LIST_HEAD(&n->gc_list); 506 INIT_LIST_HEAD(&n->managed_list); 507 508 atomic_inc(&tbl->entries); 509 out: 510 return n; 511 512 out_entries: 513 if (!exempt_from_gc) 514 atomic_dec(&tbl->gc_entries); 515 goto out; 516 } 517 518 static void neigh_get_hash_rnd(u32 *x) 519 { 520 *x = get_random_u32() | 1; 521 } 522 523 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 524 { 525 size_t size = (1 << shift) * sizeof(struct neighbour *); 526 struct neigh_hash_table *ret; 527 struct neighbour __rcu **buckets; 528 int i; 529 530 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 531 if (!ret) 532 return NULL; 533 if (size <= PAGE_SIZE) { 534 buckets = kzalloc(size, GFP_ATOMIC); 535 } else { 536 buckets = (struct neighbour __rcu **) 537 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 538 get_order(size)); 539 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 540 } 541 if (!buckets) { 542 kfree(ret); 543 return NULL; 544 } 545 ret->hash_buckets = buckets; 546 ret->hash_shift = shift; 547 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 548 neigh_get_hash_rnd(&ret->hash_rnd[i]); 549 return ret; 550 } 551 552 static void neigh_hash_free_rcu(struct rcu_head *head) 553 { 554 struct neigh_hash_table *nht = container_of(head, 555 struct neigh_hash_table, 556 rcu); 557 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 558 struct neighbour __rcu **buckets = nht->hash_buckets; 559 560 if (size <= PAGE_SIZE) { 561 kfree(buckets); 562 } else { 563 kmemleak_free(buckets); 564 free_pages((unsigned long)buckets, get_order(size)); 565 } 566 kfree(nht); 567 } 568 569 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 570 unsigned long new_shift) 571 { 572 unsigned int i, hash; 573 struct neigh_hash_table *new_nht, *old_nht; 574 575 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 576 577 old_nht = rcu_dereference_protected(tbl->nht, 578 lockdep_is_held(&tbl->lock)); 579 new_nht = neigh_hash_alloc(new_shift); 580 if (!new_nht) 581 return old_nht; 582 583 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 584 struct neighbour *n, *next; 585 586 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 587 lockdep_is_held(&tbl->lock)); 588 n != NULL; 589 n = next) { 590 hash = tbl->hash(n->primary_key, n->dev, 591 new_nht->hash_rnd); 592 593 hash >>= (32 - new_nht->hash_shift); 594 next = rcu_dereference_protected(n->next, 595 lockdep_is_held(&tbl->lock)); 596 597 rcu_assign_pointer(n->next, 598 rcu_dereference_protected( 599 new_nht->hash_buckets[hash], 600 lockdep_is_held(&tbl->lock))); 601 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 602 } 603 } 604 605 rcu_assign_pointer(tbl->nht, new_nht); 606 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 607 return new_nht; 608 } 609 610 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 611 struct net_device *dev) 612 { 613 struct neighbour *n; 614 615 NEIGH_CACHE_STAT_INC(tbl, lookups); 616 617 rcu_read_lock_bh(); 618 n = __neigh_lookup_noref(tbl, pkey, dev); 619 if (n) { 620 if (!refcount_inc_not_zero(&n->refcnt)) 621 n = NULL; 622 NEIGH_CACHE_STAT_INC(tbl, hits); 623 } 624 625 rcu_read_unlock_bh(); 626 return n; 627 } 628 EXPORT_SYMBOL(neigh_lookup); 629 630 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 631 const void *pkey) 632 { 633 struct neighbour *n; 634 unsigned int key_len = tbl->key_len; 635 u32 hash_val; 636 struct neigh_hash_table *nht; 637 638 NEIGH_CACHE_STAT_INC(tbl, lookups); 639 640 rcu_read_lock_bh(); 641 nht = rcu_dereference_bh(tbl->nht); 642 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 643 644 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 645 n != NULL; 646 n = rcu_dereference_bh(n->next)) { 647 if (!memcmp(n->primary_key, pkey, key_len) && 648 net_eq(dev_net(n->dev), net)) { 649 if (!refcount_inc_not_zero(&n->refcnt)) 650 n = NULL; 651 NEIGH_CACHE_STAT_INC(tbl, hits); 652 break; 653 } 654 } 655 656 rcu_read_unlock_bh(); 657 return n; 658 } 659 EXPORT_SYMBOL(neigh_lookup_nodev); 660 661 static struct neighbour * 662 ___neigh_create(struct neigh_table *tbl, const void *pkey, 663 struct net_device *dev, u32 flags, 664 bool exempt_from_gc, bool want_ref) 665 { 666 u32 hash_val, key_len = tbl->key_len; 667 struct neighbour *n1, *rc, *n; 668 struct neigh_hash_table *nht; 669 int error; 670 671 n = neigh_alloc(tbl, dev, flags, exempt_from_gc); 672 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 673 if (!n) { 674 rc = ERR_PTR(-ENOBUFS); 675 goto out; 676 } 677 678 memcpy(n->primary_key, pkey, key_len); 679 n->dev = dev; 680 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC); 681 682 /* Protocol specific setup. */ 683 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 684 rc = ERR_PTR(error); 685 goto out_neigh_release; 686 } 687 688 if (dev->netdev_ops->ndo_neigh_construct) { 689 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 690 if (error < 0) { 691 rc = ERR_PTR(error); 692 goto out_neigh_release; 693 } 694 } 695 696 /* Device specific setup. */ 697 if (n->parms->neigh_setup && 698 (error = n->parms->neigh_setup(n)) < 0) { 699 rc = ERR_PTR(error); 700 goto out_neigh_release; 701 } 702 703 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 704 705 write_lock_bh(&tbl->lock); 706 nht = rcu_dereference_protected(tbl->nht, 707 lockdep_is_held(&tbl->lock)); 708 709 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 710 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 711 712 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 713 714 if (n->parms->dead) { 715 rc = ERR_PTR(-EINVAL); 716 goto out_tbl_unlock; 717 } 718 719 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 720 lockdep_is_held(&tbl->lock)); 721 n1 != NULL; 722 n1 = rcu_dereference_protected(n1->next, 723 lockdep_is_held(&tbl->lock))) { 724 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 725 if (want_ref) 726 neigh_hold(n1); 727 rc = n1; 728 goto out_tbl_unlock; 729 } 730 } 731 732 n->dead = 0; 733 if (!exempt_from_gc) 734 list_add_tail(&n->gc_list, &n->tbl->gc_list); 735 if (n->flags & NTF_MANAGED) 736 list_add_tail(&n->managed_list, &n->tbl->managed_list); 737 if (want_ref) 738 neigh_hold(n); 739 rcu_assign_pointer(n->next, 740 rcu_dereference_protected(nht->hash_buckets[hash_val], 741 lockdep_is_held(&tbl->lock))); 742 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 743 write_unlock_bh(&tbl->lock); 744 neigh_dbg(2, "neigh %p is created\n", n); 745 rc = n; 746 out: 747 return rc; 748 out_tbl_unlock: 749 write_unlock_bh(&tbl->lock); 750 out_neigh_release: 751 if (!exempt_from_gc) 752 atomic_dec(&tbl->gc_entries); 753 neigh_release(n); 754 goto out; 755 } 756 757 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 758 struct net_device *dev, bool want_ref) 759 { 760 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref); 761 } 762 EXPORT_SYMBOL(__neigh_create); 763 764 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 765 { 766 u32 hash_val = *(u32 *)(pkey + key_len - 4); 767 hash_val ^= (hash_val >> 16); 768 hash_val ^= hash_val >> 8; 769 hash_val ^= hash_val >> 4; 770 hash_val &= PNEIGH_HASHMASK; 771 return hash_val; 772 } 773 774 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 775 struct net *net, 776 const void *pkey, 777 unsigned int key_len, 778 struct net_device *dev) 779 { 780 while (n) { 781 if (!memcmp(n->key, pkey, key_len) && 782 net_eq(pneigh_net(n), net) && 783 (n->dev == dev || !n->dev)) 784 return n; 785 n = n->next; 786 } 787 return NULL; 788 } 789 790 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 791 struct net *net, const void *pkey, struct net_device *dev) 792 { 793 unsigned int key_len = tbl->key_len; 794 u32 hash_val = pneigh_hash(pkey, key_len); 795 796 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 797 net, pkey, key_len, dev); 798 } 799 EXPORT_SYMBOL_GPL(__pneigh_lookup); 800 801 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 802 struct net *net, const void *pkey, 803 struct net_device *dev, int creat) 804 { 805 struct pneigh_entry *n; 806 unsigned int key_len = tbl->key_len; 807 u32 hash_val = pneigh_hash(pkey, key_len); 808 809 read_lock_bh(&tbl->lock); 810 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 811 net, pkey, key_len, dev); 812 read_unlock_bh(&tbl->lock); 813 814 if (n || !creat) 815 goto out; 816 817 ASSERT_RTNL(); 818 819 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); 820 if (!n) 821 goto out; 822 823 write_pnet(&n->net, net); 824 memcpy(n->key, pkey, key_len); 825 n->dev = dev; 826 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL); 827 828 if (tbl->pconstructor && tbl->pconstructor(n)) { 829 netdev_put(dev, &n->dev_tracker); 830 kfree(n); 831 n = NULL; 832 goto out; 833 } 834 835 write_lock_bh(&tbl->lock); 836 n->next = tbl->phash_buckets[hash_val]; 837 tbl->phash_buckets[hash_val] = n; 838 write_unlock_bh(&tbl->lock); 839 out: 840 return n; 841 } 842 EXPORT_SYMBOL(pneigh_lookup); 843 844 845 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 846 struct net_device *dev) 847 { 848 struct pneigh_entry *n, **np; 849 unsigned int key_len = tbl->key_len; 850 u32 hash_val = pneigh_hash(pkey, key_len); 851 852 write_lock_bh(&tbl->lock); 853 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 854 np = &n->next) { 855 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 856 net_eq(pneigh_net(n), net)) { 857 *np = n->next; 858 write_unlock_bh(&tbl->lock); 859 if (tbl->pdestructor) 860 tbl->pdestructor(n); 861 netdev_put(n->dev, &n->dev_tracker); 862 kfree(n); 863 return 0; 864 } 865 } 866 write_unlock_bh(&tbl->lock); 867 return -ENOENT; 868 } 869 870 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 871 struct net_device *dev) 872 { 873 struct pneigh_entry *n, **np, *freelist = NULL; 874 u32 h; 875 876 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 877 np = &tbl->phash_buckets[h]; 878 while ((n = *np) != NULL) { 879 if (!dev || n->dev == dev) { 880 *np = n->next; 881 n->next = freelist; 882 freelist = n; 883 continue; 884 } 885 np = &n->next; 886 } 887 } 888 write_unlock_bh(&tbl->lock); 889 while ((n = freelist)) { 890 freelist = n->next; 891 n->next = NULL; 892 if (tbl->pdestructor) 893 tbl->pdestructor(n); 894 netdev_put(n->dev, &n->dev_tracker); 895 kfree(n); 896 } 897 return -ENOENT; 898 } 899 900 static void neigh_parms_destroy(struct neigh_parms *parms); 901 902 static inline void neigh_parms_put(struct neigh_parms *parms) 903 { 904 if (refcount_dec_and_test(&parms->refcnt)) 905 neigh_parms_destroy(parms); 906 } 907 908 /* 909 * neighbour must already be out of the table; 910 * 911 */ 912 void neigh_destroy(struct neighbour *neigh) 913 { 914 struct net_device *dev = neigh->dev; 915 916 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 917 918 if (!neigh->dead) { 919 pr_warn("Destroying alive neighbour %p\n", neigh); 920 dump_stack(); 921 return; 922 } 923 924 if (neigh_del_timer(neigh)) 925 pr_warn("Impossible event\n"); 926 927 write_lock_bh(&neigh->lock); 928 __skb_queue_purge(&neigh->arp_queue); 929 write_unlock_bh(&neigh->lock); 930 neigh->arp_queue_len_bytes = 0; 931 932 if (dev->netdev_ops->ndo_neigh_destroy) 933 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 934 935 netdev_put(dev, &neigh->dev_tracker); 936 neigh_parms_put(neigh->parms); 937 938 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 939 940 atomic_dec(&neigh->tbl->entries); 941 kfree_rcu(neigh, rcu); 942 } 943 EXPORT_SYMBOL(neigh_destroy); 944 945 /* Neighbour state is suspicious; 946 disable fast path. 947 948 Called with write_locked neigh. 949 */ 950 static void neigh_suspect(struct neighbour *neigh) 951 { 952 neigh_dbg(2, "neigh %p is suspected\n", neigh); 953 954 neigh->output = neigh->ops->output; 955 } 956 957 /* Neighbour state is OK; 958 enable fast path. 959 960 Called with write_locked neigh. 961 */ 962 static void neigh_connect(struct neighbour *neigh) 963 { 964 neigh_dbg(2, "neigh %p is connected\n", neigh); 965 966 neigh->output = neigh->ops->connected_output; 967 } 968 969 static void neigh_periodic_work(struct work_struct *work) 970 { 971 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 972 struct neighbour *n; 973 struct neighbour __rcu **np; 974 unsigned int i; 975 struct neigh_hash_table *nht; 976 977 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 978 979 write_lock_bh(&tbl->lock); 980 nht = rcu_dereference_protected(tbl->nht, 981 lockdep_is_held(&tbl->lock)); 982 983 /* 984 * periodically recompute ReachableTime from random function 985 */ 986 987 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 988 struct neigh_parms *p; 989 tbl->last_rand = jiffies; 990 list_for_each_entry(p, &tbl->parms_list, list) 991 p->reachable_time = 992 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 993 } 994 995 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 996 goto out; 997 998 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 999 np = &nht->hash_buckets[i]; 1000 1001 while ((n = rcu_dereference_protected(*np, 1002 lockdep_is_held(&tbl->lock))) != NULL) { 1003 unsigned int state; 1004 1005 write_lock(&n->lock); 1006 1007 state = n->nud_state; 1008 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 1009 (n->flags & NTF_EXT_LEARNED)) { 1010 write_unlock(&n->lock); 1011 goto next_elt; 1012 } 1013 1014 if (time_before(n->used, n->confirmed) && 1015 time_is_before_eq_jiffies(n->confirmed)) 1016 n->used = n->confirmed; 1017 1018 if (refcount_read(&n->refcnt) == 1 && 1019 (state == NUD_FAILED || 1020 !time_in_range_open(jiffies, n->used, 1021 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 1022 *np = n->next; 1023 neigh_mark_dead(n); 1024 write_unlock(&n->lock); 1025 neigh_cleanup_and_release(n); 1026 continue; 1027 } 1028 write_unlock(&n->lock); 1029 1030 next_elt: 1031 np = &n->next; 1032 } 1033 /* 1034 * It's fine to release lock here, even if hash table 1035 * grows while we are preempted. 1036 */ 1037 write_unlock_bh(&tbl->lock); 1038 cond_resched(); 1039 write_lock_bh(&tbl->lock); 1040 nht = rcu_dereference_protected(tbl->nht, 1041 lockdep_is_held(&tbl->lock)); 1042 } 1043 out: 1044 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 1045 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 1046 * BASE_REACHABLE_TIME. 1047 */ 1048 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1049 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 1050 write_unlock_bh(&tbl->lock); 1051 } 1052 1053 static __inline__ int neigh_max_probes(struct neighbour *n) 1054 { 1055 struct neigh_parms *p = n->parms; 1056 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 1057 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 1058 NEIGH_VAR(p, MCAST_PROBES)); 1059 } 1060 1061 static void neigh_invalidate(struct neighbour *neigh) 1062 __releases(neigh->lock) 1063 __acquires(neigh->lock) 1064 { 1065 struct sk_buff *skb; 1066 1067 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 1068 neigh_dbg(2, "neigh %p is failed\n", neigh); 1069 neigh->updated = jiffies; 1070 1071 /* It is very thin place. report_unreachable is very complicated 1072 routine. Particularly, it can hit the same neighbour entry! 1073 1074 So that, we try to be accurate and avoid dead loop. --ANK 1075 */ 1076 while (neigh->nud_state == NUD_FAILED && 1077 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1078 write_unlock(&neigh->lock); 1079 neigh->ops->error_report(neigh, skb); 1080 write_lock(&neigh->lock); 1081 } 1082 __skb_queue_purge(&neigh->arp_queue); 1083 neigh->arp_queue_len_bytes = 0; 1084 } 1085 1086 static void neigh_probe(struct neighbour *neigh) 1087 __releases(neigh->lock) 1088 { 1089 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1090 /* keep skb alive even if arp_queue overflows */ 1091 if (skb) 1092 skb = skb_clone(skb, GFP_ATOMIC); 1093 write_unlock(&neigh->lock); 1094 if (neigh->ops->solicit) 1095 neigh->ops->solicit(neigh, skb); 1096 atomic_inc(&neigh->probes); 1097 consume_skb(skb); 1098 } 1099 1100 /* Called when a timer expires for a neighbour entry. */ 1101 1102 static void neigh_timer_handler(struct timer_list *t) 1103 { 1104 unsigned long now, next; 1105 struct neighbour *neigh = from_timer(neigh, t, timer); 1106 unsigned int state; 1107 int notify = 0; 1108 1109 write_lock(&neigh->lock); 1110 1111 state = neigh->nud_state; 1112 now = jiffies; 1113 next = now + HZ; 1114 1115 if (!(state & NUD_IN_TIMER)) 1116 goto out; 1117 1118 if (state & NUD_REACHABLE) { 1119 if (time_before_eq(now, 1120 neigh->confirmed + neigh->parms->reachable_time)) { 1121 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1122 next = neigh->confirmed + neigh->parms->reachable_time; 1123 } else if (time_before_eq(now, 1124 neigh->used + 1125 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1126 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1127 neigh->nud_state = NUD_DELAY; 1128 neigh->updated = jiffies; 1129 neigh_suspect(neigh); 1130 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1131 } else { 1132 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1133 neigh->nud_state = NUD_STALE; 1134 neigh->updated = jiffies; 1135 neigh_suspect(neigh); 1136 notify = 1; 1137 } 1138 } else if (state & NUD_DELAY) { 1139 if (time_before_eq(now, 1140 neigh->confirmed + 1141 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1142 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1143 neigh->nud_state = NUD_REACHABLE; 1144 neigh->updated = jiffies; 1145 neigh_connect(neigh); 1146 notify = 1; 1147 next = neigh->confirmed + neigh->parms->reachable_time; 1148 } else { 1149 neigh_dbg(2, "neigh %p is probed\n", neigh); 1150 neigh->nud_state = NUD_PROBE; 1151 neigh->updated = jiffies; 1152 atomic_set(&neigh->probes, 0); 1153 notify = 1; 1154 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1155 HZ/100); 1156 } 1157 } else { 1158 /* NUD_PROBE|NUD_INCOMPLETE */ 1159 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100); 1160 } 1161 1162 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1163 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1164 neigh->nud_state = NUD_FAILED; 1165 notify = 1; 1166 neigh_invalidate(neigh); 1167 goto out; 1168 } 1169 1170 if (neigh->nud_state & NUD_IN_TIMER) { 1171 if (time_before(next, jiffies + HZ/100)) 1172 next = jiffies + HZ/100; 1173 if (!mod_timer(&neigh->timer, next)) 1174 neigh_hold(neigh); 1175 } 1176 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1177 neigh_probe(neigh); 1178 } else { 1179 out: 1180 write_unlock(&neigh->lock); 1181 } 1182 1183 if (notify) 1184 neigh_update_notify(neigh, 0); 1185 1186 trace_neigh_timer_handler(neigh, 0); 1187 1188 neigh_release(neigh); 1189 } 1190 1191 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, 1192 const bool immediate_ok) 1193 { 1194 int rc; 1195 bool immediate_probe = false; 1196 1197 write_lock_bh(&neigh->lock); 1198 1199 rc = 0; 1200 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1201 goto out_unlock_bh; 1202 if (neigh->dead) 1203 goto out_dead; 1204 1205 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1206 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1207 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1208 unsigned long next, now = jiffies; 1209 1210 atomic_set(&neigh->probes, 1211 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1212 neigh_del_timer(neigh); 1213 neigh->nud_state = NUD_INCOMPLETE; 1214 neigh->updated = now; 1215 if (!immediate_ok) { 1216 next = now + 1; 1217 } else { 1218 immediate_probe = true; 1219 next = now + max(NEIGH_VAR(neigh->parms, 1220 RETRANS_TIME), 1221 HZ / 100); 1222 } 1223 neigh_add_timer(neigh, next); 1224 } else { 1225 neigh->nud_state = NUD_FAILED; 1226 neigh->updated = jiffies; 1227 write_unlock_bh(&neigh->lock); 1228 1229 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED); 1230 return 1; 1231 } 1232 } else if (neigh->nud_state & NUD_STALE) { 1233 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1234 neigh_del_timer(neigh); 1235 neigh->nud_state = NUD_DELAY; 1236 neigh->updated = jiffies; 1237 neigh_add_timer(neigh, jiffies + 1238 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1239 } 1240 1241 if (neigh->nud_state == NUD_INCOMPLETE) { 1242 if (skb) { 1243 while (neigh->arp_queue_len_bytes + skb->truesize > 1244 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1245 struct sk_buff *buff; 1246 1247 buff = __skb_dequeue(&neigh->arp_queue); 1248 if (!buff) 1249 break; 1250 neigh->arp_queue_len_bytes -= buff->truesize; 1251 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL); 1252 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1253 } 1254 skb_dst_force(skb); 1255 __skb_queue_tail(&neigh->arp_queue, skb); 1256 neigh->arp_queue_len_bytes += skb->truesize; 1257 } 1258 rc = 1; 1259 } 1260 out_unlock_bh: 1261 if (immediate_probe) 1262 neigh_probe(neigh); 1263 else 1264 write_unlock(&neigh->lock); 1265 local_bh_enable(); 1266 trace_neigh_event_send_done(neigh, rc); 1267 return rc; 1268 1269 out_dead: 1270 if (neigh->nud_state & NUD_STALE) 1271 goto out_unlock_bh; 1272 write_unlock_bh(&neigh->lock); 1273 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD); 1274 trace_neigh_event_send_dead(neigh, 1); 1275 return 1; 1276 } 1277 EXPORT_SYMBOL(__neigh_event_send); 1278 1279 static void neigh_update_hhs(struct neighbour *neigh) 1280 { 1281 struct hh_cache *hh; 1282 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1283 = NULL; 1284 1285 if (neigh->dev->header_ops) 1286 update = neigh->dev->header_ops->cache_update; 1287 1288 if (update) { 1289 hh = &neigh->hh; 1290 if (READ_ONCE(hh->hh_len)) { 1291 write_seqlock_bh(&hh->hh_lock); 1292 update(hh, neigh->dev, neigh->ha); 1293 write_sequnlock_bh(&hh->hh_lock); 1294 } 1295 } 1296 } 1297 1298 /* Generic update routine. 1299 -- lladdr is new lladdr or NULL, if it is not supplied. 1300 -- new is new state. 1301 -- flags 1302 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1303 if it is different. 1304 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1305 lladdr instead of overriding it 1306 if it is different. 1307 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1308 NEIGH_UPDATE_F_USE means that the entry is user triggered. 1309 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed. 1310 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1311 NTF_ROUTER flag. 1312 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1313 a router. 1314 1315 Caller MUST hold reference count on the entry. 1316 */ 1317 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1318 u8 new, u32 flags, u32 nlmsg_pid, 1319 struct netlink_ext_ack *extack) 1320 { 1321 bool gc_update = false, managed_update = false; 1322 int update_isrouter = 0; 1323 struct net_device *dev; 1324 int err, notify = 0; 1325 u8 old; 1326 1327 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1328 1329 write_lock_bh(&neigh->lock); 1330 1331 dev = neigh->dev; 1332 old = neigh->nud_state; 1333 err = -EPERM; 1334 1335 if (neigh->dead) { 1336 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1337 new = old; 1338 goto out; 1339 } 1340 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1341 (old & (NUD_NOARP | NUD_PERMANENT))) 1342 goto out; 1343 1344 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update); 1345 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) { 1346 new = old & ~NUD_PERMANENT; 1347 neigh->nud_state = new; 1348 err = 0; 1349 goto out; 1350 } 1351 1352 if (!(new & NUD_VALID)) { 1353 neigh_del_timer(neigh); 1354 if (old & NUD_CONNECTED) 1355 neigh_suspect(neigh); 1356 neigh->nud_state = new; 1357 err = 0; 1358 notify = old & NUD_VALID; 1359 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1360 (new & NUD_FAILED)) { 1361 neigh_invalidate(neigh); 1362 notify = 1; 1363 } 1364 goto out; 1365 } 1366 1367 /* Compare new lladdr with cached one */ 1368 if (!dev->addr_len) { 1369 /* First case: device needs no address. */ 1370 lladdr = neigh->ha; 1371 } else if (lladdr) { 1372 /* The second case: if something is already cached 1373 and a new address is proposed: 1374 - compare new & old 1375 - if they are different, check override flag 1376 */ 1377 if ((old & NUD_VALID) && 1378 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1379 lladdr = neigh->ha; 1380 } else { 1381 /* No address is supplied; if we know something, 1382 use it, otherwise discard the request. 1383 */ 1384 err = -EINVAL; 1385 if (!(old & NUD_VALID)) { 1386 NL_SET_ERR_MSG(extack, "No link layer address given"); 1387 goto out; 1388 } 1389 lladdr = neigh->ha; 1390 } 1391 1392 /* Update confirmed timestamp for neighbour entry after we 1393 * received ARP packet even if it doesn't change IP to MAC binding. 1394 */ 1395 if (new & NUD_CONNECTED) 1396 neigh->confirmed = jiffies; 1397 1398 /* If entry was valid and address is not changed, 1399 do not change entry state, if new one is STALE. 1400 */ 1401 err = 0; 1402 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1403 if (old & NUD_VALID) { 1404 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1405 update_isrouter = 0; 1406 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1407 (old & NUD_CONNECTED)) { 1408 lladdr = neigh->ha; 1409 new = NUD_STALE; 1410 } else 1411 goto out; 1412 } else { 1413 if (lladdr == neigh->ha && new == NUD_STALE && 1414 !(flags & NEIGH_UPDATE_F_ADMIN)) 1415 new = old; 1416 } 1417 } 1418 1419 /* Update timestamp only once we know we will make a change to the 1420 * neighbour entry. Otherwise we risk to move the locktime window with 1421 * noop updates and ignore relevant ARP updates. 1422 */ 1423 if (new != old || lladdr != neigh->ha) 1424 neigh->updated = jiffies; 1425 1426 if (new != old) { 1427 neigh_del_timer(neigh); 1428 if (new & NUD_PROBE) 1429 atomic_set(&neigh->probes, 0); 1430 if (new & NUD_IN_TIMER) 1431 neigh_add_timer(neigh, (jiffies + 1432 ((new & NUD_REACHABLE) ? 1433 neigh->parms->reachable_time : 1434 0))); 1435 neigh->nud_state = new; 1436 notify = 1; 1437 } 1438 1439 if (lladdr != neigh->ha) { 1440 write_seqlock(&neigh->ha_lock); 1441 memcpy(&neigh->ha, lladdr, dev->addr_len); 1442 write_sequnlock(&neigh->ha_lock); 1443 neigh_update_hhs(neigh); 1444 if (!(new & NUD_CONNECTED)) 1445 neigh->confirmed = jiffies - 1446 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1447 notify = 1; 1448 } 1449 if (new == old) 1450 goto out; 1451 if (new & NUD_CONNECTED) 1452 neigh_connect(neigh); 1453 else 1454 neigh_suspect(neigh); 1455 if (!(old & NUD_VALID)) { 1456 struct sk_buff *skb; 1457 1458 /* Again: avoid dead loop if something went wrong */ 1459 1460 while (neigh->nud_state & NUD_VALID && 1461 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1462 struct dst_entry *dst = skb_dst(skb); 1463 struct neighbour *n2, *n1 = neigh; 1464 write_unlock_bh(&neigh->lock); 1465 1466 rcu_read_lock(); 1467 1468 /* Why not just use 'neigh' as-is? The problem is that 1469 * things such as shaper, eql, and sch_teql can end up 1470 * using alternative, different, neigh objects to output 1471 * the packet in the output path. So what we need to do 1472 * here is re-lookup the top-level neigh in the path so 1473 * we can reinject the packet there. 1474 */ 1475 n2 = NULL; 1476 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { 1477 n2 = dst_neigh_lookup_skb(dst, skb); 1478 if (n2) 1479 n1 = n2; 1480 } 1481 n1->output(n1, skb); 1482 if (n2) 1483 neigh_release(n2); 1484 rcu_read_unlock(); 1485 1486 write_lock_bh(&neigh->lock); 1487 } 1488 __skb_queue_purge(&neigh->arp_queue); 1489 neigh->arp_queue_len_bytes = 0; 1490 } 1491 out: 1492 if (update_isrouter) 1493 neigh_update_is_router(neigh, flags, ¬ify); 1494 write_unlock_bh(&neigh->lock); 1495 if (((new ^ old) & NUD_PERMANENT) || gc_update) 1496 neigh_update_gc_list(neigh); 1497 if (managed_update) 1498 neigh_update_managed_list(neigh); 1499 if (notify) 1500 neigh_update_notify(neigh, nlmsg_pid); 1501 trace_neigh_update_done(neigh, err); 1502 return err; 1503 } 1504 1505 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1506 u32 flags, u32 nlmsg_pid) 1507 { 1508 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1509 } 1510 EXPORT_SYMBOL(neigh_update); 1511 1512 /* Update the neigh to listen temporarily for probe responses, even if it is 1513 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1514 */ 1515 void __neigh_set_probe_once(struct neighbour *neigh) 1516 { 1517 if (neigh->dead) 1518 return; 1519 neigh->updated = jiffies; 1520 if (!(neigh->nud_state & NUD_FAILED)) 1521 return; 1522 neigh->nud_state = NUD_INCOMPLETE; 1523 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1524 neigh_add_timer(neigh, 1525 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1526 HZ/100)); 1527 } 1528 EXPORT_SYMBOL(__neigh_set_probe_once); 1529 1530 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1531 u8 *lladdr, void *saddr, 1532 struct net_device *dev) 1533 { 1534 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1535 lladdr || !dev->addr_len); 1536 if (neigh) 1537 neigh_update(neigh, lladdr, NUD_STALE, 1538 NEIGH_UPDATE_F_OVERRIDE, 0); 1539 return neigh; 1540 } 1541 EXPORT_SYMBOL(neigh_event_ns); 1542 1543 /* called with read_lock_bh(&n->lock); */ 1544 static void neigh_hh_init(struct neighbour *n) 1545 { 1546 struct net_device *dev = n->dev; 1547 __be16 prot = n->tbl->protocol; 1548 struct hh_cache *hh = &n->hh; 1549 1550 write_lock_bh(&n->lock); 1551 1552 /* Only one thread can come in here and initialize the 1553 * hh_cache entry. 1554 */ 1555 if (!hh->hh_len) 1556 dev->header_ops->cache(n, hh, prot); 1557 1558 write_unlock_bh(&n->lock); 1559 } 1560 1561 /* Slow and careful. */ 1562 1563 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1564 { 1565 int rc = 0; 1566 1567 if (!neigh_event_send(neigh, skb)) { 1568 int err; 1569 struct net_device *dev = neigh->dev; 1570 unsigned int seq; 1571 1572 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) 1573 neigh_hh_init(neigh); 1574 1575 do { 1576 __skb_pull(skb, skb_network_offset(skb)); 1577 seq = read_seqbegin(&neigh->ha_lock); 1578 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1579 neigh->ha, NULL, skb->len); 1580 } while (read_seqretry(&neigh->ha_lock, seq)); 1581 1582 if (err >= 0) 1583 rc = dev_queue_xmit(skb); 1584 else 1585 goto out_kfree_skb; 1586 } 1587 out: 1588 return rc; 1589 out_kfree_skb: 1590 rc = -EINVAL; 1591 kfree_skb(skb); 1592 goto out; 1593 } 1594 EXPORT_SYMBOL(neigh_resolve_output); 1595 1596 /* As fast as possible without hh cache */ 1597 1598 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1599 { 1600 struct net_device *dev = neigh->dev; 1601 unsigned int seq; 1602 int err; 1603 1604 do { 1605 __skb_pull(skb, skb_network_offset(skb)); 1606 seq = read_seqbegin(&neigh->ha_lock); 1607 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1608 neigh->ha, NULL, skb->len); 1609 } while (read_seqretry(&neigh->ha_lock, seq)); 1610 1611 if (err >= 0) 1612 err = dev_queue_xmit(skb); 1613 else { 1614 err = -EINVAL; 1615 kfree_skb(skb); 1616 } 1617 return err; 1618 } 1619 EXPORT_SYMBOL(neigh_connected_output); 1620 1621 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1622 { 1623 return dev_queue_xmit(skb); 1624 } 1625 EXPORT_SYMBOL(neigh_direct_output); 1626 1627 static void neigh_managed_work(struct work_struct *work) 1628 { 1629 struct neigh_table *tbl = container_of(work, struct neigh_table, 1630 managed_work.work); 1631 struct neighbour *neigh; 1632 1633 write_lock_bh(&tbl->lock); 1634 list_for_each_entry(neigh, &tbl->managed_list, managed_list) 1635 neigh_event_send_probe(neigh, NULL, false); 1636 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 1637 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS)); 1638 write_unlock_bh(&tbl->lock); 1639 } 1640 1641 static void neigh_proxy_process(struct timer_list *t) 1642 { 1643 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1644 long sched_next = 0; 1645 unsigned long now = jiffies; 1646 struct sk_buff *skb, *n; 1647 1648 spin_lock(&tbl->proxy_queue.lock); 1649 1650 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1651 long tdif = NEIGH_CB(skb)->sched_next - now; 1652 1653 if (tdif <= 0) { 1654 struct net_device *dev = skb->dev; 1655 1656 neigh_parms_qlen_dec(dev, tbl->family); 1657 __skb_unlink(skb, &tbl->proxy_queue); 1658 1659 if (tbl->proxy_redo && netif_running(dev)) { 1660 rcu_read_lock(); 1661 tbl->proxy_redo(skb); 1662 rcu_read_unlock(); 1663 } else { 1664 kfree_skb(skb); 1665 } 1666 1667 dev_put(dev); 1668 } else if (!sched_next || tdif < sched_next) 1669 sched_next = tdif; 1670 } 1671 del_timer(&tbl->proxy_timer); 1672 if (sched_next) 1673 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1674 spin_unlock(&tbl->proxy_queue.lock); 1675 } 1676 1677 static unsigned long neigh_proxy_delay(struct neigh_parms *p) 1678 { 1679 /* If proxy_delay is zero, do not call get_random_u32_below() 1680 * as it is undefined behavior. 1681 */ 1682 unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY); 1683 1684 return proxy_delay ? 1685 jiffies + get_random_u32_below(proxy_delay) : jiffies; 1686 } 1687 1688 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1689 struct sk_buff *skb) 1690 { 1691 unsigned long sched_next = neigh_proxy_delay(p); 1692 1693 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1694 kfree_skb(skb); 1695 return; 1696 } 1697 1698 NEIGH_CB(skb)->sched_next = sched_next; 1699 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1700 1701 spin_lock(&tbl->proxy_queue.lock); 1702 if (del_timer(&tbl->proxy_timer)) { 1703 if (time_before(tbl->proxy_timer.expires, sched_next)) 1704 sched_next = tbl->proxy_timer.expires; 1705 } 1706 skb_dst_drop(skb); 1707 dev_hold(skb->dev); 1708 __skb_queue_tail(&tbl->proxy_queue, skb); 1709 p->qlen++; 1710 mod_timer(&tbl->proxy_timer, sched_next); 1711 spin_unlock(&tbl->proxy_queue.lock); 1712 } 1713 EXPORT_SYMBOL(pneigh_enqueue); 1714 1715 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1716 struct net *net, int ifindex) 1717 { 1718 struct neigh_parms *p; 1719 1720 list_for_each_entry(p, &tbl->parms_list, list) { 1721 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1722 (!p->dev && !ifindex && net_eq(net, &init_net))) 1723 return p; 1724 } 1725 1726 return NULL; 1727 } 1728 1729 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1730 struct neigh_table *tbl) 1731 { 1732 struct neigh_parms *p; 1733 struct net *net = dev_net(dev); 1734 const struct net_device_ops *ops = dev->netdev_ops; 1735 1736 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1737 if (p) { 1738 p->tbl = tbl; 1739 refcount_set(&p->refcnt, 1); 1740 p->reachable_time = 1741 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1742 p->qlen = 0; 1743 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL); 1744 p->dev = dev; 1745 write_pnet(&p->net, net); 1746 p->sysctl_table = NULL; 1747 1748 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1749 netdev_put(dev, &p->dev_tracker); 1750 kfree(p); 1751 return NULL; 1752 } 1753 1754 write_lock_bh(&tbl->lock); 1755 list_add(&p->list, &tbl->parms.list); 1756 write_unlock_bh(&tbl->lock); 1757 1758 neigh_parms_data_state_cleanall(p); 1759 } 1760 return p; 1761 } 1762 EXPORT_SYMBOL(neigh_parms_alloc); 1763 1764 static void neigh_rcu_free_parms(struct rcu_head *head) 1765 { 1766 struct neigh_parms *parms = 1767 container_of(head, struct neigh_parms, rcu_head); 1768 1769 neigh_parms_put(parms); 1770 } 1771 1772 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1773 { 1774 if (!parms || parms == &tbl->parms) 1775 return; 1776 write_lock_bh(&tbl->lock); 1777 list_del(&parms->list); 1778 parms->dead = 1; 1779 write_unlock_bh(&tbl->lock); 1780 netdev_put(parms->dev, &parms->dev_tracker); 1781 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1782 } 1783 EXPORT_SYMBOL(neigh_parms_release); 1784 1785 static void neigh_parms_destroy(struct neigh_parms *parms) 1786 { 1787 kfree(parms); 1788 } 1789 1790 static struct lock_class_key neigh_table_proxy_queue_class; 1791 1792 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1793 1794 void neigh_table_init(int index, struct neigh_table *tbl) 1795 { 1796 unsigned long now = jiffies; 1797 unsigned long phsize; 1798 1799 INIT_LIST_HEAD(&tbl->parms_list); 1800 INIT_LIST_HEAD(&tbl->gc_list); 1801 INIT_LIST_HEAD(&tbl->managed_list); 1802 1803 list_add(&tbl->parms.list, &tbl->parms_list); 1804 write_pnet(&tbl->parms.net, &init_net); 1805 refcount_set(&tbl->parms.refcnt, 1); 1806 tbl->parms.reachable_time = 1807 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1808 tbl->parms.qlen = 0; 1809 1810 tbl->stats = alloc_percpu(struct neigh_statistics); 1811 if (!tbl->stats) 1812 panic("cannot create neighbour cache statistics"); 1813 1814 #ifdef CONFIG_PROC_FS 1815 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1816 &neigh_stat_seq_ops, tbl)) 1817 panic("cannot create neighbour proc dir entry"); 1818 #endif 1819 1820 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1821 1822 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1823 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1824 1825 if (!tbl->nht || !tbl->phash_buckets) 1826 panic("cannot allocate neighbour cache hashes"); 1827 1828 if (!tbl->entry_size) 1829 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1830 tbl->key_len, NEIGH_PRIV_ALIGN); 1831 else 1832 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1833 1834 rwlock_init(&tbl->lock); 1835 1836 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1837 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1838 tbl->parms.reachable_time); 1839 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work); 1840 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0); 1841 1842 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1843 skb_queue_head_init_class(&tbl->proxy_queue, 1844 &neigh_table_proxy_queue_class); 1845 1846 tbl->last_flush = now; 1847 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1848 1849 neigh_tables[index] = tbl; 1850 } 1851 EXPORT_SYMBOL(neigh_table_init); 1852 1853 int neigh_table_clear(int index, struct neigh_table *tbl) 1854 { 1855 neigh_tables[index] = NULL; 1856 /* It is not clean... Fix it to unload IPv6 module safely */ 1857 cancel_delayed_work_sync(&tbl->managed_work); 1858 cancel_delayed_work_sync(&tbl->gc_work); 1859 del_timer_sync(&tbl->proxy_timer); 1860 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); 1861 neigh_ifdown(tbl, NULL); 1862 if (atomic_read(&tbl->entries)) 1863 pr_crit("neighbour leakage\n"); 1864 1865 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1866 neigh_hash_free_rcu); 1867 tbl->nht = NULL; 1868 1869 kfree(tbl->phash_buckets); 1870 tbl->phash_buckets = NULL; 1871 1872 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1873 1874 free_percpu(tbl->stats); 1875 tbl->stats = NULL; 1876 1877 return 0; 1878 } 1879 EXPORT_SYMBOL(neigh_table_clear); 1880 1881 static struct neigh_table *neigh_find_table(int family) 1882 { 1883 struct neigh_table *tbl = NULL; 1884 1885 switch (family) { 1886 case AF_INET: 1887 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1888 break; 1889 case AF_INET6: 1890 tbl = neigh_tables[NEIGH_ND_TABLE]; 1891 break; 1892 } 1893 1894 return tbl; 1895 } 1896 1897 const struct nla_policy nda_policy[NDA_MAX+1] = { 1898 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID }, 1899 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1900 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1901 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1902 [NDA_PROBES] = { .type = NLA_U32 }, 1903 [NDA_VLAN] = { .type = NLA_U16 }, 1904 [NDA_PORT] = { .type = NLA_U16 }, 1905 [NDA_VNI] = { .type = NLA_U32 }, 1906 [NDA_IFINDEX] = { .type = NLA_U32 }, 1907 [NDA_MASTER] = { .type = NLA_U32 }, 1908 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1909 [NDA_NH_ID] = { .type = NLA_U32 }, 1910 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK), 1911 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, 1912 }; 1913 1914 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1915 struct netlink_ext_ack *extack) 1916 { 1917 struct net *net = sock_net(skb->sk); 1918 struct ndmsg *ndm; 1919 struct nlattr *dst_attr; 1920 struct neigh_table *tbl; 1921 struct neighbour *neigh; 1922 struct net_device *dev = NULL; 1923 int err = -EINVAL; 1924 1925 ASSERT_RTNL(); 1926 if (nlmsg_len(nlh) < sizeof(*ndm)) 1927 goto out; 1928 1929 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1930 if (!dst_attr) { 1931 NL_SET_ERR_MSG(extack, "Network address not specified"); 1932 goto out; 1933 } 1934 1935 ndm = nlmsg_data(nlh); 1936 if (ndm->ndm_ifindex) { 1937 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1938 if (dev == NULL) { 1939 err = -ENODEV; 1940 goto out; 1941 } 1942 } 1943 1944 tbl = neigh_find_table(ndm->ndm_family); 1945 if (tbl == NULL) 1946 return -EAFNOSUPPORT; 1947 1948 if (nla_len(dst_attr) < (int)tbl->key_len) { 1949 NL_SET_ERR_MSG(extack, "Invalid network address"); 1950 goto out; 1951 } 1952 1953 if (ndm->ndm_flags & NTF_PROXY) { 1954 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1955 goto out; 1956 } 1957 1958 if (dev == NULL) 1959 goto out; 1960 1961 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1962 if (neigh == NULL) { 1963 err = -ENOENT; 1964 goto out; 1965 } 1966 1967 err = __neigh_update(neigh, NULL, NUD_FAILED, 1968 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1969 NETLINK_CB(skb).portid, extack); 1970 write_lock_bh(&tbl->lock); 1971 neigh_release(neigh); 1972 neigh_remove_one(neigh, tbl); 1973 write_unlock_bh(&tbl->lock); 1974 1975 out: 1976 return err; 1977 } 1978 1979 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1980 struct netlink_ext_ack *extack) 1981 { 1982 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1983 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1984 struct net *net = sock_net(skb->sk); 1985 struct ndmsg *ndm; 1986 struct nlattr *tb[NDA_MAX+1]; 1987 struct neigh_table *tbl; 1988 struct net_device *dev = NULL; 1989 struct neighbour *neigh; 1990 void *dst, *lladdr; 1991 u8 protocol = 0; 1992 u32 ndm_flags; 1993 int err; 1994 1995 ASSERT_RTNL(); 1996 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1997 nda_policy, extack); 1998 if (err < 0) 1999 goto out; 2000 2001 err = -EINVAL; 2002 if (!tb[NDA_DST]) { 2003 NL_SET_ERR_MSG(extack, "Network address not specified"); 2004 goto out; 2005 } 2006 2007 ndm = nlmsg_data(nlh); 2008 ndm_flags = ndm->ndm_flags; 2009 if (tb[NDA_FLAGS_EXT]) { 2010 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]); 2011 2012 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE < 2013 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE + 2014 hweight32(NTF_EXT_MASK))); 2015 ndm_flags |= (ext << NTF_EXT_SHIFT); 2016 } 2017 if (ndm->ndm_ifindex) { 2018 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 2019 if (dev == NULL) { 2020 err = -ENODEV; 2021 goto out; 2022 } 2023 2024 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 2025 NL_SET_ERR_MSG(extack, "Invalid link address"); 2026 goto out; 2027 } 2028 } 2029 2030 tbl = neigh_find_table(ndm->ndm_family); 2031 if (tbl == NULL) 2032 return -EAFNOSUPPORT; 2033 2034 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 2035 NL_SET_ERR_MSG(extack, "Invalid network address"); 2036 goto out; 2037 } 2038 2039 dst = nla_data(tb[NDA_DST]); 2040 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 2041 2042 if (tb[NDA_PROTOCOL]) 2043 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 2044 if (ndm_flags & NTF_PROXY) { 2045 struct pneigh_entry *pn; 2046 2047 if (ndm_flags & NTF_MANAGED) { 2048 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination"); 2049 goto out; 2050 } 2051 2052 err = -ENOBUFS; 2053 pn = pneigh_lookup(tbl, net, dst, dev, 1); 2054 if (pn) { 2055 pn->flags = ndm_flags; 2056 if (protocol) 2057 pn->protocol = protocol; 2058 err = 0; 2059 } 2060 goto out; 2061 } 2062 2063 if (!dev) { 2064 NL_SET_ERR_MSG(extack, "Device not specified"); 2065 goto out; 2066 } 2067 2068 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 2069 err = -EINVAL; 2070 goto out; 2071 } 2072 2073 neigh = neigh_lookup(tbl, dst, dev); 2074 if (neigh == NULL) { 2075 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT; 2076 bool exempt_from_gc = ndm_permanent || 2077 ndm_flags & NTF_EXT_LEARNED; 2078 2079 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 2080 err = -ENOENT; 2081 goto out; 2082 } 2083 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) { 2084 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry"); 2085 err = -EINVAL; 2086 goto out; 2087 } 2088 2089 neigh = ___neigh_create(tbl, dst, dev, 2090 ndm_flags & 2091 (NTF_EXT_LEARNED | NTF_MANAGED), 2092 exempt_from_gc, true); 2093 if (IS_ERR(neigh)) { 2094 err = PTR_ERR(neigh); 2095 goto out; 2096 } 2097 } else { 2098 if (nlh->nlmsg_flags & NLM_F_EXCL) { 2099 err = -EEXIST; 2100 neigh_release(neigh); 2101 goto out; 2102 } 2103 2104 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 2105 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 2106 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 2107 } 2108 2109 if (protocol) 2110 neigh->protocol = protocol; 2111 if (ndm_flags & NTF_EXT_LEARNED) 2112 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 2113 if (ndm_flags & NTF_ROUTER) 2114 flags |= NEIGH_UPDATE_F_ISROUTER; 2115 if (ndm_flags & NTF_MANAGED) 2116 flags |= NEIGH_UPDATE_F_MANAGED; 2117 if (ndm_flags & NTF_USE) 2118 flags |= NEIGH_UPDATE_F_USE; 2119 2120 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 2121 NETLINK_CB(skb).portid, extack); 2122 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) { 2123 neigh_event_send(neigh, NULL); 2124 err = 0; 2125 } 2126 neigh_release(neigh); 2127 out: 2128 return err; 2129 } 2130 2131 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 2132 { 2133 struct nlattr *nest; 2134 2135 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 2136 if (nest == NULL) 2137 return -ENOBUFS; 2138 2139 if ((parms->dev && 2140 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 2141 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 2142 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 2143 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 2144 /* approximative value for deprecated QUEUE_LEN (in packets) */ 2145 nla_put_u32(skb, NDTPA_QUEUE_LEN, 2146 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 2147 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 2148 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 2149 nla_put_u32(skb, NDTPA_UCAST_PROBES, 2150 NEIGH_VAR(parms, UCAST_PROBES)) || 2151 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2152 NEIGH_VAR(parms, MCAST_PROBES)) || 2153 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2154 NEIGH_VAR(parms, MCAST_REPROBES)) || 2155 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2156 NDTPA_PAD) || 2157 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2158 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2159 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2160 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2161 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2162 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2163 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2164 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2165 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2166 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2167 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2168 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2169 nla_put_msecs(skb, NDTPA_LOCKTIME, 2170 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) || 2171 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS, 2172 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD)) 2173 goto nla_put_failure; 2174 return nla_nest_end(skb, nest); 2175 2176 nla_put_failure: 2177 nla_nest_cancel(skb, nest); 2178 return -EMSGSIZE; 2179 } 2180 2181 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2182 u32 pid, u32 seq, int type, int flags) 2183 { 2184 struct nlmsghdr *nlh; 2185 struct ndtmsg *ndtmsg; 2186 2187 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2188 if (nlh == NULL) 2189 return -EMSGSIZE; 2190 2191 ndtmsg = nlmsg_data(nlh); 2192 2193 read_lock_bh(&tbl->lock); 2194 ndtmsg->ndtm_family = tbl->family; 2195 ndtmsg->ndtm_pad1 = 0; 2196 ndtmsg->ndtm_pad2 = 0; 2197 2198 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2199 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2200 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2201 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2202 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2203 goto nla_put_failure; 2204 { 2205 unsigned long now = jiffies; 2206 long flush_delta = now - tbl->last_flush; 2207 long rand_delta = now - tbl->last_rand; 2208 struct neigh_hash_table *nht; 2209 struct ndt_config ndc = { 2210 .ndtc_key_len = tbl->key_len, 2211 .ndtc_entry_size = tbl->entry_size, 2212 .ndtc_entries = atomic_read(&tbl->entries), 2213 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2214 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2215 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2216 }; 2217 2218 rcu_read_lock_bh(); 2219 nht = rcu_dereference_bh(tbl->nht); 2220 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2221 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2222 rcu_read_unlock_bh(); 2223 2224 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2225 goto nla_put_failure; 2226 } 2227 2228 { 2229 int cpu; 2230 struct ndt_stats ndst; 2231 2232 memset(&ndst, 0, sizeof(ndst)); 2233 2234 for_each_possible_cpu(cpu) { 2235 struct neigh_statistics *st; 2236 2237 st = per_cpu_ptr(tbl->stats, cpu); 2238 ndst.ndts_allocs += st->allocs; 2239 ndst.ndts_destroys += st->destroys; 2240 ndst.ndts_hash_grows += st->hash_grows; 2241 ndst.ndts_res_failed += st->res_failed; 2242 ndst.ndts_lookups += st->lookups; 2243 ndst.ndts_hits += st->hits; 2244 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2245 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2246 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2247 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2248 ndst.ndts_table_fulls += st->table_fulls; 2249 } 2250 2251 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2252 NDTA_PAD)) 2253 goto nla_put_failure; 2254 } 2255 2256 BUG_ON(tbl->parms.dev); 2257 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2258 goto nla_put_failure; 2259 2260 read_unlock_bh(&tbl->lock); 2261 nlmsg_end(skb, nlh); 2262 return 0; 2263 2264 nla_put_failure: 2265 read_unlock_bh(&tbl->lock); 2266 nlmsg_cancel(skb, nlh); 2267 return -EMSGSIZE; 2268 } 2269 2270 static int neightbl_fill_param_info(struct sk_buff *skb, 2271 struct neigh_table *tbl, 2272 struct neigh_parms *parms, 2273 u32 pid, u32 seq, int type, 2274 unsigned int flags) 2275 { 2276 struct ndtmsg *ndtmsg; 2277 struct nlmsghdr *nlh; 2278 2279 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2280 if (nlh == NULL) 2281 return -EMSGSIZE; 2282 2283 ndtmsg = nlmsg_data(nlh); 2284 2285 read_lock_bh(&tbl->lock); 2286 ndtmsg->ndtm_family = tbl->family; 2287 ndtmsg->ndtm_pad1 = 0; 2288 ndtmsg->ndtm_pad2 = 0; 2289 2290 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2291 neightbl_fill_parms(skb, parms) < 0) 2292 goto errout; 2293 2294 read_unlock_bh(&tbl->lock); 2295 nlmsg_end(skb, nlh); 2296 return 0; 2297 errout: 2298 read_unlock_bh(&tbl->lock); 2299 nlmsg_cancel(skb, nlh); 2300 return -EMSGSIZE; 2301 } 2302 2303 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2304 [NDTA_NAME] = { .type = NLA_STRING }, 2305 [NDTA_THRESH1] = { .type = NLA_U32 }, 2306 [NDTA_THRESH2] = { .type = NLA_U32 }, 2307 [NDTA_THRESH3] = { .type = NLA_U32 }, 2308 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2309 [NDTA_PARMS] = { .type = NLA_NESTED }, 2310 }; 2311 2312 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2313 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2314 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2315 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2316 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2317 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2318 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2319 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2320 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2321 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2322 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2323 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2324 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2325 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2326 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2327 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 }, 2328 }; 2329 2330 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2331 struct netlink_ext_ack *extack) 2332 { 2333 struct net *net = sock_net(skb->sk); 2334 struct neigh_table *tbl; 2335 struct ndtmsg *ndtmsg; 2336 struct nlattr *tb[NDTA_MAX+1]; 2337 bool found = false; 2338 int err, tidx; 2339 2340 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2341 nl_neightbl_policy, extack); 2342 if (err < 0) 2343 goto errout; 2344 2345 if (tb[NDTA_NAME] == NULL) { 2346 err = -EINVAL; 2347 goto errout; 2348 } 2349 2350 ndtmsg = nlmsg_data(nlh); 2351 2352 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2353 tbl = neigh_tables[tidx]; 2354 if (!tbl) 2355 continue; 2356 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2357 continue; 2358 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2359 found = true; 2360 break; 2361 } 2362 } 2363 2364 if (!found) 2365 return -ENOENT; 2366 2367 /* 2368 * We acquire tbl->lock to be nice to the periodic timers and 2369 * make sure they always see a consistent set of values. 2370 */ 2371 write_lock_bh(&tbl->lock); 2372 2373 if (tb[NDTA_PARMS]) { 2374 struct nlattr *tbp[NDTPA_MAX+1]; 2375 struct neigh_parms *p; 2376 int i, ifindex = 0; 2377 2378 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2379 tb[NDTA_PARMS], 2380 nl_ntbl_parm_policy, extack); 2381 if (err < 0) 2382 goto errout_tbl_lock; 2383 2384 if (tbp[NDTPA_IFINDEX]) 2385 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2386 2387 p = lookup_neigh_parms(tbl, net, ifindex); 2388 if (p == NULL) { 2389 err = -ENOENT; 2390 goto errout_tbl_lock; 2391 } 2392 2393 for (i = 1; i <= NDTPA_MAX; i++) { 2394 if (tbp[i] == NULL) 2395 continue; 2396 2397 switch (i) { 2398 case NDTPA_QUEUE_LEN: 2399 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2400 nla_get_u32(tbp[i]) * 2401 SKB_TRUESIZE(ETH_FRAME_LEN)); 2402 break; 2403 case NDTPA_QUEUE_LENBYTES: 2404 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2405 nla_get_u32(tbp[i])); 2406 break; 2407 case NDTPA_PROXY_QLEN: 2408 NEIGH_VAR_SET(p, PROXY_QLEN, 2409 nla_get_u32(tbp[i])); 2410 break; 2411 case NDTPA_APP_PROBES: 2412 NEIGH_VAR_SET(p, APP_PROBES, 2413 nla_get_u32(tbp[i])); 2414 break; 2415 case NDTPA_UCAST_PROBES: 2416 NEIGH_VAR_SET(p, UCAST_PROBES, 2417 nla_get_u32(tbp[i])); 2418 break; 2419 case NDTPA_MCAST_PROBES: 2420 NEIGH_VAR_SET(p, MCAST_PROBES, 2421 nla_get_u32(tbp[i])); 2422 break; 2423 case NDTPA_MCAST_REPROBES: 2424 NEIGH_VAR_SET(p, MCAST_REPROBES, 2425 nla_get_u32(tbp[i])); 2426 break; 2427 case NDTPA_BASE_REACHABLE_TIME: 2428 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2429 nla_get_msecs(tbp[i])); 2430 /* update reachable_time as well, otherwise, the change will 2431 * only be effective after the next time neigh_periodic_work 2432 * decides to recompute it (can be multiple minutes) 2433 */ 2434 p->reachable_time = 2435 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2436 break; 2437 case NDTPA_GC_STALETIME: 2438 NEIGH_VAR_SET(p, GC_STALETIME, 2439 nla_get_msecs(tbp[i])); 2440 break; 2441 case NDTPA_DELAY_PROBE_TIME: 2442 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2443 nla_get_msecs(tbp[i])); 2444 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2445 break; 2446 case NDTPA_INTERVAL_PROBE_TIME_MS: 2447 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS, 2448 nla_get_msecs(tbp[i])); 2449 break; 2450 case NDTPA_RETRANS_TIME: 2451 NEIGH_VAR_SET(p, RETRANS_TIME, 2452 nla_get_msecs(tbp[i])); 2453 break; 2454 case NDTPA_ANYCAST_DELAY: 2455 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2456 nla_get_msecs(tbp[i])); 2457 break; 2458 case NDTPA_PROXY_DELAY: 2459 NEIGH_VAR_SET(p, PROXY_DELAY, 2460 nla_get_msecs(tbp[i])); 2461 break; 2462 case NDTPA_LOCKTIME: 2463 NEIGH_VAR_SET(p, LOCKTIME, 2464 nla_get_msecs(tbp[i])); 2465 break; 2466 } 2467 } 2468 } 2469 2470 err = -ENOENT; 2471 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2472 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2473 !net_eq(net, &init_net)) 2474 goto errout_tbl_lock; 2475 2476 if (tb[NDTA_THRESH1]) 2477 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2478 2479 if (tb[NDTA_THRESH2]) 2480 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2481 2482 if (tb[NDTA_THRESH3]) 2483 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2484 2485 if (tb[NDTA_GC_INTERVAL]) 2486 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2487 2488 err = 0; 2489 2490 errout_tbl_lock: 2491 write_unlock_bh(&tbl->lock); 2492 errout: 2493 return err; 2494 } 2495 2496 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2497 struct netlink_ext_ack *extack) 2498 { 2499 struct ndtmsg *ndtm; 2500 2501 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2502 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2503 return -EINVAL; 2504 } 2505 2506 ndtm = nlmsg_data(nlh); 2507 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2508 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2509 return -EINVAL; 2510 } 2511 2512 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2513 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2514 return -EINVAL; 2515 } 2516 2517 return 0; 2518 } 2519 2520 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2521 { 2522 const struct nlmsghdr *nlh = cb->nlh; 2523 struct net *net = sock_net(skb->sk); 2524 int family, tidx, nidx = 0; 2525 int tbl_skip = cb->args[0]; 2526 int neigh_skip = cb->args[1]; 2527 struct neigh_table *tbl; 2528 2529 if (cb->strict_check) { 2530 int err = neightbl_valid_dump_info(nlh, cb->extack); 2531 2532 if (err < 0) 2533 return err; 2534 } 2535 2536 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2537 2538 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2539 struct neigh_parms *p; 2540 2541 tbl = neigh_tables[tidx]; 2542 if (!tbl) 2543 continue; 2544 2545 if (tidx < tbl_skip || (family && tbl->family != family)) 2546 continue; 2547 2548 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2549 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2550 NLM_F_MULTI) < 0) 2551 break; 2552 2553 nidx = 0; 2554 p = list_next_entry(&tbl->parms, list); 2555 list_for_each_entry_from(p, &tbl->parms_list, list) { 2556 if (!net_eq(neigh_parms_net(p), net)) 2557 continue; 2558 2559 if (nidx < neigh_skip) 2560 goto next; 2561 2562 if (neightbl_fill_param_info(skb, tbl, p, 2563 NETLINK_CB(cb->skb).portid, 2564 nlh->nlmsg_seq, 2565 RTM_NEWNEIGHTBL, 2566 NLM_F_MULTI) < 0) 2567 goto out; 2568 next: 2569 nidx++; 2570 } 2571 2572 neigh_skip = 0; 2573 } 2574 out: 2575 cb->args[0] = tidx; 2576 cb->args[1] = nidx; 2577 2578 return skb->len; 2579 } 2580 2581 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2582 u32 pid, u32 seq, int type, unsigned int flags) 2583 { 2584 u32 neigh_flags, neigh_flags_ext; 2585 unsigned long now = jiffies; 2586 struct nda_cacheinfo ci; 2587 struct nlmsghdr *nlh; 2588 struct ndmsg *ndm; 2589 2590 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2591 if (nlh == NULL) 2592 return -EMSGSIZE; 2593 2594 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT; 2595 neigh_flags = neigh->flags & NTF_OLD_MASK; 2596 2597 ndm = nlmsg_data(nlh); 2598 ndm->ndm_family = neigh->ops->family; 2599 ndm->ndm_pad1 = 0; 2600 ndm->ndm_pad2 = 0; 2601 ndm->ndm_flags = neigh_flags; 2602 ndm->ndm_type = neigh->type; 2603 ndm->ndm_ifindex = neigh->dev->ifindex; 2604 2605 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2606 goto nla_put_failure; 2607 2608 read_lock_bh(&neigh->lock); 2609 ndm->ndm_state = neigh->nud_state; 2610 if (neigh->nud_state & NUD_VALID) { 2611 char haddr[MAX_ADDR_LEN]; 2612 2613 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2614 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2615 read_unlock_bh(&neigh->lock); 2616 goto nla_put_failure; 2617 } 2618 } 2619 2620 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2621 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2622 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2623 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2624 read_unlock_bh(&neigh->lock); 2625 2626 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2627 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2628 goto nla_put_failure; 2629 2630 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2631 goto nla_put_failure; 2632 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2633 goto nla_put_failure; 2634 2635 nlmsg_end(skb, nlh); 2636 return 0; 2637 2638 nla_put_failure: 2639 nlmsg_cancel(skb, nlh); 2640 return -EMSGSIZE; 2641 } 2642 2643 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2644 u32 pid, u32 seq, int type, unsigned int flags, 2645 struct neigh_table *tbl) 2646 { 2647 u32 neigh_flags, neigh_flags_ext; 2648 struct nlmsghdr *nlh; 2649 struct ndmsg *ndm; 2650 2651 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2652 if (nlh == NULL) 2653 return -EMSGSIZE; 2654 2655 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT; 2656 neigh_flags = pn->flags & NTF_OLD_MASK; 2657 2658 ndm = nlmsg_data(nlh); 2659 ndm->ndm_family = tbl->family; 2660 ndm->ndm_pad1 = 0; 2661 ndm->ndm_pad2 = 0; 2662 ndm->ndm_flags = neigh_flags | NTF_PROXY; 2663 ndm->ndm_type = RTN_UNICAST; 2664 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2665 ndm->ndm_state = NUD_NONE; 2666 2667 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2668 goto nla_put_failure; 2669 2670 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2671 goto nla_put_failure; 2672 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2673 goto nla_put_failure; 2674 2675 nlmsg_end(skb, nlh); 2676 return 0; 2677 2678 nla_put_failure: 2679 nlmsg_cancel(skb, nlh); 2680 return -EMSGSIZE; 2681 } 2682 2683 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2684 { 2685 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2686 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2687 } 2688 2689 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2690 { 2691 struct net_device *master; 2692 2693 if (!master_idx) 2694 return false; 2695 2696 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2697 2698 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another 2699 * invalid value for ifindex to denote "no master". 2700 */ 2701 if (master_idx == -1) 2702 return !!master; 2703 2704 if (!master || master->ifindex != master_idx) 2705 return true; 2706 2707 return false; 2708 } 2709 2710 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2711 { 2712 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2713 return true; 2714 2715 return false; 2716 } 2717 2718 struct neigh_dump_filter { 2719 int master_idx; 2720 int dev_idx; 2721 }; 2722 2723 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2724 struct netlink_callback *cb, 2725 struct neigh_dump_filter *filter) 2726 { 2727 struct net *net = sock_net(skb->sk); 2728 struct neighbour *n; 2729 int rc, h, s_h = cb->args[1]; 2730 int idx, s_idx = idx = cb->args[2]; 2731 struct neigh_hash_table *nht; 2732 unsigned int flags = NLM_F_MULTI; 2733 2734 if (filter->dev_idx || filter->master_idx) 2735 flags |= NLM_F_DUMP_FILTERED; 2736 2737 rcu_read_lock_bh(); 2738 nht = rcu_dereference_bh(tbl->nht); 2739 2740 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2741 if (h > s_h) 2742 s_idx = 0; 2743 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2744 n != NULL; 2745 n = rcu_dereference_bh(n->next)) { 2746 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2747 goto next; 2748 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2749 neigh_master_filtered(n->dev, filter->master_idx)) 2750 goto next; 2751 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2752 cb->nlh->nlmsg_seq, 2753 RTM_NEWNEIGH, 2754 flags) < 0) { 2755 rc = -1; 2756 goto out; 2757 } 2758 next: 2759 idx++; 2760 } 2761 } 2762 rc = skb->len; 2763 out: 2764 rcu_read_unlock_bh(); 2765 cb->args[1] = h; 2766 cb->args[2] = idx; 2767 return rc; 2768 } 2769 2770 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2771 struct netlink_callback *cb, 2772 struct neigh_dump_filter *filter) 2773 { 2774 struct pneigh_entry *n; 2775 struct net *net = sock_net(skb->sk); 2776 int rc, h, s_h = cb->args[3]; 2777 int idx, s_idx = idx = cb->args[4]; 2778 unsigned int flags = NLM_F_MULTI; 2779 2780 if (filter->dev_idx || filter->master_idx) 2781 flags |= NLM_F_DUMP_FILTERED; 2782 2783 read_lock_bh(&tbl->lock); 2784 2785 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2786 if (h > s_h) 2787 s_idx = 0; 2788 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2789 if (idx < s_idx || pneigh_net(n) != net) 2790 goto next; 2791 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2792 neigh_master_filtered(n->dev, filter->master_idx)) 2793 goto next; 2794 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2795 cb->nlh->nlmsg_seq, 2796 RTM_NEWNEIGH, flags, tbl) < 0) { 2797 read_unlock_bh(&tbl->lock); 2798 rc = -1; 2799 goto out; 2800 } 2801 next: 2802 idx++; 2803 } 2804 } 2805 2806 read_unlock_bh(&tbl->lock); 2807 rc = skb->len; 2808 out: 2809 cb->args[3] = h; 2810 cb->args[4] = idx; 2811 return rc; 2812 2813 } 2814 2815 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2816 bool strict_check, 2817 struct neigh_dump_filter *filter, 2818 struct netlink_ext_ack *extack) 2819 { 2820 struct nlattr *tb[NDA_MAX + 1]; 2821 int err, i; 2822 2823 if (strict_check) { 2824 struct ndmsg *ndm; 2825 2826 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2827 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2828 return -EINVAL; 2829 } 2830 2831 ndm = nlmsg_data(nlh); 2832 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2833 ndm->ndm_state || ndm->ndm_type) { 2834 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2835 return -EINVAL; 2836 } 2837 2838 if (ndm->ndm_flags & ~NTF_PROXY) { 2839 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2840 return -EINVAL; 2841 } 2842 2843 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2844 tb, NDA_MAX, nda_policy, 2845 extack); 2846 } else { 2847 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2848 NDA_MAX, nda_policy, extack); 2849 } 2850 if (err < 0) 2851 return err; 2852 2853 for (i = 0; i <= NDA_MAX; ++i) { 2854 if (!tb[i]) 2855 continue; 2856 2857 /* all new attributes should require strict_check */ 2858 switch (i) { 2859 case NDA_IFINDEX: 2860 filter->dev_idx = nla_get_u32(tb[i]); 2861 break; 2862 case NDA_MASTER: 2863 filter->master_idx = nla_get_u32(tb[i]); 2864 break; 2865 default: 2866 if (strict_check) { 2867 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2868 return -EINVAL; 2869 } 2870 } 2871 } 2872 2873 return 0; 2874 } 2875 2876 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2877 { 2878 const struct nlmsghdr *nlh = cb->nlh; 2879 struct neigh_dump_filter filter = {}; 2880 struct neigh_table *tbl; 2881 int t, family, s_t; 2882 int proxy = 0; 2883 int err; 2884 2885 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2886 2887 /* check for full ndmsg structure presence, family member is 2888 * the same for both structures 2889 */ 2890 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2891 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2892 proxy = 1; 2893 2894 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2895 if (err < 0 && cb->strict_check) 2896 return err; 2897 2898 s_t = cb->args[0]; 2899 2900 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2901 tbl = neigh_tables[t]; 2902 2903 if (!tbl) 2904 continue; 2905 if (t < s_t || (family && tbl->family != family)) 2906 continue; 2907 if (t > s_t) 2908 memset(&cb->args[1], 0, sizeof(cb->args) - 2909 sizeof(cb->args[0])); 2910 if (proxy) 2911 err = pneigh_dump_table(tbl, skb, cb, &filter); 2912 else 2913 err = neigh_dump_table(tbl, skb, cb, &filter); 2914 if (err < 0) 2915 break; 2916 } 2917 2918 cb->args[0] = t; 2919 return skb->len; 2920 } 2921 2922 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2923 struct neigh_table **tbl, 2924 void **dst, int *dev_idx, u8 *ndm_flags, 2925 struct netlink_ext_ack *extack) 2926 { 2927 struct nlattr *tb[NDA_MAX + 1]; 2928 struct ndmsg *ndm; 2929 int err, i; 2930 2931 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2932 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2933 return -EINVAL; 2934 } 2935 2936 ndm = nlmsg_data(nlh); 2937 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2938 ndm->ndm_type) { 2939 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2940 return -EINVAL; 2941 } 2942 2943 if (ndm->ndm_flags & ~NTF_PROXY) { 2944 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2945 return -EINVAL; 2946 } 2947 2948 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2949 NDA_MAX, nda_policy, extack); 2950 if (err < 0) 2951 return err; 2952 2953 *ndm_flags = ndm->ndm_flags; 2954 *dev_idx = ndm->ndm_ifindex; 2955 *tbl = neigh_find_table(ndm->ndm_family); 2956 if (*tbl == NULL) { 2957 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2958 return -EAFNOSUPPORT; 2959 } 2960 2961 for (i = 0; i <= NDA_MAX; ++i) { 2962 if (!tb[i]) 2963 continue; 2964 2965 switch (i) { 2966 case NDA_DST: 2967 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2968 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2969 return -EINVAL; 2970 } 2971 *dst = nla_data(tb[i]); 2972 break; 2973 default: 2974 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2975 return -EINVAL; 2976 } 2977 } 2978 2979 return 0; 2980 } 2981 2982 static inline size_t neigh_nlmsg_size(void) 2983 { 2984 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2985 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2986 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2987 + nla_total_size(sizeof(struct nda_cacheinfo)) 2988 + nla_total_size(4) /* NDA_PROBES */ 2989 + nla_total_size(4) /* NDA_FLAGS_EXT */ 2990 + nla_total_size(1); /* NDA_PROTOCOL */ 2991 } 2992 2993 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2994 u32 pid, u32 seq) 2995 { 2996 struct sk_buff *skb; 2997 int err = 0; 2998 2999 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 3000 if (!skb) 3001 return -ENOBUFS; 3002 3003 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 3004 if (err) { 3005 kfree_skb(skb); 3006 goto errout; 3007 } 3008 3009 err = rtnl_unicast(skb, net, pid); 3010 errout: 3011 return err; 3012 } 3013 3014 static inline size_t pneigh_nlmsg_size(void) 3015 { 3016 return NLMSG_ALIGN(sizeof(struct ndmsg)) 3017 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 3018 + nla_total_size(4) /* NDA_FLAGS_EXT */ 3019 + nla_total_size(1); /* NDA_PROTOCOL */ 3020 } 3021 3022 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 3023 u32 pid, u32 seq, struct neigh_table *tbl) 3024 { 3025 struct sk_buff *skb; 3026 int err = 0; 3027 3028 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 3029 if (!skb) 3030 return -ENOBUFS; 3031 3032 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 3033 if (err) { 3034 kfree_skb(skb); 3035 goto errout; 3036 } 3037 3038 err = rtnl_unicast(skb, net, pid); 3039 errout: 3040 return err; 3041 } 3042 3043 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3044 struct netlink_ext_ack *extack) 3045 { 3046 struct net *net = sock_net(in_skb->sk); 3047 struct net_device *dev = NULL; 3048 struct neigh_table *tbl = NULL; 3049 struct neighbour *neigh; 3050 void *dst = NULL; 3051 u8 ndm_flags = 0; 3052 int dev_idx = 0; 3053 int err; 3054 3055 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 3056 extack); 3057 if (err < 0) 3058 return err; 3059 3060 if (dev_idx) { 3061 dev = __dev_get_by_index(net, dev_idx); 3062 if (!dev) { 3063 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 3064 return -ENODEV; 3065 } 3066 } 3067 3068 if (!dst) { 3069 NL_SET_ERR_MSG(extack, "Network address not specified"); 3070 return -EINVAL; 3071 } 3072 3073 if (ndm_flags & NTF_PROXY) { 3074 struct pneigh_entry *pn; 3075 3076 pn = pneigh_lookup(tbl, net, dst, dev, 0); 3077 if (!pn) { 3078 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 3079 return -ENOENT; 3080 } 3081 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 3082 nlh->nlmsg_seq, tbl); 3083 } 3084 3085 if (!dev) { 3086 NL_SET_ERR_MSG(extack, "No device specified"); 3087 return -EINVAL; 3088 } 3089 3090 neigh = neigh_lookup(tbl, dst, dev); 3091 if (!neigh) { 3092 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 3093 return -ENOENT; 3094 } 3095 3096 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 3097 nlh->nlmsg_seq); 3098 3099 neigh_release(neigh); 3100 3101 return err; 3102 } 3103 3104 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 3105 { 3106 int chain; 3107 struct neigh_hash_table *nht; 3108 3109 rcu_read_lock_bh(); 3110 nht = rcu_dereference_bh(tbl->nht); 3111 3112 read_lock(&tbl->lock); /* avoid resizes */ 3113 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3114 struct neighbour *n; 3115 3116 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 3117 n != NULL; 3118 n = rcu_dereference_bh(n->next)) 3119 cb(n, cookie); 3120 } 3121 read_unlock(&tbl->lock); 3122 rcu_read_unlock_bh(); 3123 } 3124 EXPORT_SYMBOL(neigh_for_each); 3125 3126 /* The tbl->lock must be held as a writer and BH disabled. */ 3127 void __neigh_for_each_release(struct neigh_table *tbl, 3128 int (*cb)(struct neighbour *)) 3129 { 3130 int chain; 3131 struct neigh_hash_table *nht; 3132 3133 nht = rcu_dereference_protected(tbl->nht, 3134 lockdep_is_held(&tbl->lock)); 3135 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3136 struct neighbour *n; 3137 struct neighbour __rcu **np; 3138 3139 np = &nht->hash_buckets[chain]; 3140 while ((n = rcu_dereference_protected(*np, 3141 lockdep_is_held(&tbl->lock))) != NULL) { 3142 int release; 3143 3144 write_lock(&n->lock); 3145 release = cb(n); 3146 if (release) { 3147 rcu_assign_pointer(*np, 3148 rcu_dereference_protected(n->next, 3149 lockdep_is_held(&tbl->lock))); 3150 neigh_mark_dead(n); 3151 } else 3152 np = &n->next; 3153 write_unlock(&n->lock); 3154 if (release) 3155 neigh_cleanup_and_release(n); 3156 } 3157 } 3158 } 3159 EXPORT_SYMBOL(__neigh_for_each_release); 3160 3161 int neigh_xmit(int index, struct net_device *dev, 3162 const void *addr, struct sk_buff *skb) 3163 { 3164 int err = -EAFNOSUPPORT; 3165 if (likely(index < NEIGH_NR_TABLES)) { 3166 struct neigh_table *tbl; 3167 struct neighbour *neigh; 3168 3169 tbl = neigh_tables[index]; 3170 if (!tbl) 3171 goto out; 3172 rcu_read_lock_bh(); 3173 if (index == NEIGH_ARP_TABLE) { 3174 u32 key = *((u32 *)addr); 3175 3176 neigh = __ipv4_neigh_lookup_noref(dev, key); 3177 } else { 3178 neigh = __neigh_lookup_noref(tbl, addr, dev); 3179 } 3180 if (!neigh) 3181 neigh = __neigh_create(tbl, addr, dev, false); 3182 err = PTR_ERR(neigh); 3183 if (IS_ERR(neigh)) { 3184 rcu_read_unlock_bh(); 3185 goto out_kfree_skb; 3186 } 3187 err = neigh->output(neigh, skb); 3188 rcu_read_unlock_bh(); 3189 } 3190 else if (index == NEIGH_LINK_TABLE) { 3191 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3192 addr, NULL, skb->len); 3193 if (err < 0) 3194 goto out_kfree_skb; 3195 err = dev_queue_xmit(skb); 3196 } 3197 out: 3198 return err; 3199 out_kfree_skb: 3200 kfree_skb(skb); 3201 goto out; 3202 } 3203 EXPORT_SYMBOL(neigh_xmit); 3204 3205 #ifdef CONFIG_PROC_FS 3206 3207 static struct neighbour *neigh_get_first(struct seq_file *seq) 3208 { 3209 struct neigh_seq_state *state = seq->private; 3210 struct net *net = seq_file_net(seq); 3211 struct neigh_hash_table *nht = state->nht; 3212 struct neighbour *n = NULL; 3213 int bucket; 3214 3215 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3216 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3217 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3218 3219 while (n) { 3220 if (!net_eq(dev_net(n->dev), net)) 3221 goto next; 3222 if (state->neigh_sub_iter) { 3223 loff_t fakep = 0; 3224 void *v; 3225 3226 v = state->neigh_sub_iter(state, n, &fakep); 3227 if (!v) 3228 goto next; 3229 } 3230 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3231 break; 3232 if (n->nud_state & ~NUD_NOARP) 3233 break; 3234 next: 3235 n = rcu_dereference_bh(n->next); 3236 } 3237 3238 if (n) 3239 break; 3240 } 3241 state->bucket = bucket; 3242 3243 return n; 3244 } 3245 3246 static struct neighbour *neigh_get_next(struct seq_file *seq, 3247 struct neighbour *n, 3248 loff_t *pos) 3249 { 3250 struct neigh_seq_state *state = seq->private; 3251 struct net *net = seq_file_net(seq); 3252 struct neigh_hash_table *nht = state->nht; 3253 3254 if (state->neigh_sub_iter) { 3255 void *v = state->neigh_sub_iter(state, n, pos); 3256 if (v) 3257 return n; 3258 } 3259 n = rcu_dereference_bh(n->next); 3260 3261 while (1) { 3262 while (n) { 3263 if (!net_eq(dev_net(n->dev), net)) 3264 goto next; 3265 if (state->neigh_sub_iter) { 3266 void *v = state->neigh_sub_iter(state, n, pos); 3267 if (v) 3268 return n; 3269 goto next; 3270 } 3271 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3272 break; 3273 3274 if (n->nud_state & ~NUD_NOARP) 3275 break; 3276 next: 3277 n = rcu_dereference_bh(n->next); 3278 } 3279 3280 if (n) 3281 break; 3282 3283 if (++state->bucket >= (1 << nht->hash_shift)) 3284 break; 3285 3286 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3287 } 3288 3289 if (n && pos) 3290 --(*pos); 3291 return n; 3292 } 3293 3294 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3295 { 3296 struct neighbour *n = neigh_get_first(seq); 3297 3298 if (n) { 3299 --(*pos); 3300 while (*pos) { 3301 n = neigh_get_next(seq, n, pos); 3302 if (!n) 3303 break; 3304 } 3305 } 3306 return *pos ? NULL : n; 3307 } 3308 3309 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3310 { 3311 struct neigh_seq_state *state = seq->private; 3312 struct net *net = seq_file_net(seq); 3313 struct neigh_table *tbl = state->tbl; 3314 struct pneigh_entry *pn = NULL; 3315 int bucket; 3316 3317 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3318 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3319 pn = tbl->phash_buckets[bucket]; 3320 while (pn && !net_eq(pneigh_net(pn), net)) 3321 pn = pn->next; 3322 if (pn) 3323 break; 3324 } 3325 state->bucket = bucket; 3326 3327 return pn; 3328 } 3329 3330 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3331 struct pneigh_entry *pn, 3332 loff_t *pos) 3333 { 3334 struct neigh_seq_state *state = seq->private; 3335 struct net *net = seq_file_net(seq); 3336 struct neigh_table *tbl = state->tbl; 3337 3338 do { 3339 pn = pn->next; 3340 } while (pn && !net_eq(pneigh_net(pn), net)); 3341 3342 while (!pn) { 3343 if (++state->bucket > PNEIGH_HASHMASK) 3344 break; 3345 pn = tbl->phash_buckets[state->bucket]; 3346 while (pn && !net_eq(pneigh_net(pn), net)) 3347 pn = pn->next; 3348 if (pn) 3349 break; 3350 } 3351 3352 if (pn && pos) 3353 --(*pos); 3354 3355 return pn; 3356 } 3357 3358 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3359 { 3360 struct pneigh_entry *pn = pneigh_get_first(seq); 3361 3362 if (pn) { 3363 --(*pos); 3364 while (*pos) { 3365 pn = pneigh_get_next(seq, pn, pos); 3366 if (!pn) 3367 break; 3368 } 3369 } 3370 return *pos ? NULL : pn; 3371 } 3372 3373 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3374 { 3375 struct neigh_seq_state *state = seq->private; 3376 void *rc; 3377 loff_t idxpos = *pos; 3378 3379 rc = neigh_get_idx(seq, &idxpos); 3380 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3381 rc = pneigh_get_idx(seq, &idxpos); 3382 3383 return rc; 3384 } 3385 3386 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3387 __acquires(tbl->lock) 3388 __acquires(rcu_bh) 3389 { 3390 struct neigh_seq_state *state = seq->private; 3391 3392 state->tbl = tbl; 3393 state->bucket = 0; 3394 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3395 3396 rcu_read_lock_bh(); 3397 state->nht = rcu_dereference_bh(tbl->nht); 3398 read_lock(&tbl->lock); 3399 3400 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3401 } 3402 EXPORT_SYMBOL(neigh_seq_start); 3403 3404 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3405 { 3406 struct neigh_seq_state *state; 3407 void *rc; 3408 3409 if (v == SEQ_START_TOKEN) { 3410 rc = neigh_get_first(seq); 3411 goto out; 3412 } 3413 3414 state = seq->private; 3415 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3416 rc = neigh_get_next(seq, v, NULL); 3417 if (rc) 3418 goto out; 3419 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3420 rc = pneigh_get_first(seq); 3421 } else { 3422 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3423 rc = pneigh_get_next(seq, v, NULL); 3424 } 3425 out: 3426 ++(*pos); 3427 return rc; 3428 } 3429 EXPORT_SYMBOL(neigh_seq_next); 3430 3431 void neigh_seq_stop(struct seq_file *seq, void *v) 3432 __releases(tbl->lock) 3433 __releases(rcu_bh) 3434 { 3435 struct neigh_seq_state *state = seq->private; 3436 struct neigh_table *tbl = state->tbl; 3437 3438 read_unlock(&tbl->lock); 3439 rcu_read_unlock_bh(); 3440 } 3441 EXPORT_SYMBOL(neigh_seq_stop); 3442 3443 /* statistics via seq_file */ 3444 3445 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3446 { 3447 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3448 int cpu; 3449 3450 if (*pos == 0) 3451 return SEQ_START_TOKEN; 3452 3453 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3454 if (!cpu_possible(cpu)) 3455 continue; 3456 *pos = cpu+1; 3457 return per_cpu_ptr(tbl->stats, cpu); 3458 } 3459 return NULL; 3460 } 3461 3462 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3463 { 3464 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3465 int cpu; 3466 3467 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3468 if (!cpu_possible(cpu)) 3469 continue; 3470 *pos = cpu+1; 3471 return per_cpu_ptr(tbl->stats, cpu); 3472 } 3473 (*pos)++; 3474 return NULL; 3475 } 3476 3477 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3478 { 3479 3480 } 3481 3482 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3483 { 3484 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3485 struct neigh_statistics *st = v; 3486 3487 if (v == SEQ_START_TOKEN) { 3488 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3489 return 0; 3490 } 3491 3492 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3493 "%08lx %08lx %08lx " 3494 "%08lx %08lx %08lx\n", 3495 atomic_read(&tbl->entries), 3496 3497 st->allocs, 3498 st->destroys, 3499 st->hash_grows, 3500 3501 st->lookups, 3502 st->hits, 3503 3504 st->res_failed, 3505 3506 st->rcv_probes_mcast, 3507 st->rcv_probes_ucast, 3508 3509 st->periodic_gc_runs, 3510 st->forced_gc_runs, 3511 st->unres_discards, 3512 st->table_fulls 3513 ); 3514 3515 return 0; 3516 } 3517 3518 static const struct seq_operations neigh_stat_seq_ops = { 3519 .start = neigh_stat_seq_start, 3520 .next = neigh_stat_seq_next, 3521 .stop = neigh_stat_seq_stop, 3522 .show = neigh_stat_seq_show, 3523 }; 3524 #endif /* CONFIG_PROC_FS */ 3525 3526 static void __neigh_notify(struct neighbour *n, int type, int flags, 3527 u32 pid) 3528 { 3529 struct net *net = dev_net(n->dev); 3530 struct sk_buff *skb; 3531 int err = -ENOBUFS; 3532 3533 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3534 if (skb == NULL) 3535 goto errout; 3536 3537 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3538 if (err < 0) { 3539 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3540 WARN_ON(err == -EMSGSIZE); 3541 kfree_skb(skb); 3542 goto errout; 3543 } 3544 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3545 return; 3546 errout: 3547 if (err < 0) 3548 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3549 } 3550 3551 void neigh_app_ns(struct neighbour *n) 3552 { 3553 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3554 } 3555 EXPORT_SYMBOL(neigh_app_ns); 3556 3557 #ifdef CONFIG_SYSCTL 3558 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3559 3560 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3561 void *buffer, size_t *lenp, loff_t *ppos) 3562 { 3563 int size, ret; 3564 struct ctl_table tmp = *ctl; 3565 3566 tmp.extra1 = SYSCTL_ZERO; 3567 tmp.extra2 = &unres_qlen_max; 3568 tmp.data = &size; 3569 3570 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3571 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3572 3573 if (write && !ret) 3574 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3575 return ret; 3576 } 3577 3578 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3579 int index) 3580 { 3581 struct net_device *dev; 3582 int family = neigh_parms_family(p); 3583 3584 rcu_read_lock(); 3585 for_each_netdev_rcu(net, dev) { 3586 struct neigh_parms *dst_p = 3587 neigh_get_dev_parms_rcu(dev, family); 3588 3589 if (dst_p && !test_bit(index, dst_p->data_state)) 3590 dst_p->data[index] = p->data[index]; 3591 } 3592 rcu_read_unlock(); 3593 } 3594 3595 static void neigh_proc_update(struct ctl_table *ctl, int write) 3596 { 3597 struct net_device *dev = ctl->extra1; 3598 struct neigh_parms *p = ctl->extra2; 3599 struct net *net = neigh_parms_net(p); 3600 int index = (int *) ctl->data - p->data; 3601 3602 if (!write) 3603 return; 3604 3605 set_bit(index, p->data_state); 3606 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3607 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3608 if (!dev) /* NULL dev means this is default value */ 3609 neigh_copy_dflt_parms(net, p, index); 3610 } 3611 3612 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3613 void *buffer, size_t *lenp, 3614 loff_t *ppos) 3615 { 3616 struct ctl_table tmp = *ctl; 3617 int ret; 3618 3619 tmp.extra1 = SYSCTL_ZERO; 3620 tmp.extra2 = SYSCTL_INT_MAX; 3621 3622 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3623 neigh_proc_update(ctl, write); 3624 return ret; 3625 } 3626 3627 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write, 3628 void *buffer, size_t *lenp, loff_t *ppos) 3629 { 3630 struct ctl_table tmp = *ctl; 3631 int ret; 3632 3633 int min = msecs_to_jiffies(1); 3634 3635 tmp.extra1 = &min; 3636 tmp.extra2 = NULL; 3637 3638 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos); 3639 neigh_proc_update(ctl, write); 3640 return ret; 3641 } 3642 3643 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3644 size_t *lenp, loff_t *ppos) 3645 { 3646 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3647 3648 neigh_proc_update(ctl, write); 3649 return ret; 3650 } 3651 EXPORT_SYMBOL(neigh_proc_dointvec); 3652 3653 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3654 size_t *lenp, loff_t *ppos) 3655 { 3656 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3657 3658 neigh_proc_update(ctl, write); 3659 return ret; 3660 } 3661 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3662 3663 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3664 void *buffer, size_t *lenp, 3665 loff_t *ppos) 3666 { 3667 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3668 3669 neigh_proc_update(ctl, write); 3670 return ret; 3671 } 3672 3673 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3674 void *buffer, size_t *lenp, loff_t *ppos) 3675 { 3676 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3677 3678 neigh_proc_update(ctl, write); 3679 return ret; 3680 } 3681 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3682 3683 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3684 void *buffer, size_t *lenp, 3685 loff_t *ppos) 3686 { 3687 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3688 3689 neigh_proc_update(ctl, write); 3690 return ret; 3691 } 3692 3693 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3694 void *buffer, size_t *lenp, 3695 loff_t *ppos) 3696 { 3697 struct neigh_parms *p = ctl->extra2; 3698 int ret; 3699 3700 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3701 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3702 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3703 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3704 else 3705 ret = -1; 3706 3707 if (write && ret == 0) { 3708 /* update reachable_time as well, otherwise, the change will 3709 * only be effective after the next time neigh_periodic_work 3710 * decides to recompute it 3711 */ 3712 p->reachable_time = 3713 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3714 } 3715 return ret; 3716 } 3717 3718 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3719 (&((struct neigh_parms *) 0)->data[index]) 3720 3721 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3722 [NEIGH_VAR_ ## attr] = { \ 3723 .procname = name, \ 3724 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3725 .maxlen = sizeof(int), \ 3726 .mode = mval, \ 3727 .proc_handler = proc, \ 3728 } 3729 3730 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3731 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3732 3733 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3734 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3735 3736 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3737 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3738 3739 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \ 3740 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive) 3741 3742 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3743 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3744 3745 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3746 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3747 3748 static struct neigh_sysctl_table { 3749 struct ctl_table_header *sysctl_header; 3750 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3751 } neigh_sysctl_template __read_mostly = { 3752 .neigh_vars = { 3753 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3754 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3755 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3756 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3757 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3758 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3759 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3760 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS, 3761 "interval_probe_time_ms"), 3762 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3763 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3764 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3765 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3766 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3767 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3768 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3769 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3770 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3771 [NEIGH_VAR_GC_INTERVAL] = { 3772 .procname = "gc_interval", 3773 .maxlen = sizeof(int), 3774 .mode = 0644, 3775 .proc_handler = proc_dointvec_jiffies, 3776 }, 3777 [NEIGH_VAR_GC_THRESH1] = { 3778 .procname = "gc_thresh1", 3779 .maxlen = sizeof(int), 3780 .mode = 0644, 3781 .extra1 = SYSCTL_ZERO, 3782 .extra2 = SYSCTL_INT_MAX, 3783 .proc_handler = proc_dointvec_minmax, 3784 }, 3785 [NEIGH_VAR_GC_THRESH2] = { 3786 .procname = "gc_thresh2", 3787 .maxlen = sizeof(int), 3788 .mode = 0644, 3789 .extra1 = SYSCTL_ZERO, 3790 .extra2 = SYSCTL_INT_MAX, 3791 .proc_handler = proc_dointvec_minmax, 3792 }, 3793 [NEIGH_VAR_GC_THRESH3] = { 3794 .procname = "gc_thresh3", 3795 .maxlen = sizeof(int), 3796 .mode = 0644, 3797 .extra1 = SYSCTL_ZERO, 3798 .extra2 = SYSCTL_INT_MAX, 3799 .proc_handler = proc_dointvec_minmax, 3800 }, 3801 {}, 3802 }, 3803 }; 3804 3805 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3806 proc_handler *handler) 3807 { 3808 int i; 3809 struct neigh_sysctl_table *t; 3810 const char *dev_name_source; 3811 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3812 char *p_name; 3813 3814 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT); 3815 if (!t) 3816 goto err; 3817 3818 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3819 t->neigh_vars[i].data += (long) p; 3820 t->neigh_vars[i].extra1 = dev; 3821 t->neigh_vars[i].extra2 = p; 3822 } 3823 3824 if (dev) { 3825 dev_name_source = dev->name; 3826 /* Terminate the table early */ 3827 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3828 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3829 } else { 3830 struct neigh_table *tbl = p->tbl; 3831 dev_name_source = "default"; 3832 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3833 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3834 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3835 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3836 } 3837 3838 if (handler) { 3839 /* RetransTime */ 3840 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3841 /* ReachableTime */ 3842 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3843 /* RetransTime (in milliseconds)*/ 3844 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3845 /* ReachableTime (in milliseconds) */ 3846 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3847 } else { 3848 /* Those handlers will update p->reachable_time after 3849 * base_reachable_time(_ms) is set to ensure the new timer starts being 3850 * applied after the next neighbour update instead of waiting for 3851 * neigh_periodic_work to update its value (can be multiple minutes) 3852 * So any handler that replaces them should do this as well 3853 */ 3854 /* ReachableTime */ 3855 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3856 neigh_proc_base_reachable_time; 3857 /* ReachableTime (in milliseconds) */ 3858 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3859 neigh_proc_base_reachable_time; 3860 } 3861 3862 switch (neigh_parms_family(p)) { 3863 case AF_INET: 3864 p_name = "ipv4"; 3865 break; 3866 case AF_INET6: 3867 p_name = "ipv6"; 3868 break; 3869 default: 3870 BUG(); 3871 } 3872 3873 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3874 p_name, dev_name_source); 3875 t->sysctl_header = 3876 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3877 if (!t->sysctl_header) 3878 goto free; 3879 3880 p->sysctl_table = t; 3881 return 0; 3882 3883 free: 3884 kfree(t); 3885 err: 3886 return -ENOBUFS; 3887 } 3888 EXPORT_SYMBOL(neigh_sysctl_register); 3889 3890 void neigh_sysctl_unregister(struct neigh_parms *p) 3891 { 3892 if (p->sysctl_table) { 3893 struct neigh_sysctl_table *t = p->sysctl_table; 3894 p->sysctl_table = NULL; 3895 unregister_net_sysctl_table(t->sysctl_header); 3896 kfree(t); 3897 } 3898 } 3899 EXPORT_SYMBOL(neigh_sysctl_unregister); 3900 3901 #endif /* CONFIG_SYSCTL */ 3902 3903 static int __init neigh_init(void) 3904 { 3905 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3906 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3907 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3908 3909 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3910 0); 3911 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3912 3913 return 0; 3914 } 3915 3916 subsys_initcall(neigh_init); 3917