1 /* 2 * Generic address resolution entity 3 * 4 * Authors: 5 * Pedro Roque <roque@di.fc.ul.pt> 6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 * 13 * Fixes: 14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 15 * Harald Welte Add neighbour cache statistics like rtstat 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/slab.h> 21 #include <linux/kmemleak.h> 22 #include <linux/types.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/socket.h> 26 #include <linux/netdevice.h> 27 #include <linux/proc_fs.h> 28 #ifdef CONFIG_SYSCTL 29 #include <linux/sysctl.h> 30 #endif 31 #include <linux/times.h> 32 #include <net/net_namespace.h> 33 #include <net/neighbour.h> 34 #include <net/arp.h> 35 #include <net/dst.h> 36 #include <net/sock.h> 37 #include <net/netevent.h> 38 #include <net/netlink.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/random.h> 41 #include <linux/string.h> 42 #include <linux/log2.h> 43 #include <linux/inetdevice.h> 44 #include <net/addrconf.h> 45 46 #include <trace/events/neigh.h> 47 48 #define DEBUG 49 #define NEIGH_DEBUG 1 50 #define neigh_dbg(level, fmt, ...) \ 51 do { \ 52 if (level <= NEIGH_DEBUG) \ 53 pr_debug(fmt, ##__VA_ARGS__); \ 54 } while (0) 55 56 #define PNEIGH_HASHMASK 0xF 57 58 static void neigh_timer_handler(struct timer_list *t); 59 static void __neigh_notify(struct neighbour *n, int type, int flags, 60 u32 pid); 61 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 62 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 63 struct net_device *dev); 64 65 #ifdef CONFIG_PROC_FS 66 static const struct seq_operations neigh_stat_seq_ops; 67 #endif 68 69 /* 70 Neighbour hash table buckets are protected with rwlock tbl->lock. 71 72 - All the scans/updates to hash buckets MUST be made under this lock. 73 - NOTHING clever should be made under this lock: no callbacks 74 to protocol backends, no attempts to send something to network. 75 It will result in deadlocks, if backend/driver wants to use neighbour 76 cache. 77 - If the entry requires some non-trivial actions, increase 78 its reference count and release table lock. 79 80 Neighbour entries are protected: 81 - with reference count. 82 - with rwlock neigh->lock 83 84 Reference count prevents destruction. 85 86 neigh->lock mainly serializes ll address data and its validity state. 87 However, the same lock is used to protect another entry fields: 88 - timer 89 - resolution queue 90 91 Again, nothing clever shall be made under neigh->lock, 92 the most complicated procedure, which we allow is dev->hard_header. 93 It is supposed, that dev->hard_header is simplistic and does 94 not make callbacks to neighbour tables. 95 */ 96 97 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 98 { 99 kfree_skb(skb); 100 return -ENETDOWN; 101 } 102 103 static void neigh_cleanup_and_release(struct neighbour *neigh) 104 { 105 if (neigh->parms->neigh_cleanup) 106 neigh->parms->neigh_cleanup(neigh); 107 108 trace_neigh_cleanup_and_release(neigh, 0); 109 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 110 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 111 neigh_release(neigh); 112 } 113 114 /* 115 * It is random distribution in the interval (1/2)*base...(3/2)*base. 116 * It corresponds to default IPv6 settings and is not overridable, 117 * because it is really reasonable choice. 118 */ 119 120 unsigned long neigh_rand_reach_time(unsigned long base) 121 { 122 return base ? (prandom_u32() % base) + (base >> 1) : 0; 123 } 124 EXPORT_SYMBOL(neigh_rand_reach_time); 125 126 static void neigh_mark_dead(struct neighbour *n) 127 { 128 n->dead = 1; 129 if (!list_empty(&n->gc_list)) { 130 list_del_init(&n->gc_list); 131 atomic_dec(&n->tbl->gc_entries); 132 } 133 } 134 135 static void neigh_update_gc_list(struct neighbour *n) 136 { 137 bool on_gc_list, exempt_from_gc; 138 139 write_lock_bh(&n->tbl->lock); 140 write_lock(&n->lock); 141 142 /* remove from the gc list if new state is permanent or if neighbor 143 * is externally learned; otherwise entry should be on the gc list 144 */ 145 exempt_from_gc = n->nud_state & NUD_PERMANENT || 146 n->flags & NTF_EXT_LEARNED; 147 on_gc_list = !list_empty(&n->gc_list); 148 149 if (exempt_from_gc && on_gc_list) { 150 list_del_init(&n->gc_list); 151 atomic_dec(&n->tbl->gc_entries); 152 } else if (!exempt_from_gc && !on_gc_list) { 153 /* add entries to the tail; cleaning removes from the front */ 154 list_add_tail(&n->gc_list, &n->tbl->gc_list); 155 atomic_inc(&n->tbl->gc_entries); 156 } 157 158 write_unlock(&n->lock); 159 write_unlock_bh(&n->tbl->lock); 160 } 161 162 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags, 163 int *notify) 164 { 165 bool rc = false; 166 u8 ndm_flags; 167 168 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 169 return rc; 170 171 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 172 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { 173 if (ndm_flags & NTF_EXT_LEARNED) 174 neigh->flags |= NTF_EXT_LEARNED; 175 else 176 neigh->flags &= ~NTF_EXT_LEARNED; 177 rc = true; 178 *notify = 1; 179 } 180 181 return rc; 182 } 183 184 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 185 struct neigh_table *tbl) 186 { 187 bool retval = false; 188 189 write_lock(&n->lock); 190 if (refcount_read(&n->refcnt) == 1) { 191 struct neighbour *neigh; 192 193 neigh = rcu_dereference_protected(n->next, 194 lockdep_is_held(&tbl->lock)); 195 rcu_assign_pointer(*np, neigh); 196 neigh_mark_dead(n); 197 retval = true; 198 } 199 write_unlock(&n->lock); 200 if (retval) 201 neigh_cleanup_and_release(n); 202 return retval; 203 } 204 205 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 206 { 207 struct neigh_hash_table *nht; 208 void *pkey = ndel->primary_key; 209 u32 hash_val; 210 struct neighbour *n; 211 struct neighbour __rcu **np; 212 213 nht = rcu_dereference_protected(tbl->nht, 214 lockdep_is_held(&tbl->lock)); 215 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 216 hash_val = hash_val >> (32 - nht->hash_shift); 217 218 np = &nht->hash_buckets[hash_val]; 219 while ((n = rcu_dereference_protected(*np, 220 lockdep_is_held(&tbl->lock)))) { 221 if (n == ndel) 222 return neigh_del(n, np, tbl); 223 np = &n->next; 224 } 225 return false; 226 } 227 228 static int neigh_forced_gc(struct neigh_table *tbl) 229 { 230 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 231 unsigned long tref = jiffies - 5 * HZ; 232 struct neighbour *n, *tmp; 233 int shrunk = 0; 234 235 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 236 237 write_lock_bh(&tbl->lock); 238 239 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 240 if (refcount_read(&n->refcnt) == 1) { 241 bool remove = false; 242 243 write_lock(&n->lock); 244 if ((n->nud_state == NUD_FAILED) || 245 time_after(tref, n->updated)) 246 remove = true; 247 write_unlock(&n->lock); 248 249 if (remove && neigh_remove_one(n, tbl)) 250 shrunk++; 251 if (shrunk >= max_clean) 252 break; 253 } 254 } 255 256 tbl->last_flush = jiffies; 257 258 write_unlock_bh(&tbl->lock); 259 260 return shrunk; 261 } 262 263 static void neigh_add_timer(struct neighbour *n, unsigned long when) 264 { 265 neigh_hold(n); 266 if (unlikely(mod_timer(&n->timer, when))) { 267 printk("NEIGH: BUG, double timer add, state is %x\n", 268 n->nud_state); 269 dump_stack(); 270 } 271 } 272 273 static int neigh_del_timer(struct neighbour *n) 274 { 275 if ((n->nud_state & NUD_IN_TIMER) && 276 del_timer(&n->timer)) { 277 neigh_release(n); 278 return 1; 279 } 280 return 0; 281 } 282 283 static void pneigh_queue_purge(struct sk_buff_head *list) 284 { 285 struct sk_buff *skb; 286 287 while ((skb = skb_dequeue(list)) != NULL) { 288 dev_put(skb->dev); 289 kfree_skb(skb); 290 } 291 } 292 293 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 294 bool skip_perm) 295 { 296 int i; 297 struct neigh_hash_table *nht; 298 299 nht = rcu_dereference_protected(tbl->nht, 300 lockdep_is_held(&tbl->lock)); 301 302 for (i = 0; i < (1 << nht->hash_shift); i++) { 303 struct neighbour *n; 304 struct neighbour __rcu **np = &nht->hash_buckets[i]; 305 306 while ((n = rcu_dereference_protected(*np, 307 lockdep_is_held(&tbl->lock))) != NULL) { 308 if (dev && n->dev != dev) { 309 np = &n->next; 310 continue; 311 } 312 if (skip_perm && n->nud_state & NUD_PERMANENT) { 313 np = &n->next; 314 continue; 315 } 316 rcu_assign_pointer(*np, 317 rcu_dereference_protected(n->next, 318 lockdep_is_held(&tbl->lock))); 319 write_lock(&n->lock); 320 neigh_del_timer(n); 321 neigh_mark_dead(n); 322 if (refcount_read(&n->refcnt) != 1) { 323 /* The most unpleasant situation. 324 We must destroy neighbour entry, 325 but someone still uses it. 326 327 The destroy will be delayed until 328 the last user releases us, but 329 we must kill timers etc. and move 330 it to safe state. 331 */ 332 __skb_queue_purge(&n->arp_queue); 333 n->arp_queue_len_bytes = 0; 334 n->output = neigh_blackhole; 335 if (n->nud_state & NUD_VALID) 336 n->nud_state = NUD_NOARP; 337 else 338 n->nud_state = NUD_NONE; 339 neigh_dbg(2, "neigh %p is stray\n", n); 340 } 341 write_unlock(&n->lock); 342 neigh_cleanup_and_release(n); 343 } 344 } 345 } 346 347 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 348 { 349 write_lock_bh(&tbl->lock); 350 neigh_flush_dev(tbl, dev, false); 351 write_unlock_bh(&tbl->lock); 352 } 353 EXPORT_SYMBOL(neigh_changeaddr); 354 355 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 356 bool skip_perm) 357 { 358 write_lock_bh(&tbl->lock); 359 neigh_flush_dev(tbl, dev, skip_perm); 360 pneigh_ifdown_and_unlock(tbl, dev); 361 362 del_timer_sync(&tbl->proxy_timer); 363 pneigh_queue_purge(&tbl->proxy_queue); 364 return 0; 365 } 366 367 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 368 { 369 __neigh_ifdown(tbl, dev, true); 370 return 0; 371 } 372 EXPORT_SYMBOL(neigh_carrier_down); 373 374 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 375 { 376 __neigh_ifdown(tbl, dev, false); 377 return 0; 378 } 379 EXPORT_SYMBOL(neigh_ifdown); 380 381 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 382 struct net_device *dev, 383 bool exempt_from_gc) 384 { 385 struct neighbour *n = NULL; 386 unsigned long now = jiffies; 387 int entries; 388 389 if (exempt_from_gc) 390 goto do_alloc; 391 392 entries = atomic_inc_return(&tbl->gc_entries) - 1; 393 if (entries >= tbl->gc_thresh3 || 394 (entries >= tbl->gc_thresh2 && 395 time_after(now, tbl->last_flush + 5 * HZ))) { 396 if (!neigh_forced_gc(tbl) && 397 entries >= tbl->gc_thresh3) { 398 net_info_ratelimited("%s: neighbor table overflow!\n", 399 tbl->id); 400 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 401 goto out_entries; 402 } 403 } 404 405 do_alloc: 406 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 407 if (!n) 408 goto out_entries; 409 410 __skb_queue_head_init(&n->arp_queue); 411 rwlock_init(&n->lock); 412 seqlock_init(&n->ha_lock); 413 n->updated = n->used = now; 414 n->nud_state = NUD_NONE; 415 n->output = neigh_blackhole; 416 seqlock_init(&n->hh.hh_lock); 417 n->parms = neigh_parms_clone(&tbl->parms); 418 timer_setup(&n->timer, neigh_timer_handler, 0); 419 420 NEIGH_CACHE_STAT_INC(tbl, allocs); 421 n->tbl = tbl; 422 refcount_set(&n->refcnt, 1); 423 n->dead = 1; 424 INIT_LIST_HEAD(&n->gc_list); 425 426 atomic_inc(&tbl->entries); 427 out: 428 return n; 429 430 out_entries: 431 if (!exempt_from_gc) 432 atomic_dec(&tbl->gc_entries); 433 goto out; 434 } 435 436 static void neigh_get_hash_rnd(u32 *x) 437 { 438 *x = get_random_u32() | 1; 439 } 440 441 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 442 { 443 size_t size = (1 << shift) * sizeof(struct neighbour *); 444 struct neigh_hash_table *ret; 445 struct neighbour __rcu **buckets; 446 int i; 447 448 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 449 if (!ret) 450 return NULL; 451 if (size <= PAGE_SIZE) { 452 buckets = kzalloc(size, GFP_ATOMIC); 453 } else { 454 buckets = (struct neighbour __rcu **) 455 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 456 get_order(size)); 457 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 458 } 459 if (!buckets) { 460 kfree(ret); 461 return NULL; 462 } 463 ret->hash_buckets = buckets; 464 ret->hash_shift = shift; 465 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 466 neigh_get_hash_rnd(&ret->hash_rnd[i]); 467 return ret; 468 } 469 470 static void neigh_hash_free_rcu(struct rcu_head *head) 471 { 472 struct neigh_hash_table *nht = container_of(head, 473 struct neigh_hash_table, 474 rcu); 475 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 476 struct neighbour __rcu **buckets = nht->hash_buckets; 477 478 if (size <= PAGE_SIZE) { 479 kfree(buckets); 480 } else { 481 kmemleak_free(buckets); 482 free_pages((unsigned long)buckets, get_order(size)); 483 } 484 kfree(nht); 485 } 486 487 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 488 unsigned long new_shift) 489 { 490 unsigned int i, hash; 491 struct neigh_hash_table *new_nht, *old_nht; 492 493 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 494 495 old_nht = rcu_dereference_protected(tbl->nht, 496 lockdep_is_held(&tbl->lock)); 497 new_nht = neigh_hash_alloc(new_shift); 498 if (!new_nht) 499 return old_nht; 500 501 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 502 struct neighbour *n, *next; 503 504 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 505 lockdep_is_held(&tbl->lock)); 506 n != NULL; 507 n = next) { 508 hash = tbl->hash(n->primary_key, n->dev, 509 new_nht->hash_rnd); 510 511 hash >>= (32 - new_nht->hash_shift); 512 next = rcu_dereference_protected(n->next, 513 lockdep_is_held(&tbl->lock)); 514 515 rcu_assign_pointer(n->next, 516 rcu_dereference_protected( 517 new_nht->hash_buckets[hash], 518 lockdep_is_held(&tbl->lock))); 519 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 520 } 521 } 522 523 rcu_assign_pointer(tbl->nht, new_nht); 524 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 525 return new_nht; 526 } 527 528 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 529 struct net_device *dev) 530 { 531 struct neighbour *n; 532 533 NEIGH_CACHE_STAT_INC(tbl, lookups); 534 535 rcu_read_lock_bh(); 536 n = __neigh_lookup_noref(tbl, pkey, dev); 537 if (n) { 538 if (!refcount_inc_not_zero(&n->refcnt)) 539 n = NULL; 540 NEIGH_CACHE_STAT_INC(tbl, hits); 541 } 542 543 rcu_read_unlock_bh(); 544 return n; 545 } 546 EXPORT_SYMBOL(neigh_lookup); 547 548 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 549 const void *pkey) 550 { 551 struct neighbour *n; 552 unsigned int key_len = tbl->key_len; 553 u32 hash_val; 554 struct neigh_hash_table *nht; 555 556 NEIGH_CACHE_STAT_INC(tbl, lookups); 557 558 rcu_read_lock_bh(); 559 nht = rcu_dereference_bh(tbl->nht); 560 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 561 562 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 563 n != NULL; 564 n = rcu_dereference_bh(n->next)) { 565 if (!memcmp(n->primary_key, pkey, key_len) && 566 net_eq(dev_net(n->dev), net)) { 567 if (!refcount_inc_not_zero(&n->refcnt)) 568 n = NULL; 569 NEIGH_CACHE_STAT_INC(tbl, hits); 570 break; 571 } 572 } 573 574 rcu_read_unlock_bh(); 575 return n; 576 } 577 EXPORT_SYMBOL(neigh_lookup_nodev); 578 579 static struct neighbour *___neigh_create(struct neigh_table *tbl, 580 const void *pkey, 581 struct net_device *dev, 582 bool exempt_from_gc, bool want_ref) 583 { 584 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); 585 u32 hash_val; 586 unsigned int key_len = tbl->key_len; 587 int error; 588 struct neigh_hash_table *nht; 589 590 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 591 592 if (!n) { 593 rc = ERR_PTR(-ENOBUFS); 594 goto out; 595 } 596 597 memcpy(n->primary_key, pkey, key_len); 598 n->dev = dev; 599 dev_hold(dev); 600 601 /* Protocol specific setup. */ 602 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 603 rc = ERR_PTR(error); 604 goto out_neigh_release; 605 } 606 607 if (dev->netdev_ops->ndo_neigh_construct) { 608 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 609 if (error < 0) { 610 rc = ERR_PTR(error); 611 goto out_neigh_release; 612 } 613 } 614 615 /* Device specific setup. */ 616 if (n->parms->neigh_setup && 617 (error = n->parms->neigh_setup(n)) < 0) { 618 rc = ERR_PTR(error); 619 goto out_neigh_release; 620 } 621 622 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 623 624 write_lock_bh(&tbl->lock); 625 nht = rcu_dereference_protected(tbl->nht, 626 lockdep_is_held(&tbl->lock)); 627 628 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 629 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 630 631 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 632 633 if (n->parms->dead) { 634 rc = ERR_PTR(-EINVAL); 635 goto out_tbl_unlock; 636 } 637 638 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 639 lockdep_is_held(&tbl->lock)); 640 n1 != NULL; 641 n1 = rcu_dereference_protected(n1->next, 642 lockdep_is_held(&tbl->lock))) { 643 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 644 if (want_ref) 645 neigh_hold(n1); 646 rc = n1; 647 goto out_tbl_unlock; 648 } 649 } 650 651 n->dead = 0; 652 if (!exempt_from_gc) 653 list_add_tail(&n->gc_list, &n->tbl->gc_list); 654 655 if (want_ref) 656 neigh_hold(n); 657 rcu_assign_pointer(n->next, 658 rcu_dereference_protected(nht->hash_buckets[hash_val], 659 lockdep_is_held(&tbl->lock))); 660 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 661 write_unlock_bh(&tbl->lock); 662 neigh_dbg(2, "neigh %p is created\n", n); 663 rc = n; 664 out: 665 return rc; 666 out_tbl_unlock: 667 write_unlock_bh(&tbl->lock); 668 out_neigh_release: 669 if (!exempt_from_gc) 670 atomic_dec(&tbl->gc_entries); 671 neigh_release(n); 672 goto out; 673 } 674 675 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 676 struct net_device *dev, bool want_ref) 677 { 678 return ___neigh_create(tbl, pkey, dev, false, want_ref); 679 } 680 EXPORT_SYMBOL(__neigh_create); 681 682 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 683 { 684 u32 hash_val = *(u32 *)(pkey + key_len - 4); 685 hash_val ^= (hash_val >> 16); 686 hash_val ^= hash_val >> 8; 687 hash_val ^= hash_val >> 4; 688 hash_val &= PNEIGH_HASHMASK; 689 return hash_val; 690 } 691 692 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 693 struct net *net, 694 const void *pkey, 695 unsigned int key_len, 696 struct net_device *dev) 697 { 698 while (n) { 699 if (!memcmp(n->key, pkey, key_len) && 700 net_eq(pneigh_net(n), net) && 701 (n->dev == dev || !n->dev)) 702 return n; 703 n = n->next; 704 } 705 return NULL; 706 } 707 708 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 709 struct net *net, const void *pkey, struct net_device *dev) 710 { 711 unsigned int key_len = tbl->key_len; 712 u32 hash_val = pneigh_hash(pkey, key_len); 713 714 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 715 net, pkey, key_len, dev); 716 } 717 EXPORT_SYMBOL_GPL(__pneigh_lookup); 718 719 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 720 struct net *net, const void *pkey, 721 struct net_device *dev, int creat) 722 { 723 struct pneigh_entry *n; 724 unsigned int key_len = tbl->key_len; 725 u32 hash_val = pneigh_hash(pkey, key_len); 726 727 read_lock_bh(&tbl->lock); 728 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 729 net, pkey, key_len, dev); 730 read_unlock_bh(&tbl->lock); 731 732 if (n || !creat) 733 goto out; 734 735 ASSERT_RTNL(); 736 737 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); 738 if (!n) 739 goto out; 740 741 n->protocol = 0; 742 write_pnet(&n->net, net); 743 memcpy(n->key, pkey, key_len); 744 n->dev = dev; 745 if (dev) 746 dev_hold(dev); 747 748 if (tbl->pconstructor && tbl->pconstructor(n)) { 749 if (dev) 750 dev_put(dev); 751 kfree(n); 752 n = NULL; 753 goto out; 754 } 755 756 write_lock_bh(&tbl->lock); 757 n->next = tbl->phash_buckets[hash_val]; 758 tbl->phash_buckets[hash_val] = n; 759 write_unlock_bh(&tbl->lock); 760 out: 761 return n; 762 } 763 EXPORT_SYMBOL(pneigh_lookup); 764 765 766 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 767 struct net_device *dev) 768 { 769 struct pneigh_entry *n, **np; 770 unsigned int key_len = tbl->key_len; 771 u32 hash_val = pneigh_hash(pkey, key_len); 772 773 write_lock_bh(&tbl->lock); 774 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 775 np = &n->next) { 776 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 777 net_eq(pneigh_net(n), net)) { 778 *np = n->next; 779 write_unlock_bh(&tbl->lock); 780 if (tbl->pdestructor) 781 tbl->pdestructor(n); 782 if (n->dev) 783 dev_put(n->dev); 784 kfree(n); 785 return 0; 786 } 787 } 788 write_unlock_bh(&tbl->lock); 789 return -ENOENT; 790 } 791 792 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 793 struct net_device *dev) 794 { 795 struct pneigh_entry *n, **np, *freelist = NULL; 796 u32 h; 797 798 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 799 np = &tbl->phash_buckets[h]; 800 while ((n = *np) != NULL) { 801 if (!dev || n->dev == dev) { 802 *np = n->next; 803 n->next = freelist; 804 freelist = n; 805 continue; 806 } 807 np = &n->next; 808 } 809 } 810 write_unlock_bh(&tbl->lock); 811 while ((n = freelist)) { 812 freelist = n->next; 813 n->next = NULL; 814 if (tbl->pdestructor) 815 tbl->pdestructor(n); 816 if (n->dev) 817 dev_put(n->dev); 818 kfree(n); 819 } 820 return -ENOENT; 821 } 822 823 static void neigh_parms_destroy(struct neigh_parms *parms); 824 825 static inline void neigh_parms_put(struct neigh_parms *parms) 826 { 827 if (refcount_dec_and_test(&parms->refcnt)) 828 neigh_parms_destroy(parms); 829 } 830 831 /* 832 * neighbour must already be out of the table; 833 * 834 */ 835 void neigh_destroy(struct neighbour *neigh) 836 { 837 struct net_device *dev = neigh->dev; 838 839 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 840 841 if (!neigh->dead) { 842 pr_warn("Destroying alive neighbour %p\n", neigh); 843 dump_stack(); 844 return; 845 } 846 847 if (neigh_del_timer(neigh)) 848 pr_warn("Impossible event\n"); 849 850 write_lock_bh(&neigh->lock); 851 __skb_queue_purge(&neigh->arp_queue); 852 write_unlock_bh(&neigh->lock); 853 neigh->arp_queue_len_bytes = 0; 854 855 if (dev->netdev_ops->ndo_neigh_destroy) 856 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 857 858 dev_put(dev); 859 neigh_parms_put(neigh->parms); 860 861 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 862 863 atomic_dec(&neigh->tbl->entries); 864 kfree_rcu(neigh, rcu); 865 } 866 EXPORT_SYMBOL(neigh_destroy); 867 868 /* Neighbour state is suspicious; 869 disable fast path. 870 871 Called with write_locked neigh. 872 */ 873 static void neigh_suspect(struct neighbour *neigh) 874 { 875 neigh_dbg(2, "neigh %p is suspected\n", neigh); 876 877 neigh->output = neigh->ops->output; 878 } 879 880 /* Neighbour state is OK; 881 enable fast path. 882 883 Called with write_locked neigh. 884 */ 885 static void neigh_connect(struct neighbour *neigh) 886 { 887 neigh_dbg(2, "neigh %p is connected\n", neigh); 888 889 neigh->output = neigh->ops->connected_output; 890 } 891 892 static void neigh_periodic_work(struct work_struct *work) 893 { 894 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 895 struct neighbour *n; 896 struct neighbour __rcu **np; 897 unsigned int i; 898 struct neigh_hash_table *nht; 899 900 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 901 902 write_lock_bh(&tbl->lock); 903 nht = rcu_dereference_protected(tbl->nht, 904 lockdep_is_held(&tbl->lock)); 905 906 /* 907 * periodically recompute ReachableTime from random function 908 */ 909 910 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 911 struct neigh_parms *p; 912 tbl->last_rand = jiffies; 913 list_for_each_entry(p, &tbl->parms_list, list) 914 p->reachable_time = 915 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 916 } 917 918 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 919 goto out; 920 921 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 922 np = &nht->hash_buckets[i]; 923 924 while ((n = rcu_dereference_protected(*np, 925 lockdep_is_held(&tbl->lock))) != NULL) { 926 unsigned int state; 927 928 write_lock(&n->lock); 929 930 state = n->nud_state; 931 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 932 (n->flags & NTF_EXT_LEARNED)) { 933 write_unlock(&n->lock); 934 goto next_elt; 935 } 936 937 if (time_before(n->used, n->confirmed)) 938 n->used = n->confirmed; 939 940 if (refcount_read(&n->refcnt) == 1 && 941 (state == NUD_FAILED || 942 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 943 *np = n->next; 944 neigh_mark_dead(n); 945 write_unlock(&n->lock); 946 neigh_cleanup_and_release(n); 947 continue; 948 } 949 write_unlock(&n->lock); 950 951 next_elt: 952 np = &n->next; 953 } 954 /* 955 * It's fine to release lock here, even if hash table 956 * grows while we are preempted. 957 */ 958 write_unlock_bh(&tbl->lock); 959 cond_resched(); 960 write_lock_bh(&tbl->lock); 961 nht = rcu_dereference_protected(tbl->nht, 962 lockdep_is_held(&tbl->lock)); 963 } 964 out: 965 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 966 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 967 * BASE_REACHABLE_TIME. 968 */ 969 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 970 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 971 write_unlock_bh(&tbl->lock); 972 } 973 974 static __inline__ int neigh_max_probes(struct neighbour *n) 975 { 976 struct neigh_parms *p = n->parms; 977 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 978 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 979 NEIGH_VAR(p, MCAST_PROBES)); 980 } 981 982 static void neigh_invalidate(struct neighbour *neigh) 983 __releases(neigh->lock) 984 __acquires(neigh->lock) 985 { 986 struct sk_buff *skb; 987 988 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 989 neigh_dbg(2, "neigh %p is failed\n", neigh); 990 neigh->updated = jiffies; 991 992 /* It is very thin place. report_unreachable is very complicated 993 routine. Particularly, it can hit the same neighbour entry! 994 995 So that, we try to be accurate and avoid dead loop. --ANK 996 */ 997 while (neigh->nud_state == NUD_FAILED && 998 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 999 write_unlock(&neigh->lock); 1000 neigh->ops->error_report(neigh, skb); 1001 write_lock(&neigh->lock); 1002 } 1003 __skb_queue_purge(&neigh->arp_queue); 1004 neigh->arp_queue_len_bytes = 0; 1005 } 1006 1007 static void neigh_probe(struct neighbour *neigh) 1008 __releases(neigh->lock) 1009 { 1010 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1011 /* keep skb alive even if arp_queue overflows */ 1012 if (skb) 1013 skb = skb_clone(skb, GFP_ATOMIC); 1014 write_unlock(&neigh->lock); 1015 if (neigh->ops->solicit) 1016 neigh->ops->solicit(neigh, skb); 1017 atomic_inc(&neigh->probes); 1018 consume_skb(skb); 1019 } 1020 1021 /* Called when a timer expires for a neighbour entry. */ 1022 1023 static void neigh_timer_handler(struct timer_list *t) 1024 { 1025 unsigned long now, next; 1026 struct neighbour *neigh = from_timer(neigh, t, timer); 1027 unsigned int state; 1028 int notify = 0; 1029 1030 write_lock(&neigh->lock); 1031 1032 state = neigh->nud_state; 1033 now = jiffies; 1034 next = now + HZ; 1035 1036 if (!(state & NUD_IN_TIMER)) 1037 goto out; 1038 1039 if (state & NUD_REACHABLE) { 1040 if (time_before_eq(now, 1041 neigh->confirmed + neigh->parms->reachable_time)) { 1042 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1043 next = neigh->confirmed + neigh->parms->reachable_time; 1044 } else if (time_before_eq(now, 1045 neigh->used + 1046 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1047 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1048 neigh->nud_state = NUD_DELAY; 1049 neigh->updated = jiffies; 1050 neigh_suspect(neigh); 1051 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1052 } else { 1053 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1054 neigh->nud_state = NUD_STALE; 1055 neigh->updated = jiffies; 1056 neigh_suspect(neigh); 1057 notify = 1; 1058 } 1059 } else if (state & NUD_DELAY) { 1060 if (time_before_eq(now, 1061 neigh->confirmed + 1062 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1063 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1064 neigh->nud_state = NUD_REACHABLE; 1065 neigh->updated = jiffies; 1066 neigh_connect(neigh); 1067 notify = 1; 1068 next = neigh->confirmed + neigh->parms->reachable_time; 1069 } else { 1070 neigh_dbg(2, "neigh %p is probed\n", neigh); 1071 neigh->nud_state = NUD_PROBE; 1072 neigh->updated = jiffies; 1073 atomic_set(&neigh->probes, 0); 1074 notify = 1; 1075 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); 1076 } 1077 } else { 1078 /* NUD_PROBE|NUD_INCOMPLETE */ 1079 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); 1080 } 1081 1082 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1083 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1084 neigh->nud_state = NUD_FAILED; 1085 notify = 1; 1086 neigh_invalidate(neigh); 1087 goto out; 1088 } 1089 1090 if (neigh->nud_state & NUD_IN_TIMER) { 1091 if (time_before(next, jiffies + HZ/2)) 1092 next = jiffies + HZ/2; 1093 if (!mod_timer(&neigh->timer, next)) 1094 neigh_hold(neigh); 1095 } 1096 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1097 neigh_probe(neigh); 1098 } else { 1099 out: 1100 write_unlock(&neigh->lock); 1101 } 1102 1103 if (notify) 1104 neigh_update_notify(neigh, 0); 1105 1106 trace_neigh_timer_handler(neigh, 0); 1107 1108 neigh_release(neigh); 1109 } 1110 1111 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 1112 { 1113 int rc; 1114 bool immediate_probe = false; 1115 1116 write_lock_bh(&neigh->lock); 1117 1118 rc = 0; 1119 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1120 goto out_unlock_bh; 1121 if (neigh->dead) 1122 goto out_dead; 1123 1124 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1125 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1126 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1127 unsigned long next, now = jiffies; 1128 1129 atomic_set(&neigh->probes, 1130 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1131 neigh->nud_state = NUD_INCOMPLETE; 1132 neigh->updated = now; 1133 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1134 HZ/2); 1135 neigh_add_timer(neigh, next); 1136 immediate_probe = true; 1137 } else { 1138 neigh->nud_state = NUD_FAILED; 1139 neigh->updated = jiffies; 1140 write_unlock_bh(&neigh->lock); 1141 1142 kfree_skb(skb); 1143 return 1; 1144 } 1145 } else if (neigh->nud_state & NUD_STALE) { 1146 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1147 neigh->nud_state = NUD_DELAY; 1148 neigh->updated = jiffies; 1149 neigh_add_timer(neigh, jiffies + 1150 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1151 } 1152 1153 if (neigh->nud_state == NUD_INCOMPLETE) { 1154 if (skb) { 1155 while (neigh->arp_queue_len_bytes + skb->truesize > 1156 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1157 struct sk_buff *buff; 1158 1159 buff = __skb_dequeue(&neigh->arp_queue); 1160 if (!buff) 1161 break; 1162 neigh->arp_queue_len_bytes -= buff->truesize; 1163 kfree_skb(buff); 1164 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1165 } 1166 skb_dst_force(skb); 1167 __skb_queue_tail(&neigh->arp_queue, skb); 1168 neigh->arp_queue_len_bytes += skb->truesize; 1169 } 1170 rc = 1; 1171 } 1172 out_unlock_bh: 1173 if (immediate_probe) 1174 neigh_probe(neigh); 1175 else 1176 write_unlock(&neigh->lock); 1177 local_bh_enable(); 1178 trace_neigh_event_send_done(neigh, rc); 1179 return rc; 1180 1181 out_dead: 1182 if (neigh->nud_state & NUD_STALE) 1183 goto out_unlock_bh; 1184 write_unlock_bh(&neigh->lock); 1185 kfree_skb(skb); 1186 trace_neigh_event_send_dead(neigh, 1); 1187 return 1; 1188 } 1189 EXPORT_SYMBOL(__neigh_event_send); 1190 1191 static void neigh_update_hhs(struct neighbour *neigh) 1192 { 1193 struct hh_cache *hh; 1194 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1195 = NULL; 1196 1197 if (neigh->dev->header_ops) 1198 update = neigh->dev->header_ops->cache_update; 1199 1200 if (update) { 1201 hh = &neigh->hh; 1202 if (hh->hh_len) { 1203 write_seqlock_bh(&hh->hh_lock); 1204 update(hh, neigh->dev, neigh->ha); 1205 write_sequnlock_bh(&hh->hh_lock); 1206 } 1207 } 1208 } 1209 1210 1211 1212 /* Generic update routine. 1213 -- lladdr is new lladdr or NULL, if it is not supplied. 1214 -- new is new state. 1215 -- flags 1216 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1217 if it is different. 1218 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1219 lladdr instead of overriding it 1220 if it is different. 1221 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1222 1223 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1224 NTF_ROUTER flag. 1225 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1226 a router. 1227 1228 Caller MUST hold reference count on the entry. 1229 */ 1230 1231 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1232 u8 new, u32 flags, u32 nlmsg_pid, 1233 struct netlink_ext_ack *extack) 1234 { 1235 bool ext_learn_change = false; 1236 u8 old; 1237 int err; 1238 int notify = 0; 1239 struct net_device *dev; 1240 int update_isrouter = 0; 1241 1242 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1243 1244 write_lock_bh(&neigh->lock); 1245 1246 dev = neigh->dev; 1247 old = neigh->nud_state; 1248 err = -EPERM; 1249 1250 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1251 (old & (NUD_NOARP | NUD_PERMANENT))) 1252 goto out; 1253 if (neigh->dead) { 1254 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1255 goto out; 1256 } 1257 1258 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); 1259 1260 if (!(new & NUD_VALID)) { 1261 neigh_del_timer(neigh); 1262 if (old & NUD_CONNECTED) 1263 neigh_suspect(neigh); 1264 neigh->nud_state = new; 1265 err = 0; 1266 notify = old & NUD_VALID; 1267 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1268 (new & NUD_FAILED)) { 1269 neigh_invalidate(neigh); 1270 notify = 1; 1271 } 1272 goto out; 1273 } 1274 1275 /* Compare new lladdr with cached one */ 1276 if (!dev->addr_len) { 1277 /* First case: device needs no address. */ 1278 lladdr = neigh->ha; 1279 } else if (lladdr) { 1280 /* The second case: if something is already cached 1281 and a new address is proposed: 1282 - compare new & old 1283 - if they are different, check override flag 1284 */ 1285 if ((old & NUD_VALID) && 1286 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1287 lladdr = neigh->ha; 1288 } else { 1289 /* No address is supplied; if we know something, 1290 use it, otherwise discard the request. 1291 */ 1292 err = -EINVAL; 1293 if (!(old & NUD_VALID)) { 1294 NL_SET_ERR_MSG(extack, "No link layer address given"); 1295 goto out; 1296 } 1297 lladdr = neigh->ha; 1298 } 1299 1300 /* Update confirmed timestamp for neighbour entry after we 1301 * received ARP packet even if it doesn't change IP to MAC binding. 1302 */ 1303 if (new & NUD_CONNECTED) 1304 neigh->confirmed = jiffies; 1305 1306 /* If entry was valid and address is not changed, 1307 do not change entry state, if new one is STALE. 1308 */ 1309 err = 0; 1310 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1311 if (old & NUD_VALID) { 1312 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1313 update_isrouter = 0; 1314 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1315 (old & NUD_CONNECTED)) { 1316 lladdr = neigh->ha; 1317 new = NUD_STALE; 1318 } else 1319 goto out; 1320 } else { 1321 if (lladdr == neigh->ha && new == NUD_STALE && 1322 !(flags & NEIGH_UPDATE_F_ADMIN)) 1323 new = old; 1324 } 1325 } 1326 1327 /* Update timestamp only once we know we will make a change to the 1328 * neighbour entry. Otherwise we risk to move the locktime window with 1329 * noop updates and ignore relevant ARP updates. 1330 */ 1331 if (new != old || lladdr != neigh->ha) 1332 neigh->updated = jiffies; 1333 1334 if (new != old) { 1335 neigh_del_timer(neigh); 1336 if (new & NUD_PROBE) 1337 atomic_set(&neigh->probes, 0); 1338 if (new & NUD_IN_TIMER) 1339 neigh_add_timer(neigh, (jiffies + 1340 ((new & NUD_REACHABLE) ? 1341 neigh->parms->reachable_time : 1342 0))); 1343 neigh->nud_state = new; 1344 notify = 1; 1345 } 1346 1347 if (lladdr != neigh->ha) { 1348 write_seqlock(&neigh->ha_lock); 1349 memcpy(&neigh->ha, lladdr, dev->addr_len); 1350 write_sequnlock(&neigh->ha_lock); 1351 neigh_update_hhs(neigh); 1352 if (!(new & NUD_CONNECTED)) 1353 neigh->confirmed = jiffies - 1354 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1355 notify = 1; 1356 } 1357 if (new == old) 1358 goto out; 1359 if (new & NUD_CONNECTED) 1360 neigh_connect(neigh); 1361 else 1362 neigh_suspect(neigh); 1363 if (!(old & NUD_VALID)) { 1364 struct sk_buff *skb; 1365 1366 /* Again: avoid dead loop if something went wrong */ 1367 1368 while (neigh->nud_state & NUD_VALID && 1369 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1370 struct dst_entry *dst = skb_dst(skb); 1371 struct neighbour *n2, *n1 = neigh; 1372 write_unlock_bh(&neigh->lock); 1373 1374 rcu_read_lock(); 1375 1376 /* Why not just use 'neigh' as-is? The problem is that 1377 * things such as shaper, eql, and sch_teql can end up 1378 * using alternative, different, neigh objects to output 1379 * the packet in the output path. So what we need to do 1380 * here is re-lookup the top-level neigh in the path so 1381 * we can reinject the packet there. 1382 */ 1383 n2 = NULL; 1384 if (dst) { 1385 n2 = dst_neigh_lookup_skb(dst, skb); 1386 if (n2) 1387 n1 = n2; 1388 } 1389 n1->output(n1, skb); 1390 if (n2) 1391 neigh_release(n2); 1392 rcu_read_unlock(); 1393 1394 write_lock_bh(&neigh->lock); 1395 } 1396 __skb_queue_purge(&neigh->arp_queue); 1397 neigh->arp_queue_len_bytes = 0; 1398 } 1399 out: 1400 if (update_isrouter) 1401 neigh_update_is_router(neigh, flags, ¬ify); 1402 write_unlock_bh(&neigh->lock); 1403 1404 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change) 1405 neigh_update_gc_list(neigh); 1406 1407 if (notify) 1408 neigh_update_notify(neigh, nlmsg_pid); 1409 1410 trace_neigh_update_done(neigh, err); 1411 1412 return err; 1413 } 1414 1415 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1416 u32 flags, u32 nlmsg_pid) 1417 { 1418 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1419 } 1420 EXPORT_SYMBOL(neigh_update); 1421 1422 /* Update the neigh to listen temporarily for probe responses, even if it is 1423 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1424 */ 1425 void __neigh_set_probe_once(struct neighbour *neigh) 1426 { 1427 if (neigh->dead) 1428 return; 1429 neigh->updated = jiffies; 1430 if (!(neigh->nud_state & NUD_FAILED)) 1431 return; 1432 neigh->nud_state = NUD_INCOMPLETE; 1433 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1434 neigh_add_timer(neigh, 1435 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1436 } 1437 EXPORT_SYMBOL(__neigh_set_probe_once); 1438 1439 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1440 u8 *lladdr, void *saddr, 1441 struct net_device *dev) 1442 { 1443 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1444 lladdr || !dev->addr_len); 1445 if (neigh) 1446 neigh_update(neigh, lladdr, NUD_STALE, 1447 NEIGH_UPDATE_F_OVERRIDE, 0); 1448 return neigh; 1449 } 1450 EXPORT_SYMBOL(neigh_event_ns); 1451 1452 /* called with read_lock_bh(&n->lock); */ 1453 static void neigh_hh_init(struct neighbour *n) 1454 { 1455 struct net_device *dev = n->dev; 1456 __be16 prot = n->tbl->protocol; 1457 struct hh_cache *hh = &n->hh; 1458 1459 write_lock_bh(&n->lock); 1460 1461 /* Only one thread can come in here and initialize the 1462 * hh_cache entry. 1463 */ 1464 if (!hh->hh_len) 1465 dev->header_ops->cache(n, hh, prot); 1466 1467 write_unlock_bh(&n->lock); 1468 } 1469 1470 /* Slow and careful. */ 1471 1472 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1473 { 1474 int rc = 0; 1475 1476 if (!neigh_event_send(neigh, skb)) { 1477 int err; 1478 struct net_device *dev = neigh->dev; 1479 unsigned int seq; 1480 1481 if (dev->header_ops->cache && !neigh->hh.hh_len) 1482 neigh_hh_init(neigh); 1483 1484 do { 1485 __skb_pull(skb, skb_network_offset(skb)); 1486 seq = read_seqbegin(&neigh->ha_lock); 1487 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1488 neigh->ha, NULL, skb->len); 1489 } while (read_seqretry(&neigh->ha_lock, seq)); 1490 1491 if (err >= 0) 1492 rc = dev_queue_xmit(skb); 1493 else 1494 goto out_kfree_skb; 1495 } 1496 out: 1497 return rc; 1498 out_kfree_skb: 1499 rc = -EINVAL; 1500 kfree_skb(skb); 1501 goto out; 1502 } 1503 EXPORT_SYMBOL(neigh_resolve_output); 1504 1505 /* As fast as possible without hh cache */ 1506 1507 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1508 { 1509 struct net_device *dev = neigh->dev; 1510 unsigned int seq; 1511 int err; 1512 1513 do { 1514 __skb_pull(skb, skb_network_offset(skb)); 1515 seq = read_seqbegin(&neigh->ha_lock); 1516 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1517 neigh->ha, NULL, skb->len); 1518 } while (read_seqretry(&neigh->ha_lock, seq)); 1519 1520 if (err >= 0) 1521 err = dev_queue_xmit(skb); 1522 else { 1523 err = -EINVAL; 1524 kfree_skb(skb); 1525 } 1526 return err; 1527 } 1528 EXPORT_SYMBOL(neigh_connected_output); 1529 1530 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1531 { 1532 return dev_queue_xmit(skb); 1533 } 1534 EXPORT_SYMBOL(neigh_direct_output); 1535 1536 static void neigh_proxy_process(struct timer_list *t) 1537 { 1538 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1539 long sched_next = 0; 1540 unsigned long now = jiffies; 1541 struct sk_buff *skb, *n; 1542 1543 spin_lock(&tbl->proxy_queue.lock); 1544 1545 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1546 long tdif = NEIGH_CB(skb)->sched_next - now; 1547 1548 if (tdif <= 0) { 1549 struct net_device *dev = skb->dev; 1550 1551 __skb_unlink(skb, &tbl->proxy_queue); 1552 if (tbl->proxy_redo && netif_running(dev)) { 1553 rcu_read_lock(); 1554 tbl->proxy_redo(skb); 1555 rcu_read_unlock(); 1556 } else { 1557 kfree_skb(skb); 1558 } 1559 1560 dev_put(dev); 1561 } else if (!sched_next || tdif < sched_next) 1562 sched_next = tdif; 1563 } 1564 del_timer(&tbl->proxy_timer); 1565 if (sched_next) 1566 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1567 spin_unlock(&tbl->proxy_queue.lock); 1568 } 1569 1570 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1571 struct sk_buff *skb) 1572 { 1573 unsigned long now = jiffies; 1574 1575 unsigned long sched_next = now + (prandom_u32() % 1576 NEIGH_VAR(p, PROXY_DELAY)); 1577 1578 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1579 kfree_skb(skb); 1580 return; 1581 } 1582 1583 NEIGH_CB(skb)->sched_next = sched_next; 1584 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1585 1586 spin_lock(&tbl->proxy_queue.lock); 1587 if (del_timer(&tbl->proxy_timer)) { 1588 if (time_before(tbl->proxy_timer.expires, sched_next)) 1589 sched_next = tbl->proxy_timer.expires; 1590 } 1591 skb_dst_drop(skb); 1592 dev_hold(skb->dev); 1593 __skb_queue_tail(&tbl->proxy_queue, skb); 1594 mod_timer(&tbl->proxy_timer, sched_next); 1595 spin_unlock(&tbl->proxy_queue.lock); 1596 } 1597 EXPORT_SYMBOL(pneigh_enqueue); 1598 1599 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1600 struct net *net, int ifindex) 1601 { 1602 struct neigh_parms *p; 1603 1604 list_for_each_entry(p, &tbl->parms_list, list) { 1605 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1606 (!p->dev && !ifindex && net_eq(net, &init_net))) 1607 return p; 1608 } 1609 1610 return NULL; 1611 } 1612 1613 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1614 struct neigh_table *tbl) 1615 { 1616 struct neigh_parms *p; 1617 struct net *net = dev_net(dev); 1618 const struct net_device_ops *ops = dev->netdev_ops; 1619 1620 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1621 if (p) { 1622 p->tbl = tbl; 1623 refcount_set(&p->refcnt, 1); 1624 p->reachable_time = 1625 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1626 dev_hold(dev); 1627 p->dev = dev; 1628 write_pnet(&p->net, net); 1629 p->sysctl_table = NULL; 1630 1631 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1632 dev_put(dev); 1633 kfree(p); 1634 return NULL; 1635 } 1636 1637 write_lock_bh(&tbl->lock); 1638 list_add(&p->list, &tbl->parms.list); 1639 write_unlock_bh(&tbl->lock); 1640 1641 neigh_parms_data_state_cleanall(p); 1642 } 1643 return p; 1644 } 1645 EXPORT_SYMBOL(neigh_parms_alloc); 1646 1647 static void neigh_rcu_free_parms(struct rcu_head *head) 1648 { 1649 struct neigh_parms *parms = 1650 container_of(head, struct neigh_parms, rcu_head); 1651 1652 neigh_parms_put(parms); 1653 } 1654 1655 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1656 { 1657 if (!parms || parms == &tbl->parms) 1658 return; 1659 write_lock_bh(&tbl->lock); 1660 list_del(&parms->list); 1661 parms->dead = 1; 1662 write_unlock_bh(&tbl->lock); 1663 if (parms->dev) 1664 dev_put(parms->dev); 1665 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1666 } 1667 EXPORT_SYMBOL(neigh_parms_release); 1668 1669 static void neigh_parms_destroy(struct neigh_parms *parms) 1670 { 1671 kfree(parms); 1672 } 1673 1674 static struct lock_class_key neigh_table_proxy_queue_class; 1675 1676 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1677 1678 void neigh_table_init(int index, struct neigh_table *tbl) 1679 { 1680 unsigned long now = jiffies; 1681 unsigned long phsize; 1682 1683 INIT_LIST_HEAD(&tbl->parms_list); 1684 INIT_LIST_HEAD(&tbl->gc_list); 1685 list_add(&tbl->parms.list, &tbl->parms_list); 1686 write_pnet(&tbl->parms.net, &init_net); 1687 refcount_set(&tbl->parms.refcnt, 1); 1688 tbl->parms.reachable_time = 1689 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1690 1691 tbl->stats = alloc_percpu(struct neigh_statistics); 1692 if (!tbl->stats) 1693 panic("cannot create neighbour cache statistics"); 1694 1695 #ifdef CONFIG_PROC_FS 1696 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1697 &neigh_stat_seq_ops, tbl)) 1698 panic("cannot create neighbour proc dir entry"); 1699 #endif 1700 1701 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1702 1703 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1704 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1705 1706 if (!tbl->nht || !tbl->phash_buckets) 1707 panic("cannot allocate neighbour cache hashes"); 1708 1709 if (!tbl->entry_size) 1710 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1711 tbl->key_len, NEIGH_PRIV_ALIGN); 1712 else 1713 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1714 1715 rwlock_init(&tbl->lock); 1716 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1717 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1718 tbl->parms.reachable_time); 1719 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1720 skb_queue_head_init_class(&tbl->proxy_queue, 1721 &neigh_table_proxy_queue_class); 1722 1723 tbl->last_flush = now; 1724 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1725 1726 neigh_tables[index] = tbl; 1727 } 1728 EXPORT_SYMBOL(neigh_table_init); 1729 1730 int neigh_table_clear(int index, struct neigh_table *tbl) 1731 { 1732 neigh_tables[index] = NULL; 1733 /* It is not clean... Fix it to unload IPv6 module safely */ 1734 cancel_delayed_work_sync(&tbl->gc_work); 1735 del_timer_sync(&tbl->proxy_timer); 1736 pneigh_queue_purge(&tbl->proxy_queue); 1737 neigh_ifdown(tbl, NULL); 1738 if (atomic_read(&tbl->entries)) 1739 pr_crit("neighbour leakage\n"); 1740 1741 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1742 neigh_hash_free_rcu); 1743 tbl->nht = NULL; 1744 1745 kfree(tbl->phash_buckets); 1746 tbl->phash_buckets = NULL; 1747 1748 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1749 1750 free_percpu(tbl->stats); 1751 tbl->stats = NULL; 1752 1753 return 0; 1754 } 1755 EXPORT_SYMBOL(neigh_table_clear); 1756 1757 static struct neigh_table *neigh_find_table(int family) 1758 { 1759 struct neigh_table *tbl = NULL; 1760 1761 switch (family) { 1762 case AF_INET: 1763 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1764 break; 1765 case AF_INET6: 1766 tbl = neigh_tables[NEIGH_ND_TABLE]; 1767 break; 1768 case AF_DECnet: 1769 tbl = neigh_tables[NEIGH_DN_TABLE]; 1770 break; 1771 } 1772 1773 return tbl; 1774 } 1775 1776 const struct nla_policy nda_policy[NDA_MAX+1] = { 1777 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1778 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1779 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1780 [NDA_PROBES] = { .type = NLA_U32 }, 1781 [NDA_VLAN] = { .type = NLA_U16 }, 1782 [NDA_PORT] = { .type = NLA_U16 }, 1783 [NDA_VNI] = { .type = NLA_U32 }, 1784 [NDA_IFINDEX] = { .type = NLA_U32 }, 1785 [NDA_MASTER] = { .type = NLA_U32 }, 1786 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1787 }; 1788 1789 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1790 struct netlink_ext_ack *extack) 1791 { 1792 struct net *net = sock_net(skb->sk); 1793 struct ndmsg *ndm; 1794 struct nlattr *dst_attr; 1795 struct neigh_table *tbl; 1796 struct neighbour *neigh; 1797 struct net_device *dev = NULL; 1798 int err = -EINVAL; 1799 1800 ASSERT_RTNL(); 1801 if (nlmsg_len(nlh) < sizeof(*ndm)) 1802 goto out; 1803 1804 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1805 if (!dst_attr) { 1806 NL_SET_ERR_MSG(extack, "Network address not specified"); 1807 goto out; 1808 } 1809 1810 ndm = nlmsg_data(nlh); 1811 if (ndm->ndm_ifindex) { 1812 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1813 if (dev == NULL) { 1814 err = -ENODEV; 1815 goto out; 1816 } 1817 } 1818 1819 tbl = neigh_find_table(ndm->ndm_family); 1820 if (tbl == NULL) 1821 return -EAFNOSUPPORT; 1822 1823 if (nla_len(dst_attr) < (int)tbl->key_len) { 1824 NL_SET_ERR_MSG(extack, "Invalid network address"); 1825 goto out; 1826 } 1827 1828 if (ndm->ndm_flags & NTF_PROXY) { 1829 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1830 goto out; 1831 } 1832 1833 if (dev == NULL) 1834 goto out; 1835 1836 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1837 if (neigh == NULL) { 1838 err = -ENOENT; 1839 goto out; 1840 } 1841 1842 err = __neigh_update(neigh, NULL, NUD_FAILED, 1843 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1844 NETLINK_CB(skb).portid, extack); 1845 write_lock_bh(&tbl->lock); 1846 neigh_release(neigh); 1847 neigh_remove_one(neigh, tbl); 1848 write_unlock_bh(&tbl->lock); 1849 1850 out: 1851 return err; 1852 } 1853 1854 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1855 struct netlink_ext_ack *extack) 1856 { 1857 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1858 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1859 struct net *net = sock_net(skb->sk); 1860 struct ndmsg *ndm; 1861 struct nlattr *tb[NDA_MAX+1]; 1862 struct neigh_table *tbl; 1863 struct net_device *dev = NULL; 1864 struct neighbour *neigh; 1865 void *dst, *lladdr; 1866 u8 protocol = 0; 1867 int err; 1868 1869 ASSERT_RTNL(); 1870 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1871 nda_policy, extack); 1872 if (err < 0) 1873 goto out; 1874 1875 err = -EINVAL; 1876 if (!tb[NDA_DST]) { 1877 NL_SET_ERR_MSG(extack, "Network address not specified"); 1878 goto out; 1879 } 1880 1881 ndm = nlmsg_data(nlh); 1882 if (ndm->ndm_ifindex) { 1883 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1884 if (dev == NULL) { 1885 err = -ENODEV; 1886 goto out; 1887 } 1888 1889 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 1890 NL_SET_ERR_MSG(extack, "Invalid link address"); 1891 goto out; 1892 } 1893 } 1894 1895 tbl = neigh_find_table(ndm->ndm_family); 1896 if (tbl == NULL) 1897 return -EAFNOSUPPORT; 1898 1899 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 1900 NL_SET_ERR_MSG(extack, "Invalid network address"); 1901 goto out; 1902 } 1903 1904 dst = nla_data(tb[NDA_DST]); 1905 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 1906 1907 if (tb[NDA_PROTOCOL]) 1908 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 1909 1910 if (ndm->ndm_flags & NTF_PROXY) { 1911 struct pneigh_entry *pn; 1912 1913 err = -ENOBUFS; 1914 pn = pneigh_lookup(tbl, net, dst, dev, 1); 1915 if (pn) { 1916 pn->flags = ndm->ndm_flags; 1917 if (protocol) 1918 pn->protocol = protocol; 1919 err = 0; 1920 } 1921 goto out; 1922 } 1923 1924 if (!dev) { 1925 NL_SET_ERR_MSG(extack, "Device not specified"); 1926 goto out; 1927 } 1928 1929 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 1930 err = -EINVAL; 1931 goto out; 1932 } 1933 1934 neigh = neigh_lookup(tbl, dst, dev); 1935 if (neigh == NULL) { 1936 bool exempt_from_gc; 1937 1938 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 1939 err = -ENOENT; 1940 goto out; 1941 } 1942 1943 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT || 1944 ndm->ndm_flags & NTF_EXT_LEARNED; 1945 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true); 1946 if (IS_ERR(neigh)) { 1947 err = PTR_ERR(neigh); 1948 goto out; 1949 } 1950 } else { 1951 if (nlh->nlmsg_flags & NLM_F_EXCL) { 1952 err = -EEXIST; 1953 neigh_release(neigh); 1954 goto out; 1955 } 1956 1957 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 1958 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 1959 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 1960 } 1961 1962 if (ndm->ndm_flags & NTF_EXT_LEARNED) 1963 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 1964 1965 if (ndm->ndm_flags & NTF_ROUTER) 1966 flags |= NEIGH_UPDATE_F_ISROUTER; 1967 1968 if (ndm->ndm_flags & NTF_USE) { 1969 neigh_event_send(neigh, NULL); 1970 err = 0; 1971 } else 1972 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 1973 NETLINK_CB(skb).portid, extack); 1974 1975 if (protocol) 1976 neigh->protocol = protocol; 1977 1978 neigh_release(neigh); 1979 1980 out: 1981 return err; 1982 } 1983 1984 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 1985 { 1986 struct nlattr *nest; 1987 1988 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 1989 if (nest == NULL) 1990 return -ENOBUFS; 1991 1992 if ((parms->dev && 1993 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 1994 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 1995 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 1996 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 1997 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1998 nla_put_u32(skb, NDTPA_QUEUE_LEN, 1999 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 2000 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 2001 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 2002 nla_put_u32(skb, NDTPA_UCAST_PROBES, 2003 NEIGH_VAR(parms, UCAST_PROBES)) || 2004 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2005 NEIGH_VAR(parms, MCAST_PROBES)) || 2006 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2007 NEIGH_VAR(parms, MCAST_REPROBES)) || 2008 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2009 NDTPA_PAD) || 2010 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2011 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2012 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2013 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2014 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2015 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2016 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2017 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2018 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2019 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2020 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2021 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2022 nla_put_msecs(skb, NDTPA_LOCKTIME, 2023 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD)) 2024 goto nla_put_failure; 2025 return nla_nest_end(skb, nest); 2026 2027 nla_put_failure: 2028 nla_nest_cancel(skb, nest); 2029 return -EMSGSIZE; 2030 } 2031 2032 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2033 u32 pid, u32 seq, int type, int flags) 2034 { 2035 struct nlmsghdr *nlh; 2036 struct ndtmsg *ndtmsg; 2037 2038 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2039 if (nlh == NULL) 2040 return -EMSGSIZE; 2041 2042 ndtmsg = nlmsg_data(nlh); 2043 2044 read_lock_bh(&tbl->lock); 2045 ndtmsg->ndtm_family = tbl->family; 2046 ndtmsg->ndtm_pad1 = 0; 2047 ndtmsg->ndtm_pad2 = 0; 2048 2049 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2050 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2051 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2052 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2053 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2054 goto nla_put_failure; 2055 { 2056 unsigned long now = jiffies; 2057 unsigned int flush_delta = now - tbl->last_flush; 2058 unsigned int rand_delta = now - tbl->last_rand; 2059 struct neigh_hash_table *nht; 2060 struct ndt_config ndc = { 2061 .ndtc_key_len = tbl->key_len, 2062 .ndtc_entry_size = tbl->entry_size, 2063 .ndtc_entries = atomic_read(&tbl->entries), 2064 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2065 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2066 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2067 }; 2068 2069 rcu_read_lock_bh(); 2070 nht = rcu_dereference_bh(tbl->nht); 2071 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2072 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2073 rcu_read_unlock_bh(); 2074 2075 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2076 goto nla_put_failure; 2077 } 2078 2079 { 2080 int cpu; 2081 struct ndt_stats ndst; 2082 2083 memset(&ndst, 0, sizeof(ndst)); 2084 2085 for_each_possible_cpu(cpu) { 2086 struct neigh_statistics *st; 2087 2088 st = per_cpu_ptr(tbl->stats, cpu); 2089 ndst.ndts_allocs += st->allocs; 2090 ndst.ndts_destroys += st->destroys; 2091 ndst.ndts_hash_grows += st->hash_grows; 2092 ndst.ndts_res_failed += st->res_failed; 2093 ndst.ndts_lookups += st->lookups; 2094 ndst.ndts_hits += st->hits; 2095 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2096 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2097 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2098 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2099 ndst.ndts_table_fulls += st->table_fulls; 2100 } 2101 2102 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2103 NDTA_PAD)) 2104 goto nla_put_failure; 2105 } 2106 2107 BUG_ON(tbl->parms.dev); 2108 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2109 goto nla_put_failure; 2110 2111 read_unlock_bh(&tbl->lock); 2112 nlmsg_end(skb, nlh); 2113 return 0; 2114 2115 nla_put_failure: 2116 read_unlock_bh(&tbl->lock); 2117 nlmsg_cancel(skb, nlh); 2118 return -EMSGSIZE; 2119 } 2120 2121 static int neightbl_fill_param_info(struct sk_buff *skb, 2122 struct neigh_table *tbl, 2123 struct neigh_parms *parms, 2124 u32 pid, u32 seq, int type, 2125 unsigned int flags) 2126 { 2127 struct ndtmsg *ndtmsg; 2128 struct nlmsghdr *nlh; 2129 2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2131 if (nlh == NULL) 2132 return -EMSGSIZE; 2133 2134 ndtmsg = nlmsg_data(nlh); 2135 2136 read_lock_bh(&tbl->lock); 2137 ndtmsg->ndtm_family = tbl->family; 2138 ndtmsg->ndtm_pad1 = 0; 2139 ndtmsg->ndtm_pad2 = 0; 2140 2141 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2142 neightbl_fill_parms(skb, parms) < 0) 2143 goto errout; 2144 2145 read_unlock_bh(&tbl->lock); 2146 nlmsg_end(skb, nlh); 2147 return 0; 2148 errout: 2149 read_unlock_bh(&tbl->lock); 2150 nlmsg_cancel(skb, nlh); 2151 return -EMSGSIZE; 2152 } 2153 2154 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2155 [NDTA_NAME] = { .type = NLA_STRING }, 2156 [NDTA_THRESH1] = { .type = NLA_U32 }, 2157 [NDTA_THRESH2] = { .type = NLA_U32 }, 2158 [NDTA_THRESH3] = { .type = NLA_U32 }, 2159 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2160 [NDTA_PARMS] = { .type = NLA_NESTED }, 2161 }; 2162 2163 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2164 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2165 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2166 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2167 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2168 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2169 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2170 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2171 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2172 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2173 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2174 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2175 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2176 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2177 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2178 }; 2179 2180 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2181 struct netlink_ext_ack *extack) 2182 { 2183 struct net *net = sock_net(skb->sk); 2184 struct neigh_table *tbl; 2185 struct ndtmsg *ndtmsg; 2186 struct nlattr *tb[NDTA_MAX+1]; 2187 bool found = false; 2188 int err, tidx; 2189 2190 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2191 nl_neightbl_policy, extack); 2192 if (err < 0) 2193 goto errout; 2194 2195 if (tb[NDTA_NAME] == NULL) { 2196 err = -EINVAL; 2197 goto errout; 2198 } 2199 2200 ndtmsg = nlmsg_data(nlh); 2201 2202 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2203 tbl = neigh_tables[tidx]; 2204 if (!tbl) 2205 continue; 2206 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2207 continue; 2208 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2209 found = true; 2210 break; 2211 } 2212 } 2213 2214 if (!found) 2215 return -ENOENT; 2216 2217 /* 2218 * We acquire tbl->lock to be nice to the periodic timers and 2219 * make sure they always see a consistent set of values. 2220 */ 2221 write_lock_bh(&tbl->lock); 2222 2223 if (tb[NDTA_PARMS]) { 2224 struct nlattr *tbp[NDTPA_MAX+1]; 2225 struct neigh_parms *p; 2226 int i, ifindex = 0; 2227 2228 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2229 tb[NDTA_PARMS], 2230 nl_ntbl_parm_policy, extack); 2231 if (err < 0) 2232 goto errout_tbl_lock; 2233 2234 if (tbp[NDTPA_IFINDEX]) 2235 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2236 2237 p = lookup_neigh_parms(tbl, net, ifindex); 2238 if (p == NULL) { 2239 err = -ENOENT; 2240 goto errout_tbl_lock; 2241 } 2242 2243 for (i = 1; i <= NDTPA_MAX; i++) { 2244 if (tbp[i] == NULL) 2245 continue; 2246 2247 switch (i) { 2248 case NDTPA_QUEUE_LEN: 2249 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2250 nla_get_u32(tbp[i]) * 2251 SKB_TRUESIZE(ETH_FRAME_LEN)); 2252 break; 2253 case NDTPA_QUEUE_LENBYTES: 2254 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2255 nla_get_u32(tbp[i])); 2256 break; 2257 case NDTPA_PROXY_QLEN: 2258 NEIGH_VAR_SET(p, PROXY_QLEN, 2259 nla_get_u32(tbp[i])); 2260 break; 2261 case NDTPA_APP_PROBES: 2262 NEIGH_VAR_SET(p, APP_PROBES, 2263 nla_get_u32(tbp[i])); 2264 break; 2265 case NDTPA_UCAST_PROBES: 2266 NEIGH_VAR_SET(p, UCAST_PROBES, 2267 nla_get_u32(tbp[i])); 2268 break; 2269 case NDTPA_MCAST_PROBES: 2270 NEIGH_VAR_SET(p, MCAST_PROBES, 2271 nla_get_u32(tbp[i])); 2272 break; 2273 case NDTPA_MCAST_REPROBES: 2274 NEIGH_VAR_SET(p, MCAST_REPROBES, 2275 nla_get_u32(tbp[i])); 2276 break; 2277 case NDTPA_BASE_REACHABLE_TIME: 2278 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2279 nla_get_msecs(tbp[i])); 2280 /* update reachable_time as well, otherwise, the change will 2281 * only be effective after the next time neigh_periodic_work 2282 * decides to recompute it (can be multiple minutes) 2283 */ 2284 p->reachable_time = 2285 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2286 break; 2287 case NDTPA_GC_STALETIME: 2288 NEIGH_VAR_SET(p, GC_STALETIME, 2289 nla_get_msecs(tbp[i])); 2290 break; 2291 case NDTPA_DELAY_PROBE_TIME: 2292 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2293 nla_get_msecs(tbp[i])); 2294 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2295 break; 2296 case NDTPA_RETRANS_TIME: 2297 NEIGH_VAR_SET(p, RETRANS_TIME, 2298 nla_get_msecs(tbp[i])); 2299 break; 2300 case NDTPA_ANYCAST_DELAY: 2301 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2302 nla_get_msecs(tbp[i])); 2303 break; 2304 case NDTPA_PROXY_DELAY: 2305 NEIGH_VAR_SET(p, PROXY_DELAY, 2306 nla_get_msecs(tbp[i])); 2307 break; 2308 case NDTPA_LOCKTIME: 2309 NEIGH_VAR_SET(p, LOCKTIME, 2310 nla_get_msecs(tbp[i])); 2311 break; 2312 } 2313 } 2314 } 2315 2316 err = -ENOENT; 2317 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2318 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2319 !net_eq(net, &init_net)) 2320 goto errout_tbl_lock; 2321 2322 if (tb[NDTA_THRESH1]) 2323 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2324 2325 if (tb[NDTA_THRESH2]) 2326 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2327 2328 if (tb[NDTA_THRESH3]) 2329 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2330 2331 if (tb[NDTA_GC_INTERVAL]) 2332 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2333 2334 err = 0; 2335 2336 errout_tbl_lock: 2337 write_unlock_bh(&tbl->lock); 2338 errout: 2339 return err; 2340 } 2341 2342 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2343 struct netlink_ext_ack *extack) 2344 { 2345 struct ndtmsg *ndtm; 2346 2347 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2348 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2349 return -EINVAL; 2350 } 2351 2352 ndtm = nlmsg_data(nlh); 2353 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2354 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2355 return -EINVAL; 2356 } 2357 2358 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2359 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2360 return -EINVAL; 2361 } 2362 2363 return 0; 2364 } 2365 2366 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2367 { 2368 const struct nlmsghdr *nlh = cb->nlh; 2369 struct net *net = sock_net(skb->sk); 2370 int family, tidx, nidx = 0; 2371 int tbl_skip = cb->args[0]; 2372 int neigh_skip = cb->args[1]; 2373 struct neigh_table *tbl; 2374 2375 if (cb->strict_check) { 2376 int err = neightbl_valid_dump_info(nlh, cb->extack); 2377 2378 if (err < 0) 2379 return err; 2380 } 2381 2382 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2383 2384 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2385 struct neigh_parms *p; 2386 2387 tbl = neigh_tables[tidx]; 2388 if (!tbl) 2389 continue; 2390 2391 if (tidx < tbl_skip || (family && tbl->family != family)) 2392 continue; 2393 2394 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2395 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2396 NLM_F_MULTI) < 0) 2397 break; 2398 2399 nidx = 0; 2400 p = list_next_entry(&tbl->parms, list); 2401 list_for_each_entry_from(p, &tbl->parms_list, list) { 2402 if (!net_eq(neigh_parms_net(p), net)) 2403 continue; 2404 2405 if (nidx < neigh_skip) 2406 goto next; 2407 2408 if (neightbl_fill_param_info(skb, tbl, p, 2409 NETLINK_CB(cb->skb).portid, 2410 nlh->nlmsg_seq, 2411 RTM_NEWNEIGHTBL, 2412 NLM_F_MULTI) < 0) 2413 goto out; 2414 next: 2415 nidx++; 2416 } 2417 2418 neigh_skip = 0; 2419 } 2420 out: 2421 cb->args[0] = tidx; 2422 cb->args[1] = nidx; 2423 2424 return skb->len; 2425 } 2426 2427 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2428 u32 pid, u32 seq, int type, unsigned int flags) 2429 { 2430 unsigned long now = jiffies; 2431 struct nda_cacheinfo ci; 2432 struct nlmsghdr *nlh; 2433 struct ndmsg *ndm; 2434 2435 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2436 if (nlh == NULL) 2437 return -EMSGSIZE; 2438 2439 ndm = nlmsg_data(nlh); 2440 ndm->ndm_family = neigh->ops->family; 2441 ndm->ndm_pad1 = 0; 2442 ndm->ndm_pad2 = 0; 2443 ndm->ndm_flags = neigh->flags; 2444 ndm->ndm_type = neigh->type; 2445 ndm->ndm_ifindex = neigh->dev->ifindex; 2446 2447 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2448 goto nla_put_failure; 2449 2450 read_lock_bh(&neigh->lock); 2451 ndm->ndm_state = neigh->nud_state; 2452 if (neigh->nud_state & NUD_VALID) { 2453 char haddr[MAX_ADDR_LEN]; 2454 2455 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2456 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2457 read_unlock_bh(&neigh->lock); 2458 goto nla_put_failure; 2459 } 2460 } 2461 2462 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2463 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2464 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2465 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2466 read_unlock_bh(&neigh->lock); 2467 2468 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2469 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2470 goto nla_put_failure; 2471 2472 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2473 goto nla_put_failure; 2474 2475 nlmsg_end(skb, nlh); 2476 return 0; 2477 2478 nla_put_failure: 2479 nlmsg_cancel(skb, nlh); 2480 return -EMSGSIZE; 2481 } 2482 2483 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2484 u32 pid, u32 seq, int type, unsigned int flags, 2485 struct neigh_table *tbl) 2486 { 2487 struct nlmsghdr *nlh; 2488 struct ndmsg *ndm; 2489 2490 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2491 if (nlh == NULL) 2492 return -EMSGSIZE; 2493 2494 ndm = nlmsg_data(nlh); 2495 ndm->ndm_family = tbl->family; 2496 ndm->ndm_pad1 = 0; 2497 ndm->ndm_pad2 = 0; 2498 ndm->ndm_flags = pn->flags | NTF_PROXY; 2499 ndm->ndm_type = RTN_UNICAST; 2500 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2501 ndm->ndm_state = NUD_NONE; 2502 2503 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2504 goto nla_put_failure; 2505 2506 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2507 goto nla_put_failure; 2508 2509 nlmsg_end(skb, nlh); 2510 return 0; 2511 2512 nla_put_failure: 2513 nlmsg_cancel(skb, nlh); 2514 return -EMSGSIZE; 2515 } 2516 2517 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2518 { 2519 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2520 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2521 } 2522 2523 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2524 { 2525 struct net_device *master; 2526 2527 if (!master_idx) 2528 return false; 2529 2530 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2531 if (!master || master->ifindex != master_idx) 2532 return true; 2533 2534 return false; 2535 } 2536 2537 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2538 { 2539 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2540 return true; 2541 2542 return false; 2543 } 2544 2545 struct neigh_dump_filter { 2546 int master_idx; 2547 int dev_idx; 2548 }; 2549 2550 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2551 struct netlink_callback *cb, 2552 struct neigh_dump_filter *filter) 2553 { 2554 struct net *net = sock_net(skb->sk); 2555 struct neighbour *n; 2556 int rc, h, s_h = cb->args[1]; 2557 int idx, s_idx = idx = cb->args[2]; 2558 struct neigh_hash_table *nht; 2559 unsigned int flags = NLM_F_MULTI; 2560 2561 if (filter->dev_idx || filter->master_idx) 2562 flags |= NLM_F_DUMP_FILTERED; 2563 2564 rcu_read_lock_bh(); 2565 nht = rcu_dereference_bh(tbl->nht); 2566 2567 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2568 if (h > s_h) 2569 s_idx = 0; 2570 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2571 n != NULL; 2572 n = rcu_dereference_bh(n->next)) { 2573 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2574 goto next; 2575 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2576 neigh_master_filtered(n->dev, filter->master_idx)) 2577 goto next; 2578 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2579 cb->nlh->nlmsg_seq, 2580 RTM_NEWNEIGH, 2581 flags) < 0) { 2582 rc = -1; 2583 goto out; 2584 } 2585 next: 2586 idx++; 2587 } 2588 } 2589 rc = skb->len; 2590 out: 2591 rcu_read_unlock_bh(); 2592 cb->args[1] = h; 2593 cb->args[2] = idx; 2594 return rc; 2595 } 2596 2597 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2598 struct netlink_callback *cb, 2599 struct neigh_dump_filter *filter) 2600 { 2601 struct pneigh_entry *n; 2602 struct net *net = sock_net(skb->sk); 2603 int rc, h, s_h = cb->args[3]; 2604 int idx, s_idx = idx = cb->args[4]; 2605 unsigned int flags = NLM_F_MULTI; 2606 2607 if (filter->dev_idx || filter->master_idx) 2608 flags |= NLM_F_DUMP_FILTERED; 2609 2610 read_lock_bh(&tbl->lock); 2611 2612 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2613 if (h > s_h) 2614 s_idx = 0; 2615 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2616 if (idx < s_idx || pneigh_net(n) != net) 2617 goto next; 2618 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2619 neigh_master_filtered(n->dev, filter->master_idx)) 2620 goto next; 2621 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2622 cb->nlh->nlmsg_seq, 2623 RTM_NEWNEIGH, flags, tbl) < 0) { 2624 read_unlock_bh(&tbl->lock); 2625 rc = -1; 2626 goto out; 2627 } 2628 next: 2629 idx++; 2630 } 2631 } 2632 2633 read_unlock_bh(&tbl->lock); 2634 rc = skb->len; 2635 out: 2636 cb->args[3] = h; 2637 cb->args[4] = idx; 2638 return rc; 2639 2640 } 2641 2642 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2643 bool strict_check, 2644 struct neigh_dump_filter *filter, 2645 struct netlink_ext_ack *extack) 2646 { 2647 struct nlattr *tb[NDA_MAX + 1]; 2648 int err, i; 2649 2650 if (strict_check) { 2651 struct ndmsg *ndm; 2652 2653 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2654 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2655 return -EINVAL; 2656 } 2657 2658 ndm = nlmsg_data(nlh); 2659 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2660 ndm->ndm_state || ndm->ndm_type) { 2661 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2662 return -EINVAL; 2663 } 2664 2665 if (ndm->ndm_flags & ~NTF_PROXY) { 2666 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2667 return -EINVAL; 2668 } 2669 2670 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2671 tb, NDA_MAX, nda_policy, 2672 extack); 2673 } else { 2674 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2675 NDA_MAX, nda_policy, extack); 2676 } 2677 if (err < 0) 2678 return err; 2679 2680 for (i = 0; i <= NDA_MAX; ++i) { 2681 if (!tb[i]) 2682 continue; 2683 2684 /* all new attributes should require strict_check */ 2685 switch (i) { 2686 case NDA_IFINDEX: 2687 filter->dev_idx = nla_get_u32(tb[i]); 2688 break; 2689 case NDA_MASTER: 2690 filter->master_idx = nla_get_u32(tb[i]); 2691 break; 2692 default: 2693 if (strict_check) { 2694 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2695 return -EINVAL; 2696 } 2697 } 2698 } 2699 2700 return 0; 2701 } 2702 2703 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2704 { 2705 const struct nlmsghdr *nlh = cb->nlh; 2706 struct neigh_dump_filter filter = {}; 2707 struct neigh_table *tbl; 2708 int t, family, s_t; 2709 int proxy = 0; 2710 int err; 2711 2712 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2713 2714 /* check for full ndmsg structure presence, family member is 2715 * the same for both structures 2716 */ 2717 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2718 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2719 proxy = 1; 2720 2721 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2722 if (err < 0 && cb->strict_check) 2723 return err; 2724 2725 s_t = cb->args[0]; 2726 2727 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2728 tbl = neigh_tables[t]; 2729 2730 if (!tbl) 2731 continue; 2732 if (t < s_t || (family && tbl->family != family)) 2733 continue; 2734 if (t > s_t) 2735 memset(&cb->args[1], 0, sizeof(cb->args) - 2736 sizeof(cb->args[0])); 2737 if (proxy) 2738 err = pneigh_dump_table(tbl, skb, cb, &filter); 2739 else 2740 err = neigh_dump_table(tbl, skb, cb, &filter); 2741 if (err < 0) 2742 break; 2743 } 2744 2745 cb->args[0] = t; 2746 return skb->len; 2747 } 2748 2749 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2750 struct neigh_table **tbl, 2751 void **dst, int *dev_idx, u8 *ndm_flags, 2752 struct netlink_ext_ack *extack) 2753 { 2754 struct nlattr *tb[NDA_MAX + 1]; 2755 struct ndmsg *ndm; 2756 int err, i; 2757 2758 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2759 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2760 return -EINVAL; 2761 } 2762 2763 ndm = nlmsg_data(nlh); 2764 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2765 ndm->ndm_type) { 2766 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2767 return -EINVAL; 2768 } 2769 2770 if (ndm->ndm_flags & ~NTF_PROXY) { 2771 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2772 return -EINVAL; 2773 } 2774 2775 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2776 NDA_MAX, nda_policy, extack); 2777 if (err < 0) 2778 return err; 2779 2780 *ndm_flags = ndm->ndm_flags; 2781 *dev_idx = ndm->ndm_ifindex; 2782 *tbl = neigh_find_table(ndm->ndm_family); 2783 if (*tbl == NULL) { 2784 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2785 return -EAFNOSUPPORT; 2786 } 2787 2788 for (i = 0; i <= NDA_MAX; ++i) { 2789 if (!tb[i]) 2790 continue; 2791 2792 switch (i) { 2793 case NDA_DST: 2794 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2795 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2796 return -EINVAL; 2797 } 2798 *dst = nla_data(tb[i]); 2799 break; 2800 default: 2801 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2802 return -EINVAL; 2803 } 2804 } 2805 2806 return 0; 2807 } 2808 2809 static inline size_t neigh_nlmsg_size(void) 2810 { 2811 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2812 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2813 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2814 + nla_total_size(sizeof(struct nda_cacheinfo)) 2815 + nla_total_size(4) /* NDA_PROBES */ 2816 + nla_total_size(1); /* NDA_PROTOCOL */ 2817 } 2818 2819 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2820 u32 pid, u32 seq) 2821 { 2822 struct sk_buff *skb; 2823 int err = 0; 2824 2825 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 2826 if (!skb) 2827 return -ENOBUFS; 2828 2829 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 2830 if (err) { 2831 kfree_skb(skb); 2832 goto errout; 2833 } 2834 2835 err = rtnl_unicast(skb, net, pid); 2836 errout: 2837 return err; 2838 } 2839 2840 static inline size_t pneigh_nlmsg_size(void) 2841 { 2842 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2843 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2844 + nla_total_size(1); /* NDA_PROTOCOL */ 2845 } 2846 2847 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 2848 u32 pid, u32 seq, struct neigh_table *tbl) 2849 { 2850 struct sk_buff *skb; 2851 int err = 0; 2852 2853 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 2854 if (!skb) 2855 return -ENOBUFS; 2856 2857 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 2858 if (err) { 2859 kfree_skb(skb); 2860 goto errout; 2861 } 2862 2863 err = rtnl_unicast(skb, net, pid); 2864 errout: 2865 return err; 2866 } 2867 2868 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 2869 struct netlink_ext_ack *extack) 2870 { 2871 struct net *net = sock_net(in_skb->sk); 2872 struct net_device *dev = NULL; 2873 struct neigh_table *tbl = NULL; 2874 struct neighbour *neigh; 2875 void *dst = NULL; 2876 u8 ndm_flags = 0; 2877 int dev_idx = 0; 2878 int err; 2879 2880 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 2881 extack); 2882 if (err < 0) 2883 return err; 2884 2885 if (dev_idx) { 2886 dev = __dev_get_by_index(net, dev_idx); 2887 if (!dev) { 2888 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 2889 return -ENODEV; 2890 } 2891 } 2892 2893 if (!dst) { 2894 NL_SET_ERR_MSG(extack, "Network address not specified"); 2895 return -EINVAL; 2896 } 2897 2898 if (ndm_flags & NTF_PROXY) { 2899 struct pneigh_entry *pn; 2900 2901 pn = pneigh_lookup(tbl, net, dst, dev, 0); 2902 if (!pn) { 2903 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 2904 return -ENOENT; 2905 } 2906 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 2907 nlh->nlmsg_seq, tbl); 2908 } 2909 2910 if (!dev) { 2911 NL_SET_ERR_MSG(extack, "No device specified"); 2912 return -EINVAL; 2913 } 2914 2915 neigh = neigh_lookup(tbl, dst, dev); 2916 if (!neigh) { 2917 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 2918 return -ENOENT; 2919 } 2920 2921 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 2922 nlh->nlmsg_seq); 2923 2924 neigh_release(neigh); 2925 2926 return err; 2927 } 2928 2929 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 2930 { 2931 int chain; 2932 struct neigh_hash_table *nht; 2933 2934 rcu_read_lock_bh(); 2935 nht = rcu_dereference_bh(tbl->nht); 2936 2937 read_lock(&tbl->lock); /* avoid resizes */ 2938 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2939 struct neighbour *n; 2940 2941 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 2942 n != NULL; 2943 n = rcu_dereference_bh(n->next)) 2944 cb(n, cookie); 2945 } 2946 read_unlock(&tbl->lock); 2947 rcu_read_unlock_bh(); 2948 } 2949 EXPORT_SYMBOL(neigh_for_each); 2950 2951 /* The tbl->lock must be held as a writer and BH disabled. */ 2952 void __neigh_for_each_release(struct neigh_table *tbl, 2953 int (*cb)(struct neighbour *)) 2954 { 2955 int chain; 2956 struct neigh_hash_table *nht; 2957 2958 nht = rcu_dereference_protected(tbl->nht, 2959 lockdep_is_held(&tbl->lock)); 2960 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2961 struct neighbour *n; 2962 struct neighbour __rcu **np; 2963 2964 np = &nht->hash_buckets[chain]; 2965 while ((n = rcu_dereference_protected(*np, 2966 lockdep_is_held(&tbl->lock))) != NULL) { 2967 int release; 2968 2969 write_lock(&n->lock); 2970 release = cb(n); 2971 if (release) { 2972 rcu_assign_pointer(*np, 2973 rcu_dereference_protected(n->next, 2974 lockdep_is_held(&tbl->lock))); 2975 neigh_mark_dead(n); 2976 } else 2977 np = &n->next; 2978 write_unlock(&n->lock); 2979 if (release) 2980 neigh_cleanup_and_release(n); 2981 } 2982 } 2983 } 2984 EXPORT_SYMBOL(__neigh_for_each_release); 2985 2986 int neigh_xmit(int index, struct net_device *dev, 2987 const void *addr, struct sk_buff *skb) 2988 { 2989 int err = -EAFNOSUPPORT; 2990 if (likely(index < NEIGH_NR_TABLES)) { 2991 struct neigh_table *tbl; 2992 struct neighbour *neigh; 2993 2994 tbl = neigh_tables[index]; 2995 if (!tbl) 2996 goto out; 2997 rcu_read_lock_bh(); 2998 if (index == NEIGH_ARP_TABLE) { 2999 u32 key = *((u32 *)addr); 3000 3001 neigh = __ipv4_neigh_lookup_noref(dev, key); 3002 } else { 3003 neigh = __neigh_lookup_noref(tbl, addr, dev); 3004 } 3005 if (!neigh) 3006 neigh = __neigh_create(tbl, addr, dev, false); 3007 err = PTR_ERR(neigh); 3008 if (IS_ERR(neigh)) { 3009 rcu_read_unlock_bh(); 3010 goto out_kfree_skb; 3011 } 3012 err = neigh->output(neigh, skb); 3013 rcu_read_unlock_bh(); 3014 } 3015 else if (index == NEIGH_LINK_TABLE) { 3016 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3017 addr, NULL, skb->len); 3018 if (err < 0) 3019 goto out_kfree_skb; 3020 err = dev_queue_xmit(skb); 3021 } 3022 out: 3023 return err; 3024 out_kfree_skb: 3025 kfree_skb(skb); 3026 goto out; 3027 } 3028 EXPORT_SYMBOL(neigh_xmit); 3029 3030 #ifdef CONFIG_PROC_FS 3031 3032 static struct neighbour *neigh_get_first(struct seq_file *seq) 3033 { 3034 struct neigh_seq_state *state = seq->private; 3035 struct net *net = seq_file_net(seq); 3036 struct neigh_hash_table *nht = state->nht; 3037 struct neighbour *n = NULL; 3038 int bucket = state->bucket; 3039 3040 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3041 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3042 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3043 3044 while (n) { 3045 if (!net_eq(dev_net(n->dev), net)) 3046 goto next; 3047 if (state->neigh_sub_iter) { 3048 loff_t fakep = 0; 3049 void *v; 3050 3051 v = state->neigh_sub_iter(state, n, &fakep); 3052 if (!v) 3053 goto next; 3054 } 3055 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3056 break; 3057 if (n->nud_state & ~NUD_NOARP) 3058 break; 3059 next: 3060 n = rcu_dereference_bh(n->next); 3061 } 3062 3063 if (n) 3064 break; 3065 } 3066 state->bucket = bucket; 3067 3068 return n; 3069 } 3070 3071 static struct neighbour *neigh_get_next(struct seq_file *seq, 3072 struct neighbour *n, 3073 loff_t *pos) 3074 { 3075 struct neigh_seq_state *state = seq->private; 3076 struct net *net = seq_file_net(seq); 3077 struct neigh_hash_table *nht = state->nht; 3078 3079 if (state->neigh_sub_iter) { 3080 void *v = state->neigh_sub_iter(state, n, pos); 3081 if (v) 3082 return n; 3083 } 3084 n = rcu_dereference_bh(n->next); 3085 3086 while (1) { 3087 while (n) { 3088 if (!net_eq(dev_net(n->dev), net)) 3089 goto next; 3090 if (state->neigh_sub_iter) { 3091 void *v = state->neigh_sub_iter(state, n, pos); 3092 if (v) 3093 return n; 3094 goto next; 3095 } 3096 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3097 break; 3098 3099 if (n->nud_state & ~NUD_NOARP) 3100 break; 3101 next: 3102 n = rcu_dereference_bh(n->next); 3103 } 3104 3105 if (n) 3106 break; 3107 3108 if (++state->bucket >= (1 << nht->hash_shift)) 3109 break; 3110 3111 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3112 } 3113 3114 if (n && pos) 3115 --(*pos); 3116 return n; 3117 } 3118 3119 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3120 { 3121 struct neighbour *n = neigh_get_first(seq); 3122 3123 if (n) { 3124 --(*pos); 3125 while (*pos) { 3126 n = neigh_get_next(seq, n, pos); 3127 if (!n) 3128 break; 3129 } 3130 } 3131 return *pos ? NULL : n; 3132 } 3133 3134 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3135 { 3136 struct neigh_seq_state *state = seq->private; 3137 struct net *net = seq_file_net(seq); 3138 struct neigh_table *tbl = state->tbl; 3139 struct pneigh_entry *pn = NULL; 3140 int bucket = state->bucket; 3141 3142 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3143 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3144 pn = tbl->phash_buckets[bucket]; 3145 while (pn && !net_eq(pneigh_net(pn), net)) 3146 pn = pn->next; 3147 if (pn) 3148 break; 3149 } 3150 state->bucket = bucket; 3151 3152 return pn; 3153 } 3154 3155 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3156 struct pneigh_entry *pn, 3157 loff_t *pos) 3158 { 3159 struct neigh_seq_state *state = seq->private; 3160 struct net *net = seq_file_net(seq); 3161 struct neigh_table *tbl = state->tbl; 3162 3163 do { 3164 pn = pn->next; 3165 } while (pn && !net_eq(pneigh_net(pn), net)); 3166 3167 while (!pn) { 3168 if (++state->bucket > PNEIGH_HASHMASK) 3169 break; 3170 pn = tbl->phash_buckets[state->bucket]; 3171 while (pn && !net_eq(pneigh_net(pn), net)) 3172 pn = pn->next; 3173 if (pn) 3174 break; 3175 } 3176 3177 if (pn && pos) 3178 --(*pos); 3179 3180 return pn; 3181 } 3182 3183 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3184 { 3185 struct pneigh_entry *pn = pneigh_get_first(seq); 3186 3187 if (pn) { 3188 --(*pos); 3189 while (*pos) { 3190 pn = pneigh_get_next(seq, pn, pos); 3191 if (!pn) 3192 break; 3193 } 3194 } 3195 return *pos ? NULL : pn; 3196 } 3197 3198 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3199 { 3200 struct neigh_seq_state *state = seq->private; 3201 void *rc; 3202 loff_t idxpos = *pos; 3203 3204 rc = neigh_get_idx(seq, &idxpos); 3205 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3206 rc = pneigh_get_idx(seq, &idxpos); 3207 3208 return rc; 3209 } 3210 3211 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3212 __acquires(rcu_bh) 3213 { 3214 struct neigh_seq_state *state = seq->private; 3215 3216 state->tbl = tbl; 3217 state->bucket = 0; 3218 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3219 3220 rcu_read_lock_bh(); 3221 state->nht = rcu_dereference_bh(tbl->nht); 3222 3223 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3224 } 3225 EXPORT_SYMBOL(neigh_seq_start); 3226 3227 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3228 { 3229 struct neigh_seq_state *state; 3230 void *rc; 3231 3232 if (v == SEQ_START_TOKEN) { 3233 rc = neigh_get_first(seq); 3234 goto out; 3235 } 3236 3237 state = seq->private; 3238 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3239 rc = neigh_get_next(seq, v, NULL); 3240 if (rc) 3241 goto out; 3242 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3243 rc = pneigh_get_first(seq); 3244 } else { 3245 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3246 rc = pneigh_get_next(seq, v, NULL); 3247 } 3248 out: 3249 ++(*pos); 3250 return rc; 3251 } 3252 EXPORT_SYMBOL(neigh_seq_next); 3253 3254 void neigh_seq_stop(struct seq_file *seq, void *v) 3255 __releases(rcu_bh) 3256 { 3257 rcu_read_unlock_bh(); 3258 } 3259 EXPORT_SYMBOL(neigh_seq_stop); 3260 3261 /* statistics via seq_file */ 3262 3263 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3264 { 3265 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3266 int cpu; 3267 3268 if (*pos == 0) 3269 return SEQ_START_TOKEN; 3270 3271 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3272 if (!cpu_possible(cpu)) 3273 continue; 3274 *pos = cpu+1; 3275 return per_cpu_ptr(tbl->stats, cpu); 3276 } 3277 return NULL; 3278 } 3279 3280 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3281 { 3282 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3283 int cpu; 3284 3285 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3286 if (!cpu_possible(cpu)) 3287 continue; 3288 *pos = cpu+1; 3289 return per_cpu_ptr(tbl->stats, cpu); 3290 } 3291 return NULL; 3292 } 3293 3294 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3295 { 3296 3297 } 3298 3299 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3300 { 3301 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3302 struct neigh_statistics *st = v; 3303 3304 if (v == SEQ_START_TOKEN) { 3305 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3306 return 0; 3307 } 3308 3309 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3310 "%08lx %08lx %08lx %08lx %08lx %08lx\n", 3311 atomic_read(&tbl->entries), 3312 3313 st->allocs, 3314 st->destroys, 3315 st->hash_grows, 3316 3317 st->lookups, 3318 st->hits, 3319 3320 st->res_failed, 3321 3322 st->rcv_probes_mcast, 3323 st->rcv_probes_ucast, 3324 3325 st->periodic_gc_runs, 3326 st->forced_gc_runs, 3327 st->unres_discards, 3328 st->table_fulls 3329 ); 3330 3331 return 0; 3332 } 3333 3334 static const struct seq_operations neigh_stat_seq_ops = { 3335 .start = neigh_stat_seq_start, 3336 .next = neigh_stat_seq_next, 3337 .stop = neigh_stat_seq_stop, 3338 .show = neigh_stat_seq_show, 3339 }; 3340 #endif /* CONFIG_PROC_FS */ 3341 3342 static void __neigh_notify(struct neighbour *n, int type, int flags, 3343 u32 pid) 3344 { 3345 struct net *net = dev_net(n->dev); 3346 struct sk_buff *skb; 3347 int err = -ENOBUFS; 3348 3349 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3350 if (skb == NULL) 3351 goto errout; 3352 3353 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3354 if (err < 0) { 3355 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3356 WARN_ON(err == -EMSGSIZE); 3357 kfree_skb(skb); 3358 goto errout; 3359 } 3360 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3361 return; 3362 errout: 3363 if (err < 0) 3364 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3365 } 3366 3367 void neigh_app_ns(struct neighbour *n) 3368 { 3369 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3370 } 3371 EXPORT_SYMBOL(neigh_app_ns); 3372 3373 #ifdef CONFIG_SYSCTL 3374 static int zero; 3375 static int int_max = INT_MAX; 3376 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3377 3378 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3379 void __user *buffer, size_t *lenp, loff_t *ppos) 3380 { 3381 int size, ret; 3382 struct ctl_table tmp = *ctl; 3383 3384 tmp.extra1 = &zero; 3385 tmp.extra2 = &unres_qlen_max; 3386 tmp.data = &size; 3387 3388 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3389 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3390 3391 if (write && !ret) 3392 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3393 return ret; 3394 } 3395 3396 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 3397 int family) 3398 { 3399 switch (family) { 3400 case AF_INET: 3401 return __in_dev_arp_parms_get_rcu(dev); 3402 case AF_INET6: 3403 return __in6_dev_nd_parms_get_rcu(dev); 3404 } 3405 return NULL; 3406 } 3407 3408 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3409 int index) 3410 { 3411 struct net_device *dev; 3412 int family = neigh_parms_family(p); 3413 3414 rcu_read_lock(); 3415 for_each_netdev_rcu(net, dev) { 3416 struct neigh_parms *dst_p = 3417 neigh_get_dev_parms_rcu(dev, family); 3418 3419 if (dst_p && !test_bit(index, dst_p->data_state)) 3420 dst_p->data[index] = p->data[index]; 3421 } 3422 rcu_read_unlock(); 3423 } 3424 3425 static void neigh_proc_update(struct ctl_table *ctl, int write) 3426 { 3427 struct net_device *dev = ctl->extra1; 3428 struct neigh_parms *p = ctl->extra2; 3429 struct net *net = neigh_parms_net(p); 3430 int index = (int *) ctl->data - p->data; 3431 3432 if (!write) 3433 return; 3434 3435 set_bit(index, p->data_state); 3436 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3437 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3438 if (!dev) /* NULL dev means this is default value */ 3439 neigh_copy_dflt_parms(net, p, index); 3440 } 3441 3442 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3443 void __user *buffer, 3444 size_t *lenp, loff_t *ppos) 3445 { 3446 struct ctl_table tmp = *ctl; 3447 int ret; 3448 3449 tmp.extra1 = &zero; 3450 tmp.extra2 = &int_max; 3451 3452 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3453 neigh_proc_update(ctl, write); 3454 return ret; 3455 } 3456 3457 int neigh_proc_dointvec(struct ctl_table *ctl, int write, 3458 void __user *buffer, size_t *lenp, loff_t *ppos) 3459 { 3460 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3461 3462 neigh_proc_update(ctl, write); 3463 return ret; 3464 } 3465 EXPORT_SYMBOL(neigh_proc_dointvec); 3466 3467 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 3468 void __user *buffer, 3469 size_t *lenp, loff_t *ppos) 3470 { 3471 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3472 3473 neigh_proc_update(ctl, write); 3474 return ret; 3475 } 3476 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3477 3478 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3479 void __user *buffer, 3480 size_t *lenp, loff_t *ppos) 3481 { 3482 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3483 3484 neigh_proc_update(ctl, write); 3485 return ret; 3486 } 3487 3488 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3489 void __user *buffer, 3490 size_t *lenp, loff_t *ppos) 3491 { 3492 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3493 3494 neigh_proc_update(ctl, write); 3495 return ret; 3496 } 3497 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3498 3499 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3500 void __user *buffer, 3501 size_t *lenp, loff_t *ppos) 3502 { 3503 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3504 3505 neigh_proc_update(ctl, write); 3506 return ret; 3507 } 3508 3509 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3510 void __user *buffer, 3511 size_t *lenp, loff_t *ppos) 3512 { 3513 struct neigh_parms *p = ctl->extra2; 3514 int ret; 3515 3516 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3517 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3518 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3519 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3520 else 3521 ret = -1; 3522 3523 if (write && ret == 0) { 3524 /* update reachable_time as well, otherwise, the change will 3525 * only be effective after the next time neigh_periodic_work 3526 * decides to recompute it 3527 */ 3528 p->reachable_time = 3529 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3530 } 3531 return ret; 3532 } 3533 3534 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3535 (&((struct neigh_parms *) 0)->data[index]) 3536 3537 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3538 [NEIGH_VAR_ ## attr] = { \ 3539 .procname = name, \ 3540 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3541 .maxlen = sizeof(int), \ 3542 .mode = mval, \ 3543 .proc_handler = proc, \ 3544 } 3545 3546 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3547 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3548 3549 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3550 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3551 3552 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3553 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3554 3555 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \ 3556 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3557 3558 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3559 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3560 3561 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3562 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3563 3564 static struct neigh_sysctl_table { 3565 struct ctl_table_header *sysctl_header; 3566 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3567 } neigh_sysctl_template __read_mostly = { 3568 .neigh_vars = { 3569 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3570 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3571 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3573 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3574 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3575 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3576 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3577 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3579 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3580 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3581 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3582 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3583 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3584 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3585 [NEIGH_VAR_GC_INTERVAL] = { 3586 .procname = "gc_interval", 3587 .maxlen = sizeof(int), 3588 .mode = 0644, 3589 .proc_handler = proc_dointvec_jiffies, 3590 }, 3591 [NEIGH_VAR_GC_THRESH1] = { 3592 .procname = "gc_thresh1", 3593 .maxlen = sizeof(int), 3594 .mode = 0644, 3595 .extra1 = &zero, 3596 .extra2 = &int_max, 3597 .proc_handler = proc_dointvec_minmax, 3598 }, 3599 [NEIGH_VAR_GC_THRESH2] = { 3600 .procname = "gc_thresh2", 3601 .maxlen = sizeof(int), 3602 .mode = 0644, 3603 .extra1 = &zero, 3604 .extra2 = &int_max, 3605 .proc_handler = proc_dointvec_minmax, 3606 }, 3607 [NEIGH_VAR_GC_THRESH3] = { 3608 .procname = "gc_thresh3", 3609 .maxlen = sizeof(int), 3610 .mode = 0644, 3611 .extra1 = &zero, 3612 .extra2 = &int_max, 3613 .proc_handler = proc_dointvec_minmax, 3614 }, 3615 {}, 3616 }, 3617 }; 3618 3619 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3620 proc_handler *handler) 3621 { 3622 int i; 3623 struct neigh_sysctl_table *t; 3624 const char *dev_name_source; 3625 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3626 char *p_name; 3627 3628 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); 3629 if (!t) 3630 goto err; 3631 3632 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3633 t->neigh_vars[i].data += (long) p; 3634 t->neigh_vars[i].extra1 = dev; 3635 t->neigh_vars[i].extra2 = p; 3636 } 3637 3638 if (dev) { 3639 dev_name_source = dev->name; 3640 /* Terminate the table early */ 3641 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3642 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3643 } else { 3644 struct neigh_table *tbl = p->tbl; 3645 dev_name_source = "default"; 3646 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3647 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3648 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3649 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3650 } 3651 3652 if (handler) { 3653 /* RetransTime */ 3654 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3655 /* ReachableTime */ 3656 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3657 /* RetransTime (in milliseconds)*/ 3658 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3659 /* ReachableTime (in milliseconds) */ 3660 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3661 } else { 3662 /* Those handlers will update p->reachable_time after 3663 * base_reachable_time(_ms) is set to ensure the new timer starts being 3664 * applied after the next neighbour update instead of waiting for 3665 * neigh_periodic_work to update its value (can be multiple minutes) 3666 * So any handler that replaces them should do this as well 3667 */ 3668 /* ReachableTime */ 3669 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3670 neigh_proc_base_reachable_time; 3671 /* ReachableTime (in milliseconds) */ 3672 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3673 neigh_proc_base_reachable_time; 3674 } 3675 3676 /* Don't export sysctls to unprivileged users */ 3677 if (neigh_parms_net(p)->user_ns != &init_user_ns) 3678 t->neigh_vars[0].procname = NULL; 3679 3680 switch (neigh_parms_family(p)) { 3681 case AF_INET: 3682 p_name = "ipv4"; 3683 break; 3684 case AF_INET6: 3685 p_name = "ipv6"; 3686 break; 3687 default: 3688 BUG(); 3689 } 3690 3691 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3692 p_name, dev_name_source); 3693 t->sysctl_header = 3694 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3695 if (!t->sysctl_header) 3696 goto free; 3697 3698 p->sysctl_table = t; 3699 return 0; 3700 3701 free: 3702 kfree(t); 3703 err: 3704 return -ENOBUFS; 3705 } 3706 EXPORT_SYMBOL(neigh_sysctl_register); 3707 3708 void neigh_sysctl_unregister(struct neigh_parms *p) 3709 { 3710 if (p->sysctl_table) { 3711 struct neigh_sysctl_table *t = p->sysctl_table; 3712 p->sysctl_table = NULL; 3713 unregister_net_sysctl_table(t->sysctl_header); 3714 kfree(t); 3715 } 3716 } 3717 EXPORT_SYMBOL(neigh_sysctl_unregister); 3718 3719 #endif /* CONFIG_SYSCTL */ 3720 3721 static int __init neigh_init(void) 3722 { 3723 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3724 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3725 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3726 3727 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3728 0); 3729 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3730 3731 return 0; 3732 } 3733 3734 subsys_initcall(neigh_init); 3735