1 /* 2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 3 * 4 * Marek Lindner, Simon Wunderlich 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA 19 * 20 */ 21 22 #include "main.h" 23 #include "originator.h" 24 #include "hash.h" 25 #include "translation-table.h" 26 #include "routing.h" 27 #include "gateway_client.h" 28 #include "hard-interface.h" 29 #include "unicast.h" 30 #include "soft-interface.h" 31 32 static void purge_orig(struct work_struct *work); 33 34 static void start_purge_timer(struct bat_priv *bat_priv) 35 { 36 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); 37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); 38 } 39 40 /* returns 1 if they are the same originator */ 41 static int compare_orig(const struct hlist_node *node, const void *data2) 42 { 43 const void *data1 = container_of(node, struct orig_node, hash_entry); 44 45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 46 } 47 48 int originator_init(struct bat_priv *bat_priv) 49 { 50 if (bat_priv->orig_hash) 51 return 1; 52 53 bat_priv->orig_hash = hash_new(1024); 54 55 if (!bat_priv->orig_hash) 56 goto err; 57 58 start_purge_timer(bat_priv); 59 return 1; 60 61 err: 62 return 0; 63 } 64 65 void neigh_node_free_ref(struct neigh_node *neigh_node) 66 { 67 if (atomic_dec_and_test(&neigh_node->refcount)) 68 kfree_rcu(neigh_node, rcu); 69 } 70 71 /* increases the refcounter of a found router */ 72 struct neigh_node *orig_node_get_router(struct orig_node *orig_node) 73 { 74 struct neigh_node *router; 75 76 rcu_read_lock(); 77 router = rcu_dereference(orig_node->router); 78 79 if (router && !atomic_inc_not_zero(&router->refcount)) 80 router = NULL; 81 82 rcu_read_unlock(); 83 return router; 84 } 85 86 struct neigh_node *create_neighbor(struct orig_node *orig_node, 87 struct orig_node *orig_neigh_node, 88 const uint8_t *neigh, 89 struct hard_iface *if_incoming) 90 { 91 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 92 struct neigh_node *neigh_node; 93 94 bat_dbg(DBG_BATMAN, bat_priv, 95 "Creating new last-hop neighbor of originator\n"); 96 97 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 98 if (!neigh_node) 99 return NULL; 100 101 INIT_HLIST_NODE(&neigh_node->list); 102 INIT_LIST_HEAD(&neigh_node->bonding_list); 103 spin_lock_init(&neigh_node->tq_lock); 104 105 memcpy(neigh_node->addr, neigh, ETH_ALEN); 106 neigh_node->orig_node = orig_neigh_node; 107 neigh_node->if_incoming = if_incoming; 108 109 /* extra reference for return */ 110 atomic_set(&neigh_node->refcount, 2); 111 112 spin_lock_bh(&orig_node->neigh_list_lock); 113 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 114 spin_unlock_bh(&orig_node->neigh_list_lock); 115 return neigh_node; 116 } 117 118 static void orig_node_free_rcu(struct rcu_head *rcu) 119 { 120 struct hlist_node *node, *node_tmp; 121 struct neigh_node *neigh_node, *tmp_neigh_node; 122 struct orig_node *orig_node; 123 124 orig_node = container_of(rcu, struct orig_node, rcu); 125 126 spin_lock_bh(&orig_node->neigh_list_lock); 127 128 /* for all bonding members ... */ 129 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 130 &orig_node->bond_list, bonding_list) { 131 list_del_rcu(&neigh_node->bonding_list); 132 neigh_node_free_ref(neigh_node); 133 } 134 135 /* for all neighbors towards this originator ... */ 136 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 137 &orig_node->neigh_list, list) { 138 hlist_del_rcu(&neigh_node->list); 139 neigh_node_free_ref(neigh_node); 140 } 141 142 spin_unlock_bh(&orig_node->neigh_list_lock); 143 144 frag_list_free(&orig_node->frag_list); 145 tt_global_del_orig(orig_node->bat_priv, orig_node, 146 "originator timed out"); 147 148 kfree(orig_node->tt_buff); 149 kfree(orig_node->bcast_own); 150 kfree(orig_node->bcast_own_sum); 151 kfree(orig_node); 152 } 153 154 void orig_node_free_ref(struct orig_node *orig_node) 155 { 156 if (atomic_dec_and_test(&orig_node->refcount)) 157 call_rcu(&orig_node->rcu, orig_node_free_rcu); 158 } 159 160 void originator_free(struct bat_priv *bat_priv) 161 { 162 struct hashtable_t *hash = bat_priv->orig_hash; 163 struct hlist_node *node, *node_tmp; 164 struct hlist_head *head; 165 spinlock_t *list_lock; /* spinlock to protect write access */ 166 struct orig_node *orig_node; 167 int i; 168 169 if (!hash) 170 return; 171 172 cancel_delayed_work_sync(&bat_priv->orig_work); 173 174 bat_priv->orig_hash = NULL; 175 176 for (i = 0; i < hash->size; i++) { 177 head = &hash->table[i]; 178 list_lock = &hash->list_locks[i]; 179 180 spin_lock_bh(list_lock); 181 hlist_for_each_entry_safe(orig_node, node, node_tmp, 182 head, hash_entry) { 183 184 hlist_del_rcu(node); 185 orig_node_free_ref(orig_node); 186 } 187 spin_unlock_bh(list_lock); 188 } 189 190 hash_destroy(hash); 191 } 192 193 /* this function finds or creates an originator entry for the given 194 * address if it does not exits */ 195 struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) 196 { 197 struct orig_node *orig_node; 198 int size; 199 int hash_added; 200 201 orig_node = orig_hash_find(bat_priv, addr); 202 if (orig_node) 203 return orig_node; 204 205 bat_dbg(DBG_BATMAN, bat_priv, 206 "Creating new originator: %pM\n", addr); 207 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 209 if (!orig_node) 210 return NULL; 211 212 INIT_HLIST_HEAD(&orig_node->neigh_list); 213 INIT_LIST_HEAD(&orig_node->bond_list); 214 spin_lock_init(&orig_node->ogm_cnt_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock); 216 spin_lock_init(&orig_node->neigh_list_lock); 217 spin_lock_init(&orig_node->tt_buff_lock); 218 219 /* extra reference for return */ 220 atomic_set(&orig_node->refcount, 2); 221 222 orig_node->tt_poss_change = false; 223 orig_node->bat_priv = bat_priv; 224 memcpy(orig_node->orig, addr, ETH_ALEN); 225 orig_node->router = NULL; 226 orig_node->tt_crc = 0; 227 atomic_set(&orig_node->last_ttvn, 0); 228 orig_node->tt_buff = NULL; 229 orig_node->tt_buff_len = 0; 230 atomic_set(&orig_node->tt_size, 0); 231 orig_node->bcast_seqno_reset = jiffies - 1 232 - msecs_to_jiffies(RESET_PROTECTION_MS); 233 orig_node->batman_seqno_reset = jiffies - 1 234 - msecs_to_jiffies(RESET_PROTECTION_MS); 235 236 atomic_set(&orig_node->bond_candidates, 0); 237 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; 239 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 241 if (!orig_node->bcast_own) 242 goto free_orig_node; 243 244 size = bat_priv->num_ifaces * sizeof(uint8_t); 245 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); 246 247 INIT_LIST_HEAD(&orig_node->frag_list); 248 orig_node->last_frag_packet = 0; 249 250 if (!orig_node->bcast_own_sum) 251 goto free_bcast_own; 252 253 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 254 choose_orig, orig_node, &orig_node->hash_entry); 255 if (hash_added != 0) 256 goto free_bcast_own_sum; 257 258 return orig_node; 259 free_bcast_own_sum: 260 kfree(orig_node->bcast_own_sum); 261 free_bcast_own: 262 kfree(orig_node->bcast_own); 263 free_orig_node: 264 kfree(orig_node); 265 return NULL; 266 } 267 268 static bool purge_orig_neighbors(struct bat_priv *bat_priv, 269 struct orig_node *orig_node, 270 struct neigh_node **best_neigh_node) 271 { 272 struct hlist_node *node, *node_tmp; 273 struct neigh_node *neigh_node; 274 bool neigh_purged = false; 275 276 *best_neigh_node = NULL; 277 278 spin_lock_bh(&orig_node->neigh_list_lock); 279 280 /* for all neighbors towards this originator ... */ 281 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 282 &orig_node->neigh_list, list) { 283 284 if ((time_after(jiffies, 285 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || 286 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 287 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 288 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 289 290 if ((neigh_node->if_incoming->if_status == 291 IF_INACTIVE) || 292 (neigh_node->if_incoming->if_status == 293 IF_NOT_IN_USE) || 294 (neigh_node->if_incoming->if_status == 295 IF_TO_BE_REMOVED)) 296 bat_dbg(DBG_BATMAN, bat_priv, 297 "neighbor purge: originator %pM, " 298 "neighbor: %pM, iface: %s\n", 299 orig_node->orig, neigh_node->addr, 300 neigh_node->if_incoming->net_dev->name); 301 else 302 bat_dbg(DBG_BATMAN, bat_priv, 303 "neighbor timeout: originator %pM, " 304 "neighbor: %pM, last_valid: %lu\n", 305 orig_node->orig, neigh_node->addr, 306 (neigh_node->last_valid / HZ)); 307 308 neigh_purged = true; 309 310 hlist_del_rcu(&neigh_node->list); 311 bonding_candidate_del(orig_node, neigh_node); 312 neigh_node_free_ref(neigh_node); 313 } else { 314 if ((!*best_neigh_node) || 315 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 316 *best_neigh_node = neigh_node; 317 } 318 } 319 320 spin_unlock_bh(&orig_node->neigh_list_lock); 321 return neigh_purged; 322 } 323 324 static bool purge_orig_node(struct bat_priv *bat_priv, 325 struct orig_node *orig_node) 326 { 327 struct neigh_node *best_neigh_node; 328 329 if (time_after(jiffies, 330 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) { 331 332 bat_dbg(DBG_BATMAN, bat_priv, 333 "Originator timeout: originator %pM, last_valid %lu\n", 334 orig_node->orig, (orig_node->last_valid / HZ)); 335 return true; 336 } else { 337 if (purge_orig_neighbors(bat_priv, orig_node, 338 &best_neigh_node)) { 339 update_route(bat_priv, orig_node, best_neigh_node); 340 } 341 } 342 343 return false; 344 } 345 346 static void _purge_orig(struct bat_priv *bat_priv) 347 { 348 struct hashtable_t *hash = bat_priv->orig_hash; 349 struct hlist_node *node, *node_tmp; 350 struct hlist_head *head; 351 spinlock_t *list_lock; /* spinlock to protect write access */ 352 struct orig_node *orig_node; 353 int i; 354 355 if (!hash) 356 return; 357 358 /* for all origins... */ 359 for (i = 0; i < hash->size; i++) { 360 head = &hash->table[i]; 361 list_lock = &hash->list_locks[i]; 362 363 spin_lock_bh(list_lock); 364 hlist_for_each_entry_safe(orig_node, node, node_tmp, 365 head, hash_entry) { 366 if (purge_orig_node(bat_priv, orig_node)) { 367 if (orig_node->gw_flags) 368 gw_node_delete(bat_priv, orig_node); 369 hlist_del_rcu(node); 370 orig_node_free_ref(orig_node); 371 continue; 372 } 373 374 if (time_after(jiffies, orig_node->last_frag_packet + 375 msecs_to_jiffies(FRAG_TIMEOUT))) 376 frag_list_free(&orig_node->frag_list); 377 } 378 spin_unlock_bh(list_lock); 379 } 380 381 gw_node_purge(bat_priv); 382 gw_election(bat_priv); 383 384 softif_neigh_purge(bat_priv); 385 } 386 387 static void purge_orig(struct work_struct *work) 388 { 389 struct delayed_work *delayed_work = 390 container_of(work, struct delayed_work, work); 391 struct bat_priv *bat_priv = 392 container_of(delayed_work, struct bat_priv, orig_work); 393 394 _purge_orig(bat_priv); 395 start_purge_timer(bat_priv); 396 } 397 398 void purge_orig_ref(struct bat_priv *bat_priv) 399 { 400 _purge_orig(bat_priv); 401 } 402 403 int orig_seq_print_text(struct seq_file *seq, void *offset) 404 { 405 struct net_device *net_dev = (struct net_device *)seq->private; 406 struct bat_priv *bat_priv = netdev_priv(net_dev); 407 struct hashtable_t *hash = bat_priv->orig_hash; 408 struct hlist_node *node, *node_tmp; 409 struct hlist_head *head; 410 struct hard_iface *primary_if; 411 struct orig_node *orig_node; 412 struct neigh_node *neigh_node, *neigh_node_tmp; 413 int batman_count = 0; 414 int last_seen_secs; 415 int last_seen_msecs; 416 int i, ret = 0; 417 418 primary_if = primary_if_get_selected(bat_priv); 419 420 if (!primary_if) { 421 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 422 "please specify interfaces to enable it\n", 423 net_dev->name); 424 goto out; 425 } 426 427 if (primary_if->if_status != IF_ACTIVE) { 428 ret = seq_printf(seq, "BATMAN mesh %s " 429 "disabled - primary interface not active\n", 430 net_dev->name); 431 goto out; 432 } 433 434 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 435 SOURCE_VERSION, primary_if->net_dev->name, 436 primary_if->net_dev->dev_addr, net_dev->name); 437 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 438 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 439 "outgoingIF", "Potential nexthops"); 440 441 for (i = 0; i < hash->size; i++) { 442 head = &hash->table[i]; 443 444 rcu_read_lock(); 445 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 446 neigh_node = orig_node_get_router(orig_node); 447 if (!neigh_node) 448 continue; 449 450 if (neigh_node->tq_avg == 0) 451 goto next; 452 453 last_seen_secs = jiffies_to_msecs(jiffies - 454 orig_node->last_valid) / 1000; 455 last_seen_msecs = jiffies_to_msecs(jiffies - 456 orig_node->last_valid) % 1000; 457 458 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 459 orig_node->orig, last_seen_secs, 460 last_seen_msecs, neigh_node->tq_avg, 461 neigh_node->addr, 462 neigh_node->if_incoming->net_dev->name); 463 464 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 465 &orig_node->neigh_list, list) { 466 seq_printf(seq, " %pM (%3i)", 467 neigh_node_tmp->addr, 468 neigh_node_tmp->tq_avg); 469 } 470 471 seq_printf(seq, "\n"); 472 batman_count++; 473 474 next: 475 neigh_node_free_ref(neigh_node); 476 } 477 rcu_read_unlock(); 478 } 479 480 if (batman_count == 0) 481 seq_printf(seq, "No batman nodes in range ...\n"); 482 483 out: 484 if (primary_if) 485 hardif_free_ref(primary_if); 486 return ret; 487 } 488 489 static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 490 { 491 void *data_ptr; 492 493 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 494 GFP_ATOMIC); 495 if (!data_ptr) 496 return -1; 497 498 memcpy(data_ptr, orig_node->bcast_own, 499 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); 500 kfree(orig_node->bcast_own); 501 orig_node->bcast_own = data_ptr; 502 503 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 504 if (!data_ptr) 505 return -1; 506 507 memcpy(data_ptr, orig_node->bcast_own_sum, 508 (max_if_num - 1) * sizeof(uint8_t)); 509 kfree(orig_node->bcast_own_sum); 510 orig_node->bcast_own_sum = data_ptr; 511 512 return 0; 513 } 514 515 int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) 516 { 517 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 518 struct hashtable_t *hash = bat_priv->orig_hash; 519 struct hlist_node *node; 520 struct hlist_head *head; 521 struct orig_node *orig_node; 522 int i, ret; 523 524 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 525 * if_num */ 526 for (i = 0; i < hash->size; i++) { 527 head = &hash->table[i]; 528 529 rcu_read_lock(); 530 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 531 spin_lock_bh(&orig_node->ogm_cnt_lock); 532 ret = orig_node_add_if(orig_node, max_if_num); 533 spin_unlock_bh(&orig_node->ogm_cnt_lock); 534 535 if (ret == -1) 536 goto err; 537 } 538 rcu_read_unlock(); 539 } 540 541 return 0; 542 543 err: 544 rcu_read_unlock(); 545 return -ENOMEM; 546 } 547 548 static int orig_node_del_if(struct orig_node *orig_node, 549 int max_if_num, int del_if_num) 550 { 551 void *data_ptr = NULL; 552 int chunk_size; 553 554 /* last interface was removed */ 555 if (max_if_num == 0) 556 goto free_bcast_own; 557 558 chunk_size = sizeof(unsigned long) * NUM_WORDS; 559 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 560 if (!data_ptr) 561 return -1; 562 563 /* copy first part */ 564 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 565 566 /* copy second part */ 567 memcpy((char *)data_ptr + del_if_num * chunk_size, 568 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 569 (max_if_num - del_if_num) * chunk_size); 570 571 free_bcast_own: 572 kfree(orig_node->bcast_own); 573 orig_node->bcast_own = data_ptr; 574 575 if (max_if_num == 0) 576 goto free_own_sum; 577 578 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 579 if (!data_ptr) 580 return -1; 581 582 memcpy(data_ptr, orig_node->bcast_own_sum, 583 del_if_num * sizeof(uint8_t)); 584 585 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), 586 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 587 (max_if_num - del_if_num) * sizeof(uint8_t)); 588 589 free_own_sum: 590 kfree(orig_node->bcast_own_sum); 591 orig_node->bcast_own_sum = data_ptr; 592 593 return 0; 594 } 595 596 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) 597 { 598 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 599 struct hashtable_t *hash = bat_priv->orig_hash; 600 struct hlist_node *node; 601 struct hlist_head *head; 602 struct hard_iface *hard_iface_tmp; 603 struct orig_node *orig_node; 604 int i, ret; 605 606 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 607 * if_num */ 608 for (i = 0; i < hash->size; i++) { 609 head = &hash->table[i]; 610 611 rcu_read_lock(); 612 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 613 spin_lock_bh(&orig_node->ogm_cnt_lock); 614 ret = orig_node_del_if(orig_node, max_if_num, 615 hard_iface->if_num); 616 spin_unlock_bh(&orig_node->ogm_cnt_lock); 617 618 if (ret == -1) 619 goto err; 620 } 621 rcu_read_unlock(); 622 } 623 624 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 625 rcu_read_lock(); 626 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { 627 if (hard_iface_tmp->if_status == IF_NOT_IN_USE) 628 continue; 629 630 if (hard_iface == hard_iface_tmp) 631 continue; 632 633 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 634 continue; 635 636 if (hard_iface_tmp->if_num > hard_iface->if_num) 637 hard_iface_tmp->if_num--; 638 } 639 rcu_read_unlock(); 640 641 hard_iface->if_num = -1; 642 return 0; 643 644 err: 645 rcu_read_unlock(); 646 return -ENOMEM; 647 } 648