1 /* 2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 3 * 4 * Marek Lindner, Simon Wunderlich 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA 19 * 20 */ 21 22 #include "main.h" 23 #include "translation-table.h" 24 #include "soft-interface.h" 25 #include "hard-interface.h" 26 #include "send.h" 27 #include "hash.h" 28 #include "originator.h" 29 #include "routing.h" 30 31 #include <linux/crc16.h> 32 33 static void _tt_global_del(struct bat_priv *bat_priv, 34 struct tt_global_entry *tt_global_entry, 35 const char *message); 36 static void tt_purge(struct work_struct *work); 37 38 /* returns 1 if they are the same mac addr */ 39 static int compare_ltt(const struct hlist_node *node, const void *data2) 40 { 41 const void *data1 = container_of(node, struct tt_local_entry, 42 hash_entry); 43 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 45 } 46 47 /* returns 1 if they are the same mac addr */ 48 static int compare_gtt(const struct hlist_node *node, const void *data2) 49 { 50 const void *data1 = container_of(node, struct tt_global_entry, 51 hash_entry); 52 53 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 54 } 55 56 static void tt_start_timer(struct bat_priv *bat_priv) 57 { 58 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); 59 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 60 msecs_to_jiffies(5000)); 61 } 62 63 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 64 const void *data) 65 { 66 struct hashtable_t *hash = bat_priv->tt_local_hash; 67 struct hlist_head *head; 68 struct hlist_node *node; 69 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; 70 int index; 71 72 if (!hash) 73 return NULL; 74 75 index = choose_orig(data, hash->size); 76 head = &hash->table[index]; 77 78 rcu_read_lock(); 79 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { 80 if (!compare_eth(tt_local_entry, data)) 81 continue; 82 83 if (!atomic_inc_not_zero(&tt_local_entry->refcount)) 84 continue; 85 86 tt_local_entry_tmp = tt_local_entry; 87 break; 88 } 89 rcu_read_unlock(); 90 91 return tt_local_entry_tmp; 92 } 93 94 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, 95 const void *data) 96 { 97 struct hashtable_t *hash = bat_priv->tt_global_hash; 98 struct hlist_head *head; 99 struct hlist_node *node; 100 struct tt_global_entry *tt_global_entry; 101 struct tt_global_entry *tt_global_entry_tmp = NULL; 102 int index; 103 104 if (!hash) 105 return NULL; 106 107 index = choose_orig(data, hash->size); 108 head = &hash->table[index]; 109 110 rcu_read_lock(); 111 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { 112 if (!compare_eth(tt_global_entry, data)) 113 continue; 114 115 if (!atomic_inc_not_zero(&tt_global_entry->refcount)) 116 continue; 117 118 tt_global_entry_tmp = tt_global_entry; 119 break; 120 } 121 rcu_read_unlock(); 122 123 return tt_global_entry_tmp; 124 } 125 126 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) 127 { 128 unsigned long deadline; 129 deadline = starting_time + msecs_to_jiffies(timeout); 130 131 return time_after(jiffies, deadline); 132 } 133 134 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) 135 { 136 if (atomic_dec_and_test(&tt_local_entry->refcount)) 137 kfree_rcu(tt_local_entry, rcu); 138 } 139 140 static void tt_global_entry_free_rcu(struct rcu_head *rcu) 141 { 142 struct tt_global_entry *tt_global_entry; 143 144 tt_global_entry = container_of(rcu, struct tt_global_entry, rcu); 145 146 if (tt_global_entry->orig_node) 147 orig_node_free_ref(tt_global_entry->orig_node); 148 149 kfree(tt_global_entry); 150 } 151 152 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 153 { 154 if (atomic_dec_and_test(&tt_global_entry->refcount)) 155 call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu); 156 } 157 158 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 159 uint8_t flags) 160 { 161 struct tt_change_node *tt_change_node; 162 163 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); 164 165 if (!tt_change_node) 166 return; 167 168 tt_change_node->change.flags = flags; 169 memcpy(tt_change_node->change.addr, addr, ETH_ALEN); 170 171 spin_lock_bh(&bat_priv->tt_changes_list_lock); 172 /* track the change in the OGMinterval list */ 173 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); 174 atomic_inc(&bat_priv->tt_local_changes); 175 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 176 177 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 178 } 179 180 int tt_len(int changes_num) 181 { 182 return changes_num * sizeof(struct tt_change); 183 } 184 185 static int tt_local_init(struct bat_priv *bat_priv) 186 { 187 if (bat_priv->tt_local_hash) 188 return 1; 189 190 bat_priv->tt_local_hash = hash_new(1024); 191 192 if (!bat_priv->tt_local_hash) 193 return 0; 194 195 return 1; 196 } 197 198 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 199 int ifindex) 200 { 201 struct bat_priv *bat_priv = netdev_priv(soft_iface); 202 struct tt_local_entry *tt_local_entry = NULL; 203 struct tt_global_entry *tt_global_entry = NULL; 204 205 tt_local_entry = tt_local_hash_find(bat_priv, addr); 206 207 if (tt_local_entry) { 208 tt_local_entry->last_seen = jiffies; 209 goto out; 210 } 211 212 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); 213 if (!tt_local_entry) 214 goto out; 215 216 bat_dbg(DBG_TT, bat_priv, 217 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 218 (uint8_t)atomic_read(&bat_priv->ttvn)); 219 220 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 221 tt_local_entry->last_seen = jiffies; 222 tt_local_entry->flags = NO_FLAGS; 223 if (is_wifi_iface(ifindex)) 224 tt_local_entry->flags |= TT_CLIENT_WIFI; 225 atomic_set(&tt_local_entry->refcount, 2); 226 227 /* the batman interface mac address should never be purged */ 228 if (compare_eth(addr, soft_iface->dev_addr)) 229 tt_local_entry->flags |= TT_CLIENT_NOPURGE; 230 231 tt_local_event(bat_priv, addr, tt_local_entry->flags); 232 233 /* The local entry has to be marked as NEW to avoid to send it in 234 * a full table response going out before the next ttvn increment 235 * (consistency check) */ 236 tt_local_entry->flags |= TT_CLIENT_NEW; 237 238 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, 239 tt_local_entry, &tt_local_entry->hash_entry); 240 241 /* remove address from global hash if present */ 242 tt_global_entry = tt_global_hash_find(bat_priv, addr); 243 244 /* Check whether it is a roaming! */ 245 if (tt_global_entry) { 246 /* This node is probably going to update its tt table */ 247 tt_global_entry->orig_node->tt_poss_change = true; 248 /* The global entry has to be marked as PENDING and has to be 249 * kept for consistency purpose */ 250 tt_global_entry->flags |= TT_CLIENT_PENDING; 251 send_roam_adv(bat_priv, tt_global_entry->addr, 252 tt_global_entry->orig_node); 253 } 254 out: 255 if (tt_local_entry) 256 tt_local_entry_free_ref(tt_local_entry); 257 if (tt_global_entry) 258 tt_global_entry_free_ref(tt_global_entry); 259 } 260 261 int tt_changes_fill_buffer(struct bat_priv *bat_priv, 262 unsigned char *buff, int buff_len) 263 { 264 int count = 0, tot_changes = 0; 265 struct tt_change_node *entry, *safe; 266 267 if (buff_len > 0) 268 tot_changes = buff_len / tt_len(1); 269 270 spin_lock_bh(&bat_priv->tt_changes_list_lock); 271 atomic_set(&bat_priv->tt_local_changes, 0); 272 273 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 274 list) { 275 if (count < tot_changes) { 276 memcpy(buff + tt_len(count), 277 &entry->change, sizeof(struct tt_change)); 278 count++; 279 } 280 list_del(&entry->list); 281 kfree(entry); 282 } 283 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 284 285 /* Keep the buffer for possible tt_request */ 286 spin_lock_bh(&bat_priv->tt_buff_lock); 287 kfree(bat_priv->tt_buff); 288 bat_priv->tt_buff_len = 0; 289 bat_priv->tt_buff = NULL; 290 /* We check whether this new OGM has no changes due to size 291 * problems */ 292 if (buff_len > 0) { 293 /** 294 * if kmalloc() fails we will reply with the full table 295 * instead of providing the diff 296 */ 297 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); 298 if (bat_priv->tt_buff) { 299 memcpy(bat_priv->tt_buff, buff, buff_len); 300 bat_priv->tt_buff_len = buff_len; 301 } 302 } 303 spin_unlock_bh(&bat_priv->tt_buff_lock); 304 305 return tot_changes; 306 } 307 308 int tt_local_seq_print_text(struct seq_file *seq, void *offset) 309 { 310 struct net_device *net_dev = (struct net_device *)seq->private; 311 struct bat_priv *bat_priv = netdev_priv(net_dev); 312 struct hashtable_t *hash = bat_priv->tt_local_hash; 313 struct tt_local_entry *tt_local_entry; 314 struct hard_iface *primary_if; 315 struct hlist_node *node; 316 struct hlist_head *head; 317 size_t buf_size, pos; 318 char *buff; 319 int i, ret = 0; 320 321 primary_if = primary_if_get_selected(bat_priv); 322 if (!primary_if) { 323 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 324 "please specify interfaces to enable it\n", 325 net_dev->name); 326 goto out; 327 } 328 329 if (primary_if->if_status != IF_ACTIVE) { 330 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 331 "primary interface not active\n", 332 net_dev->name); 333 goto out; 334 } 335 336 seq_printf(seq, "Locally retrieved addresses (from %s) " 337 "announced via TT (TTVN: %u):\n", 338 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 339 340 buf_size = 1; 341 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 342 for (i = 0; i < hash->size; i++) { 343 head = &hash->table[i]; 344 345 rcu_read_lock(); 346 __hlist_for_each_rcu(node, head) 347 buf_size += 29; 348 rcu_read_unlock(); 349 } 350 351 buff = kmalloc(buf_size, GFP_ATOMIC); 352 if (!buff) { 353 ret = -ENOMEM; 354 goto out; 355 } 356 357 buff[0] = '\0'; 358 pos = 0; 359 360 for (i = 0; i < hash->size; i++) { 361 head = &hash->table[i]; 362 363 rcu_read_lock(); 364 hlist_for_each_entry_rcu(tt_local_entry, node, 365 head, hash_entry) { 366 pos += snprintf(buff + pos, 30, " * %pM " 367 "[%c%c%c%c%c]\n", 368 tt_local_entry->addr, 369 (tt_local_entry->flags & 370 TT_CLIENT_ROAM ? 'R' : '.'), 371 (tt_local_entry->flags & 372 TT_CLIENT_NOPURGE ? 'P' : '.'), 373 (tt_local_entry->flags & 374 TT_CLIENT_NEW ? 'N' : '.'), 375 (tt_local_entry->flags & 376 TT_CLIENT_PENDING ? 'X' : '.'), 377 (tt_local_entry->flags & 378 TT_CLIENT_WIFI ? 'W' : '.')); 379 } 380 rcu_read_unlock(); 381 } 382 383 seq_printf(seq, "%s", buff); 384 kfree(buff); 385 out: 386 if (primary_if) 387 hardif_free_ref(primary_if); 388 return ret; 389 } 390 391 static void tt_local_set_pending(struct bat_priv *bat_priv, 392 struct tt_local_entry *tt_local_entry, 393 uint16_t flags) 394 { 395 tt_local_event(bat_priv, tt_local_entry->addr, 396 tt_local_entry->flags | flags); 397 398 /* The local client has to be marked as "pending to be removed" but has 399 * to be kept in the table in order to send it in a full table 400 * response issued before the net ttvn increment (consistency check) */ 401 tt_local_entry->flags |= TT_CLIENT_PENDING; 402 } 403 404 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 405 const char *message, bool roaming) 406 { 407 struct tt_local_entry *tt_local_entry = NULL; 408 409 tt_local_entry = tt_local_hash_find(bat_priv, addr); 410 if (!tt_local_entry) 411 goto out; 412 413 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | 414 (roaming ? TT_CLIENT_ROAM : NO_FLAGS)); 415 416 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: " 417 "%s\n", tt_local_entry->addr, message); 418 out: 419 if (tt_local_entry) 420 tt_local_entry_free_ref(tt_local_entry); 421 } 422 423 static void tt_local_purge(struct bat_priv *bat_priv) 424 { 425 struct hashtable_t *hash = bat_priv->tt_local_hash; 426 struct tt_local_entry *tt_local_entry; 427 struct hlist_node *node, *node_tmp; 428 struct hlist_head *head; 429 spinlock_t *list_lock; /* protects write access to the hash lists */ 430 int i; 431 432 for (i = 0; i < hash->size; i++) { 433 head = &hash->table[i]; 434 list_lock = &hash->list_locks[i]; 435 436 spin_lock_bh(list_lock); 437 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 438 head, hash_entry) { 439 if (tt_local_entry->flags & TT_CLIENT_NOPURGE) 440 continue; 441 442 /* entry already marked for deletion */ 443 if (tt_local_entry->flags & TT_CLIENT_PENDING) 444 continue; 445 446 if (!is_out_of_time(tt_local_entry->last_seen, 447 TT_LOCAL_TIMEOUT * 1000)) 448 continue; 449 450 tt_local_set_pending(bat_priv, tt_local_entry, 451 TT_CLIENT_DEL); 452 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) " 453 "pending to be removed: timed out\n", 454 tt_local_entry->addr); 455 } 456 spin_unlock_bh(list_lock); 457 } 458 459 } 460 461 static void tt_local_table_free(struct bat_priv *bat_priv) 462 { 463 struct hashtable_t *hash; 464 spinlock_t *list_lock; /* protects write access to the hash lists */ 465 struct tt_local_entry *tt_local_entry; 466 struct hlist_node *node, *node_tmp; 467 struct hlist_head *head; 468 int i; 469 470 if (!bat_priv->tt_local_hash) 471 return; 472 473 hash = bat_priv->tt_local_hash; 474 475 for (i = 0; i < hash->size; i++) { 476 head = &hash->table[i]; 477 list_lock = &hash->list_locks[i]; 478 479 spin_lock_bh(list_lock); 480 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 481 head, hash_entry) { 482 hlist_del_rcu(node); 483 tt_local_entry_free_ref(tt_local_entry); 484 } 485 spin_unlock_bh(list_lock); 486 } 487 488 hash_destroy(hash); 489 490 bat_priv->tt_local_hash = NULL; 491 } 492 493 static int tt_global_init(struct bat_priv *bat_priv) 494 { 495 if (bat_priv->tt_global_hash) 496 return 1; 497 498 bat_priv->tt_global_hash = hash_new(1024); 499 500 if (!bat_priv->tt_global_hash) 501 return 0; 502 503 return 1; 504 } 505 506 static void tt_changes_list_free(struct bat_priv *bat_priv) 507 { 508 struct tt_change_node *entry, *safe; 509 510 spin_lock_bh(&bat_priv->tt_changes_list_lock); 511 512 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 513 list) { 514 list_del(&entry->list); 515 kfree(entry); 516 } 517 518 atomic_set(&bat_priv->tt_local_changes, 0); 519 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 520 } 521 522 /* caller must hold orig_node refcount */ 523 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 524 const unsigned char *tt_addr, uint8_t ttvn, bool roaming, 525 bool wifi) 526 { 527 struct tt_global_entry *tt_global_entry; 528 struct orig_node *orig_node_tmp; 529 int ret = 0; 530 531 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 532 533 if (!tt_global_entry) { 534 tt_global_entry = 535 kmalloc(sizeof(*tt_global_entry), 536 GFP_ATOMIC); 537 if (!tt_global_entry) 538 goto out; 539 540 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); 541 /* Assign the new orig_node */ 542 atomic_inc(&orig_node->refcount); 543 tt_global_entry->orig_node = orig_node; 544 tt_global_entry->ttvn = ttvn; 545 tt_global_entry->flags = NO_FLAGS; 546 tt_global_entry->roam_at = 0; 547 atomic_set(&tt_global_entry->refcount, 2); 548 549 hash_add(bat_priv->tt_global_hash, compare_gtt, 550 choose_orig, tt_global_entry, 551 &tt_global_entry->hash_entry); 552 atomic_inc(&orig_node->tt_size); 553 } else { 554 if (tt_global_entry->orig_node != orig_node) { 555 atomic_dec(&tt_global_entry->orig_node->tt_size); 556 orig_node_tmp = tt_global_entry->orig_node; 557 atomic_inc(&orig_node->refcount); 558 tt_global_entry->orig_node = orig_node; 559 orig_node_free_ref(orig_node_tmp); 560 atomic_inc(&orig_node->tt_size); 561 } 562 tt_global_entry->ttvn = ttvn; 563 tt_global_entry->flags = NO_FLAGS; 564 tt_global_entry->roam_at = 0; 565 } 566 567 if (wifi) 568 tt_global_entry->flags |= TT_CLIENT_WIFI; 569 570 bat_dbg(DBG_TT, bat_priv, 571 "Creating new global tt entry: %pM (via %pM)\n", 572 tt_global_entry->addr, orig_node->orig); 573 574 /* remove address from local hash if present */ 575 tt_local_remove(bat_priv, tt_global_entry->addr, 576 "global tt received", roaming); 577 ret = 1; 578 out: 579 if (tt_global_entry) 580 tt_global_entry_free_ref(tt_global_entry); 581 return ret; 582 } 583 584 int tt_global_seq_print_text(struct seq_file *seq, void *offset) 585 { 586 struct net_device *net_dev = (struct net_device *)seq->private; 587 struct bat_priv *bat_priv = netdev_priv(net_dev); 588 struct hashtable_t *hash = bat_priv->tt_global_hash; 589 struct tt_global_entry *tt_global_entry; 590 struct hard_iface *primary_if; 591 struct hlist_node *node; 592 struct hlist_head *head; 593 size_t buf_size, pos; 594 char *buff; 595 int i, ret = 0; 596 597 primary_if = primary_if_get_selected(bat_priv); 598 if (!primary_if) { 599 ret = seq_printf(seq, "BATMAN mesh %s disabled - please " 600 "specify interfaces to enable it\n", 601 net_dev->name); 602 goto out; 603 } 604 605 if (primary_if->if_status != IF_ACTIVE) { 606 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 607 "primary interface not active\n", 608 net_dev->name); 609 goto out; 610 } 611 612 seq_printf(seq, 613 "Globally announced TT entries received via the mesh %s\n", 614 net_dev->name); 615 seq_printf(seq, " %-13s %s %-15s %s %s\n", 616 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); 617 618 buf_size = 1; 619 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via 620 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ 621 for (i = 0; i < hash->size; i++) { 622 head = &hash->table[i]; 623 624 rcu_read_lock(); 625 __hlist_for_each_rcu(node, head) 626 buf_size += 67; 627 rcu_read_unlock(); 628 } 629 630 buff = kmalloc(buf_size, GFP_ATOMIC); 631 if (!buff) { 632 ret = -ENOMEM; 633 goto out; 634 } 635 636 buff[0] = '\0'; 637 pos = 0; 638 639 for (i = 0; i < hash->size; i++) { 640 head = &hash->table[i]; 641 642 rcu_read_lock(); 643 hlist_for_each_entry_rcu(tt_global_entry, node, 644 head, hash_entry) { 645 pos += snprintf(buff + pos, 69, 646 " * %pM (%3u) via %pM (%3u) " 647 "[%c%c%c]\n", tt_global_entry->addr, 648 tt_global_entry->ttvn, 649 tt_global_entry->orig_node->orig, 650 (uint8_t) atomic_read( 651 &tt_global_entry->orig_node-> 652 last_ttvn), 653 (tt_global_entry->flags & 654 TT_CLIENT_ROAM ? 'R' : '.'), 655 (tt_global_entry->flags & 656 TT_CLIENT_PENDING ? 'X' : '.'), 657 (tt_global_entry->flags & 658 TT_CLIENT_WIFI ? 'W' : '.')); 659 } 660 rcu_read_unlock(); 661 } 662 663 seq_printf(seq, "%s", buff); 664 kfree(buff); 665 out: 666 if (primary_if) 667 hardif_free_ref(primary_if); 668 return ret; 669 } 670 671 static void _tt_global_del(struct bat_priv *bat_priv, 672 struct tt_global_entry *tt_global_entry, 673 const char *message) 674 { 675 if (!tt_global_entry) 676 goto out; 677 678 bat_dbg(DBG_TT, bat_priv, 679 "Deleting global tt entry %pM (via %pM): %s\n", 680 tt_global_entry->addr, tt_global_entry->orig_node->orig, 681 message); 682 683 atomic_dec(&tt_global_entry->orig_node->tt_size); 684 685 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, 686 tt_global_entry->addr); 687 out: 688 if (tt_global_entry) 689 tt_global_entry_free_ref(tt_global_entry); 690 } 691 692 void tt_global_del(struct bat_priv *bat_priv, 693 struct orig_node *orig_node, const unsigned char *addr, 694 const char *message, bool roaming) 695 { 696 struct tt_global_entry *tt_global_entry = NULL; 697 698 tt_global_entry = tt_global_hash_find(bat_priv, addr); 699 if (!tt_global_entry) 700 goto out; 701 702 if (tt_global_entry->orig_node == orig_node) { 703 if (roaming) { 704 tt_global_entry->flags |= TT_CLIENT_ROAM; 705 tt_global_entry->roam_at = jiffies; 706 goto out; 707 } 708 _tt_global_del(bat_priv, tt_global_entry, message); 709 } 710 out: 711 if (tt_global_entry) 712 tt_global_entry_free_ref(tt_global_entry); 713 } 714 715 void tt_global_del_orig(struct bat_priv *bat_priv, 716 struct orig_node *orig_node, const char *message) 717 { 718 struct tt_global_entry *tt_global_entry; 719 int i; 720 struct hashtable_t *hash = bat_priv->tt_global_hash; 721 struct hlist_node *node, *safe; 722 struct hlist_head *head; 723 spinlock_t *list_lock; /* protects write access to the hash lists */ 724 725 if (!hash) 726 return; 727 728 for (i = 0; i < hash->size; i++) { 729 head = &hash->table[i]; 730 list_lock = &hash->list_locks[i]; 731 732 spin_lock_bh(list_lock); 733 hlist_for_each_entry_safe(tt_global_entry, node, safe, 734 head, hash_entry) { 735 if (tt_global_entry->orig_node == orig_node) { 736 bat_dbg(DBG_TT, bat_priv, 737 "Deleting global tt entry %pM " 738 "(via %pM): originator time out\n", 739 tt_global_entry->addr, 740 tt_global_entry->orig_node->orig); 741 hlist_del_rcu(node); 742 tt_global_entry_free_ref(tt_global_entry); 743 } 744 } 745 spin_unlock_bh(list_lock); 746 } 747 atomic_set(&orig_node->tt_size, 0); 748 } 749 750 static void tt_global_roam_purge(struct bat_priv *bat_priv) 751 { 752 struct hashtable_t *hash = bat_priv->tt_global_hash; 753 struct tt_global_entry *tt_global_entry; 754 struct hlist_node *node, *node_tmp; 755 struct hlist_head *head; 756 spinlock_t *list_lock; /* protects write access to the hash lists */ 757 int i; 758 759 for (i = 0; i < hash->size; i++) { 760 head = &hash->table[i]; 761 list_lock = &hash->list_locks[i]; 762 763 spin_lock_bh(list_lock); 764 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, 765 head, hash_entry) { 766 if (!(tt_global_entry->flags & TT_CLIENT_ROAM)) 767 continue; 768 if (!is_out_of_time(tt_global_entry->roam_at, 769 TT_CLIENT_ROAM_TIMEOUT * 1000)) 770 continue; 771 772 bat_dbg(DBG_TT, bat_priv, "Deleting global " 773 "tt entry (%pM): Roaming timeout\n", 774 tt_global_entry->addr); 775 atomic_dec(&tt_global_entry->orig_node->tt_size); 776 hlist_del_rcu(node); 777 tt_global_entry_free_ref(tt_global_entry); 778 } 779 spin_unlock_bh(list_lock); 780 } 781 782 } 783 784 static void tt_global_table_free(struct bat_priv *bat_priv) 785 { 786 struct hashtable_t *hash; 787 spinlock_t *list_lock; /* protects write access to the hash lists */ 788 struct tt_global_entry *tt_global_entry; 789 struct hlist_node *node, *node_tmp; 790 struct hlist_head *head; 791 int i; 792 793 if (!bat_priv->tt_global_hash) 794 return; 795 796 hash = bat_priv->tt_global_hash; 797 798 for (i = 0; i < hash->size; i++) { 799 head = &hash->table[i]; 800 list_lock = &hash->list_locks[i]; 801 802 spin_lock_bh(list_lock); 803 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, 804 head, hash_entry) { 805 hlist_del_rcu(node); 806 tt_global_entry_free_ref(tt_global_entry); 807 } 808 spin_unlock_bh(list_lock); 809 } 810 811 hash_destroy(hash); 812 813 bat_priv->tt_global_hash = NULL; 814 } 815 816 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, 817 struct tt_global_entry *tt_global_entry) 818 { 819 bool ret = false; 820 821 if (tt_local_entry->flags & TT_CLIENT_WIFI && 822 tt_global_entry->flags & TT_CLIENT_WIFI) 823 ret = true; 824 825 return ret; 826 } 827 828 struct orig_node *transtable_search(struct bat_priv *bat_priv, 829 const uint8_t *src, const uint8_t *addr) 830 { 831 struct tt_local_entry *tt_local_entry = NULL; 832 struct tt_global_entry *tt_global_entry = NULL; 833 struct orig_node *orig_node = NULL; 834 835 if (src && atomic_read(&bat_priv->ap_isolation)) { 836 tt_local_entry = tt_local_hash_find(bat_priv, src); 837 if (!tt_local_entry) 838 goto out; 839 } 840 841 tt_global_entry = tt_global_hash_find(bat_priv, addr); 842 if (!tt_global_entry) 843 goto out; 844 845 /* check whether the clients should not communicate due to AP 846 * isolation */ 847 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) 848 goto out; 849 850 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 851 goto out; 852 853 /* A global client marked as PENDING has already moved from that 854 * originator */ 855 if (tt_global_entry->flags & TT_CLIENT_PENDING) 856 goto out; 857 858 orig_node = tt_global_entry->orig_node; 859 860 out: 861 if (tt_global_entry) 862 tt_global_entry_free_ref(tt_global_entry); 863 if (tt_local_entry) 864 tt_local_entry_free_ref(tt_local_entry); 865 866 return orig_node; 867 } 868 869 /* Calculates the checksum of the local table of a given orig_node */ 870 uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) 871 { 872 uint16_t total = 0, total_one; 873 struct hashtable_t *hash = bat_priv->tt_global_hash; 874 struct tt_global_entry *tt_global_entry; 875 struct hlist_node *node; 876 struct hlist_head *head; 877 int i, j; 878 879 for (i = 0; i < hash->size; i++) { 880 head = &hash->table[i]; 881 882 rcu_read_lock(); 883 hlist_for_each_entry_rcu(tt_global_entry, node, 884 head, hash_entry) { 885 if (compare_eth(tt_global_entry->orig_node, 886 orig_node)) { 887 /* Roaming clients are in the global table for 888 * consistency only. They don't have to be 889 * taken into account while computing the 890 * global crc */ 891 if (tt_global_entry->flags & TT_CLIENT_ROAM) 892 continue; 893 total_one = 0; 894 for (j = 0; j < ETH_ALEN; j++) 895 total_one = crc16_byte(total_one, 896 tt_global_entry->addr[j]); 897 total ^= total_one; 898 } 899 } 900 rcu_read_unlock(); 901 } 902 903 return total; 904 } 905 906 /* Calculates the checksum of the local table */ 907 uint16_t tt_local_crc(struct bat_priv *bat_priv) 908 { 909 uint16_t total = 0, total_one; 910 struct hashtable_t *hash = bat_priv->tt_local_hash; 911 struct tt_local_entry *tt_local_entry; 912 struct hlist_node *node; 913 struct hlist_head *head; 914 int i, j; 915 916 for (i = 0; i < hash->size; i++) { 917 head = &hash->table[i]; 918 919 rcu_read_lock(); 920 hlist_for_each_entry_rcu(tt_local_entry, node, 921 head, hash_entry) { 922 /* not yet committed clients have not to be taken into 923 * account while computing the CRC */ 924 if (tt_local_entry->flags & TT_CLIENT_NEW) 925 continue; 926 total_one = 0; 927 for (j = 0; j < ETH_ALEN; j++) 928 total_one = crc16_byte(total_one, 929 tt_local_entry->addr[j]); 930 total ^= total_one; 931 } 932 rcu_read_unlock(); 933 } 934 935 return total; 936 } 937 938 static void tt_req_list_free(struct bat_priv *bat_priv) 939 { 940 struct tt_req_node *node, *safe; 941 942 spin_lock_bh(&bat_priv->tt_req_list_lock); 943 944 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 945 list_del(&node->list); 946 kfree(node); 947 } 948 949 spin_unlock_bh(&bat_priv->tt_req_list_lock); 950 } 951 952 void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, 953 const unsigned char *tt_buff, uint8_t tt_num_changes) 954 { 955 uint16_t tt_buff_len = tt_len(tt_num_changes); 956 957 /* Replace the old buffer only if I received something in the 958 * last OGM (the OGM could carry no changes) */ 959 spin_lock_bh(&orig_node->tt_buff_lock); 960 if (tt_buff_len > 0) { 961 kfree(orig_node->tt_buff); 962 orig_node->tt_buff_len = 0; 963 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); 964 if (orig_node->tt_buff) { 965 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); 966 orig_node->tt_buff_len = tt_buff_len; 967 } 968 } 969 spin_unlock_bh(&orig_node->tt_buff_lock); 970 } 971 972 static void tt_req_purge(struct bat_priv *bat_priv) 973 { 974 struct tt_req_node *node, *safe; 975 976 spin_lock_bh(&bat_priv->tt_req_list_lock); 977 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 978 if (is_out_of_time(node->issued_at, 979 TT_REQUEST_TIMEOUT * 1000)) { 980 list_del(&node->list); 981 kfree(node); 982 } 983 } 984 spin_unlock_bh(&bat_priv->tt_req_list_lock); 985 } 986 987 /* returns the pointer to the new tt_req_node struct if no request 988 * has already been issued for this orig_node, NULL otherwise */ 989 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, 990 struct orig_node *orig_node) 991 { 992 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 993 994 spin_lock_bh(&bat_priv->tt_req_list_lock); 995 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 996 if (compare_eth(tt_req_node_tmp, orig_node) && 997 !is_out_of_time(tt_req_node_tmp->issued_at, 998 TT_REQUEST_TIMEOUT * 1000)) 999 goto unlock; 1000 } 1001 1002 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); 1003 if (!tt_req_node) 1004 goto unlock; 1005 1006 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); 1007 tt_req_node->issued_at = jiffies; 1008 1009 list_add(&tt_req_node->list, &bat_priv->tt_req_list); 1010 unlock: 1011 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1012 return tt_req_node; 1013 } 1014 1015 /* data_ptr is useless here, but has to be kept to respect the prototype */ 1016 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) 1017 { 1018 const struct tt_local_entry *tt_local_entry = entry_ptr; 1019 1020 if (tt_local_entry->flags & TT_CLIENT_NEW) 1021 return 0; 1022 return 1; 1023 } 1024 1025 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) 1026 { 1027 const struct tt_global_entry *tt_global_entry = entry_ptr; 1028 const struct orig_node *orig_node = data_ptr; 1029 1030 if (tt_global_entry->flags & TT_CLIENT_ROAM) 1031 return 0; 1032 1033 return (tt_global_entry->orig_node == orig_node); 1034 } 1035 1036 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1037 struct hashtable_t *hash, 1038 struct hard_iface *primary_if, 1039 int (*valid_cb)(const void *, 1040 const void *), 1041 void *cb_data) 1042 { 1043 struct tt_local_entry *tt_local_entry; 1044 struct tt_query_packet *tt_response; 1045 struct tt_change *tt_change; 1046 struct hlist_node *node; 1047 struct hlist_head *head; 1048 struct sk_buff *skb = NULL; 1049 uint16_t tt_tot, tt_count; 1050 ssize_t tt_query_size = sizeof(struct tt_query_packet); 1051 int i; 1052 1053 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { 1054 tt_len = primary_if->soft_iface->mtu - tt_query_size; 1055 tt_len -= tt_len % sizeof(struct tt_change); 1056 } 1057 tt_tot = tt_len / sizeof(struct tt_change); 1058 1059 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); 1060 if (!skb) 1061 goto out; 1062 1063 skb_reserve(skb, ETH_HLEN); 1064 tt_response = (struct tt_query_packet *)skb_put(skb, 1065 tt_query_size + tt_len); 1066 tt_response->ttvn = ttvn; 1067 1068 tt_change = (struct tt_change *)(skb->data + tt_query_size); 1069 tt_count = 0; 1070 1071 rcu_read_lock(); 1072 for (i = 0; i < hash->size; i++) { 1073 head = &hash->table[i]; 1074 1075 hlist_for_each_entry_rcu(tt_local_entry, node, 1076 head, hash_entry) { 1077 if (tt_count == tt_tot) 1078 break; 1079 1080 if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data))) 1081 continue; 1082 1083 memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN); 1084 tt_change->flags = NO_FLAGS; 1085 1086 tt_count++; 1087 tt_change++; 1088 } 1089 } 1090 rcu_read_unlock(); 1091 1092 /* store in the message the number of entries we have successfully 1093 * copied */ 1094 tt_response->tt_data = htons(tt_count); 1095 1096 out: 1097 return skb; 1098 } 1099 1100 static int send_tt_request(struct bat_priv *bat_priv, 1101 struct orig_node *dst_orig_node, 1102 uint8_t ttvn, uint16_t tt_crc, bool full_table) 1103 { 1104 struct sk_buff *skb = NULL; 1105 struct tt_query_packet *tt_request; 1106 struct neigh_node *neigh_node = NULL; 1107 struct hard_iface *primary_if; 1108 struct tt_req_node *tt_req_node = NULL; 1109 int ret = 1; 1110 1111 primary_if = primary_if_get_selected(bat_priv); 1112 if (!primary_if) 1113 goto out; 1114 1115 /* The new tt_req will be issued only if I'm not waiting for a 1116 * reply from the same orig_node yet */ 1117 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); 1118 if (!tt_req_node) 1119 goto out; 1120 1121 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); 1122 if (!skb) 1123 goto out; 1124 1125 skb_reserve(skb, ETH_HLEN); 1126 1127 tt_request = (struct tt_query_packet *)skb_put(skb, 1128 sizeof(struct tt_query_packet)); 1129 1130 tt_request->packet_type = BAT_TT_QUERY; 1131 tt_request->version = COMPAT_VERSION; 1132 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1133 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1134 tt_request->ttl = TTL; 1135 tt_request->ttvn = ttvn; 1136 tt_request->tt_data = tt_crc; 1137 tt_request->flags = TT_REQUEST; 1138 1139 if (full_table) 1140 tt_request->flags |= TT_FULL_TABLE; 1141 1142 neigh_node = orig_node_get_router(dst_orig_node); 1143 if (!neigh_node) 1144 goto out; 1145 1146 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM " 1147 "[%c]\n", dst_orig_node->orig, neigh_node->addr, 1148 (full_table ? 'F' : '.')); 1149 1150 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1151 ret = 0; 1152 1153 out: 1154 if (neigh_node) 1155 neigh_node_free_ref(neigh_node); 1156 if (primary_if) 1157 hardif_free_ref(primary_if); 1158 if (ret) 1159 kfree_skb(skb); 1160 if (ret && tt_req_node) { 1161 spin_lock_bh(&bat_priv->tt_req_list_lock); 1162 list_del(&tt_req_node->list); 1163 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1164 kfree(tt_req_node); 1165 } 1166 return ret; 1167 } 1168 1169 static bool send_other_tt_response(struct bat_priv *bat_priv, 1170 struct tt_query_packet *tt_request) 1171 { 1172 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; 1173 struct neigh_node *neigh_node = NULL; 1174 struct hard_iface *primary_if = NULL; 1175 uint8_t orig_ttvn, req_ttvn, ttvn; 1176 int ret = false; 1177 unsigned char *tt_buff; 1178 bool full_table; 1179 uint16_t tt_len, tt_tot; 1180 struct sk_buff *skb = NULL; 1181 struct tt_query_packet *tt_response; 1182 1183 bat_dbg(DBG_TT, bat_priv, 1184 "Received TT_REQUEST from %pM for " 1185 "ttvn: %u (%pM) [%c]\n", tt_request->src, 1186 tt_request->ttvn, tt_request->dst, 1187 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1188 1189 /* Let's get the orig node of the REAL destination */ 1190 req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); 1191 if (!req_dst_orig_node) 1192 goto out; 1193 1194 res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); 1195 if (!res_dst_orig_node) 1196 goto out; 1197 1198 neigh_node = orig_node_get_router(res_dst_orig_node); 1199 if (!neigh_node) 1200 goto out; 1201 1202 primary_if = primary_if_get_selected(bat_priv); 1203 if (!primary_if) 1204 goto out; 1205 1206 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1207 req_ttvn = tt_request->ttvn; 1208 1209 /* I don't have the requested data */ 1210 if (orig_ttvn != req_ttvn || 1211 tt_request->tt_data != req_dst_orig_node->tt_crc) 1212 goto out; 1213 1214 /* If the full table has been explicitly requested */ 1215 if (tt_request->flags & TT_FULL_TABLE || 1216 !req_dst_orig_node->tt_buff) 1217 full_table = true; 1218 else 1219 full_table = false; 1220 1221 /* In this version, fragmentation is not implemented, then 1222 * I'll send only one packet with as much TT entries as I can */ 1223 if (!full_table) { 1224 spin_lock_bh(&req_dst_orig_node->tt_buff_lock); 1225 tt_len = req_dst_orig_node->tt_buff_len; 1226 tt_tot = tt_len / sizeof(struct tt_change); 1227 1228 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1229 tt_len + ETH_HLEN); 1230 if (!skb) 1231 goto unlock; 1232 1233 skb_reserve(skb, ETH_HLEN); 1234 tt_response = (struct tt_query_packet *)skb_put(skb, 1235 sizeof(struct tt_query_packet) + tt_len); 1236 tt_response->ttvn = req_ttvn; 1237 tt_response->tt_data = htons(tt_tot); 1238 1239 tt_buff = skb->data + sizeof(struct tt_query_packet); 1240 /* Copy the last orig_node's OGM buffer */ 1241 memcpy(tt_buff, req_dst_orig_node->tt_buff, 1242 req_dst_orig_node->tt_buff_len); 1243 1244 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1245 } else { 1246 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * 1247 sizeof(struct tt_change); 1248 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1249 1250 skb = tt_response_fill_table(tt_len, ttvn, 1251 bat_priv->tt_global_hash, 1252 primary_if, tt_global_valid_entry, 1253 req_dst_orig_node); 1254 if (!skb) 1255 goto out; 1256 1257 tt_response = (struct tt_query_packet *)skb->data; 1258 } 1259 1260 tt_response->packet_type = BAT_TT_QUERY; 1261 tt_response->version = COMPAT_VERSION; 1262 tt_response->ttl = TTL; 1263 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1264 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1265 tt_response->flags = TT_RESPONSE; 1266 1267 if (full_table) 1268 tt_response->flags |= TT_FULL_TABLE; 1269 1270 bat_dbg(DBG_TT, bat_priv, 1271 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", 1272 res_dst_orig_node->orig, neigh_node->addr, 1273 req_dst_orig_node->orig, req_ttvn); 1274 1275 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1276 ret = true; 1277 goto out; 1278 1279 unlock: 1280 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1281 1282 out: 1283 if (res_dst_orig_node) 1284 orig_node_free_ref(res_dst_orig_node); 1285 if (req_dst_orig_node) 1286 orig_node_free_ref(req_dst_orig_node); 1287 if (neigh_node) 1288 neigh_node_free_ref(neigh_node); 1289 if (primary_if) 1290 hardif_free_ref(primary_if); 1291 if (!ret) 1292 kfree_skb(skb); 1293 return ret; 1294 1295 } 1296 static bool send_my_tt_response(struct bat_priv *bat_priv, 1297 struct tt_query_packet *tt_request) 1298 { 1299 struct orig_node *orig_node = NULL; 1300 struct neigh_node *neigh_node = NULL; 1301 struct hard_iface *primary_if = NULL; 1302 uint8_t my_ttvn, req_ttvn, ttvn; 1303 int ret = false; 1304 unsigned char *tt_buff; 1305 bool full_table; 1306 uint16_t tt_len, tt_tot; 1307 struct sk_buff *skb = NULL; 1308 struct tt_query_packet *tt_response; 1309 1310 bat_dbg(DBG_TT, bat_priv, 1311 "Received TT_REQUEST from %pM for " 1312 "ttvn: %u (me) [%c]\n", tt_request->src, 1313 tt_request->ttvn, 1314 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1315 1316 1317 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1318 req_ttvn = tt_request->ttvn; 1319 1320 orig_node = get_orig_node(bat_priv, tt_request->src); 1321 if (!orig_node) 1322 goto out; 1323 1324 neigh_node = orig_node_get_router(orig_node); 1325 if (!neigh_node) 1326 goto out; 1327 1328 primary_if = primary_if_get_selected(bat_priv); 1329 if (!primary_if) 1330 goto out; 1331 1332 /* If the full table has been explicitly requested or the gap 1333 * is too big send the whole local translation table */ 1334 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || 1335 !bat_priv->tt_buff) 1336 full_table = true; 1337 else 1338 full_table = false; 1339 1340 /* In this version, fragmentation is not implemented, then 1341 * I'll send only one packet with as much TT entries as I can */ 1342 if (!full_table) { 1343 spin_lock_bh(&bat_priv->tt_buff_lock); 1344 tt_len = bat_priv->tt_buff_len; 1345 tt_tot = tt_len / sizeof(struct tt_change); 1346 1347 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1348 tt_len + ETH_HLEN); 1349 if (!skb) 1350 goto unlock; 1351 1352 skb_reserve(skb, ETH_HLEN); 1353 tt_response = (struct tt_query_packet *)skb_put(skb, 1354 sizeof(struct tt_query_packet) + tt_len); 1355 tt_response->ttvn = req_ttvn; 1356 tt_response->tt_data = htons(tt_tot); 1357 1358 tt_buff = skb->data + sizeof(struct tt_query_packet); 1359 memcpy(tt_buff, bat_priv->tt_buff, 1360 bat_priv->tt_buff_len); 1361 spin_unlock_bh(&bat_priv->tt_buff_lock); 1362 } else { 1363 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * 1364 sizeof(struct tt_change); 1365 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1366 1367 skb = tt_response_fill_table(tt_len, ttvn, 1368 bat_priv->tt_local_hash, 1369 primary_if, tt_local_valid_entry, 1370 NULL); 1371 if (!skb) 1372 goto out; 1373 1374 tt_response = (struct tt_query_packet *)skb->data; 1375 } 1376 1377 tt_response->packet_type = BAT_TT_QUERY; 1378 tt_response->version = COMPAT_VERSION; 1379 tt_response->ttl = TTL; 1380 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1381 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1382 tt_response->flags = TT_RESPONSE; 1383 1384 if (full_table) 1385 tt_response->flags |= TT_FULL_TABLE; 1386 1387 bat_dbg(DBG_TT, bat_priv, 1388 "Sending TT_RESPONSE to %pM via %pM [%c]\n", 1389 orig_node->orig, neigh_node->addr, 1390 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1391 1392 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1393 ret = true; 1394 goto out; 1395 1396 unlock: 1397 spin_unlock_bh(&bat_priv->tt_buff_lock); 1398 out: 1399 if (orig_node) 1400 orig_node_free_ref(orig_node); 1401 if (neigh_node) 1402 neigh_node_free_ref(neigh_node); 1403 if (primary_if) 1404 hardif_free_ref(primary_if); 1405 if (!ret) 1406 kfree_skb(skb); 1407 /* This packet was for me, so it doesn't need to be re-routed */ 1408 return true; 1409 } 1410 1411 bool send_tt_response(struct bat_priv *bat_priv, 1412 struct tt_query_packet *tt_request) 1413 { 1414 if (is_my_mac(tt_request->dst)) 1415 return send_my_tt_response(bat_priv, tt_request); 1416 else 1417 return send_other_tt_response(bat_priv, tt_request); 1418 } 1419 1420 static void _tt_update_changes(struct bat_priv *bat_priv, 1421 struct orig_node *orig_node, 1422 struct tt_change *tt_change, 1423 uint16_t tt_num_changes, uint8_t ttvn) 1424 { 1425 int i; 1426 1427 for (i = 0; i < tt_num_changes; i++) { 1428 if ((tt_change + i)->flags & TT_CLIENT_DEL) 1429 tt_global_del(bat_priv, orig_node, 1430 (tt_change + i)->addr, 1431 "tt removed by changes", 1432 (tt_change + i)->flags & TT_CLIENT_ROAM); 1433 else 1434 if (!tt_global_add(bat_priv, orig_node, 1435 (tt_change + i)->addr, ttvn, false, 1436 (tt_change + i)->flags & 1437 TT_CLIENT_WIFI)) 1438 /* In case of problem while storing a 1439 * global_entry, we stop the updating 1440 * procedure without committing the 1441 * ttvn change. This will avoid to send 1442 * corrupted data on tt_request 1443 */ 1444 return; 1445 } 1446 } 1447 1448 static void tt_fill_gtable(struct bat_priv *bat_priv, 1449 struct tt_query_packet *tt_response) 1450 { 1451 struct orig_node *orig_node = NULL; 1452 1453 orig_node = orig_hash_find(bat_priv, tt_response->src); 1454 if (!orig_node) 1455 goto out; 1456 1457 /* Purge the old table first.. */ 1458 tt_global_del_orig(bat_priv, orig_node, "Received full table"); 1459 1460 _tt_update_changes(bat_priv, orig_node, 1461 (struct tt_change *)(tt_response + 1), 1462 tt_response->tt_data, tt_response->ttvn); 1463 1464 spin_lock_bh(&orig_node->tt_buff_lock); 1465 kfree(orig_node->tt_buff); 1466 orig_node->tt_buff_len = 0; 1467 orig_node->tt_buff = NULL; 1468 spin_unlock_bh(&orig_node->tt_buff_lock); 1469 1470 atomic_set(&orig_node->last_ttvn, tt_response->ttvn); 1471 1472 out: 1473 if (orig_node) 1474 orig_node_free_ref(orig_node); 1475 } 1476 1477 static void tt_update_changes(struct bat_priv *bat_priv, 1478 struct orig_node *orig_node, 1479 uint16_t tt_num_changes, uint8_t ttvn, 1480 struct tt_change *tt_change) 1481 { 1482 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, 1483 ttvn); 1484 1485 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, 1486 tt_num_changes); 1487 atomic_set(&orig_node->last_ttvn, ttvn); 1488 } 1489 1490 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) 1491 { 1492 struct tt_local_entry *tt_local_entry = NULL; 1493 bool ret = false; 1494 1495 tt_local_entry = tt_local_hash_find(bat_priv, addr); 1496 if (!tt_local_entry) 1497 goto out; 1498 /* Check if the client has been logically deleted (but is kept for 1499 * consistency purpose) */ 1500 if (tt_local_entry->flags & TT_CLIENT_PENDING) 1501 goto out; 1502 ret = true; 1503 out: 1504 if (tt_local_entry) 1505 tt_local_entry_free_ref(tt_local_entry); 1506 return ret; 1507 } 1508 1509 void handle_tt_response(struct bat_priv *bat_priv, 1510 struct tt_query_packet *tt_response) 1511 { 1512 struct tt_req_node *node, *safe; 1513 struct orig_node *orig_node = NULL; 1514 1515 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for " 1516 "ttvn %d t_size: %d [%c]\n", 1517 tt_response->src, tt_response->ttvn, 1518 tt_response->tt_data, 1519 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1520 1521 orig_node = orig_hash_find(bat_priv, tt_response->src); 1522 if (!orig_node) 1523 goto out; 1524 1525 if (tt_response->flags & TT_FULL_TABLE) 1526 tt_fill_gtable(bat_priv, tt_response); 1527 else 1528 tt_update_changes(bat_priv, orig_node, tt_response->tt_data, 1529 tt_response->ttvn, 1530 (struct tt_change *)(tt_response + 1)); 1531 1532 /* Delete the tt_req_node from pending tt_requests list */ 1533 spin_lock_bh(&bat_priv->tt_req_list_lock); 1534 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1535 if (!compare_eth(node->addr, tt_response->src)) 1536 continue; 1537 list_del(&node->list); 1538 kfree(node); 1539 } 1540 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1541 1542 /* Recalculate the CRC for this orig_node and store it */ 1543 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 1544 /* Roaming phase is over: tables are in sync again. I can 1545 * unset the flag */ 1546 orig_node->tt_poss_change = false; 1547 out: 1548 if (orig_node) 1549 orig_node_free_ref(orig_node); 1550 } 1551 1552 int tt_init(struct bat_priv *bat_priv) 1553 { 1554 if (!tt_local_init(bat_priv)) 1555 return 0; 1556 1557 if (!tt_global_init(bat_priv)) 1558 return 0; 1559 1560 tt_start_timer(bat_priv); 1561 1562 return 1; 1563 } 1564 1565 static void tt_roam_list_free(struct bat_priv *bat_priv) 1566 { 1567 struct tt_roam_node *node, *safe; 1568 1569 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1570 1571 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1572 list_del(&node->list); 1573 kfree(node); 1574 } 1575 1576 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 1577 } 1578 1579 static void tt_roam_purge(struct bat_priv *bat_priv) 1580 { 1581 struct tt_roam_node *node, *safe; 1582 1583 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1584 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1585 if (!is_out_of_time(node->first_time, 1586 ROAMING_MAX_TIME * 1000)) 1587 continue; 1588 1589 list_del(&node->list); 1590 kfree(node); 1591 } 1592 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 1593 } 1594 1595 /* This function checks whether the client already reached the 1596 * maximum number of possible roaming phases. In this case the ROAMING_ADV 1597 * will not be sent. 1598 * 1599 * returns true if the ROAMING_ADV can be sent, false otherwise */ 1600 static bool tt_check_roam_count(struct bat_priv *bat_priv, 1601 uint8_t *client) 1602 { 1603 struct tt_roam_node *tt_roam_node; 1604 bool ret = false; 1605 1606 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1607 /* The new tt_req will be issued only if I'm not waiting for a 1608 * reply from the same orig_node yet */ 1609 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { 1610 if (!compare_eth(tt_roam_node->addr, client)) 1611 continue; 1612 1613 if (is_out_of_time(tt_roam_node->first_time, 1614 ROAMING_MAX_TIME * 1000)) 1615 continue; 1616 1617 if (!atomic_dec_not_zero(&tt_roam_node->counter)) 1618 /* Sorry, you roamed too many times! */ 1619 goto unlock; 1620 ret = true; 1621 break; 1622 } 1623 1624 if (!ret) { 1625 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); 1626 if (!tt_roam_node) 1627 goto unlock; 1628 1629 tt_roam_node->first_time = jiffies; 1630 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1); 1631 memcpy(tt_roam_node->addr, client, ETH_ALEN); 1632 1633 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); 1634 ret = true; 1635 } 1636 1637 unlock: 1638 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 1639 return ret; 1640 } 1641 1642 void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 1643 struct orig_node *orig_node) 1644 { 1645 struct neigh_node *neigh_node = NULL; 1646 struct sk_buff *skb = NULL; 1647 struct roam_adv_packet *roam_adv_packet; 1648 int ret = 1; 1649 struct hard_iface *primary_if; 1650 1651 /* before going on we have to check whether the client has 1652 * already roamed to us too many times */ 1653 if (!tt_check_roam_count(bat_priv, client)) 1654 goto out; 1655 1656 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN); 1657 if (!skb) 1658 goto out; 1659 1660 skb_reserve(skb, ETH_HLEN); 1661 1662 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, 1663 sizeof(struct roam_adv_packet)); 1664 1665 roam_adv_packet->packet_type = BAT_ROAM_ADV; 1666 roam_adv_packet->version = COMPAT_VERSION; 1667 roam_adv_packet->ttl = TTL; 1668 primary_if = primary_if_get_selected(bat_priv); 1669 if (!primary_if) 1670 goto out; 1671 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1672 hardif_free_ref(primary_if); 1673 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); 1674 memcpy(roam_adv_packet->client, client, ETH_ALEN); 1675 1676 neigh_node = orig_node_get_router(orig_node); 1677 if (!neigh_node) 1678 goto out; 1679 1680 bat_dbg(DBG_TT, bat_priv, 1681 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", 1682 orig_node->orig, client, neigh_node->addr); 1683 1684 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1685 ret = 0; 1686 1687 out: 1688 if (neigh_node) 1689 neigh_node_free_ref(neigh_node); 1690 if (ret) 1691 kfree_skb(skb); 1692 return; 1693 } 1694 1695 static void tt_purge(struct work_struct *work) 1696 { 1697 struct delayed_work *delayed_work = 1698 container_of(work, struct delayed_work, work); 1699 struct bat_priv *bat_priv = 1700 container_of(delayed_work, struct bat_priv, tt_work); 1701 1702 tt_local_purge(bat_priv); 1703 tt_global_roam_purge(bat_priv); 1704 tt_req_purge(bat_priv); 1705 tt_roam_purge(bat_priv); 1706 1707 tt_start_timer(bat_priv); 1708 } 1709 1710 void tt_free(struct bat_priv *bat_priv) 1711 { 1712 cancel_delayed_work_sync(&bat_priv->tt_work); 1713 1714 tt_local_table_free(bat_priv); 1715 tt_global_table_free(bat_priv); 1716 tt_req_list_free(bat_priv); 1717 tt_changes_list_free(bat_priv); 1718 tt_roam_list_free(bat_priv); 1719 1720 kfree(bat_priv->tt_buff); 1721 } 1722 1723 /* This function will reset the specified flags from all the entries in 1724 * the given hash table and will increment num_local_tt for each involved 1725 * entry */ 1726 static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags) 1727 { 1728 int i; 1729 struct hashtable_t *hash = bat_priv->tt_local_hash; 1730 struct hlist_head *head; 1731 struct hlist_node *node; 1732 struct tt_local_entry *tt_local_entry; 1733 1734 if (!hash) 1735 return; 1736 1737 for (i = 0; i < hash->size; i++) { 1738 head = &hash->table[i]; 1739 1740 rcu_read_lock(); 1741 hlist_for_each_entry_rcu(tt_local_entry, node, 1742 head, hash_entry) { 1743 if (!(tt_local_entry->flags & flags)) 1744 continue; 1745 tt_local_entry->flags &= ~flags; 1746 atomic_inc(&bat_priv->num_local_tt); 1747 } 1748 rcu_read_unlock(); 1749 } 1750 1751 } 1752 1753 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ 1754 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) 1755 { 1756 struct hashtable_t *hash = bat_priv->tt_local_hash; 1757 struct tt_local_entry *tt_local_entry; 1758 struct hlist_node *node, *node_tmp; 1759 struct hlist_head *head; 1760 spinlock_t *list_lock; /* protects write access to the hash lists */ 1761 int i; 1762 1763 if (!hash) 1764 return; 1765 1766 for (i = 0; i < hash->size; i++) { 1767 head = &hash->table[i]; 1768 list_lock = &hash->list_locks[i]; 1769 1770 spin_lock_bh(list_lock); 1771 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 1772 head, hash_entry) { 1773 if (!(tt_local_entry->flags & TT_CLIENT_PENDING)) 1774 continue; 1775 1776 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry " 1777 "(%pM): pending\n", tt_local_entry->addr); 1778 1779 atomic_dec(&bat_priv->num_local_tt); 1780 hlist_del_rcu(node); 1781 tt_local_entry_free_ref(tt_local_entry); 1782 } 1783 spin_unlock_bh(list_lock); 1784 } 1785 1786 } 1787 1788 void tt_commit_changes(struct bat_priv *bat_priv) 1789 { 1790 tt_local_reset_flags(bat_priv, TT_CLIENT_NEW); 1791 tt_local_purge_pending_clients(bat_priv); 1792 1793 /* Increment the TTVN only once per OGM interval */ 1794 atomic_inc(&bat_priv->ttvn); 1795 bat_priv->tt_poss_change = false; 1796 } 1797 1798 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) 1799 { 1800 struct tt_local_entry *tt_local_entry = NULL; 1801 struct tt_global_entry *tt_global_entry = NULL; 1802 bool ret = true; 1803 1804 if (!atomic_read(&bat_priv->ap_isolation)) 1805 return false; 1806 1807 tt_local_entry = tt_local_hash_find(bat_priv, dst); 1808 if (!tt_local_entry) 1809 goto out; 1810 1811 tt_global_entry = tt_global_hash_find(bat_priv, src); 1812 if (!tt_global_entry) 1813 goto out; 1814 1815 if (_is_ap_isolated(tt_local_entry, tt_global_entry)) 1816 goto out; 1817 1818 ret = false; 1819 1820 out: 1821 if (tt_global_entry) 1822 tt_global_entry_free_ref(tt_global_entry); 1823 if (tt_local_entry) 1824 tt_local_entry_free_ref(tt_local_entry); 1825 return ret; 1826 } 1827 1828 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 1829 const unsigned char *tt_buff, uint8_t tt_num_changes, 1830 uint8_t ttvn, uint16_t tt_crc) 1831 { 1832 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 1833 bool full_table = true; 1834 1835 /* the ttvn increased by one -> we can apply the attached changes */ 1836 if (ttvn - orig_ttvn == 1) { 1837 /* the OGM could not contain the changes due to their size or 1838 * because they have already been sent TT_OGM_APPEND_MAX times. 1839 * In this case send a tt request */ 1840 if (!tt_num_changes) { 1841 full_table = false; 1842 goto request_table; 1843 } 1844 1845 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, 1846 (struct tt_change *)tt_buff); 1847 1848 /* Even if we received the precomputed crc with the OGM, we 1849 * prefer to recompute it to spot any possible inconsistency 1850 * in the global table */ 1851 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 1852 1853 /* The ttvn alone is not enough to guarantee consistency 1854 * because a single value could represent different states 1855 * (due to the wrap around). Thus a node has to check whether 1856 * the resulting table (after applying the changes) is still 1857 * consistent or not. E.g. a node could disconnect while its 1858 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case 1859 * checking the CRC value is mandatory to detect the 1860 * inconsistency */ 1861 if (orig_node->tt_crc != tt_crc) 1862 goto request_table; 1863 1864 /* Roaming phase is over: tables are in sync again. I can 1865 * unset the flag */ 1866 orig_node->tt_poss_change = false; 1867 } else { 1868 /* if we missed more than one change or our tables are not 1869 * in sync anymore -> request fresh tt data */ 1870 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { 1871 request_table: 1872 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. " 1873 "Need to retrieve the correct information " 1874 "(ttvn: %u last_ttvn: %u crc: %u last_crc: " 1875 "%u num_changes: %u)\n", orig_node->orig, ttvn, 1876 orig_ttvn, tt_crc, orig_node->tt_crc, 1877 tt_num_changes); 1878 send_tt_request(bat_priv, orig_node, ttvn, tt_crc, 1879 full_table); 1880 return; 1881 } 1882 } 1883 } 1884