1 /* 2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 3 * 4 * Marek Lindner, Simon Wunderlich 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA 19 * 20 */ 21 22 #include "main.h" 23 #include "routing.h" 24 #include "send.h" 25 #include "hash.h" 26 #include "soft-interface.h" 27 #include "hard-interface.h" 28 #include "icmp_socket.h" 29 #include "translation-table.h" 30 #include "originator.h" 31 #include "ring_buffer.h" 32 #include "vis.h" 33 #include "aggregation.h" 34 #include "gateway_common.h" 35 #include "gateway_client.h" 36 #include "unicast.h" 37 38 void slide_own_bcast_window(struct hard_iface *hard_iface) 39 { 40 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 41 struct hashtable_t *hash = bat_priv->orig_hash; 42 struct hlist_node *node; 43 struct hlist_head *head; 44 struct orig_node *orig_node; 45 unsigned long *word; 46 int i; 47 size_t word_index; 48 49 for (i = 0; i < hash->size; i++) { 50 head = &hash->table[i]; 51 52 rcu_read_lock(); 53 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 54 spin_lock_bh(&orig_node->ogm_cnt_lock); 55 word_index = hard_iface->if_num * NUM_WORDS; 56 word = &(orig_node->bcast_own[word_index]); 57 58 bit_get_packet(bat_priv, word, 1, 0); 59 orig_node->bcast_own_sum[hard_iface->if_num] = 60 bit_packet_count(word); 61 spin_unlock_bh(&orig_node->ogm_cnt_lock); 62 } 63 rcu_read_unlock(); 64 } 65 } 66 67 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, 68 unsigned char *hna_buff, int hna_buff_len) 69 { 70 if ((hna_buff_len != orig_node->hna_buff_len) || 71 ((hna_buff_len > 0) && 72 (orig_node->hna_buff_len > 0) && 73 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { 74 75 if (orig_node->hna_buff_len > 0) 76 hna_global_del_orig(bat_priv, orig_node, 77 "originator changed hna"); 78 79 if ((hna_buff_len > 0) && (hna_buff)) 80 hna_global_add_orig(bat_priv, orig_node, 81 hna_buff, hna_buff_len); 82 } 83 } 84 85 static void update_route(struct bat_priv *bat_priv, 86 struct orig_node *orig_node, 87 struct neigh_node *neigh_node, 88 unsigned char *hna_buff, int hna_buff_len) 89 { 90 struct neigh_node *neigh_node_tmp; 91 92 /* route deleted */ 93 if ((orig_node->router) && (!neigh_node)) { 94 95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 96 orig_node->orig); 97 hna_global_del_orig(bat_priv, orig_node, 98 "originator timed out"); 99 100 /* route added */ 101 } else if ((!orig_node->router) && (neigh_node)) { 102 103 bat_dbg(DBG_ROUTES, bat_priv, 104 "Adding route towards: %pM (via %pM)\n", 105 orig_node->orig, neigh_node->addr); 106 hna_global_add_orig(bat_priv, orig_node, 107 hna_buff, hna_buff_len); 108 109 /* route changed */ 110 } else { 111 bat_dbg(DBG_ROUTES, bat_priv, 112 "Changing route towards: %pM " 113 "(now via %pM - was via %pM)\n", 114 orig_node->orig, neigh_node->addr, 115 orig_node->router->addr); 116 } 117 118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 119 neigh_node = NULL; 120 neigh_node_tmp = orig_node->router; 121 orig_node->router = neigh_node; 122 if (neigh_node_tmp) 123 neigh_node_free_ref(neigh_node_tmp); 124 } 125 126 127 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 128 struct neigh_node *neigh_node, unsigned char *hna_buff, 129 int hna_buff_len) 130 { 131 132 if (!orig_node) 133 return; 134 135 if (orig_node->router != neigh_node) 136 update_route(bat_priv, orig_node, neigh_node, 137 hna_buff, hna_buff_len); 138 /* may be just HNA changed */ 139 else 140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); 141 } 142 143 static int is_bidirectional_neigh(struct orig_node *orig_node, 144 struct orig_node *orig_neigh_node, 145 struct batman_packet *batman_packet, 146 struct hard_iface *if_incoming) 147 { 148 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 149 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 150 struct hlist_node *node; 151 unsigned char total_count; 152 uint8_t orig_eq_count, neigh_rq_count, tq_own; 153 int tq_asym_penalty, ret = 0; 154 155 if (orig_node == orig_neigh_node) { 156 rcu_read_lock(); 157 hlist_for_each_entry_rcu(tmp_neigh_node, node, 158 &orig_node->neigh_list, list) { 159 160 if (!compare_eth(tmp_neigh_node->addr, 161 orig_neigh_node->orig)) 162 continue; 163 164 if (tmp_neigh_node->if_incoming != if_incoming) 165 continue; 166 167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 168 continue; 169 170 neigh_node = tmp_neigh_node; 171 } 172 rcu_read_unlock(); 173 174 if (!neigh_node) 175 neigh_node = create_neighbor(orig_node, 176 orig_neigh_node, 177 orig_neigh_node->orig, 178 if_incoming); 179 if (!neigh_node) 180 goto out; 181 182 neigh_node->last_valid = jiffies; 183 } else { 184 /* find packet count of corresponding one hop neighbor */ 185 rcu_read_lock(); 186 hlist_for_each_entry_rcu(tmp_neigh_node, node, 187 &orig_neigh_node->neigh_list, list) { 188 189 if (!compare_eth(tmp_neigh_node->addr, 190 orig_neigh_node->orig)) 191 continue; 192 193 if (tmp_neigh_node->if_incoming != if_incoming) 194 continue; 195 196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 197 continue; 198 199 neigh_node = tmp_neigh_node; 200 } 201 rcu_read_unlock(); 202 203 if (!neigh_node) 204 neigh_node = create_neighbor(orig_neigh_node, 205 orig_neigh_node, 206 orig_neigh_node->orig, 207 if_incoming); 208 if (!neigh_node) 209 goto out; 210 } 211 212 orig_node->last_valid = jiffies; 213 214 spin_lock_bh(&orig_node->ogm_cnt_lock); 215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; 216 neigh_rq_count = neigh_node->real_packet_count; 217 spin_unlock_bh(&orig_node->ogm_cnt_lock); 218 219 /* pay attention to not get a value bigger than 100 % */ 220 total_count = (orig_eq_count > neigh_rq_count ? 221 neigh_rq_count : orig_eq_count); 222 223 /* if we have too few packets (too less data) we set tq_own to zero */ 224 /* if we receive too few packets it is not considered bidirectional */ 225 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || 226 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) 227 tq_own = 0; 228 else 229 /* neigh_node->real_packet_count is never zero as we 230 * only purge old information when getting new 231 * information */ 232 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; 233 234 /* 235 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does 236 * affect the nearly-symmetric links only a little, but 237 * punishes asymmetric links more. This will give a value 238 * between 0 and TQ_MAX_VALUE 239 */ 240 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * 241 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 242 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 243 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / 244 (TQ_LOCAL_WINDOW_SIZE * 245 TQ_LOCAL_WINDOW_SIZE * 246 TQ_LOCAL_WINDOW_SIZE); 247 248 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) / 249 (TQ_MAX_VALUE * TQ_MAX_VALUE)); 250 251 bat_dbg(DBG_BATMAN, bat_priv, 252 "bidirectional: " 253 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, " 254 "real recv = %2i, local tq: %3i, asym_penalty: %3i, " 255 "total tq: %3i\n", 256 orig_node->orig, orig_neigh_node->orig, total_count, 257 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq); 258 259 /* if link has the minimum required transmission quality 260 * consider it bidirectional */ 261 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) 262 ret = 1; 263 264 out: 265 if (neigh_node) 266 neigh_node_free_ref(neigh_node); 267 return ret; 268 } 269 270 /* caller must hold the neigh_list_lock */ 271 void bonding_candidate_del(struct orig_node *orig_node, 272 struct neigh_node *neigh_node) 273 { 274 /* this neighbor is not part of our candidate list */ 275 if (list_empty(&neigh_node->bonding_list)) 276 goto out; 277 278 list_del_rcu(&neigh_node->bonding_list); 279 INIT_LIST_HEAD(&neigh_node->bonding_list); 280 neigh_node_free_ref(neigh_node); 281 atomic_dec(&orig_node->bond_candidates); 282 283 out: 284 return; 285 } 286 287 static void bonding_candidate_add(struct orig_node *orig_node, 288 struct neigh_node *neigh_node) 289 { 290 struct hlist_node *node; 291 struct neigh_node *tmp_neigh_node; 292 uint8_t best_tq, interference_candidate = 0; 293 294 spin_lock_bh(&orig_node->neigh_list_lock); 295 296 /* only consider if it has the same primary address ... */ 297 if (!compare_eth(orig_node->orig, 298 neigh_node->orig_node->primary_addr)) 299 goto candidate_del; 300 301 if (!orig_node->router) 302 goto candidate_del; 303 304 best_tq = orig_node->router->tq_avg; 305 306 /* ... and is good enough to be considered */ 307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) 308 goto candidate_del; 309 310 /** 311 * check if we have another candidate with the same mac address or 312 * interface. If we do, we won't select this candidate because of 313 * possible interference. 314 */ 315 hlist_for_each_entry_rcu(tmp_neigh_node, node, 316 &orig_node->neigh_list, list) { 317 318 if (tmp_neigh_node == neigh_node) 319 continue; 320 321 /* we only care if the other candidate is even 322 * considered as candidate. */ 323 if (list_empty(&tmp_neigh_node->bonding_list)) 324 continue; 325 326 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || 327 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { 328 interference_candidate = 1; 329 break; 330 } 331 } 332 333 /* don't care further if it is an interference candidate */ 334 if (interference_candidate) 335 goto candidate_del; 336 337 /* this neighbor already is part of our candidate list */ 338 if (!list_empty(&neigh_node->bonding_list)) 339 goto out; 340 341 if (!atomic_inc_not_zero(&neigh_node->refcount)) 342 goto out; 343 344 list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); 345 atomic_inc(&orig_node->bond_candidates); 346 goto out; 347 348 candidate_del: 349 bonding_candidate_del(orig_node, neigh_node); 350 351 out: 352 spin_unlock_bh(&orig_node->neigh_list_lock); 353 return; 354 } 355 356 /* copy primary address for bonding */ 357 static void bonding_save_primary(struct orig_node *orig_node, 358 struct orig_node *orig_neigh_node, 359 struct batman_packet *batman_packet) 360 { 361 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 362 return; 363 364 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 365 } 366 367 static void update_orig(struct bat_priv *bat_priv, 368 struct orig_node *orig_node, 369 struct ethhdr *ethhdr, 370 struct batman_packet *batman_packet, 371 struct hard_iface *if_incoming, 372 unsigned char *hna_buff, int hna_buff_len, 373 char is_duplicate) 374 { 375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 376 struct orig_node *orig_node_tmp; 377 struct hlist_node *node; 378 int tmp_hna_buff_len; 379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 380 381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 382 "Searching and updating originator entry of received packet\n"); 383 384 rcu_read_lock(); 385 hlist_for_each_entry_rcu(tmp_neigh_node, node, 386 &orig_node->neigh_list, list) { 387 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 388 (tmp_neigh_node->if_incoming == if_incoming) && 389 atomic_inc_not_zero(&tmp_neigh_node->refcount)) { 390 if (neigh_node) 391 neigh_node_free_ref(neigh_node); 392 neigh_node = tmp_neigh_node; 393 continue; 394 } 395 396 if (is_duplicate) 397 continue; 398 399 ring_buffer_set(tmp_neigh_node->tq_recv, 400 &tmp_neigh_node->tq_index, 0); 401 tmp_neigh_node->tq_avg = 402 ring_buffer_avg(tmp_neigh_node->tq_recv); 403 } 404 405 if (!neigh_node) { 406 struct orig_node *orig_tmp; 407 408 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); 409 if (!orig_tmp) 410 goto unlock; 411 412 neigh_node = create_neighbor(orig_node, orig_tmp, 413 ethhdr->h_source, if_incoming); 414 415 orig_node_free_ref(orig_tmp); 416 if (!neigh_node) 417 goto unlock; 418 } else 419 bat_dbg(DBG_BATMAN, bat_priv, 420 "Updating existing last-hop neighbor of originator\n"); 421 422 rcu_read_unlock(); 423 424 orig_node->flags = batman_packet->flags; 425 neigh_node->last_valid = jiffies; 426 427 ring_buffer_set(neigh_node->tq_recv, 428 &neigh_node->tq_index, 429 batman_packet->tq); 430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 431 432 if (!is_duplicate) { 433 orig_node->last_ttl = batman_packet->ttl; 434 neigh_node->last_ttl = batman_packet->ttl; 435 } 436 437 bonding_candidate_add(orig_node, neigh_node); 438 439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? 440 batman_packet->num_hna * ETH_ALEN : hna_buff_len); 441 442 /* if this neighbor already is our next hop there is nothing 443 * to change */ 444 if (orig_node->router == neigh_node) 445 goto update_hna; 446 447 /* if this neighbor does not offer a better TQ we won't consider it */ 448 if ((orig_node->router) && 449 (orig_node->router->tq_avg > neigh_node->tq_avg)) 450 goto update_hna; 451 452 /* if the TQ is the same and the link not more symetric we 453 * won't consider it either */ 454 if ((orig_node->router) && 455 (neigh_node->tq_avg == orig_node->router->tq_avg)) { 456 orig_node_tmp = orig_node->router->orig_node; 457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 458 bcast_own_sum_orig = 459 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 460 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 461 462 orig_node_tmp = neigh_node->orig_node; 463 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 464 bcast_own_sum_neigh = 465 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 467 468 if (bcast_own_sum_orig >= bcast_own_sum_neigh) 469 goto update_hna; 470 } 471 472 update_routes(bat_priv, orig_node, neigh_node, 473 hna_buff, tmp_hna_buff_len); 474 goto update_gw; 475 476 update_hna: 477 update_routes(bat_priv, orig_node, orig_node->router, 478 hna_buff, tmp_hna_buff_len); 479 480 update_gw: 481 if (orig_node->gw_flags != batman_packet->gw_flags) 482 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags); 483 484 orig_node->gw_flags = batman_packet->gw_flags; 485 486 /* restart gateway selection if fast or late switching was enabled */ 487 if ((orig_node->gw_flags) && 488 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && 489 (atomic_read(&bat_priv->gw_sel_class) > 2)) 490 gw_check_election(bat_priv, orig_node); 491 492 goto out; 493 494 unlock: 495 rcu_read_unlock(); 496 out: 497 if (neigh_node) 498 neigh_node_free_ref(neigh_node); 499 } 500 501 /* checks whether the host restarted and is in the protection time. 502 * returns: 503 * 0 if the packet is to be accepted 504 * 1 if the packet is to be ignored. 505 */ 506 static int window_protected(struct bat_priv *bat_priv, 507 int32_t seq_num_diff, 508 unsigned long *last_reset) 509 { 510 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 511 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 512 if (time_after(jiffies, *last_reset + 513 msecs_to_jiffies(RESET_PROTECTION_MS))) { 514 515 *last_reset = jiffies; 516 bat_dbg(DBG_BATMAN, bat_priv, 517 "old packet received, start protection\n"); 518 519 return 0; 520 } else 521 return 1; 522 } 523 return 0; 524 } 525 526 /* processes a batman packet for all interfaces, adjusts the sequence number and 527 * finds out whether it is a duplicate. 528 * returns: 529 * 1 the packet is a duplicate 530 * 0 the packet has not yet been received 531 * -1 the packet is old and has been received while the seqno window 532 * was protected. Caller should drop it. 533 */ 534 static char count_real_packets(struct ethhdr *ethhdr, 535 struct batman_packet *batman_packet, 536 struct hard_iface *if_incoming) 537 { 538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 539 struct orig_node *orig_node; 540 struct neigh_node *tmp_neigh_node; 541 struct hlist_node *node; 542 char is_duplicate = 0; 543 int32_t seq_diff; 544 int need_update = 0; 545 int set_mark, ret = -1; 546 547 orig_node = get_orig_node(bat_priv, batman_packet->orig); 548 if (!orig_node) 549 return 0; 550 551 spin_lock_bh(&orig_node->ogm_cnt_lock); 552 seq_diff = batman_packet->seqno - orig_node->last_real_seqno; 553 554 /* signalize caller that the packet is to be dropped. */ 555 if (window_protected(bat_priv, seq_diff, 556 &orig_node->batman_seqno_reset)) 557 goto out; 558 559 rcu_read_lock(); 560 hlist_for_each_entry_rcu(tmp_neigh_node, node, 561 &orig_node->neigh_list, list) { 562 563 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, 564 orig_node->last_real_seqno, 565 batman_packet->seqno); 566 567 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 568 (tmp_neigh_node->if_incoming == if_incoming)) 569 set_mark = 1; 570 else 571 set_mark = 0; 572 573 /* if the window moved, set the update flag. */ 574 need_update |= bit_get_packet(bat_priv, 575 tmp_neigh_node->real_bits, 576 seq_diff, set_mark); 577 578 tmp_neigh_node->real_packet_count = 579 bit_packet_count(tmp_neigh_node->real_bits); 580 } 581 rcu_read_unlock(); 582 583 if (need_update) { 584 bat_dbg(DBG_BATMAN, bat_priv, 585 "updating last_seqno: old %d, new %d\n", 586 orig_node->last_real_seqno, batman_packet->seqno); 587 orig_node->last_real_seqno = batman_packet->seqno; 588 } 589 590 ret = is_duplicate; 591 592 out: 593 spin_unlock_bh(&orig_node->ogm_cnt_lock); 594 orig_node_free_ref(orig_node); 595 return ret; 596 } 597 598 void receive_bat_packet(struct ethhdr *ethhdr, 599 struct batman_packet *batman_packet, 600 unsigned char *hna_buff, int hna_buff_len, 601 struct hard_iface *if_incoming) 602 { 603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 604 struct hard_iface *hard_iface; 605 struct orig_node *orig_neigh_node, *orig_node; 606 char has_directlink_flag; 607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 609 char is_duplicate; 610 uint32_t if_incoming_seqno; 611 612 /* Silently drop when the batman packet is actually not a 613 * correct packet. 614 * 615 * This might happen if a packet is padded (e.g. Ethernet has a 616 * minimum frame length of 64 byte) and the aggregation interprets 617 * it as an additional length. 618 * 619 * TODO: A more sane solution would be to have a bit in the 620 * batman_packet to detect whether the packet is the last 621 * packet in an aggregation. Here we expect that the padding 622 * is always zero (or not 0x01) 623 */ 624 if (batman_packet->packet_type != BAT_PACKET) 625 return; 626 627 /* could be changed by schedule_own_packet() */ 628 if_incoming_seqno = atomic_read(&if_incoming->seqno); 629 630 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); 631 632 is_single_hop_neigh = (compare_eth(ethhdr->h_source, 633 batman_packet->orig) ? 1 : 0); 634 635 bat_dbg(DBG_BATMAN, bat_priv, 636 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 637 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " 638 "TTL %d, V %d, IDF %d)\n", 639 ethhdr->h_source, if_incoming->net_dev->name, 640 if_incoming->net_dev->dev_addr, batman_packet->orig, 641 batman_packet->prev_sender, batman_packet->seqno, 642 batman_packet->tq, batman_packet->ttl, batman_packet->version, 643 has_directlink_flag); 644 645 rcu_read_lock(); 646 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 647 if (hard_iface->if_status != IF_ACTIVE) 648 continue; 649 650 if (hard_iface->soft_iface != if_incoming->soft_iface) 651 continue; 652 653 if (compare_eth(ethhdr->h_source, 654 hard_iface->net_dev->dev_addr)) 655 is_my_addr = 1; 656 657 if (compare_eth(batman_packet->orig, 658 hard_iface->net_dev->dev_addr)) 659 is_my_orig = 1; 660 661 if (compare_eth(batman_packet->prev_sender, 662 hard_iface->net_dev->dev_addr)) 663 is_my_oldorig = 1; 664 665 if (compare_eth(ethhdr->h_source, broadcast_addr)) 666 is_broadcast = 1; 667 } 668 rcu_read_unlock(); 669 670 if (batman_packet->version != COMPAT_VERSION) { 671 bat_dbg(DBG_BATMAN, bat_priv, 672 "Drop packet: incompatible batman version (%i)\n", 673 batman_packet->version); 674 return; 675 } 676 677 if (is_my_addr) { 678 bat_dbg(DBG_BATMAN, bat_priv, 679 "Drop packet: received my own broadcast (sender: %pM" 680 ")\n", 681 ethhdr->h_source); 682 return; 683 } 684 685 if (is_broadcast) { 686 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 687 "ignoring all packets with broadcast source addr (sender: %pM" 688 ")\n", ethhdr->h_source); 689 return; 690 } 691 692 if (is_my_orig) { 693 unsigned long *word; 694 int offset; 695 696 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); 697 if (!orig_neigh_node) 698 return; 699 700 /* neighbor has to indicate direct link and it has to 701 * come via the corresponding interface */ 702 /* if received seqno equals last send seqno save new 703 * seqno for bidirectional check */ 704 if (has_directlink_flag && 705 compare_eth(if_incoming->net_dev->dev_addr, 706 batman_packet->orig) && 707 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { 708 offset = if_incoming->if_num * NUM_WORDS; 709 710 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 711 word = &(orig_neigh_node->bcast_own[offset]); 712 bit_mark(word, 0); 713 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 714 bit_packet_count(word); 715 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 716 } 717 718 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 719 "originator packet from myself (via neighbor)\n"); 720 orig_node_free_ref(orig_neigh_node); 721 return; 722 } 723 724 if (is_my_oldorig) { 725 bat_dbg(DBG_BATMAN, bat_priv, 726 "Drop packet: ignoring all rebroadcast echos (sender: " 727 "%pM)\n", ethhdr->h_source); 728 return; 729 } 730 731 orig_node = get_orig_node(bat_priv, batman_packet->orig); 732 if (!orig_node) 733 return; 734 735 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming); 736 737 if (is_duplicate == -1) { 738 bat_dbg(DBG_BATMAN, bat_priv, 739 "Drop packet: packet within seqno protection time " 740 "(sender: %pM)\n", ethhdr->h_source); 741 goto out; 742 } 743 744 if (batman_packet->tq == 0) { 745 bat_dbg(DBG_BATMAN, bat_priv, 746 "Drop packet: originator packet with tq equal 0\n"); 747 goto out; 748 } 749 750 /* avoid temporary routing loops */ 751 if ((orig_node->router) && 752 (orig_node->router->orig_node->router) && 753 (compare_eth(orig_node->router->addr, 754 batman_packet->prev_sender)) && 755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && 756 (compare_eth(orig_node->router->addr, 757 orig_node->router->orig_node->router->addr))) { 758 bat_dbg(DBG_BATMAN, bat_priv, 759 "Drop packet: ignoring all rebroadcast packets that " 760 "may make me loop (sender: %pM)\n", ethhdr->h_source); 761 goto out; 762 } 763 764 /* if sender is a direct neighbor the sender mac equals 765 * originator mac */ 766 orig_neigh_node = (is_single_hop_neigh ? 767 orig_node : 768 get_orig_node(bat_priv, ethhdr->h_source)); 769 if (!orig_neigh_node) 770 goto out; 771 772 /* drop packet if sender is not a direct neighbor and if we 773 * don't route towards it */ 774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 775 bat_dbg(DBG_BATMAN, bat_priv, 776 "Drop packet: OGM via unknown neighbor!\n"); 777 goto out_neigh; 778 } 779 780 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, 781 batman_packet, if_incoming); 782 783 bonding_save_primary(orig_node, orig_neigh_node, batman_packet); 784 785 /* update ranking if it is not a duplicate or has the same 786 * seqno and similar ttl as the non-duplicate */ 787 if (is_bidirectional && 788 (!is_duplicate || 789 ((orig_node->last_real_seqno == batman_packet->seqno) && 790 (orig_node->last_ttl - 3 <= batman_packet->ttl)))) 791 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 792 if_incoming, hna_buff, hna_buff_len, is_duplicate); 793 794 /* is single hop (direct) neighbor */ 795 if (is_single_hop_neigh) { 796 797 /* mark direct link on incoming interface */ 798 schedule_forward_packet(orig_node, ethhdr, batman_packet, 799 1, hna_buff_len, if_incoming); 800 801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 802 "rebroadcast neighbor packet with direct link flag\n"); 803 goto out_neigh; 804 } 805 806 /* multihop originator */ 807 if (!is_bidirectional) { 808 bat_dbg(DBG_BATMAN, bat_priv, 809 "Drop packet: not received via bidirectional link\n"); 810 goto out_neigh; 811 } 812 813 if (is_duplicate) { 814 bat_dbg(DBG_BATMAN, bat_priv, 815 "Drop packet: duplicate packet received\n"); 816 goto out_neigh; 817 } 818 819 bat_dbg(DBG_BATMAN, bat_priv, 820 "Forwarding packet: rebroadcast originator packet\n"); 821 schedule_forward_packet(orig_node, ethhdr, batman_packet, 822 0, hna_buff_len, if_incoming); 823 824 out_neigh: 825 if ((orig_neigh_node) && (!is_single_hop_neigh)) 826 orig_node_free_ref(orig_neigh_node); 827 out: 828 orig_node_free_ref(orig_node); 829 } 830 831 int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface) 832 { 833 struct ethhdr *ethhdr; 834 835 /* drop packet if it has not necessary minimum size */ 836 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet)))) 837 return NET_RX_DROP; 838 839 ethhdr = (struct ethhdr *)skb_mac_header(skb); 840 841 /* packet with broadcast indication but unicast recipient */ 842 if (!is_broadcast_ether_addr(ethhdr->h_dest)) 843 return NET_RX_DROP; 844 845 /* packet with broadcast sender address */ 846 if (is_broadcast_ether_addr(ethhdr->h_source)) 847 return NET_RX_DROP; 848 849 /* create a copy of the skb, if needed, to modify it. */ 850 if (skb_cow(skb, 0) < 0) 851 return NET_RX_DROP; 852 853 /* keep skb linear */ 854 if (skb_linearize(skb) < 0) 855 return NET_RX_DROP; 856 857 ethhdr = (struct ethhdr *)skb_mac_header(skb); 858 859 receive_aggr_bat_packet(ethhdr, 860 skb->data, 861 skb_headlen(skb), 862 hard_iface); 863 864 kfree_skb(skb); 865 return NET_RX_SUCCESS; 866 } 867 868 static int recv_my_icmp_packet(struct bat_priv *bat_priv, 869 struct sk_buff *skb, size_t icmp_len) 870 { 871 struct orig_node *orig_node = NULL; 872 struct neigh_node *neigh_node = NULL; 873 struct icmp_packet_rr *icmp_packet; 874 int ret = NET_RX_DROP; 875 876 icmp_packet = (struct icmp_packet_rr *)skb->data; 877 878 /* add data to device queue */ 879 if (icmp_packet->msg_type != ECHO_REQUEST) { 880 bat_socket_receive_packet(icmp_packet, icmp_len); 881 goto out; 882 } 883 884 if (!bat_priv->primary_if) 885 goto out; 886 887 /* answer echo request (ping) */ 888 /* get routing information */ 889 rcu_read_lock(); 890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 891 892 if (!orig_node) 893 goto unlock; 894 895 neigh_node = orig_node->router; 896 897 if (!neigh_node) 898 goto unlock; 899 900 if (!atomic_inc_not_zero(&neigh_node->refcount)) { 901 neigh_node = NULL; 902 goto unlock; 903 } 904 905 rcu_read_unlock(); 906 907 /* create a copy of the skb, if needed, to modify it. */ 908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 909 goto out; 910 911 icmp_packet = (struct icmp_packet_rr *)skb->data; 912 913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 914 memcpy(icmp_packet->orig, 915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 916 icmp_packet->msg_type = ECHO_REPLY; 917 icmp_packet->ttl = TTL; 918 919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 920 ret = NET_RX_SUCCESS; 921 goto out; 922 923 unlock: 924 rcu_read_unlock(); 925 out: 926 if (neigh_node) 927 neigh_node_free_ref(neigh_node); 928 if (orig_node) 929 orig_node_free_ref(orig_node); 930 return ret; 931 } 932 933 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 934 struct sk_buff *skb) 935 { 936 struct orig_node *orig_node = NULL; 937 struct neigh_node *neigh_node = NULL; 938 struct icmp_packet *icmp_packet; 939 int ret = NET_RX_DROP; 940 941 icmp_packet = (struct icmp_packet *)skb->data; 942 943 /* send TTL exceeded if packet is an echo request (traceroute) */ 944 if (icmp_packet->msg_type != ECHO_REQUEST) { 945 pr_debug("Warning - can't forward icmp packet from %pM to " 946 "%pM: ttl exceeded\n", icmp_packet->orig, 947 icmp_packet->dst); 948 goto out; 949 } 950 951 if (!bat_priv->primary_if) 952 goto out; 953 954 /* get routing information */ 955 rcu_read_lock(); 956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 957 958 if (!orig_node) 959 goto unlock; 960 961 neigh_node = orig_node->router; 962 963 if (!neigh_node) 964 goto unlock; 965 966 if (!atomic_inc_not_zero(&neigh_node->refcount)) { 967 neigh_node = NULL; 968 goto unlock; 969 } 970 971 rcu_read_unlock(); 972 973 /* create a copy of the skb, if needed, to modify it. */ 974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 975 goto out; 976 977 icmp_packet = (struct icmp_packet *)skb->data; 978 979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 980 memcpy(icmp_packet->orig, 981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 982 icmp_packet->msg_type = TTL_EXCEEDED; 983 icmp_packet->ttl = TTL; 984 985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 986 ret = NET_RX_SUCCESS; 987 goto out; 988 989 unlock: 990 rcu_read_unlock(); 991 out: 992 if (neigh_node) 993 neigh_node_free_ref(neigh_node); 994 if (orig_node) 995 orig_node_free_ref(orig_node); 996 return ret; 997 } 998 999 1000 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1001 { 1002 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1003 struct icmp_packet_rr *icmp_packet; 1004 struct ethhdr *ethhdr; 1005 struct orig_node *orig_node = NULL; 1006 struct neigh_node *neigh_node = NULL; 1007 int hdr_size = sizeof(struct icmp_packet); 1008 int ret = NET_RX_DROP; 1009 1010 /** 1011 * we truncate all incoming icmp packets if they don't match our size 1012 */ 1013 if (skb->len >= sizeof(struct icmp_packet_rr)) 1014 hdr_size = sizeof(struct icmp_packet_rr); 1015 1016 /* drop packet if it has not necessary minimum size */ 1017 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1018 goto out; 1019 1020 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1021 1022 /* packet with unicast indication but broadcast recipient */ 1023 if (is_broadcast_ether_addr(ethhdr->h_dest)) 1024 goto out; 1025 1026 /* packet with broadcast sender address */ 1027 if (is_broadcast_ether_addr(ethhdr->h_source)) 1028 goto out; 1029 1030 /* not for me */ 1031 if (!is_my_mac(ethhdr->h_dest)) 1032 goto out; 1033 1034 icmp_packet = (struct icmp_packet_rr *)skb->data; 1035 1036 /* add record route information if not full */ 1037 if ((hdr_size == sizeof(struct icmp_packet_rr)) && 1038 (icmp_packet->rr_cur < BAT_RR_LEN)) { 1039 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), 1040 ethhdr->h_dest, ETH_ALEN); 1041 icmp_packet->rr_cur++; 1042 } 1043 1044 /* packet for me */ 1045 if (is_my_mac(icmp_packet->dst)) 1046 return recv_my_icmp_packet(bat_priv, skb, hdr_size); 1047 1048 /* TTL exceeded */ 1049 if (icmp_packet->ttl < 2) 1050 return recv_icmp_ttl_exceeded(bat_priv, skb); 1051 1052 /* get routing information */ 1053 rcu_read_lock(); 1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 1055 1056 if (!orig_node) 1057 goto unlock; 1058 1059 neigh_node = orig_node->router; 1060 1061 if (!neigh_node) 1062 goto unlock; 1063 1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) { 1065 neigh_node = NULL; 1066 goto unlock; 1067 } 1068 1069 rcu_read_unlock(); 1070 1071 /* create a copy of the skb, if needed, to modify it. */ 1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1073 goto out; 1074 1075 icmp_packet = (struct icmp_packet_rr *)skb->data; 1076 1077 /* decrement ttl */ 1078 icmp_packet->ttl--; 1079 1080 /* route it */ 1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1082 ret = NET_RX_SUCCESS; 1083 goto out; 1084 1085 unlock: 1086 rcu_read_unlock(); 1087 out: 1088 if (neigh_node) 1089 neigh_node_free_ref(neigh_node); 1090 if (orig_node) 1091 orig_node_free_ref(orig_node); 1092 return ret; 1093 } 1094 1095 /* find a suitable router for this originator, and use 1096 * bonding if possible. increases the found neighbors 1097 * refcount.*/ 1098 struct neigh_node *find_router(struct bat_priv *bat_priv, 1099 struct orig_node *orig_node, 1100 struct hard_iface *recv_if) 1101 { 1102 struct orig_node *primary_orig_node; 1103 struct orig_node *router_orig; 1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node; 1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1106 int bonding_enabled; 1107 1108 if (!orig_node) 1109 return NULL; 1110 1111 if (!orig_node->router) 1112 return NULL; 1113 1114 /* without bonding, the first node should 1115 * always choose the default router. */ 1116 bonding_enabled = atomic_read(&bat_priv->bonding); 1117 1118 rcu_read_lock(); 1119 /* select default router to output */ 1120 router = orig_node->router; 1121 router_orig = orig_node->router->orig_node; 1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) { 1123 rcu_read_unlock(); 1124 return NULL; 1125 } 1126 1127 if ((!recv_if) && (!bonding_enabled)) 1128 goto return_router; 1129 1130 /* if we have something in the primary_addr, we can search 1131 * for a potential bonding candidate. */ 1132 if (compare_eth(router_orig->primary_addr, zero_mac)) 1133 goto return_router; 1134 1135 /* find the orig_node which has the primary interface. might 1136 * even be the same as our router_orig in many cases */ 1137 1138 if (compare_eth(router_orig->primary_addr, router_orig->orig)) { 1139 primary_orig_node = router_orig; 1140 } else { 1141 primary_orig_node = orig_hash_find(bat_priv, 1142 router_orig->primary_addr); 1143 if (!primary_orig_node) 1144 goto return_router; 1145 1146 orig_node_free_ref(primary_orig_node); 1147 } 1148 1149 /* with less than 2 candidates, we can't do any 1150 * bonding and prefer the original router. */ 1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 1152 goto return_router; 1153 1154 1155 /* all nodes between should choose a candidate which 1156 * is is not on the interface where the packet came 1157 * in. */ 1158 1159 neigh_node_free_ref(router); 1160 first_candidate = NULL; 1161 router = NULL; 1162 1163 if (bonding_enabled) { 1164 /* in the bonding case, send the packets in a round 1165 * robin fashion over the remaining interfaces. */ 1166 1167 list_for_each_entry_rcu(tmp_neigh_node, 1168 &primary_orig_node->bond_list, bonding_list) { 1169 if (!first_candidate) 1170 first_candidate = tmp_neigh_node; 1171 /* recv_if == NULL on the first node. */ 1172 if (tmp_neigh_node->if_incoming != recv_if && 1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) { 1174 router = tmp_neigh_node; 1175 break; 1176 } 1177 } 1178 1179 /* use the first candidate if nothing was found. */ 1180 if (!router && first_candidate && 1181 atomic_inc_not_zero(&first_candidate->refcount)) 1182 router = first_candidate; 1183 1184 if (!router) { 1185 rcu_read_unlock(); 1186 return NULL; 1187 } 1188 1189 /* selected should point to the next element 1190 * after the current router */ 1191 spin_lock_bh(&primary_orig_node->neigh_list_lock); 1192 /* this is a list_move(), which unfortunately 1193 * does not exist as rcu version */ 1194 list_del_rcu(&primary_orig_node->bond_list); 1195 list_add_rcu(&primary_orig_node->bond_list, 1196 &router->bonding_list); 1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock); 1198 1199 } else { 1200 /* if bonding is disabled, use the best of the 1201 * remaining candidates which are not using 1202 * this interface. */ 1203 list_for_each_entry_rcu(tmp_neigh_node, 1204 &primary_orig_node->bond_list, bonding_list) { 1205 if (!first_candidate) 1206 first_candidate = tmp_neigh_node; 1207 1208 /* recv_if == NULL on the first node. */ 1209 if (tmp_neigh_node->if_incoming == recv_if) 1210 continue; 1211 1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 1213 continue; 1214 1215 /* if we don't have a router yet 1216 * or this one is better, choose it. */ 1217 if ((!router) || 1218 (tmp_neigh_node->tq_avg > router->tq_avg)) { 1219 /* decrement refcount of 1220 * previously selected router */ 1221 if (router) 1222 neigh_node_free_ref(router); 1223 1224 router = tmp_neigh_node; 1225 atomic_inc_not_zero(&router->refcount); 1226 } 1227 1228 neigh_node_free_ref(tmp_neigh_node); 1229 } 1230 1231 /* use the first candidate if nothing was found. */ 1232 if (!router && first_candidate && 1233 atomic_inc_not_zero(&first_candidate->refcount)) 1234 router = first_candidate; 1235 } 1236 return_router: 1237 rcu_read_unlock(); 1238 return router; 1239 } 1240 1241 static int check_unicast_packet(struct sk_buff *skb, int hdr_size) 1242 { 1243 struct ethhdr *ethhdr; 1244 1245 /* drop packet if it has not necessary minimum size */ 1246 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1247 return -1; 1248 1249 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1250 1251 /* packet with unicast indication but broadcast recipient */ 1252 if (is_broadcast_ether_addr(ethhdr->h_dest)) 1253 return -1; 1254 1255 /* packet with broadcast sender address */ 1256 if (is_broadcast_ether_addr(ethhdr->h_source)) 1257 return -1; 1258 1259 /* not for me */ 1260 if (!is_my_mac(ethhdr->h_dest)) 1261 return -1; 1262 1263 return 0; 1264 } 1265 1266 int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1267 { 1268 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1269 struct orig_node *orig_node = NULL; 1270 struct neigh_node *neigh_node = NULL; 1271 struct unicast_packet *unicast_packet; 1272 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); 1273 int ret = NET_RX_DROP; 1274 struct sk_buff *new_skb; 1275 1276 unicast_packet = (struct unicast_packet *)skb->data; 1277 1278 /* TTL exceeded */ 1279 if (unicast_packet->ttl < 2) { 1280 pr_debug("Warning - can't forward unicast packet from %pM to " 1281 "%pM: ttl exceeded\n", ethhdr->h_source, 1282 unicast_packet->dest); 1283 goto out; 1284 } 1285 1286 /* get routing information */ 1287 rcu_read_lock(); 1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 1289 1290 if (!orig_node) 1291 goto unlock; 1292 1293 rcu_read_unlock(); 1294 1295 /* find_router() increases neigh_nodes refcount if found. */ 1296 neigh_node = find_router(bat_priv, orig_node, recv_if); 1297 1298 if (!neigh_node) 1299 goto out; 1300 1301 /* create a copy of the skb, if needed, to modify it. */ 1302 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1303 goto out; 1304 1305 unicast_packet = (struct unicast_packet *)skb->data; 1306 1307 if (unicast_packet->packet_type == BAT_UNICAST && 1308 atomic_read(&bat_priv->fragmentation) && 1309 skb->len > neigh_node->if_incoming->net_dev->mtu) { 1310 ret = frag_send_skb(skb, bat_priv, 1311 neigh_node->if_incoming, neigh_node->addr); 1312 goto out; 1313 } 1314 1315 if (unicast_packet->packet_type == BAT_UNICAST_FRAG && 1316 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { 1317 1318 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 1319 1320 if (ret == NET_RX_DROP) 1321 goto out; 1322 1323 /* packet was buffered for late merge */ 1324 if (!new_skb) { 1325 ret = NET_RX_SUCCESS; 1326 goto out; 1327 } 1328 1329 skb = new_skb; 1330 unicast_packet = (struct unicast_packet *)skb->data; 1331 } 1332 1333 /* decrement ttl */ 1334 unicast_packet->ttl--; 1335 1336 /* route it */ 1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1338 ret = NET_RX_SUCCESS; 1339 goto out; 1340 1341 unlock: 1342 rcu_read_unlock(); 1343 out: 1344 if (neigh_node) 1345 neigh_node_free_ref(neigh_node); 1346 if (orig_node) 1347 orig_node_free_ref(orig_node); 1348 return ret; 1349 } 1350 1351 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1352 { 1353 struct unicast_packet *unicast_packet; 1354 int hdr_size = sizeof(struct unicast_packet); 1355 1356 if (check_unicast_packet(skb, hdr_size) < 0) 1357 return NET_RX_DROP; 1358 1359 unicast_packet = (struct unicast_packet *)skb->data; 1360 1361 /* packet for me */ 1362 if (is_my_mac(unicast_packet->dest)) { 1363 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1364 return NET_RX_SUCCESS; 1365 } 1366 1367 return route_unicast_packet(skb, recv_if); 1368 } 1369 1370 int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1371 { 1372 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1373 struct unicast_frag_packet *unicast_packet; 1374 int hdr_size = sizeof(struct unicast_frag_packet); 1375 struct sk_buff *new_skb = NULL; 1376 int ret; 1377 1378 if (check_unicast_packet(skb, hdr_size) < 0) 1379 return NET_RX_DROP; 1380 1381 unicast_packet = (struct unicast_frag_packet *)skb->data; 1382 1383 /* packet for me */ 1384 if (is_my_mac(unicast_packet->dest)) { 1385 1386 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 1387 1388 if (ret == NET_RX_DROP) 1389 return NET_RX_DROP; 1390 1391 /* packet was buffered for late merge */ 1392 if (!new_skb) 1393 return NET_RX_SUCCESS; 1394 1395 interface_rx(recv_if->soft_iface, new_skb, recv_if, 1396 sizeof(struct unicast_packet)); 1397 return NET_RX_SUCCESS; 1398 } 1399 1400 return route_unicast_packet(skb, recv_if); 1401 } 1402 1403 1404 int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1405 { 1406 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1407 struct orig_node *orig_node = NULL; 1408 struct bcast_packet *bcast_packet; 1409 struct ethhdr *ethhdr; 1410 int hdr_size = sizeof(struct bcast_packet); 1411 int ret = NET_RX_DROP; 1412 int32_t seq_diff; 1413 1414 /* drop packet if it has not necessary minimum size */ 1415 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1416 goto out; 1417 1418 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1419 1420 /* packet with broadcast indication but unicast recipient */ 1421 if (!is_broadcast_ether_addr(ethhdr->h_dest)) 1422 goto out; 1423 1424 /* packet with broadcast sender address */ 1425 if (is_broadcast_ether_addr(ethhdr->h_source)) 1426 goto out; 1427 1428 /* ignore broadcasts sent by myself */ 1429 if (is_my_mac(ethhdr->h_source)) 1430 goto out; 1431 1432 bcast_packet = (struct bcast_packet *)skb->data; 1433 1434 /* ignore broadcasts originated by myself */ 1435 if (is_my_mac(bcast_packet->orig)) 1436 goto out; 1437 1438 if (bcast_packet->ttl < 2) 1439 goto out; 1440 1441 rcu_read_lock(); 1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1443 1444 if (!orig_node) 1445 goto rcu_unlock; 1446 1447 rcu_read_unlock(); 1448 1449 spin_lock_bh(&orig_node->bcast_seqno_lock); 1450 1451 /* check whether the packet is a duplicate */ 1452 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, 1453 ntohl(bcast_packet->seqno))) 1454 goto spin_unlock; 1455 1456 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1457 1458 /* check whether the packet is old and the host just restarted. */ 1459 if (window_protected(bat_priv, seq_diff, 1460 &orig_node->bcast_seqno_reset)) 1461 goto spin_unlock; 1462 1463 /* mark broadcast in flood history, update window position 1464 * if required. */ 1465 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) 1466 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); 1467 1468 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1469 1470 /* rebroadcast packet */ 1471 add_bcast_packet_to_list(bat_priv, skb); 1472 1473 /* broadcast for me */ 1474 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1475 ret = NET_RX_SUCCESS; 1476 goto out; 1477 1478 rcu_unlock: 1479 rcu_read_unlock(); 1480 goto out; 1481 spin_unlock: 1482 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1483 out: 1484 if (orig_node) 1485 orig_node_free_ref(orig_node); 1486 return ret; 1487 } 1488 1489 int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1490 { 1491 struct vis_packet *vis_packet; 1492 struct ethhdr *ethhdr; 1493 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1494 int hdr_size = sizeof(struct vis_packet); 1495 1496 /* keep skb linear */ 1497 if (skb_linearize(skb) < 0) 1498 return NET_RX_DROP; 1499 1500 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1501 return NET_RX_DROP; 1502 1503 vis_packet = (struct vis_packet *)skb->data; 1504 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1505 1506 /* not for me */ 1507 if (!is_my_mac(ethhdr->h_dest)) 1508 return NET_RX_DROP; 1509 1510 /* ignore own packets */ 1511 if (is_my_mac(vis_packet->vis_orig)) 1512 return NET_RX_DROP; 1513 1514 if (is_my_mac(vis_packet->sender_orig)) 1515 return NET_RX_DROP; 1516 1517 switch (vis_packet->vis_type) { 1518 case VIS_TYPE_SERVER_SYNC: 1519 receive_server_sync_packet(bat_priv, vis_packet, 1520 skb_headlen(skb)); 1521 break; 1522 1523 case VIS_TYPE_CLIENT_UPDATE: 1524 receive_client_update_packet(bat_priv, vis_packet, 1525 skb_headlen(skb)); 1526 break; 1527 1528 default: /* ignore unknown packet */ 1529 break; 1530 } 1531 1532 /* We take a copy of the data in the packet, so we should 1533 always free the skbuf. */ 1534 return NET_RX_DROP; 1535 } 1536