1 /* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: 2 * 3 * Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "main.h" 19 #include "hash.h" 20 #include "hard-interface.h" 21 #include "originator.h" 22 #include "bridge_loop_avoidance.h" 23 #include "translation-table.h" 24 #include "send.h" 25 26 #include <linux/etherdevice.h> 27 #include <linux/crc16.h> 28 #include <linux/if_arp.h> 29 #include <net/arp.h> 30 #include <linux/if_vlan.h> 31 32 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 33 34 static void batadv_bla_periodic_work(struct work_struct *work); 35 static void 36 batadv_bla_send_announce(struct batadv_priv *bat_priv, 37 struct batadv_bla_backbone_gw *backbone_gw); 38 39 /* return the index of the claim */ 40 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) 41 { 42 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; 43 uint32_t hash = 0; 44 45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); 46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); 47 48 hash += (hash << 3); 49 hash ^= (hash >> 11); 50 hash += (hash << 15); 51 52 return hash % size; 53 } 54 55 /* return the index of the backbone gateway */ 56 static inline uint32_t batadv_choose_backbone_gw(const void *data, 57 uint32_t size) 58 { 59 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; 60 uint32_t hash = 0; 61 62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); 63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); 64 65 hash += (hash << 3); 66 hash ^= (hash >> 11); 67 hash += (hash << 15); 68 69 return hash % size; 70 } 71 72 /* compares address and vid of two backbone gws */ 73 static int batadv_compare_backbone_gw(const struct hlist_node *node, 74 const void *data2) 75 { 76 const void *data1 = container_of(node, struct batadv_bla_backbone_gw, 77 hash_entry); 78 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2; 79 80 if (!batadv_compare_eth(gw1->orig, gw2->orig)) 81 return 0; 82 83 if (gw1->vid != gw2->vid) 84 return 0; 85 86 return 1; 87 } 88 89 /* compares address and vid of two claims */ 90 static int batadv_compare_claim(const struct hlist_node *node, 91 const void *data2) 92 { 93 const void *data1 = container_of(node, struct batadv_bla_claim, 94 hash_entry); 95 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2; 96 97 if (!batadv_compare_eth(cl1->addr, cl2->addr)) 98 return 0; 99 100 if (cl1->vid != cl2->vid) 101 return 0; 102 103 return 1; 104 } 105 106 /* free a backbone gw */ 107 static void 108 batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw) 109 { 110 if (atomic_dec_and_test(&backbone_gw->refcount)) 111 kfree_rcu(backbone_gw, rcu); 112 } 113 114 /* finally deinitialize the claim */ 115 static void batadv_claim_free_rcu(struct rcu_head *rcu) 116 { 117 struct batadv_bla_claim *claim; 118 119 claim = container_of(rcu, struct batadv_bla_claim, rcu); 120 121 batadv_backbone_gw_free_ref(claim->backbone_gw); 122 kfree(claim); 123 } 124 125 /* free a claim, call claim_free_rcu if its the last reference */ 126 static void batadv_claim_free_ref(struct batadv_bla_claim *claim) 127 { 128 if (atomic_dec_and_test(&claim->refcount)) 129 call_rcu(&claim->rcu, batadv_claim_free_rcu); 130 } 131 132 /** 133 * batadv_claim_hash_find 134 * @bat_priv: the bat priv with all the soft interface information 135 * @data: search data (may be local/static data) 136 * 137 * looks for a claim in the hash, and returns it if found 138 * or NULL otherwise. 139 */ 140 static struct batadv_bla_claim 141 *batadv_claim_hash_find(struct batadv_priv *bat_priv, 142 struct batadv_bla_claim *data) 143 { 144 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 145 struct hlist_head *head; 146 struct batadv_bla_claim *claim; 147 struct batadv_bla_claim *claim_tmp = NULL; 148 int index; 149 150 if (!hash) 151 return NULL; 152 153 index = batadv_choose_claim(data, hash->size); 154 head = &hash->table[index]; 155 156 rcu_read_lock(); 157 hlist_for_each_entry_rcu(claim, head, hash_entry) { 158 if (!batadv_compare_claim(&claim->hash_entry, data)) 159 continue; 160 161 if (!atomic_inc_not_zero(&claim->refcount)) 162 continue; 163 164 claim_tmp = claim; 165 break; 166 } 167 rcu_read_unlock(); 168 169 return claim_tmp; 170 } 171 172 /** 173 * batadv_backbone_hash_find - looks for a claim in the hash 174 * @bat_priv: the bat priv with all the soft interface information 175 * @addr: the address of the originator 176 * @vid: the VLAN ID 177 * 178 * Returns claim if found or NULL otherwise. 179 */ 180 static struct batadv_bla_backbone_gw * 181 batadv_backbone_hash_find(struct batadv_priv *bat_priv, 182 uint8_t *addr, unsigned short vid) 183 { 184 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 185 struct hlist_head *head; 186 struct batadv_bla_backbone_gw search_entry, *backbone_gw; 187 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; 188 int index; 189 190 if (!hash) 191 return NULL; 192 193 ether_addr_copy(search_entry.orig, addr); 194 search_entry.vid = vid; 195 196 index = batadv_choose_backbone_gw(&search_entry, hash->size); 197 head = &hash->table[index]; 198 199 rcu_read_lock(); 200 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 201 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, 202 &search_entry)) 203 continue; 204 205 if (!atomic_inc_not_zero(&backbone_gw->refcount)) 206 continue; 207 208 backbone_gw_tmp = backbone_gw; 209 break; 210 } 211 rcu_read_unlock(); 212 213 return backbone_gw_tmp; 214 } 215 216 /* delete all claims for a backbone */ 217 static void 218 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) 219 { 220 struct batadv_hashtable *hash; 221 struct hlist_node *node_tmp; 222 struct hlist_head *head; 223 struct batadv_bla_claim *claim; 224 int i; 225 spinlock_t *list_lock; /* protects write access to the hash lists */ 226 227 hash = backbone_gw->bat_priv->bla.claim_hash; 228 if (!hash) 229 return; 230 231 for (i = 0; i < hash->size; i++) { 232 head = &hash->table[i]; 233 list_lock = &hash->list_locks[i]; 234 235 spin_lock_bh(list_lock); 236 hlist_for_each_entry_safe(claim, node_tmp, 237 head, hash_entry) { 238 if (claim->backbone_gw != backbone_gw) 239 continue; 240 241 batadv_claim_free_ref(claim); 242 hlist_del_rcu(&claim->hash_entry); 243 } 244 spin_unlock_bh(list_lock); 245 } 246 247 /* all claims gone, initialize CRC */ 248 backbone_gw->crc = BATADV_BLA_CRC_INIT; 249 } 250 251 /** 252 * batadv_bla_send_claim - sends a claim frame according to the provided info 253 * @bat_priv: the bat priv with all the soft interface information 254 * @mac: the mac address to be announced within the claim 255 * @vid: the VLAN ID 256 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) 257 */ 258 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, 259 unsigned short vid, int claimtype) 260 { 261 struct sk_buff *skb; 262 struct ethhdr *ethhdr; 263 struct batadv_hard_iface *primary_if; 264 struct net_device *soft_iface; 265 uint8_t *hw_src; 266 struct batadv_bla_claim_dst local_claim_dest; 267 __be32 zeroip = 0; 268 269 primary_if = batadv_primary_if_get_selected(bat_priv); 270 if (!primary_if) 271 return; 272 273 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, 274 sizeof(local_claim_dest)); 275 local_claim_dest.type = claimtype; 276 277 soft_iface = primary_if->soft_iface; 278 279 skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 280 /* IP DST: 0.0.0.0 */ 281 zeroip, 282 primary_if->soft_iface, 283 /* IP SRC: 0.0.0.0 */ 284 zeroip, 285 /* Ethernet DST: Broadcast */ 286 NULL, 287 /* Ethernet SRC/HW SRC: originator mac */ 288 primary_if->net_dev->dev_addr, 289 /* HW DST: FF:43:05:XX:YY:YY 290 * with XX = claim type 291 * and YY:YY = group id 292 */ 293 (uint8_t *)&local_claim_dest); 294 295 if (!skb) 296 goto out; 297 298 ethhdr = (struct ethhdr *)skb->data; 299 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); 300 301 /* now we pretend that the client would have sent this ... */ 302 switch (claimtype) { 303 case BATADV_CLAIM_TYPE_CLAIM: 304 /* normal claim frame 305 * set Ethernet SRC to the clients mac 306 */ 307 ether_addr_copy(ethhdr->h_source, mac); 308 batadv_dbg(BATADV_DBG_BLA, bat_priv, 309 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, 310 BATADV_PRINT_VID(vid)); 311 break; 312 case BATADV_CLAIM_TYPE_UNCLAIM: 313 /* unclaim frame 314 * set HW SRC to the clients mac 315 */ 316 ether_addr_copy(hw_src, mac); 317 batadv_dbg(BATADV_DBG_BLA, bat_priv, 318 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, 319 BATADV_PRINT_VID(vid)); 320 break; 321 case BATADV_CLAIM_TYPE_ANNOUNCE: 322 /* announcement frame 323 * set HW SRC to the special mac containg the crc 324 */ 325 ether_addr_copy(hw_src, mac); 326 batadv_dbg(BATADV_DBG_BLA, bat_priv, 327 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", 328 ethhdr->h_source, BATADV_PRINT_VID(vid)); 329 break; 330 case BATADV_CLAIM_TYPE_REQUEST: 331 /* request frame 332 * set HW SRC and header destination to the receiving backbone 333 * gws mac 334 */ 335 ether_addr_copy(hw_src, mac); 336 ether_addr_copy(ethhdr->h_dest, mac); 337 batadv_dbg(BATADV_DBG_BLA, bat_priv, 338 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", 339 ethhdr->h_source, ethhdr->h_dest, 340 BATADV_PRINT_VID(vid)); 341 break; 342 } 343 344 if (vid & BATADV_VLAN_HAS_TAG) 345 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), 346 vid & VLAN_VID_MASK); 347 348 skb_reset_mac_header(skb); 349 skb->protocol = eth_type_trans(skb, soft_iface); 350 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 351 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 352 skb->len + ETH_HLEN); 353 soft_iface->last_rx = jiffies; 354 355 netif_rx(skb); 356 out: 357 if (primary_if) 358 batadv_hardif_free_ref(primary_if); 359 } 360 361 /** 362 * batadv_bla_get_backbone_gw 363 * @bat_priv: the bat priv with all the soft interface information 364 * @orig: the mac address of the originator 365 * @vid: the VLAN ID 366 * @own_backbone: set if the requested backbone is local 367 * 368 * searches for the backbone gw or creates a new one if it could not 369 * be found. 370 */ 371 static struct batadv_bla_backbone_gw * 372 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, 373 unsigned short vid, bool own_backbone) 374 { 375 struct batadv_bla_backbone_gw *entry; 376 struct batadv_orig_node *orig_node; 377 int hash_added; 378 379 entry = batadv_backbone_hash_find(bat_priv, orig, vid); 380 381 if (entry) 382 return entry; 383 384 batadv_dbg(BATADV_DBG_BLA, bat_priv, 385 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", 386 orig, BATADV_PRINT_VID(vid)); 387 388 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 389 if (!entry) 390 return NULL; 391 392 entry->vid = vid; 393 entry->lasttime = jiffies; 394 entry->crc = BATADV_BLA_CRC_INIT; 395 entry->bat_priv = bat_priv; 396 atomic_set(&entry->request_sent, 0); 397 atomic_set(&entry->wait_periods, 0); 398 ether_addr_copy(entry->orig, orig); 399 400 /* one for the hash, one for returning */ 401 atomic_set(&entry->refcount, 2); 402 403 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, 404 batadv_compare_backbone_gw, 405 batadv_choose_backbone_gw, entry, 406 &entry->hash_entry); 407 408 if (unlikely(hash_added != 0)) { 409 /* hash failed, free the structure */ 410 kfree(entry); 411 return NULL; 412 } 413 414 /* this is a gateway now, remove any TT entry on this VLAN */ 415 orig_node = batadv_orig_hash_find(bat_priv, orig); 416 if (orig_node) { 417 batadv_tt_global_del_orig(bat_priv, orig_node, vid, 418 "became a backbone gateway"); 419 batadv_orig_node_free_ref(orig_node); 420 } 421 422 if (own_backbone) { 423 batadv_bla_send_announce(bat_priv, entry); 424 425 /* this will be decreased in the worker thread */ 426 atomic_inc(&entry->request_sent); 427 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS); 428 atomic_inc(&bat_priv->bla.num_requests); 429 } 430 431 return entry; 432 } 433 434 /* update or add the own backbone gw to make sure we announce 435 * where we receive other backbone gws 436 */ 437 static void 438 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, 439 struct batadv_hard_iface *primary_if, 440 unsigned short vid) 441 { 442 struct batadv_bla_backbone_gw *backbone_gw; 443 444 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 445 primary_if->net_dev->dev_addr, 446 vid, true); 447 if (unlikely(!backbone_gw)) 448 return; 449 450 backbone_gw->lasttime = jiffies; 451 batadv_backbone_gw_free_ref(backbone_gw); 452 } 453 454 /** 455 * batadv_bla_answer_request - answer a bla request by sending own claims 456 * @bat_priv: the bat priv with all the soft interface information 457 * @primary_if: interface where the request came on 458 * @vid: the vid where the request came on 459 * 460 * Repeat all of our own claims, and finally send an ANNOUNCE frame 461 * to allow the requester another check if the CRC is correct now. 462 */ 463 static void batadv_bla_answer_request(struct batadv_priv *bat_priv, 464 struct batadv_hard_iface *primary_if, 465 unsigned short vid) 466 { 467 struct hlist_head *head; 468 struct batadv_hashtable *hash; 469 struct batadv_bla_claim *claim; 470 struct batadv_bla_backbone_gw *backbone_gw; 471 int i; 472 473 batadv_dbg(BATADV_DBG_BLA, bat_priv, 474 "bla_answer_request(): received a claim request, send all of our own claims again\n"); 475 476 backbone_gw = batadv_backbone_hash_find(bat_priv, 477 primary_if->net_dev->dev_addr, 478 vid); 479 if (!backbone_gw) 480 return; 481 482 hash = bat_priv->bla.claim_hash; 483 for (i = 0; i < hash->size; i++) { 484 head = &hash->table[i]; 485 486 rcu_read_lock(); 487 hlist_for_each_entry_rcu(claim, head, hash_entry) { 488 /* only own claims are interesting */ 489 if (claim->backbone_gw != backbone_gw) 490 continue; 491 492 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, 493 BATADV_CLAIM_TYPE_CLAIM); 494 } 495 rcu_read_unlock(); 496 } 497 498 /* finally, send an announcement frame */ 499 batadv_bla_send_announce(bat_priv, backbone_gw); 500 batadv_backbone_gw_free_ref(backbone_gw); 501 } 502 503 /** 504 * batadv_bla_send_request - send a request to repeat claims 505 * @backbone_gw: the backbone gateway from whom we are out of sync 506 * 507 * When the crc is wrong, ask the backbone gateway for a full table update. 508 * After the request, it will repeat all of his own claims and finally 509 * send an announcement claim with which we can check again. 510 */ 511 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw) 512 { 513 /* first, remove all old entries */ 514 batadv_bla_del_backbone_claims(backbone_gw); 515 516 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 517 "Sending REQUEST to %pM\n", backbone_gw->orig); 518 519 /* send request */ 520 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, 521 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST); 522 523 /* no local broadcasts should be sent or received, for now. */ 524 if (!atomic_read(&backbone_gw->request_sent)) { 525 atomic_inc(&backbone_gw->bat_priv->bla.num_requests); 526 atomic_set(&backbone_gw->request_sent, 1); 527 } 528 } 529 530 /** 531 * batadv_bla_send_announce 532 * @bat_priv: the bat priv with all the soft interface information 533 * @backbone_gw: our backbone gateway which should be announced 534 * 535 * This function sends an announcement. It is called from multiple 536 * places. 537 */ 538 static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 539 struct batadv_bla_backbone_gw *backbone_gw) 540 { 541 uint8_t mac[ETH_ALEN]; 542 __be16 crc; 543 544 memcpy(mac, batadv_announce_mac, 4); 545 crc = htons(backbone_gw->crc); 546 memcpy(&mac[4], &crc, 2); 547 548 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, 549 BATADV_CLAIM_TYPE_ANNOUNCE); 550 } 551 552 /** 553 * batadv_bla_add_claim - Adds a claim in the claim hash 554 * @bat_priv: the bat priv with all the soft interface information 555 * @mac: the mac address of the claim 556 * @vid: the VLAN ID of the frame 557 * @backbone_gw: the backbone gateway which claims it 558 */ 559 static void batadv_bla_add_claim(struct batadv_priv *bat_priv, 560 const uint8_t *mac, const unsigned short vid, 561 struct batadv_bla_backbone_gw *backbone_gw) 562 { 563 struct batadv_bla_claim *claim; 564 struct batadv_bla_claim search_claim; 565 int hash_added; 566 567 ether_addr_copy(search_claim.addr, mac); 568 search_claim.vid = vid; 569 claim = batadv_claim_hash_find(bat_priv, &search_claim); 570 571 /* create a new claim entry if it does not exist yet. */ 572 if (!claim) { 573 claim = kzalloc(sizeof(*claim), GFP_ATOMIC); 574 if (!claim) 575 return; 576 577 ether_addr_copy(claim->addr, mac); 578 claim->vid = vid; 579 claim->lasttime = jiffies; 580 claim->backbone_gw = backbone_gw; 581 582 atomic_set(&claim->refcount, 2); 583 batadv_dbg(BATADV_DBG_BLA, bat_priv, 584 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 585 mac, BATADV_PRINT_VID(vid)); 586 hash_added = batadv_hash_add(bat_priv->bla.claim_hash, 587 batadv_compare_claim, 588 batadv_choose_claim, claim, 589 &claim->hash_entry); 590 591 if (unlikely(hash_added != 0)) { 592 /* only local changes happened. */ 593 kfree(claim); 594 return; 595 } 596 } else { 597 claim->lasttime = jiffies; 598 if (claim->backbone_gw == backbone_gw) 599 /* no need to register a new backbone */ 600 goto claim_free_ref; 601 602 batadv_dbg(BATADV_DBG_BLA, bat_priv, 603 "bla_add_claim(): changing ownership for %pM, vid %d\n", 604 mac, BATADV_PRINT_VID(vid)); 605 606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 607 batadv_backbone_gw_free_ref(claim->backbone_gw); 608 } 609 /* set (new) backbone gw */ 610 atomic_inc(&backbone_gw->refcount); 611 claim->backbone_gw = backbone_gw; 612 613 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 614 backbone_gw->lasttime = jiffies; 615 616 claim_free_ref: 617 batadv_claim_free_ref(claim); 618 } 619 620 /* Delete a claim from the claim hash which has the 621 * given mac address and vid. 622 */ 623 static void batadv_bla_del_claim(struct batadv_priv *bat_priv, 624 const uint8_t *mac, const unsigned short vid) 625 { 626 struct batadv_bla_claim search_claim, *claim; 627 628 ether_addr_copy(search_claim.addr, mac); 629 search_claim.vid = vid; 630 claim = batadv_claim_hash_find(bat_priv, &search_claim); 631 if (!claim) 632 return; 633 634 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", 635 mac, BATADV_PRINT_VID(vid)); 636 637 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 638 batadv_choose_claim, claim); 639 batadv_claim_free_ref(claim); /* reference from the hash is gone */ 640 641 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 642 643 /* don't need the reference from hash_find() anymore */ 644 batadv_claim_free_ref(claim); 645 } 646 647 /* check for ANNOUNCE frame, return 1 if handled */ 648 static int batadv_handle_announce(struct batadv_priv *bat_priv, 649 uint8_t *an_addr, uint8_t *backbone_addr, 650 unsigned short vid) 651 { 652 struct batadv_bla_backbone_gw *backbone_gw; 653 uint16_t crc; 654 655 if (memcmp(an_addr, batadv_announce_mac, 4) != 0) 656 return 0; 657 658 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, 659 false); 660 661 if (unlikely(!backbone_gw)) 662 return 1; 663 664 /* handle as ANNOUNCE frame */ 665 backbone_gw->lasttime = jiffies; 666 crc = ntohs(*((__be16 *)(&an_addr[4]))); 667 668 batadv_dbg(BATADV_DBG_BLA, bat_priv, 669 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", 670 BATADV_PRINT_VID(vid), backbone_gw->orig, crc); 671 672 if (backbone_gw->crc != crc) { 673 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 674 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", 675 backbone_gw->orig, 676 BATADV_PRINT_VID(backbone_gw->vid), 677 backbone_gw->crc, crc); 678 679 batadv_bla_send_request(backbone_gw); 680 } else { 681 /* if we have sent a request and the crc was OK, 682 * we can allow traffic again. 683 */ 684 if (atomic_read(&backbone_gw->request_sent)) { 685 atomic_dec(&backbone_gw->bat_priv->bla.num_requests); 686 atomic_set(&backbone_gw->request_sent, 0); 687 } 688 } 689 690 batadv_backbone_gw_free_ref(backbone_gw); 691 return 1; 692 } 693 694 /* check for REQUEST frame, return 1 if handled */ 695 static int batadv_handle_request(struct batadv_priv *bat_priv, 696 struct batadv_hard_iface *primary_if, 697 uint8_t *backbone_addr, 698 struct ethhdr *ethhdr, unsigned short vid) 699 { 700 /* check for REQUEST frame */ 701 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) 702 return 0; 703 704 /* sanity check, this should not happen on a normal switch, 705 * we ignore it in this case. 706 */ 707 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) 708 return 1; 709 710 batadv_dbg(BATADV_DBG_BLA, bat_priv, 711 "handle_request(): REQUEST vid %d (sent by %pM)...\n", 712 BATADV_PRINT_VID(vid), ethhdr->h_source); 713 714 batadv_bla_answer_request(bat_priv, primary_if, vid); 715 return 1; 716 } 717 718 /* check for UNCLAIM frame, return 1 if handled */ 719 static int batadv_handle_unclaim(struct batadv_priv *bat_priv, 720 struct batadv_hard_iface *primary_if, 721 uint8_t *backbone_addr, 722 uint8_t *claim_addr, unsigned short vid) 723 { 724 struct batadv_bla_backbone_gw *backbone_gw; 725 726 /* unclaim in any case if it is our own */ 727 if (primary_if && batadv_compare_eth(backbone_addr, 728 primary_if->net_dev->dev_addr)) 729 batadv_bla_send_claim(bat_priv, claim_addr, vid, 730 BATADV_CLAIM_TYPE_UNCLAIM); 731 732 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); 733 734 if (!backbone_gw) 735 return 1; 736 737 /* this must be an UNCLAIM frame */ 738 batadv_dbg(BATADV_DBG_BLA, bat_priv, 739 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", 740 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig); 741 742 batadv_bla_del_claim(bat_priv, claim_addr, vid); 743 batadv_backbone_gw_free_ref(backbone_gw); 744 return 1; 745 } 746 747 /* check for CLAIM frame, return 1 if handled */ 748 static int batadv_handle_claim(struct batadv_priv *bat_priv, 749 struct batadv_hard_iface *primary_if, 750 uint8_t *backbone_addr, uint8_t *claim_addr, 751 unsigned short vid) 752 { 753 struct batadv_bla_backbone_gw *backbone_gw; 754 755 /* register the gateway if not yet available, and add the claim. */ 756 757 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, 758 false); 759 760 if (unlikely(!backbone_gw)) 761 return 1; 762 763 /* this must be a CLAIM frame */ 764 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 765 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 766 batadv_bla_send_claim(bat_priv, claim_addr, vid, 767 BATADV_CLAIM_TYPE_CLAIM); 768 769 /* TODO: we could call something like tt_local_del() here. */ 770 771 batadv_backbone_gw_free_ref(backbone_gw); 772 return 1; 773 } 774 775 /** 776 * batadv_check_claim_group 777 * @bat_priv: the bat priv with all the soft interface information 778 * @primary_if: the primary interface of this batman interface 779 * @hw_src: the Hardware source in the ARP Header 780 * @hw_dst: the Hardware destination in the ARP Header 781 * @ethhdr: pointer to the Ethernet header of the claim frame 782 * 783 * checks if it is a claim packet and if its on the same group. 784 * This function also applies the group ID of the sender 785 * if it is in the same mesh. 786 * 787 * returns: 788 * 2 - if it is a claim packet and on the same group 789 * 1 - if is a claim packet from another group 790 * 0 - if it is not a claim packet 791 */ 792 static int batadv_check_claim_group(struct batadv_priv *bat_priv, 793 struct batadv_hard_iface *primary_if, 794 uint8_t *hw_src, uint8_t *hw_dst, 795 struct ethhdr *ethhdr) 796 { 797 uint8_t *backbone_addr; 798 struct batadv_orig_node *orig_node; 799 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 800 801 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 802 bla_dst_own = &bat_priv->bla.claim_dest; 803 804 /* if announcement packet, use the source, 805 * otherwise assume it is in the hw_src 806 */ 807 switch (bla_dst->type) { 808 case BATADV_CLAIM_TYPE_CLAIM: 809 backbone_addr = hw_src; 810 break; 811 case BATADV_CLAIM_TYPE_REQUEST: 812 case BATADV_CLAIM_TYPE_ANNOUNCE: 813 case BATADV_CLAIM_TYPE_UNCLAIM: 814 backbone_addr = ethhdr->h_source; 815 break; 816 default: 817 return 0; 818 } 819 820 /* don't accept claim frames from ourselves */ 821 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 822 return 0; 823 824 /* if its already the same group, it is fine. */ 825 if (bla_dst->group == bla_dst_own->group) 826 return 2; 827 828 /* lets see if this originator is in our mesh */ 829 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr); 830 831 /* dont accept claims from gateways which are not in 832 * the same mesh or group. 833 */ 834 if (!orig_node) 835 return 1; 836 837 /* if our mesh friends mac is bigger, use it for ourselves. */ 838 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 839 batadv_dbg(BATADV_DBG_BLA, bat_priv, 840 "taking other backbones claim group: %#.4x\n", 841 ntohs(bla_dst->group)); 842 bla_dst_own->group = bla_dst->group; 843 } 844 845 batadv_orig_node_free_ref(orig_node); 846 847 return 2; 848 } 849 850 /** 851 * batadv_bla_process_claim 852 * @bat_priv: the bat priv with all the soft interface information 853 * @primary_if: the primary hard interface of this batman soft interface 854 * @skb: the frame to be checked 855 * 856 * Check if this is a claim frame, and process it accordingly. 857 * 858 * returns 1 if it was a claim frame, otherwise return 0 to 859 * tell the callee that it can use the frame on its own. 860 */ 861 static int batadv_bla_process_claim(struct batadv_priv *bat_priv, 862 struct batadv_hard_iface *primary_if, 863 struct sk_buff *skb) 864 { 865 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 866 uint8_t *hw_src, *hw_dst; 867 struct vlan_hdr *vhdr, vhdr_buf; 868 struct ethhdr *ethhdr; 869 struct arphdr *arphdr; 870 unsigned short vid; 871 int vlan_depth = 0; 872 __be16 proto; 873 int headlen; 874 int ret; 875 876 vid = batadv_get_vid(skb, 0); 877 ethhdr = eth_hdr(skb); 878 879 proto = ethhdr->h_proto; 880 headlen = ETH_HLEN; 881 if (vid & BATADV_VLAN_HAS_TAG) { 882 /* Traverse the VLAN/Ethertypes. 883 * 884 * At this point it is known that the first protocol is a VLAN 885 * header, so start checking at the encapsulated protocol. 886 * 887 * The depth of the VLAN headers is recorded to drop BLA claim 888 * frames encapsulated into multiple VLAN headers (QinQ). 889 */ 890 do { 891 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, 892 &vhdr_buf); 893 if (!vhdr) 894 return 0; 895 896 proto = vhdr->h_vlan_encapsulated_proto; 897 headlen += VLAN_HLEN; 898 vlan_depth++; 899 } while (proto == htons(ETH_P_8021Q)); 900 } 901 902 if (proto != htons(ETH_P_ARP)) 903 return 0; /* not a claim frame */ 904 905 /* this must be a ARP frame. check if it is a claim. */ 906 907 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) 908 return 0; 909 910 /* pskb_may_pull() may have modified the pointers, get ethhdr again */ 911 ethhdr = eth_hdr(skb); 912 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); 913 914 /* Check whether the ARP frame carries a valid 915 * IP information 916 */ 917 if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) 918 return 0; 919 if (arphdr->ar_pro != htons(ETH_P_IP)) 920 return 0; 921 if (arphdr->ar_hln != ETH_ALEN) 922 return 0; 923 if (arphdr->ar_pln != 4) 924 return 0; 925 926 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 927 hw_dst = hw_src + ETH_ALEN + 4; 928 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 929 bla_dst_own = &bat_priv->bla.claim_dest; 930 931 /* check if it is a claim frame in general */ 932 if (memcmp(bla_dst->magic, bla_dst_own->magic, 933 sizeof(bla_dst->magic)) != 0) 934 return 0; 935 936 /* check if there is a claim frame encapsulated deeper in (QinQ) and 937 * drop that, as this is not supported by BLA but should also not be 938 * sent via the mesh. 939 */ 940 if (vlan_depth > 1) 941 return 1; 942 943 /* check if it is a claim frame. */ 944 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 945 ethhdr); 946 if (ret == 1) 947 batadv_dbg(BATADV_DBG_BLA, bat_priv, 948 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 949 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, 950 hw_dst); 951 952 if (ret < 2) 953 return ret; 954 955 /* become a backbone gw ourselves on this vlan if not happened yet */ 956 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 957 958 /* check for the different types of claim frames ... */ 959 switch (bla_dst->type) { 960 case BATADV_CLAIM_TYPE_CLAIM: 961 if (batadv_handle_claim(bat_priv, primary_if, hw_src, 962 ethhdr->h_source, vid)) 963 return 1; 964 break; 965 case BATADV_CLAIM_TYPE_UNCLAIM: 966 if (batadv_handle_unclaim(bat_priv, primary_if, 967 ethhdr->h_source, hw_src, vid)) 968 return 1; 969 break; 970 971 case BATADV_CLAIM_TYPE_ANNOUNCE: 972 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, 973 vid)) 974 return 1; 975 break; 976 case BATADV_CLAIM_TYPE_REQUEST: 977 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, 978 vid)) 979 return 1; 980 break; 981 } 982 983 batadv_dbg(BATADV_DBG_BLA, bat_priv, 984 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 985 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); 986 return 1; 987 } 988 989 /* Check when we last heard from other nodes, and remove them in case of 990 * a time out, or clean all backbone gws if now is set. 991 */ 992 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 993 { 994 struct batadv_bla_backbone_gw *backbone_gw; 995 struct hlist_node *node_tmp; 996 struct hlist_head *head; 997 struct batadv_hashtable *hash; 998 spinlock_t *list_lock; /* protects write access to the hash lists */ 999 int i; 1000 1001 hash = bat_priv->bla.backbone_hash; 1002 if (!hash) 1003 return; 1004 1005 for (i = 0; i < hash->size; i++) { 1006 head = &hash->table[i]; 1007 list_lock = &hash->list_locks[i]; 1008 1009 spin_lock_bh(list_lock); 1010 hlist_for_each_entry_safe(backbone_gw, node_tmp, 1011 head, hash_entry) { 1012 if (now) 1013 goto purge_now; 1014 if (!batadv_has_timed_out(backbone_gw->lasttime, 1015 BATADV_BLA_BACKBONE_TIMEOUT)) 1016 continue; 1017 1018 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 1019 "bla_purge_backbone_gw(): backbone gw %pM timed out\n", 1020 backbone_gw->orig); 1021 1022 purge_now: 1023 /* don't wait for the pending request anymore */ 1024 if (atomic_read(&backbone_gw->request_sent)) 1025 atomic_dec(&bat_priv->bla.num_requests); 1026 1027 batadv_bla_del_backbone_claims(backbone_gw); 1028 1029 hlist_del_rcu(&backbone_gw->hash_entry); 1030 batadv_backbone_gw_free_ref(backbone_gw); 1031 } 1032 spin_unlock_bh(list_lock); 1033 } 1034 } 1035 1036 /** 1037 * batadv_bla_purge_claims 1038 * @bat_priv: the bat priv with all the soft interface information 1039 * @primary_if: the selected primary interface, may be NULL if now is set 1040 * @now: whether the whole hash shall be wiped now 1041 * 1042 * Check when we heard last time from our own claims, and remove them in case of 1043 * a time out, or clean all claims if now is set 1044 */ 1045 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, 1046 struct batadv_hard_iface *primary_if, 1047 int now) 1048 { 1049 struct batadv_bla_claim *claim; 1050 struct hlist_head *head; 1051 struct batadv_hashtable *hash; 1052 int i; 1053 1054 hash = bat_priv->bla.claim_hash; 1055 if (!hash) 1056 return; 1057 1058 for (i = 0; i < hash->size; i++) { 1059 head = &hash->table[i]; 1060 1061 rcu_read_lock(); 1062 hlist_for_each_entry_rcu(claim, head, hash_entry) { 1063 if (now) 1064 goto purge_now; 1065 if (!batadv_compare_eth(claim->backbone_gw->orig, 1066 primary_if->net_dev->dev_addr)) 1067 continue; 1068 if (!batadv_has_timed_out(claim->lasttime, 1069 BATADV_BLA_CLAIM_TIMEOUT)) 1070 continue; 1071 1072 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1073 "bla_purge_claims(): %pM, vid %d, time out\n", 1074 claim->addr, claim->vid); 1075 1076 purge_now: 1077 batadv_handle_unclaim(bat_priv, primary_if, 1078 claim->backbone_gw->orig, 1079 claim->addr, claim->vid); 1080 } 1081 rcu_read_unlock(); 1082 } 1083 } 1084 1085 /** 1086 * batadv_bla_update_orig_address 1087 * @bat_priv: the bat priv with all the soft interface information 1088 * @primary_if: the new selected primary_if 1089 * @oldif: the old primary interface, may be NULL 1090 * 1091 * Update the backbone gateways when the own orig address changes. 1092 */ 1093 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, 1094 struct batadv_hard_iface *primary_if, 1095 struct batadv_hard_iface *oldif) 1096 { 1097 struct batadv_bla_backbone_gw *backbone_gw; 1098 struct hlist_head *head; 1099 struct batadv_hashtable *hash; 1100 __be16 group; 1101 int i; 1102 1103 /* reset bridge loop avoidance group id */ 1104 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1105 bat_priv->bla.claim_dest.group = group; 1106 1107 /* purge everything when bridge loop avoidance is turned off */ 1108 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1109 oldif = NULL; 1110 1111 if (!oldif) { 1112 batadv_bla_purge_claims(bat_priv, NULL, 1); 1113 batadv_bla_purge_backbone_gw(bat_priv, 1); 1114 return; 1115 } 1116 1117 hash = bat_priv->bla.backbone_hash; 1118 if (!hash) 1119 return; 1120 1121 for (i = 0; i < hash->size; i++) { 1122 head = &hash->table[i]; 1123 1124 rcu_read_lock(); 1125 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1126 /* own orig still holds the old value. */ 1127 if (!batadv_compare_eth(backbone_gw->orig, 1128 oldif->net_dev->dev_addr)) 1129 continue; 1130 1131 ether_addr_copy(backbone_gw->orig, 1132 primary_if->net_dev->dev_addr); 1133 /* send an announce frame so others will ask for our 1134 * claims and update their tables. 1135 */ 1136 batadv_bla_send_announce(bat_priv, backbone_gw); 1137 } 1138 rcu_read_unlock(); 1139 } 1140 } 1141 1142 /* periodic work to do: 1143 * * purge structures when they are too old 1144 * * send announcements 1145 */ 1146 static void batadv_bla_periodic_work(struct work_struct *work) 1147 { 1148 struct delayed_work *delayed_work; 1149 struct batadv_priv *bat_priv; 1150 struct batadv_priv_bla *priv_bla; 1151 struct hlist_head *head; 1152 struct batadv_bla_backbone_gw *backbone_gw; 1153 struct batadv_hashtable *hash; 1154 struct batadv_hard_iface *primary_if; 1155 int i; 1156 1157 delayed_work = container_of(work, struct delayed_work, work); 1158 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); 1159 bat_priv = container_of(priv_bla, struct batadv_priv, bla); 1160 primary_if = batadv_primary_if_get_selected(bat_priv); 1161 if (!primary_if) 1162 goto out; 1163 1164 batadv_bla_purge_claims(bat_priv, primary_if, 0); 1165 batadv_bla_purge_backbone_gw(bat_priv, 0); 1166 1167 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1168 goto out; 1169 1170 hash = bat_priv->bla.backbone_hash; 1171 if (!hash) 1172 goto out; 1173 1174 for (i = 0; i < hash->size; i++) { 1175 head = &hash->table[i]; 1176 1177 rcu_read_lock(); 1178 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1179 if (!batadv_compare_eth(backbone_gw->orig, 1180 primary_if->net_dev->dev_addr)) 1181 continue; 1182 1183 backbone_gw->lasttime = jiffies; 1184 1185 batadv_bla_send_announce(bat_priv, backbone_gw); 1186 1187 /* request_sent is only set after creation to avoid 1188 * problems when we are not yet known as backbone gw 1189 * in the backbone. 1190 * 1191 * We can reset this now after we waited some periods 1192 * to give bridge forward delays and bla group forming 1193 * some grace time. 1194 */ 1195 1196 if (atomic_read(&backbone_gw->request_sent) == 0) 1197 continue; 1198 1199 if (!atomic_dec_and_test(&backbone_gw->wait_periods)) 1200 continue; 1201 1202 atomic_dec(&backbone_gw->bat_priv->bla.num_requests); 1203 atomic_set(&backbone_gw->request_sent, 0); 1204 } 1205 rcu_read_unlock(); 1206 } 1207 out: 1208 if (primary_if) 1209 batadv_hardif_free_ref(primary_if); 1210 1211 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, 1212 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1213 } 1214 1215 /* The hash for claim and backbone hash receive the same key because they 1216 * are getting initialized by hash_new with the same key. Reinitializing 1217 * them with to different keys to allow nested locking without generating 1218 * lockdep warnings 1219 */ 1220 static struct lock_class_key batadv_claim_hash_lock_class_key; 1221 static struct lock_class_key batadv_backbone_hash_lock_class_key; 1222 1223 /* initialize all bla structures */ 1224 int batadv_bla_init(struct batadv_priv *bat_priv) 1225 { 1226 int i; 1227 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1228 struct batadv_hard_iface *primary_if; 1229 uint16_t crc; 1230 unsigned long entrytime; 1231 1232 spin_lock_init(&bat_priv->bla.bcast_duplist_lock); 1233 1234 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1235 1236 /* setting claim destination address */ 1237 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3); 1238 bat_priv->bla.claim_dest.type = 0; 1239 primary_if = batadv_primary_if_get_selected(bat_priv); 1240 if (primary_if) { 1241 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN); 1242 bat_priv->bla.claim_dest.group = htons(crc); 1243 batadv_hardif_free_ref(primary_if); 1244 } else { 1245 bat_priv->bla.claim_dest.group = 0; /* will be set later */ 1246 } 1247 1248 /* initialize the duplicate list */ 1249 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); 1250 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) 1251 bat_priv->bla.bcast_duplist[i].entrytime = entrytime; 1252 bat_priv->bla.bcast_duplist_curr = 0; 1253 1254 if (bat_priv->bla.claim_hash) 1255 return 0; 1256 1257 bat_priv->bla.claim_hash = batadv_hash_new(128); 1258 bat_priv->bla.backbone_hash = batadv_hash_new(32); 1259 1260 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) 1261 return -ENOMEM; 1262 1263 batadv_hash_set_lock_class(bat_priv->bla.claim_hash, 1264 &batadv_claim_hash_lock_class_key); 1265 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, 1266 &batadv_backbone_hash_lock_class_key); 1267 1268 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1269 1270 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); 1271 1272 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, 1273 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1274 return 0; 1275 } 1276 1277 /** 1278 * batadv_bla_check_bcast_duplist 1279 * @bat_priv: the bat priv with all the soft interface information 1280 * @skb: contains the bcast_packet to be checked 1281 * 1282 * check if it is on our broadcast list. Another gateway might 1283 * have sent the same packet because it is connected to the same backbone, 1284 * so we have to remove this duplicate. 1285 * 1286 * This is performed by checking the CRC, which will tell us 1287 * with a good chance that it is the same packet. If it is furthermore 1288 * sent by another host, drop it. We allow equal packets from 1289 * the same host however as this might be intended. 1290 */ 1291 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 1292 struct sk_buff *skb) 1293 { 1294 int i, curr, ret = 0; 1295 __be32 crc; 1296 struct batadv_bcast_packet *bcast_packet; 1297 struct batadv_bcast_duplist_entry *entry; 1298 1299 bcast_packet = (struct batadv_bcast_packet *)skb->data; 1300 1301 /* calculate the crc ... */ 1302 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); 1303 1304 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); 1305 1306 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1307 curr = (bat_priv->bla.bcast_duplist_curr + i); 1308 curr %= BATADV_DUPLIST_SIZE; 1309 entry = &bat_priv->bla.bcast_duplist[curr]; 1310 1311 /* we can stop searching if the entry is too old ; 1312 * later entries will be even older 1313 */ 1314 if (batadv_has_timed_out(entry->entrytime, 1315 BATADV_DUPLIST_TIMEOUT)) 1316 break; 1317 1318 if (entry->crc != crc) 1319 continue; 1320 1321 if (batadv_compare_eth(entry->orig, bcast_packet->orig)) 1322 continue; 1323 1324 /* this entry seems to match: same crc, not too old, 1325 * and from another gw. therefore return 1 to forbid it. 1326 */ 1327 ret = 1; 1328 goto out; 1329 } 1330 /* not found, add a new entry (overwrite the oldest entry) 1331 * and allow it, its the first occurrence. 1332 */ 1333 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1334 curr %= BATADV_DUPLIST_SIZE; 1335 entry = &bat_priv->bla.bcast_duplist[curr]; 1336 entry->crc = crc; 1337 entry->entrytime = jiffies; 1338 ether_addr_copy(entry->orig, bcast_packet->orig); 1339 bat_priv->bla.bcast_duplist_curr = curr; 1340 1341 out: 1342 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock); 1343 1344 return ret; 1345 } 1346 1347 /** 1348 * batadv_bla_is_backbone_gw_orig 1349 * @bat_priv: the bat priv with all the soft interface information 1350 * @orig: originator mac address 1351 * @vid: VLAN identifier 1352 * 1353 * Check if the originator is a gateway for the VLAN identified by vid. 1354 * 1355 * Returns true if orig is a backbone for this vid, false otherwise. 1356 */ 1357 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig, 1358 unsigned short vid) 1359 { 1360 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1361 struct hlist_head *head; 1362 struct batadv_bla_backbone_gw *backbone_gw; 1363 int i; 1364 1365 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1366 return false; 1367 1368 if (!hash) 1369 return false; 1370 1371 for (i = 0; i < hash->size; i++) { 1372 head = &hash->table[i]; 1373 1374 rcu_read_lock(); 1375 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1376 if (batadv_compare_eth(backbone_gw->orig, orig) && 1377 backbone_gw->vid == vid) { 1378 rcu_read_unlock(); 1379 return true; 1380 } 1381 } 1382 rcu_read_unlock(); 1383 } 1384 1385 return false; 1386 } 1387 1388 /** 1389 * batadv_bla_is_backbone_gw 1390 * @skb: the frame to be checked 1391 * @orig_node: the orig_node of the frame 1392 * @hdr_size: maximum length of the frame 1393 * 1394 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 1395 * if the orig_node is also a gateway on the soft interface, otherwise it 1396 * returns 0. 1397 */ 1398 int batadv_bla_is_backbone_gw(struct sk_buff *skb, 1399 struct batadv_orig_node *orig_node, int hdr_size) 1400 { 1401 struct batadv_bla_backbone_gw *backbone_gw; 1402 unsigned short vid; 1403 1404 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1405 return 0; 1406 1407 /* first, find out the vid. */ 1408 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) 1409 return 0; 1410 1411 vid = batadv_get_vid(skb, hdr_size); 1412 1413 /* see if this originator is a backbone gw for this VLAN */ 1414 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv, 1415 orig_node->orig, vid); 1416 if (!backbone_gw) 1417 return 0; 1418 1419 batadv_backbone_gw_free_ref(backbone_gw); 1420 return 1; 1421 } 1422 1423 /* free all bla structures (for softinterface free or module unload) */ 1424 void batadv_bla_free(struct batadv_priv *bat_priv) 1425 { 1426 struct batadv_hard_iface *primary_if; 1427 1428 cancel_delayed_work_sync(&bat_priv->bla.work); 1429 primary_if = batadv_primary_if_get_selected(bat_priv); 1430 1431 if (bat_priv->bla.claim_hash) { 1432 batadv_bla_purge_claims(bat_priv, primary_if, 1); 1433 batadv_hash_destroy(bat_priv->bla.claim_hash); 1434 bat_priv->bla.claim_hash = NULL; 1435 } 1436 if (bat_priv->bla.backbone_hash) { 1437 batadv_bla_purge_backbone_gw(bat_priv, 1); 1438 batadv_hash_destroy(bat_priv->bla.backbone_hash); 1439 bat_priv->bla.backbone_hash = NULL; 1440 } 1441 if (primary_if) 1442 batadv_hardif_free_ref(primary_if); 1443 } 1444 1445 /** 1446 * batadv_bla_rx 1447 * @bat_priv: the bat priv with all the soft interface information 1448 * @skb: the frame to be checked 1449 * @vid: the VLAN ID of the frame 1450 * @is_bcast: the packet came in a broadcast packet type. 1451 * 1452 * bla_rx avoidance checks if: 1453 * * we have to race for a claim 1454 * * if the frame is allowed on the LAN 1455 * 1456 * in these cases, the skb is further handled by this function and 1457 * returns 1, otherwise it returns 0 and the caller shall further 1458 * process the skb. 1459 */ 1460 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1461 unsigned short vid, bool is_bcast) 1462 { 1463 struct ethhdr *ethhdr; 1464 struct batadv_bla_claim search_claim, *claim = NULL; 1465 struct batadv_hard_iface *primary_if; 1466 int ret; 1467 1468 ethhdr = eth_hdr(skb); 1469 1470 primary_if = batadv_primary_if_get_selected(bat_priv); 1471 if (!primary_if) 1472 goto handled; 1473 1474 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1475 goto allow; 1476 1477 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1478 /* don't allow broadcasts while requests are in flight */ 1479 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1480 goto handled; 1481 1482 ether_addr_copy(search_claim.addr, ethhdr->h_source); 1483 search_claim.vid = vid; 1484 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1485 1486 if (!claim) { 1487 /* possible optimization: race for a claim */ 1488 /* No claim exists yet, claim it for us! 1489 */ 1490 batadv_handle_claim(bat_priv, primary_if, 1491 primary_if->net_dev->dev_addr, 1492 ethhdr->h_source, vid); 1493 goto allow; 1494 } 1495 1496 /* if it is our own claim ... */ 1497 if (batadv_compare_eth(claim->backbone_gw->orig, 1498 primary_if->net_dev->dev_addr)) { 1499 /* ... allow it in any case */ 1500 claim->lasttime = jiffies; 1501 goto allow; 1502 } 1503 1504 /* if it is a broadcast ... */ 1505 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { 1506 /* ... drop it. the responsible gateway is in charge. 1507 * 1508 * We need to check is_bcast because with the gateway 1509 * feature, broadcasts (like DHCP requests) may be sent 1510 * using a unicast packet type. 1511 */ 1512 goto handled; 1513 } else { 1514 /* seems the client considers us as its best gateway. 1515 * send a claim and update the claim table 1516 * immediately. 1517 */ 1518 batadv_handle_claim(bat_priv, primary_if, 1519 primary_if->net_dev->dev_addr, 1520 ethhdr->h_source, vid); 1521 goto allow; 1522 } 1523 allow: 1524 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1525 ret = 0; 1526 goto out; 1527 1528 handled: 1529 kfree_skb(skb); 1530 ret = 1; 1531 1532 out: 1533 if (primary_if) 1534 batadv_hardif_free_ref(primary_if); 1535 if (claim) 1536 batadv_claim_free_ref(claim); 1537 return ret; 1538 } 1539 1540 /** 1541 * batadv_bla_tx 1542 * @bat_priv: the bat priv with all the soft interface information 1543 * @skb: the frame to be checked 1544 * @vid: the VLAN ID of the frame 1545 * 1546 * bla_tx checks if: 1547 * * a claim was received which has to be processed 1548 * * the frame is allowed on the mesh 1549 * 1550 * in these cases, the skb is further handled by this function and 1551 * returns 1, otherwise it returns 0 and the caller shall further 1552 * process the skb. 1553 * 1554 * This call might reallocate skb data. 1555 */ 1556 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1557 unsigned short vid) 1558 { 1559 struct ethhdr *ethhdr; 1560 struct batadv_bla_claim search_claim, *claim = NULL; 1561 struct batadv_hard_iface *primary_if; 1562 int ret = 0; 1563 1564 primary_if = batadv_primary_if_get_selected(bat_priv); 1565 if (!primary_if) 1566 goto out; 1567 1568 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1569 goto allow; 1570 1571 if (batadv_bla_process_claim(bat_priv, primary_if, skb)) 1572 goto handled; 1573 1574 ethhdr = eth_hdr(skb); 1575 1576 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1577 /* don't allow broadcasts while requests are in flight */ 1578 if (is_multicast_ether_addr(ethhdr->h_dest)) 1579 goto handled; 1580 1581 ether_addr_copy(search_claim.addr, ethhdr->h_source); 1582 search_claim.vid = vid; 1583 1584 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1585 1586 /* if no claim exists, allow it. */ 1587 if (!claim) 1588 goto allow; 1589 1590 /* check if we are responsible. */ 1591 if (batadv_compare_eth(claim->backbone_gw->orig, 1592 primary_if->net_dev->dev_addr)) { 1593 /* if yes, the client has roamed and we have 1594 * to unclaim it. 1595 */ 1596 batadv_handle_unclaim(bat_priv, primary_if, 1597 primary_if->net_dev->dev_addr, 1598 ethhdr->h_source, vid); 1599 goto allow; 1600 } 1601 1602 /* check if it is a multicast/broadcast frame */ 1603 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1604 /* drop it. the responsible gateway has forwarded it into 1605 * the backbone network. 1606 */ 1607 goto handled; 1608 } else { 1609 /* we must allow it. at least if we are 1610 * responsible for the DESTINATION. 1611 */ 1612 goto allow; 1613 } 1614 allow: 1615 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1616 ret = 0; 1617 goto out; 1618 handled: 1619 ret = 1; 1620 out: 1621 if (primary_if) 1622 batadv_hardif_free_ref(primary_if); 1623 if (claim) 1624 batadv_claim_free_ref(claim); 1625 return ret; 1626 } 1627 1628 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) 1629 { 1630 struct net_device *net_dev = (struct net_device *)seq->private; 1631 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1632 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1633 struct batadv_bla_claim *claim; 1634 struct batadv_hard_iface *primary_if; 1635 struct hlist_head *head; 1636 uint32_t i; 1637 bool is_own; 1638 uint8_t *primary_addr; 1639 1640 primary_if = batadv_seq_print_text_primary_if_get(seq); 1641 if (!primary_if) 1642 goto out; 1643 1644 primary_addr = primary_if->net_dev->dev_addr; 1645 seq_printf(seq, 1646 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n", 1647 net_dev->name, primary_addr, 1648 ntohs(bat_priv->bla.claim_dest.group)); 1649 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n", 1650 "Client", "VID", "Originator", "CRC"); 1651 for (i = 0; i < hash->size; i++) { 1652 head = &hash->table[i]; 1653 1654 rcu_read_lock(); 1655 hlist_for_each_entry_rcu(claim, head, hash_entry) { 1656 is_own = batadv_compare_eth(claim->backbone_gw->orig, 1657 primary_addr); 1658 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", 1659 claim->addr, BATADV_PRINT_VID(claim->vid), 1660 claim->backbone_gw->orig, 1661 (is_own ? 'x' : ' '), 1662 claim->backbone_gw->crc); 1663 } 1664 rcu_read_unlock(); 1665 } 1666 out: 1667 if (primary_if) 1668 batadv_hardif_free_ref(primary_if); 1669 return 0; 1670 } 1671 1672 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) 1673 { 1674 struct net_device *net_dev = (struct net_device *)seq->private; 1675 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1676 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1677 struct batadv_bla_backbone_gw *backbone_gw; 1678 struct batadv_hard_iface *primary_if; 1679 struct hlist_head *head; 1680 int secs, msecs; 1681 uint32_t i; 1682 bool is_own; 1683 uint8_t *primary_addr; 1684 1685 primary_if = batadv_seq_print_text_primary_if_get(seq); 1686 if (!primary_if) 1687 goto out; 1688 1689 primary_addr = primary_if->net_dev->dev_addr; 1690 seq_printf(seq, 1691 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n", 1692 net_dev->name, primary_addr, 1693 ntohs(bat_priv->bla.claim_dest.group)); 1694 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n", 1695 "Originator", "VID", "last seen", "CRC"); 1696 for (i = 0; i < hash->size; i++) { 1697 head = &hash->table[i]; 1698 1699 rcu_read_lock(); 1700 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1701 msecs = jiffies_to_msecs(jiffies - 1702 backbone_gw->lasttime); 1703 secs = msecs / 1000; 1704 msecs = msecs % 1000; 1705 1706 is_own = batadv_compare_eth(backbone_gw->orig, 1707 primary_addr); 1708 if (is_own) 1709 continue; 1710 1711 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", 1712 backbone_gw->orig, 1713 BATADV_PRINT_VID(backbone_gw->vid), secs, 1714 msecs, backbone_gw->crc); 1715 } 1716 rcu_read_unlock(); 1717 } 1718 out: 1719 if (primary_if) 1720 batadv_hardif_free_ref(primary_if); 1721 return 0; 1722 } 1723