1 /* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "main.h" 19 20 #include <linux/atomic.h> 21 #include <linux/bug.h> 22 #include <linux/byteorder/generic.h> 23 #include <linux/crc32c.h> 24 #include <linux/errno.h> 25 #include <linux/fs.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/init.h> 29 #include <linux/ip.h> 30 #include <linux/ipv6.h> 31 #include <linux/kernel.h> 32 #include <linux/list.h> 33 #include <linux/lockdep.h> 34 #include <linux/module.h> 35 #include <linux/moduleparam.h> 36 #include <linux/netdevice.h> 37 #include <linux/pkt_sched.h> 38 #include <linux/rculist.h> 39 #include <linux/rcupdate.h> 40 #include <linux/seq_file.h> 41 #include <linux/skbuff.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/stddef.h> 45 #include <linux/string.h> 46 #include <linux/workqueue.h> 47 #include <net/dsfield.h> 48 #include <net/rtnetlink.h> 49 50 #include "bat_algo.h" 51 #include "bridge_loop_avoidance.h" 52 #include "debugfs.h" 53 #include "distributed-arp-table.h" 54 #include "gateway_client.h" 55 #include "gateway_common.h" 56 #include "hard-interface.h" 57 #include "icmp_socket.h" 58 #include "multicast.h" 59 #include "network-coding.h" 60 #include "originator.h" 61 #include "packet.h" 62 #include "routing.h" 63 #include "send.h" 64 #include "soft-interface.h" 65 #include "translation-table.h" 66 67 /* List manipulations on hardif_list have to be rtnl_lock()'ed, 68 * list traversals just rcu-locked 69 */ 70 struct list_head batadv_hardif_list; 71 static int (*batadv_rx_handler[256])(struct sk_buff *, 72 struct batadv_hard_iface *); 73 char batadv_routing_algo[20] = "BATMAN_IV"; 74 static struct hlist_head batadv_algo_list; 75 76 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 77 78 struct workqueue_struct *batadv_event_workqueue; 79 80 static void batadv_recv_handler_init(void); 81 82 static int __init batadv_init(void) 83 { 84 INIT_LIST_HEAD(&batadv_hardif_list); 85 INIT_HLIST_HEAD(&batadv_algo_list); 86 87 batadv_recv_handler_init(); 88 89 batadv_iv_init(); 90 batadv_nc_init(); 91 92 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 93 94 if (!batadv_event_workqueue) 95 return -ENOMEM; 96 97 batadv_socket_init(); 98 batadv_debugfs_init(); 99 100 register_netdevice_notifier(&batadv_hard_if_notifier); 101 rtnl_link_register(&batadv_link_ops); 102 103 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 104 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); 105 106 return 0; 107 } 108 109 static void __exit batadv_exit(void) 110 { 111 batadv_debugfs_destroy(); 112 rtnl_link_unregister(&batadv_link_ops); 113 unregister_netdevice_notifier(&batadv_hard_if_notifier); 114 batadv_hardif_remove_interfaces(); 115 116 flush_workqueue(batadv_event_workqueue); 117 destroy_workqueue(batadv_event_workqueue); 118 batadv_event_workqueue = NULL; 119 120 rcu_barrier(); 121 } 122 123 int batadv_mesh_init(struct net_device *soft_iface) 124 { 125 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 126 int ret; 127 128 spin_lock_init(&bat_priv->forw_bat_list_lock); 129 spin_lock_init(&bat_priv->forw_bcast_list_lock); 130 spin_lock_init(&bat_priv->tt.changes_list_lock); 131 spin_lock_init(&bat_priv->tt.req_list_lock); 132 spin_lock_init(&bat_priv->tt.roam_list_lock); 133 spin_lock_init(&bat_priv->tt.last_changeset_lock); 134 spin_lock_init(&bat_priv->tt.commit_lock); 135 spin_lock_init(&bat_priv->gw.list_lock); 136 #ifdef CONFIG_BATMAN_ADV_MCAST 137 spin_lock_init(&bat_priv->mcast.want_lists_lock); 138 #endif 139 spin_lock_init(&bat_priv->tvlv.container_list_lock); 140 spin_lock_init(&bat_priv->tvlv.handler_list_lock); 141 spin_lock_init(&bat_priv->softif_vlan_list_lock); 142 143 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 144 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 145 INIT_HLIST_HEAD(&bat_priv->gw.list); 146 #ifdef CONFIG_BATMAN_ADV_MCAST 147 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list); 148 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list); 149 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list); 150 #endif 151 INIT_LIST_HEAD(&bat_priv->tt.changes_list); 152 INIT_HLIST_HEAD(&bat_priv->tt.req_list); 153 INIT_LIST_HEAD(&bat_priv->tt.roam_list); 154 #ifdef CONFIG_BATMAN_ADV_MCAST 155 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list); 156 #endif 157 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list); 158 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list); 159 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list); 160 161 ret = batadv_originator_init(bat_priv); 162 if (ret < 0) 163 goto err; 164 165 ret = batadv_tt_init(bat_priv); 166 if (ret < 0) 167 goto err; 168 169 ret = batadv_bla_init(bat_priv); 170 if (ret < 0) 171 goto err; 172 173 ret = batadv_dat_init(bat_priv); 174 if (ret < 0) 175 goto err; 176 177 ret = batadv_nc_mesh_init(bat_priv); 178 if (ret < 0) 179 goto err; 180 181 batadv_gw_init(bat_priv); 182 batadv_mcast_init(bat_priv); 183 184 atomic_set(&bat_priv->gw.reselect, 0); 185 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 186 187 return 0; 188 189 err: 190 batadv_mesh_free(soft_iface); 191 return ret; 192 } 193 194 void batadv_mesh_free(struct net_device *soft_iface) 195 { 196 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 197 198 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 199 200 batadv_purge_outstanding_packets(bat_priv, NULL); 201 202 batadv_gw_node_free(bat_priv); 203 batadv_nc_mesh_free(bat_priv); 204 batadv_dat_free(bat_priv); 205 batadv_bla_free(bat_priv); 206 207 batadv_mcast_free(bat_priv); 208 209 /* Free the TT and the originator tables only after having terminated 210 * all the other depending components which may use these structures for 211 * their purposes. 212 */ 213 batadv_tt_free(bat_priv); 214 215 /* Since the originator table clean up routine is accessing the TT 216 * tables as well, it has to be invoked after the TT tables have been 217 * freed and marked as empty. This ensures that no cleanup RCU callbacks 218 * accessing the TT data are scheduled for later execution. 219 */ 220 batadv_originator_free(bat_priv); 221 222 batadv_gw_free(bat_priv); 223 224 free_percpu(bat_priv->bat_counters); 225 bat_priv->bat_counters = NULL; 226 227 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 228 } 229 230 /** 231 * batadv_is_my_mac - check if the given mac address belongs to any of the real 232 * interfaces in the current mesh 233 * @bat_priv: the bat priv with all the soft interface information 234 * @addr: the address to check 235 * 236 * Returns 'true' if the mac address was found, false otherwise. 237 */ 238 bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr) 239 { 240 const struct batadv_hard_iface *hard_iface; 241 bool is_my_mac = false; 242 243 rcu_read_lock(); 244 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 245 if (hard_iface->if_status != BATADV_IF_ACTIVE) 246 continue; 247 248 if (hard_iface->soft_iface != bat_priv->soft_iface) 249 continue; 250 251 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { 252 is_my_mac = true; 253 break; 254 } 255 } 256 rcu_read_unlock(); 257 return is_my_mac; 258 } 259 260 /** 261 * batadv_seq_print_text_primary_if_get - called from debugfs table printing 262 * function that requires the primary interface 263 * @seq: debugfs table seq_file struct 264 * 265 * Returns primary interface if found or NULL otherwise. 266 */ 267 struct batadv_hard_iface * 268 batadv_seq_print_text_primary_if_get(struct seq_file *seq) 269 { 270 struct net_device *net_dev = (struct net_device *)seq->private; 271 struct batadv_priv *bat_priv = netdev_priv(net_dev); 272 struct batadv_hard_iface *primary_if; 273 274 primary_if = batadv_primary_if_get_selected(bat_priv); 275 276 if (!primary_if) { 277 seq_printf(seq, 278 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 279 net_dev->name); 280 goto out; 281 } 282 283 if (primary_if->if_status == BATADV_IF_ACTIVE) 284 goto out; 285 286 seq_printf(seq, 287 "BATMAN mesh %s disabled - primary interface not active\n", 288 net_dev->name); 289 batadv_hardif_free_ref(primary_if); 290 primary_if = NULL; 291 292 out: 293 return primary_if; 294 } 295 296 /** 297 * batadv_max_header_len - calculate maximum encapsulation overhead for a 298 * payload packet 299 * 300 * Return the maximum encapsulation overhead in bytes. 301 */ 302 int batadv_max_header_len(void) 303 { 304 int header_len = 0; 305 306 header_len = max_t(int, header_len, 307 sizeof(struct batadv_unicast_packet)); 308 header_len = max_t(int, header_len, 309 sizeof(struct batadv_unicast_4addr_packet)); 310 header_len = max_t(int, header_len, 311 sizeof(struct batadv_bcast_packet)); 312 313 #ifdef CONFIG_BATMAN_ADV_NC 314 header_len = max_t(int, header_len, 315 sizeof(struct batadv_coded_packet)); 316 #endif 317 318 return header_len + ETH_HLEN; 319 } 320 321 /** 322 * batadv_skb_set_priority - sets skb priority according to packet content 323 * @skb: the packet to be sent 324 * @offset: offset to the packet content 325 * 326 * This function sets a value between 256 and 263 (802.1d priority), which 327 * can be interpreted by the cfg80211 or other drivers. 328 */ 329 void batadv_skb_set_priority(struct sk_buff *skb, int offset) 330 { 331 struct iphdr ip_hdr_tmp, *ip_hdr; 332 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr; 333 struct ethhdr ethhdr_tmp, *ethhdr; 334 struct vlan_ethhdr *vhdr, vhdr_tmp; 335 u32 prio; 336 337 /* already set, do nothing */ 338 if (skb->priority >= 256 && skb->priority <= 263) 339 return; 340 341 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), ðhdr_tmp); 342 if (!ethhdr) 343 return; 344 345 switch (ethhdr->h_proto) { 346 case htons(ETH_P_8021Q): 347 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr), 348 sizeof(*vhdr), &vhdr_tmp); 349 if (!vhdr) 350 return; 351 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK; 352 prio = prio >> VLAN_PRIO_SHIFT; 353 break; 354 case htons(ETH_P_IP): 355 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr), 356 sizeof(*ip_hdr), &ip_hdr_tmp); 357 if (!ip_hdr) 358 return; 359 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5; 360 break; 361 case htons(ETH_P_IPV6): 362 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr), 363 sizeof(*ip6_hdr), &ip6_hdr_tmp); 364 if (!ip6_hdr) 365 return; 366 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5; 367 break; 368 default: 369 return; 370 } 371 372 skb->priority = prio + 256; 373 } 374 375 static int batadv_recv_unhandled_packet(struct sk_buff *skb, 376 struct batadv_hard_iface *recv_if) 377 { 378 return NET_RX_DROP; 379 } 380 381 /* incoming packets with the batman ethertype received on any active hard 382 * interface 383 */ 384 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 385 struct packet_type *ptype, 386 struct net_device *orig_dev) 387 { 388 struct batadv_priv *bat_priv; 389 struct batadv_ogm_packet *batadv_ogm_packet; 390 struct batadv_hard_iface *hard_iface; 391 u8 idx; 392 int ret; 393 394 hard_iface = container_of(ptype, struct batadv_hard_iface, 395 batman_adv_ptype); 396 skb = skb_share_check(skb, GFP_ATOMIC); 397 398 /* skb was released by skb_share_check() */ 399 if (!skb) 400 goto err_out; 401 402 /* packet should hold at least type and version */ 403 if (unlikely(!pskb_may_pull(skb, 2))) 404 goto err_free; 405 406 /* expect a valid ethernet header here. */ 407 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) 408 goto err_free; 409 410 if (!hard_iface->soft_iface) 411 goto err_free; 412 413 bat_priv = netdev_priv(hard_iface->soft_iface); 414 415 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 416 goto err_free; 417 418 /* discard frames on not active interfaces */ 419 if (hard_iface->if_status != BATADV_IF_ACTIVE) 420 goto err_free; 421 422 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; 423 424 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) { 425 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 426 "Drop packet: incompatible batman version (%i)\n", 427 batadv_ogm_packet->version); 428 goto err_free; 429 } 430 431 /* reset control block to avoid left overs from previous users */ 432 memset(skb->cb, 0, sizeof(struct batadv_skb_cb)); 433 434 /* all receive handlers return whether they received or reused 435 * the supplied skb. if not, we have to free the skb. 436 */ 437 idx = batadv_ogm_packet->packet_type; 438 ret = (*batadv_rx_handler[idx])(skb, hard_iface); 439 440 if (ret == NET_RX_DROP) 441 kfree_skb(skb); 442 443 /* return NET_RX_SUCCESS in any case as we 444 * most probably dropped the packet for 445 * routing-logical reasons. 446 */ 447 return NET_RX_SUCCESS; 448 449 err_free: 450 kfree_skb(skb); 451 err_out: 452 return NET_RX_DROP; 453 } 454 455 static void batadv_recv_handler_init(void) 456 { 457 int i; 458 459 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++) 460 batadv_rx_handler[i] = batadv_recv_unhandled_packet; 461 462 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++) 463 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet; 464 465 /* compile time checks for sizes */ 466 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6); 467 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24); 468 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20); 469 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20); 470 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116); 471 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10); 472 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18); 473 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20); 474 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14); 475 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46); 476 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20); 477 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4); 478 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8); 479 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8); 480 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12); 481 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8); 482 483 /* broadcast packet */ 484 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; 485 486 /* unicast packets ... */ 487 /* unicast with 4 addresses packet */ 488 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet; 489 /* unicast packet */ 490 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; 491 /* unicast tvlv packet */ 492 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv; 493 /* batman icmp packet */ 494 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; 495 /* Fragmented packets */ 496 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet; 497 } 498 499 int 500 batadv_recv_handler_register(u8 packet_type, 501 int (*recv_handler)(struct sk_buff *, 502 struct batadv_hard_iface *)) 503 { 504 int (*curr)(struct sk_buff *, 505 struct batadv_hard_iface *); 506 curr = batadv_rx_handler[packet_type]; 507 508 if ((curr != batadv_recv_unhandled_packet) && 509 (curr != batadv_recv_unhandled_unicast_packet)) 510 return -EBUSY; 511 512 batadv_rx_handler[packet_type] = recv_handler; 513 return 0; 514 } 515 516 void batadv_recv_handler_unregister(u8 packet_type) 517 { 518 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet; 519 } 520 521 static struct batadv_algo_ops *batadv_algo_get(char *name) 522 { 523 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 524 525 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) { 526 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 527 continue; 528 529 bat_algo_ops = bat_algo_ops_tmp; 530 break; 531 } 532 533 return bat_algo_ops; 534 } 535 536 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) 537 { 538 struct batadv_algo_ops *bat_algo_ops_tmp; 539 540 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name); 541 if (bat_algo_ops_tmp) { 542 pr_info("Trying to register already registered routing algorithm: %s\n", 543 bat_algo_ops->name); 544 return -EEXIST; 545 } 546 547 /* all algorithms must implement all ops (for now) */ 548 if (!bat_algo_ops->bat_iface_enable || 549 !bat_algo_ops->bat_iface_disable || 550 !bat_algo_ops->bat_iface_update_mac || 551 !bat_algo_ops->bat_primary_iface_set || 552 !bat_algo_ops->bat_ogm_schedule || 553 !bat_algo_ops->bat_ogm_emit || 554 !bat_algo_ops->bat_neigh_cmp || 555 !bat_algo_ops->bat_neigh_is_equiv_or_better) { 556 pr_info("Routing algo '%s' does not implement required ops\n", 557 bat_algo_ops->name); 558 return -EINVAL; 559 } 560 561 INIT_HLIST_NODE(&bat_algo_ops->list); 562 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list); 563 564 return 0; 565 } 566 567 int batadv_algo_select(struct batadv_priv *bat_priv, char *name) 568 { 569 struct batadv_algo_ops *bat_algo_ops; 570 571 bat_algo_ops = batadv_algo_get(name); 572 if (!bat_algo_ops) 573 return -EINVAL; 574 575 bat_priv->bat_algo_ops = bat_algo_ops; 576 577 return 0; 578 } 579 580 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) 581 { 582 struct batadv_algo_ops *bat_algo_ops; 583 584 seq_puts(seq, "Available routing algorithms:\n"); 585 586 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { 587 seq_printf(seq, " * %s\n", bat_algo_ops->name); 588 } 589 590 return 0; 591 } 592 593 /** 594 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in 595 * the header 596 * @skb: skb pointing to fragmented socket buffers 597 * @payload_ptr: Pointer to position inside the head buffer of the skb 598 * marking the start of the data to be CRC'ed 599 * 600 * payload_ptr must always point to an address in the skb head buffer and not to 601 * a fragment. 602 */ 603 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) 604 { 605 u32 crc = 0; 606 unsigned int from; 607 unsigned int to = skb->len; 608 struct skb_seq_state st; 609 const u8 *data; 610 unsigned int len; 611 unsigned int consumed = 0; 612 613 from = (unsigned int)(payload_ptr - skb->data); 614 615 skb_prepare_seq_read(skb, from, to, &st); 616 while ((len = skb_seq_read(consumed, &data, &st)) != 0) { 617 crc = crc32c(crc, data, len); 618 consumed += len; 619 } 620 621 return htonl(crc); 622 } 623 624 /** 625 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and 626 * possibly free it 627 * @tvlv_handler: the tvlv handler to free 628 */ 629 static void 630 batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler) 631 { 632 if (atomic_dec_and_test(&tvlv_handler->refcount)) 633 kfree_rcu(tvlv_handler, rcu); 634 } 635 636 /** 637 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list 638 * based on the provided type and version (both need to match) 639 * @bat_priv: the bat priv with all the soft interface information 640 * @type: tvlv handler type to look for 641 * @version: tvlv handler version to look for 642 * 643 * Returns tvlv handler if found or NULL otherwise. 644 */ 645 static struct batadv_tvlv_handler 646 *batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version) 647 { 648 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL; 649 650 rcu_read_lock(); 651 hlist_for_each_entry_rcu(tvlv_handler_tmp, 652 &bat_priv->tvlv.handler_list, list) { 653 if (tvlv_handler_tmp->type != type) 654 continue; 655 656 if (tvlv_handler_tmp->version != version) 657 continue; 658 659 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount)) 660 continue; 661 662 tvlv_handler = tvlv_handler_tmp; 663 break; 664 } 665 rcu_read_unlock(); 666 667 return tvlv_handler; 668 } 669 670 /** 671 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and 672 * possibly free it 673 * @tvlv: the tvlv container to free 674 */ 675 static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv) 676 { 677 if (atomic_dec_and_test(&tvlv->refcount)) 678 kfree(tvlv); 679 } 680 681 /** 682 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container 683 * list based on the provided type and version (both need to match) 684 * @bat_priv: the bat priv with all the soft interface information 685 * @type: tvlv container type to look for 686 * @version: tvlv container version to look for 687 * 688 * Has to be called with the appropriate locks being acquired 689 * (tvlv.container_list_lock). 690 * 691 * Returns tvlv container if found or NULL otherwise. 692 */ 693 static struct batadv_tvlv_container 694 *batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version) 695 { 696 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL; 697 698 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) { 699 if (tvlv_tmp->tvlv_hdr.type != type) 700 continue; 701 702 if (tvlv_tmp->tvlv_hdr.version != version) 703 continue; 704 705 if (!atomic_inc_not_zero(&tvlv_tmp->refcount)) 706 continue; 707 708 tvlv = tvlv_tmp; 709 break; 710 } 711 712 return tvlv; 713 } 714 715 /** 716 * batadv_tvlv_container_list_size - calculate the size of the tvlv container 717 * list entries 718 * @bat_priv: the bat priv with all the soft interface information 719 * 720 * Has to be called with the appropriate locks being acquired 721 * (tvlv.container_list_lock). 722 * 723 * Returns size of all currently registered tvlv containers in bytes. 724 */ 725 static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv) 726 { 727 struct batadv_tvlv_container *tvlv; 728 u16 tvlv_len = 0; 729 730 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) { 731 tvlv_len += sizeof(struct batadv_tvlv_hdr); 732 tvlv_len += ntohs(tvlv->tvlv_hdr.len); 733 } 734 735 return tvlv_len; 736 } 737 738 /** 739 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container 740 * list 741 * @bat_priv: the bat priv with all the soft interface information 742 * @tvlv: the to be removed tvlv container 743 * 744 * Has to be called with the appropriate locks being acquired 745 * (tvlv.container_list_lock). 746 */ 747 static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv, 748 struct batadv_tvlv_container *tvlv) 749 { 750 lockdep_assert_held(&bat_priv->tvlv.handler_list_lock); 751 752 if (!tvlv) 753 return; 754 755 hlist_del(&tvlv->list); 756 757 /* first call to decrement the counter, second call to free */ 758 batadv_tvlv_container_free_ref(tvlv); 759 batadv_tvlv_container_free_ref(tvlv); 760 } 761 762 /** 763 * batadv_tvlv_container_unregister - unregister tvlv container based on the 764 * provided type and version (both need to match) 765 * @bat_priv: the bat priv with all the soft interface information 766 * @type: tvlv container type to unregister 767 * @version: tvlv container type to unregister 768 */ 769 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv, 770 u8 type, u8 version) 771 { 772 struct batadv_tvlv_container *tvlv; 773 774 spin_lock_bh(&bat_priv->tvlv.container_list_lock); 775 tvlv = batadv_tvlv_container_get(bat_priv, type, version); 776 batadv_tvlv_container_remove(bat_priv, tvlv); 777 spin_unlock_bh(&bat_priv->tvlv.container_list_lock); 778 } 779 780 /** 781 * batadv_tvlv_container_register - register tvlv type, version and content 782 * to be propagated with each (primary interface) OGM 783 * @bat_priv: the bat priv with all the soft interface information 784 * @type: tvlv container type 785 * @version: tvlv container version 786 * @tvlv_value: tvlv container content 787 * @tvlv_value_len: tvlv container content length 788 * 789 * If a container of the same type and version was already registered the new 790 * content is going to replace the old one. 791 */ 792 void batadv_tvlv_container_register(struct batadv_priv *bat_priv, 793 u8 type, u8 version, 794 void *tvlv_value, u16 tvlv_value_len) 795 { 796 struct batadv_tvlv_container *tvlv_old, *tvlv_new; 797 798 if (!tvlv_value) 799 tvlv_value_len = 0; 800 801 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC); 802 if (!tvlv_new) 803 return; 804 805 tvlv_new->tvlv_hdr.version = version; 806 tvlv_new->tvlv_hdr.type = type; 807 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len); 808 809 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len)); 810 INIT_HLIST_NODE(&tvlv_new->list); 811 atomic_set(&tvlv_new->refcount, 1); 812 813 spin_lock_bh(&bat_priv->tvlv.container_list_lock); 814 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version); 815 batadv_tvlv_container_remove(bat_priv, tvlv_old); 816 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list); 817 spin_unlock_bh(&bat_priv->tvlv.container_list_lock); 818 } 819 820 /** 821 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate 822 * requested packet size 823 * @packet_buff: packet buffer 824 * @packet_buff_len: packet buffer size 825 * @min_packet_len: requested packet minimum size 826 * @additional_packet_len: requested additional packet size on top of minimum 827 * size 828 * 829 * Returns true of the packet buffer could be changed to the requested size, 830 * false otherwise. 831 */ 832 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff, 833 int *packet_buff_len, 834 int min_packet_len, 835 int additional_packet_len) 836 { 837 unsigned char *new_buff; 838 839 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC); 840 841 /* keep old buffer if kmalloc should fail */ 842 if (!new_buff) 843 return false; 844 845 memcpy(new_buff, *packet_buff, min_packet_len); 846 kfree(*packet_buff); 847 *packet_buff = new_buff; 848 *packet_buff_len = min_packet_len + additional_packet_len; 849 850 return true; 851 } 852 853 /** 854 * batadv_tvlv_container_ogm_append - append tvlv container content to given 855 * OGM packet buffer 856 * @bat_priv: the bat priv with all the soft interface information 857 * @packet_buff: ogm packet buffer 858 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv 859 * content 860 * @packet_min_len: ogm header size to be preserved for the OGM itself 861 * 862 * The ogm packet might be enlarged or shrunk depending on the current size 863 * and the size of the to-be-appended tvlv containers. 864 * 865 * Returns size of all appended tvlv containers in bytes. 866 */ 867 u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv, 868 unsigned char **packet_buff, 869 int *packet_buff_len, int packet_min_len) 870 { 871 struct batadv_tvlv_container *tvlv; 872 struct batadv_tvlv_hdr *tvlv_hdr; 873 u16 tvlv_value_len; 874 void *tvlv_value; 875 bool ret; 876 877 spin_lock_bh(&bat_priv->tvlv.container_list_lock); 878 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv); 879 880 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len, 881 packet_min_len, tvlv_value_len); 882 883 if (!ret) 884 goto end; 885 886 if (!tvlv_value_len) 887 goto end; 888 889 tvlv_value = (*packet_buff) + packet_min_len; 890 891 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) { 892 tvlv_hdr = tvlv_value; 893 tvlv_hdr->type = tvlv->tvlv_hdr.type; 894 tvlv_hdr->version = tvlv->tvlv_hdr.version; 895 tvlv_hdr->len = tvlv->tvlv_hdr.len; 896 tvlv_value = tvlv_hdr + 1; 897 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len)); 898 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len); 899 } 900 901 end: 902 spin_unlock_bh(&bat_priv->tvlv.container_list_lock); 903 return tvlv_value_len; 904 } 905 906 /** 907 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the 908 * appropriate handlers 909 * @bat_priv: the bat priv with all the soft interface information 910 * @tvlv_handler: tvlv callback function handling the tvlv content 911 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet 912 * @orig_node: orig node emitting the ogm packet 913 * @src: source mac address of the unicast packet 914 * @dst: destination mac address of the unicast packet 915 * @tvlv_value: tvlv content 916 * @tvlv_value_len: tvlv content length 917 * 918 * Returns success if handler was not found or the return value of the handler 919 * callback. 920 */ 921 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv, 922 struct batadv_tvlv_handler *tvlv_handler, 923 bool ogm_source, 924 struct batadv_orig_node *orig_node, 925 u8 *src, u8 *dst, 926 void *tvlv_value, u16 tvlv_value_len) 927 { 928 if (!tvlv_handler) 929 return NET_RX_SUCCESS; 930 931 if (ogm_source) { 932 if (!tvlv_handler->ogm_handler) 933 return NET_RX_SUCCESS; 934 935 if (!orig_node) 936 return NET_RX_SUCCESS; 937 938 tvlv_handler->ogm_handler(bat_priv, orig_node, 939 BATADV_NO_FLAGS, 940 tvlv_value, tvlv_value_len); 941 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED; 942 } else { 943 if (!src) 944 return NET_RX_SUCCESS; 945 946 if (!dst) 947 return NET_RX_SUCCESS; 948 949 if (!tvlv_handler->unicast_handler) 950 return NET_RX_SUCCESS; 951 952 return tvlv_handler->unicast_handler(bat_priv, src, 953 dst, tvlv_value, 954 tvlv_value_len); 955 } 956 957 return NET_RX_SUCCESS; 958 } 959 960 /** 961 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the 962 * appropriate handlers 963 * @bat_priv: the bat priv with all the soft interface information 964 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet 965 * @orig_node: orig node emitting the ogm packet 966 * @src: source mac address of the unicast packet 967 * @dst: destination mac address of the unicast packet 968 * @tvlv_value: tvlv content 969 * @tvlv_value_len: tvlv content length 970 * 971 * Returns success when processing an OGM or the return value of all called 972 * handler callbacks. 973 */ 974 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv, 975 bool ogm_source, 976 struct batadv_orig_node *orig_node, 977 u8 *src, u8 *dst, 978 void *tvlv_value, u16 tvlv_value_len) 979 { 980 struct batadv_tvlv_handler *tvlv_handler; 981 struct batadv_tvlv_hdr *tvlv_hdr; 982 u16 tvlv_value_cont_len; 983 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND; 984 int ret = NET_RX_SUCCESS; 985 986 while (tvlv_value_len >= sizeof(*tvlv_hdr)) { 987 tvlv_hdr = tvlv_value; 988 tvlv_value_cont_len = ntohs(tvlv_hdr->len); 989 tvlv_value = tvlv_hdr + 1; 990 tvlv_value_len -= sizeof(*tvlv_hdr); 991 992 if (tvlv_value_cont_len > tvlv_value_len) 993 break; 994 995 tvlv_handler = batadv_tvlv_handler_get(bat_priv, 996 tvlv_hdr->type, 997 tvlv_hdr->version); 998 999 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler, 1000 ogm_source, orig_node, 1001 src, dst, tvlv_value, 1002 tvlv_value_cont_len); 1003 if (tvlv_handler) 1004 batadv_tvlv_handler_free_ref(tvlv_handler); 1005 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len; 1006 tvlv_value_len -= tvlv_value_cont_len; 1007 } 1008 1009 if (!ogm_source) 1010 return ret; 1011 1012 rcu_read_lock(); 1013 hlist_for_each_entry_rcu(tvlv_handler, 1014 &bat_priv->tvlv.handler_list, list) { 1015 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) && 1016 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED)) 1017 tvlv_handler->ogm_handler(bat_priv, orig_node, 1018 cifnotfound, NULL, 0); 1019 1020 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED; 1021 } 1022 rcu_read_unlock(); 1023 1024 return NET_RX_SUCCESS; 1025 } 1026 1027 /** 1028 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate 1029 * handlers 1030 * @bat_priv: the bat priv with all the soft interface information 1031 * @batadv_ogm_packet: ogm packet containing the tvlv containers 1032 * @orig_node: orig node emitting the ogm packet 1033 */ 1034 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv, 1035 struct batadv_ogm_packet *batadv_ogm_packet, 1036 struct batadv_orig_node *orig_node) 1037 { 1038 void *tvlv_value; 1039 u16 tvlv_value_len; 1040 1041 if (!batadv_ogm_packet) 1042 return; 1043 1044 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len); 1045 if (!tvlv_value_len) 1046 return; 1047 1048 tvlv_value = batadv_ogm_packet + 1; 1049 1050 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL, 1051 tvlv_value, tvlv_value_len); 1052 } 1053 1054 /** 1055 * batadv_tvlv_handler_register - register tvlv handler based on the provided 1056 * type and version (both need to match) for ogm tvlv payload and/or unicast 1057 * payload 1058 * @bat_priv: the bat priv with all the soft interface information 1059 * @optr: ogm tvlv handler callback function. This function receives the orig 1060 * node, flags and the tvlv content as argument to process. 1061 * @uptr: unicast tvlv handler callback function. This function receives the 1062 * source & destination of the unicast packet as well as the tvlv content 1063 * to process. 1064 * @type: tvlv handler type to be registered 1065 * @version: tvlv handler version to be registered 1066 * @flags: flags to enable or disable TVLV API behavior 1067 */ 1068 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, 1069 void (*optr)(struct batadv_priv *bat_priv, 1070 struct batadv_orig_node *orig, 1071 u8 flags, 1072 void *tvlv_value, 1073 u16 tvlv_value_len), 1074 int (*uptr)(struct batadv_priv *bat_priv, 1075 u8 *src, u8 *dst, 1076 void *tvlv_value, 1077 u16 tvlv_value_len), 1078 u8 type, u8 version, u8 flags) 1079 { 1080 struct batadv_tvlv_handler *tvlv_handler; 1081 1082 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); 1083 if (tvlv_handler) { 1084 batadv_tvlv_handler_free_ref(tvlv_handler); 1085 return; 1086 } 1087 1088 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); 1089 if (!tvlv_handler) 1090 return; 1091 1092 tvlv_handler->ogm_handler = optr; 1093 tvlv_handler->unicast_handler = uptr; 1094 tvlv_handler->type = type; 1095 tvlv_handler->version = version; 1096 tvlv_handler->flags = flags; 1097 atomic_set(&tvlv_handler->refcount, 1); 1098 INIT_HLIST_NODE(&tvlv_handler->list); 1099 1100 spin_lock_bh(&bat_priv->tvlv.handler_list_lock); 1101 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); 1102 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); 1103 } 1104 1105 /** 1106 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the 1107 * provided type and version (both need to match) 1108 * @bat_priv: the bat priv with all the soft interface information 1109 * @type: tvlv handler type to be unregistered 1110 * @version: tvlv handler version to be unregistered 1111 */ 1112 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv, 1113 u8 type, u8 version) 1114 { 1115 struct batadv_tvlv_handler *tvlv_handler; 1116 1117 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); 1118 if (!tvlv_handler) 1119 return; 1120 1121 batadv_tvlv_handler_free_ref(tvlv_handler); 1122 spin_lock_bh(&bat_priv->tvlv.handler_list_lock); 1123 hlist_del_rcu(&tvlv_handler->list); 1124 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); 1125 batadv_tvlv_handler_free_ref(tvlv_handler); 1126 } 1127 1128 /** 1129 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the 1130 * specified host 1131 * @bat_priv: the bat priv with all the soft interface information 1132 * @src: source mac address of the unicast packet 1133 * @dst: destination mac address of the unicast packet 1134 * @type: tvlv type 1135 * @version: tvlv version 1136 * @tvlv_value: tvlv content 1137 * @tvlv_value_len: tvlv content length 1138 */ 1139 void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src, 1140 u8 *dst, u8 type, u8 version, 1141 void *tvlv_value, u16 tvlv_value_len) 1142 { 1143 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet; 1144 struct batadv_tvlv_hdr *tvlv_hdr; 1145 struct batadv_orig_node *orig_node; 1146 struct sk_buff *skb = NULL; 1147 unsigned char *tvlv_buff; 1148 unsigned int tvlv_len; 1149 ssize_t hdr_len = sizeof(*unicast_tvlv_packet); 1150 bool ret = false; 1151 1152 orig_node = batadv_orig_hash_find(bat_priv, dst); 1153 if (!orig_node) 1154 goto out; 1155 1156 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len; 1157 1158 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len); 1159 if (!skb) 1160 goto out; 1161 1162 skb->priority = TC_PRIO_CONTROL; 1163 skb_reserve(skb, ETH_HLEN); 1164 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); 1165 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; 1166 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV; 1167 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION; 1168 unicast_tvlv_packet->ttl = BATADV_TTL; 1169 unicast_tvlv_packet->reserved = 0; 1170 unicast_tvlv_packet->tvlv_len = htons(tvlv_len); 1171 unicast_tvlv_packet->align = 0; 1172 ether_addr_copy(unicast_tvlv_packet->src, src); 1173 ether_addr_copy(unicast_tvlv_packet->dst, dst); 1174 1175 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1); 1176 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff; 1177 tvlv_hdr->version = version; 1178 tvlv_hdr->type = type; 1179 tvlv_hdr->len = htons(tvlv_value_len); 1180 tvlv_buff += sizeof(*tvlv_hdr); 1181 memcpy(tvlv_buff, tvlv_value, tvlv_value_len); 1182 1183 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 1184 ret = true; 1185 1186 out: 1187 if (skb && !ret) 1188 kfree_skb(skb); 1189 if (orig_node) 1190 batadv_orig_node_free_ref(orig_node); 1191 } 1192 1193 /** 1194 * batadv_get_vid - extract the VLAN identifier from skb if any 1195 * @skb: the buffer containing the packet 1196 * @header_len: length of the batman header preceding the ethernet header 1197 * 1198 * If the packet embedded in the skb is vlan tagged this function returns the 1199 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned. 1200 */ 1201 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) 1202 { 1203 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len); 1204 struct vlan_ethhdr *vhdr; 1205 unsigned short vid; 1206 1207 if (ethhdr->h_proto != htons(ETH_P_8021Q)) 1208 return BATADV_NO_FLAGS; 1209 1210 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN)) 1211 return BATADV_NO_FLAGS; 1212 1213 vhdr = (struct vlan_ethhdr *)(skb->data + header_len); 1214 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 1215 vid |= BATADV_VLAN_HAS_TAG; 1216 1217 return vid; 1218 } 1219 1220 /** 1221 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan 1222 * @bat_priv: the bat priv with all the soft interface information 1223 * @vid: the VLAN identifier for which the AP isolation attributed as to be 1224 * looked up 1225 * 1226 * Returns true if AP isolation is on for the VLAN idenfied by vid, false 1227 * otherwise 1228 */ 1229 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) 1230 { 1231 bool ap_isolation_enabled = false; 1232 struct batadv_softif_vlan *vlan; 1233 1234 /* if the AP isolation is requested on a VLAN, then check for its 1235 * setting in the proper VLAN private data structure 1236 */ 1237 vlan = batadv_softif_vlan_get(bat_priv, vid); 1238 if (vlan) { 1239 ap_isolation_enabled = atomic_read(&vlan->ap_isolation); 1240 batadv_softif_vlan_free_ref(vlan); 1241 } 1242 1243 return ap_isolation_enabled; 1244 } 1245 1246 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) 1247 { 1248 struct batadv_algo_ops *bat_algo_ops; 1249 char *algo_name = (char *)val; 1250 size_t name_len = strlen(algo_name); 1251 1252 if (name_len > 0 && algo_name[name_len - 1] == '\n') 1253 algo_name[name_len - 1] = '\0'; 1254 1255 bat_algo_ops = batadv_algo_get(algo_name); 1256 if (!bat_algo_ops) { 1257 pr_err("Routing algorithm '%s' is not supported\n", algo_name); 1258 return -EINVAL; 1259 } 1260 1261 return param_set_copystring(algo_name, kp); 1262 } 1263 1264 static const struct kernel_param_ops batadv_param_ops_ra = { 1265 .set = batadv_param_set_ra, 1266 .get = param_get_string, 1267 }; 1268 1269 static struct kparam_string batadv_param_string_ra = { 1270 .maxlen = sizeof(batadv_routing_algo), 1271 .string = batadv_routing_algo, 1272 }; 1273 1274 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra, 1275 0644); 1276 module_init(batadv_init); 1277 module_exit(batadv_exit); 1278 1279 MODULE_LICENSE("GPL"); 1280 1281 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); 1282 MODULE_DESCRIPTION(BATADV_DRIVER_DESC); 1283 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); 1284 MODULE_VERSION(BATADV_SOURCE_VERSION); 1285