1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 17 * 02110-1301, USA 18 */ 19 20 #include <linux/crc32c.h> 21 #include <linux/highmem.h> 22 #include "main.h" 23 #include "sysfs.h" 24 #include "debugfs.h" 25 #include "routing.h" 26 #include "send.h" 27 #include "originator.h" 28 #include "soft-interface.h" 29 #include "icmp_socket.h" 30 #include "translation-table.h" 31 #include "hard-interface.h" 32 #include "gateway_client.h" 33 #include "bridge_loop_avoidance.h" 34 #include "distributed-arp-table.h" 35 #include "vis.h" 36 #include "hash.h" 37 #include "bat_algo.h" 38 #include "network-coding.h" 39 40 41 /* List manipulations on hardif_list have to be rtnl_lock()'ed, 42 * list traversals just rcu-locked 43 */ 44 struct list_head batadv_hardif_list; 45 static int (*batadv_rx_handler[256])(struct sk_buff *, 46 struct batadv_hard_iface *); 47 char batadv_routing_algo[20] = "BATMAN_IV"; 48 static struct hlist_head batadv_algo_list; 49 50 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 51 52 struct workqueue_struct *batadv_event_workqueue; 53 54 static void batadv_recv_handler_init(void); 55 56 static int __init batadv_init(void) 57 { 58 INIT_LIST_HEAD(&batadv_hardif_list); 59 INIT_HLIST_HEAD(&batadv_algo_list); 60 61 batadv_recv_handler_init(); 62 63 batadv_iv_init(); 64 65 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 66 67 if (!batadv_event_workqueue) 68 return -ENOMEM; 69 70 batadv_socket_init(); 71 batadv_debugfs_init(); 72 73 register_netdevice_notifier(&batadv_hard_if_notifier); 74 75 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 76 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); 77 78 return 0; 79 } 80 81 static void __exit batadv_exit(void) 82 { 83 batadv_debugfs_destroy(); 84 unregister_netdevice_notifier(&batadv_hard_if_notifier); 85 batadv_hardif_remove_interfaces(); 86 87 flush_workqueue(batadv_event_workqueue); 88 destroy_workqueue(batadv_event_workqueue); 89 batadv_event_workqueue = NULL; 90 91 rcu_barrier(); 92 } 93 94 int batadv_mesh_init(struct net_device *soft_iface) 95 { 96 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 97 int ret; 98 99 spin_lock_init(&bat_priv->forw_bat_list_lock); 100 spin_lock_init(&bat_priv->forw_bcast_list_lock); 101 spin_lock_init(&bat_priv->tt.changes_list_lock); 102 spin_lock_init(&bat_priv->tt.req_list_lock); 103 spin_lock_init(&bat_priv->tt.roam_list_lock); 104 spin_lock_init(&bat_priv->tt.last_changeset_lock); 105 spin_lock_init(&bat_priv->gw.list_lock); 106 spin_lock_init(&bat_priv->vis.hash_lock); 107 spin_lock_init(&bat_priv->vis.list_lock); 108 109 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 110 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 111 INIT_HLIST_HEAD(&bat_priv->gw.list); 112 INIT_LIST_HEAD(&bat_priv->tt.changes_list); 113 INIT_LIST_HEAD(&bat_priv->tt.req_list); 114 INIT_LIST_HEAD(&bat_priv->tt.roam_list); 115 116 ret = batadv_originator_init(bat_priv); 117 if (ret < 0) 118 goto err; 119 120 ret = batadv_tt_init(bat_priv); 121 if (ret < 0) 122 goto err; 123 124 batadv_tt_local_add(soft_iface, soft_iface->dev_addr, 125 BATADV_NULL_IFINDEX); 126 127 ret = batadv_vis_init(bat_priv); 128 if (ret < 0) 129 goto err; 130 131 ret = batadv_bla_init(bat_priv); 132 if (ret < 0) 133 goto err; 134 135 ret = batadv_dat_init(bat_priv); 136 if (ret < 0) 137 goto err; 138 139 ret = batadv_nc_init(bat_priv); 140 if (ret < 0) 141 goto err; 142 143 atomic_set(&bat_priv->gw.reselect, 0); 144 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 145 146 return 0; 147 148 err: 149 batadv_mesh_free(soft_iface); 150 return ret; 151 } 152 153 void batadv_mesh_free(struct net_device *soft_iface) 154 { 155 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 156 157 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 158 159 batadv_purge_outstanding_packets(bat_priv, NULL); 160 161 batadv_vis_quit(bat_priv); 162 163 batadv_gw_node_purge(bat_priv); 164 batadv_originator_free(bat_priv); 165 batadv_nc_free(bat_priv); 166 167 batadv_tt_free(bat_priv); 168 169 batadv_bla_free(bat_priv); 170 171 batadv_dat_free(bat_priv); 172 173 free_percpu(bat_priv->bat_counters); 174 175 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 176 } 177 178 int batadv_is_my_mac(const uint8_t *addr) 179 { 180 const struct batadv_hard_iface *hard_iface; 181 182 rcu_read_lock(); 183 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 184 if (hard_iface->if_status != BATADV_IF_ACTIVE) 185 continue; 186 187 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { 188 rcu_read_unlock(); 189 return 1; 190 } 191 } 192 rcu_read_unlock(); 193 return 0; 194 } 195 196 /** 197 * batadv_seq_print_text_primary_if_get - called from debugfs table printing 198 * function that requires the primary interface 199 * @seq: debugfs table seq_file struct 200 * 201 * Returns primary interface if found or NULL otherwise. 202 */ 203 struct batadv_hard_iface * 204 batadv_seq_print_text_primary_if_get(struct seq_file *seq) 205 { 206 struct net_device *net_dev = (struct net_device *)seq->private; 207 struct batadv_priv *bat_priv = netdev_priv(net_dev); 208 struct batadv_hard_iface *primary_if; 209 210 primary_if = batadv_primary_if_get_selected(bat_priv); 211 212 if (!primary_if) { 213 seq_printf(seq, 214 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 215 net_dev->name); 216 goto out; 217 } 218 219 if (primary_if->if_status == BATADV_IF_ACTIVE) 220 goto out; 221 222 seq_printf(seq, 223 "BATMAN mesh %s disabled - primary interface not active\n", 224 net_dev->name); 225 batadv_hardif_free_ref(primary_if); 226 primary_if = NULL; 227 228 out: 229 return primary_if; 230 } 231 232 static int batadv_recv_unhandled_packet(struct sk_buff *skb, 233 struct batadv_hard_iface *recv_if) 234 { 235 return NET_RX_DROP; 236 } 237 238 /* incoming packets with the batman ethertype received on any active hard 239 * interface 240 */ 241 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 242 struct packet_type *ptype, 243 struct net_device *orig_dev) 244 { 245 struct batadv_priv *bat_priv; 246 struct batadv_ogm_packet *batadv_ogm_packet; 247 struct batadv_hard_iface *hard_iface; 248 uint8_t idx; 249 int ret; 250 251 hard_iface = container_of(ptype, struct batadv_hard_iface, 252 batman_adv_ptype); 253 skb = skb_share_check(skb, GFP_ATOMIC); 254 255 /* skb was released by skb_share_check() */ 256 if (!skb) 257 goto err_out; 258 259 /* packet should hold at least type and version */ 260 if (unlikely(!pskb_may_pull(skb, 2))) 261 goto err_free; 262 263 /* expect a valid ethernet header here. */ 264 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) 265 goto err_free; 266 267 if (!hard_iface->soft_iface) 268 goto err_free; 269 270 bat_priv = netdev_priv(hard_iface->soft_iface); 271 272 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 273 goto err_free; 274 275 /* discard frames on not active interfaces */ 276 if (hard_iface->if_status != BATADV_IF_ACTIVE) 277 goto err_free; 278 279 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; 280 281 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { 282 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 283 "Drop packet: incompatible batman version (%i)\n", 284 batadv_ogm_packet->header.version); 285 goto err_free; 286 } 287 288 /* all receive handlers return whether they received or reused 289 * the supplied skb. if not, we have to free the skb. 290 */ 291 idx = batadv_ogm_packet->header.packet_type; 292 ret = (*batadv_rx_handler[idx])(skb, hard_iface); 293 294 if (ret == NET_RX_DROP) 295 kfree_skb(skb); 296 297 /* return NET_RX_SUCCESS in any case as we 298 * most probably dropped the packet for 299 * routing-logical reasons. 300 */ 301 return NET_RX_SUCCESS; 302 303 err_free: 304 kfree_skb(skb); 305 err_out: 306 return NET_RX_DROP; 307 } 308 309 static void batadv_recv_handler_init(void) 310 { 311 int i; 312 313 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++) 314 batadv_rx_handler[i] = batadv_recv_unhandled_packet; 315 316 /* batman icmp packet */ 317 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; 318 /* unicast with 4 addresses packet */ 319 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet; 320 /* unicast packet */ 321 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; 322 /* fragmented unicast packet */ 323 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet; 324 /* broadcast packet */ 325 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; 326 /* vis packet */ 327 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet; 328 /* Translation table query (request or response) */ 329 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query; 330 /* Roaming advertisement */ 331 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv; 332 } 333 334 int 335 batadv_recv_handler_register(uint8_t packet_type, 336 int (*recv_handler)(struct sk_buff *, 337 struct batadv_hard_iface *)) 338 { 339 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet) 340 return -EBUSY; 341 342 batadv_rx_handler[packet_type] = recv_handler; 343 return 0; 344 } 345 346 void batadv_recv_handler_unregister(uint8_t packet_type) 347 { 348 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet; 349 } 350 351 static struct batadv_algo_ops *batadv_algo_get(char *name) 352 { 353 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 354 355 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) { 356 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 357 continue; 358 359 bat_algo_ops = bat_algo_ops_tmp; 360 break; 361 } 362 363 return bat_algo_ops; 364 } 365 366 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) 367 { 368 struct batadv_algo_ops *bat_algo_ops_tmp; 369 int ret; 370 371 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name); 372 if (bat_algo_ops_tmp) { 373 pr_info("Trying to register already registered routing algorithm: %s\n", 374 bat_algo_ops->name); 375 ret = -EEXIST; 376 goto out; 377 } 378 379 /* all algorithms must implement all ops (for now) */ 380 if (!bat_algo_ops->bat_iface_enable || 381 !bat_algo_ops->bat_iface_disable || 382 !bat_algo_ops->bat_iface_update_mac || 383 !bat_algo_ops->bat_primary_iface_set || 384 !bat_algo_ops->bat_ogm_schedule || 385 !bat_algo_ops->bat_ogm_emit) { 386 pr_info("Routing algo '%s' does not implement required ops\n", 387 bat_algo_ops->name); 388 ret = -EINVAL; 389 goto out; 390 } 391 392 INIT_HLIST_NODE(&bat_algo_ops->list); 393 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list); 394 ret = 0; 395 396 out: 397 return ret; 398 } 399 400 int batadv_algo_select(struct batadv_priv *bat_priv, char *name) 401 { 402 struct batadv_algo_ops *bat_algo_ops; 403 int ret = -EINVAL; 404 405 bat_algo_ops = batadv_algo_get(name); 406 if (!bat_algo_ops) 407 goto out; 408 409 bat_priv->bat_algo_ops = bat_algo_ops; 410 ret = 0; 411 412 out: 413 return ret; 414 } 415 416 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) 417 { 418 struct batadv_algo_ops *bat_algo_ops; 419 420 seq_printf(seq, "Available routing algorithms:\n"); 421 422 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { 423 seq_printf(seq, "%s\n", bat_algo_ops->name); 424 } 425 426 return 0; 427 } 428 429 /** 430 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in 431 * the header 432 * @skb: skb pointing to fragmented socket buffers 433 * @payload_ptr: Pointer to position inside the head buffer of the skb 434 * marking the start of the data to be CRC'ed 435 * 436 * payload_ptr must always point to an address in the skb head buffer and not to 437 * a fragment. 438 */ 439 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) 440 { 441 u32 crc = 0; 442 unsigned int from; 443 unsigned int to = skb->len; 444 struct skb_seq_state st; 445 const u8 *data; 446 unsigned int len; 447 unsigned int consumed = 0; 448 449 from = (unsigned int)(payload_ptr - skb->data); 450 451 skb_prepare_seq_read(skb, from, to, &st); 452 while ((len = skb_seq_read(consumed, &data, &st)) != 0) { 453 crc = crc32c(crc, data, len); 454 consumed += len; 455 } 456 skb_abort_seq_read(&st); 457 458 return htonl(crc); 459 } 460 461 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) 462 { 463 struct batadv_algo_ops *bat_algo_ops; 464 char *algo_name = (char *)val; 465 size_t name_len = strlen(algo_name); 466 467 if (algo_name[name_len - 1] == '\n') 468 algo_name[name_len - 1] = '\0'; 469 470 bat_algo_ops = batadv_algo_get(algo_name); 471 if (!bat_algo_ops) { 472 pr_err("Routing algorithm '%s' is not supported\n", algo_name); 473 return -EINVAL; 474 } 475 476 return param_set_copystring(algo_name, kp); 477 } 478 479 static const struct kernel_param_ops batadv_param_ops_ra = { 480 .set = batadv_param_set_ra, 481 .get = param_get_string, 482 }; 483 484 static struct kparam_string batadv_param_string_ra = { 485 .maxlen = sizeof(batadv_routing_algo), 486 .string = batadv_routing_algo, 487 }; 488 489 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra, 490 0644); 491 module_init(batadv_init); 492 module_exit(batadv_exit); 493 494 MODULE_LICENSE("GPL"); 495 496 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); 497 MODULE_DESCRIPTION(BATADV_DRIVER_DESC); 498 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); 499 MODULE_VERSION(BATADV_SOURCE_VERSION); 500