1 /* 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 3 * 4 * Marek Lindner, Simon Wunderlich 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA 19 * 20 */ 21 22 #include "main.h" 23 #include "bat_sysfs.h" 24 #include "bat_debugfs.h" 25 #include "routing.h" 26 #include "send.h" 27 #include "originator.h" 28 #include "soft-interface.h" 29 #include "icmp_socket.h" 30 #include "translation-table.h" 31 #include "hard-interface.h" 32 #include "gateway_client.h" 33 #include "bridge_loop_avoidance.h" 34 #include "vis.h" 35 #include "hash.h" 36 #include "bat_algo.h" 37 38 39 /* List manipulations on hardif_list have to be rtnl_lock()'ed, 40 * list traversals just rcu-locked */ 41 struct list_head hardif_list; 42 static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); 43 char bat_routing_algo[20] = "BATMAN IV"; 44 static struct hlist_head bat_algo_list; 45 46 unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 47 48 struct workqueue_struct *bat_event_workqueue; 49 50 static void recv_handler_init(void); 51 52 static int __init batman_init(void) 53 { 54 INIT_LIST_HEAD(&hardif_list); 55 INIT_HLIST_HEAD(&bat_algo_list); 56 57 recv_handler_init(); 58 59 bat_iv_init(); 60 61 /* the name should not be longer than 10 chars - see 62 * http://lwn.net/Articles/23634/ */ 63 bat_event_workqueue = create_singlethread_workqueue("bat_events"); 64 65 if (!bat_event_workqueue) 66 return -ENOMEM; 67 68 bat_socket_init(); 69 debugfs_init(); 70 71 register_netdevice_notifier(&hard_if_notifier); 72 73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 74 SOURCE_VERSION, COMPAT_VERSION); 75 76 return 0; 77 } 78 79 static void __exit batman_exit(void) 80 { 81 debugfs_destroy(); 82 unregister_netdevice_notifier(&hard_if_notifier); 83 hardif_remove_interfaces(); 84 85 flush_workqueue(bat_event_workqueue); 86 destroy_workqueue(bat_event_workqueue); 87 bat_event_workqueue = NULL; 88 89 rcu_barrier(); 90 } 91 92 int mesh_init(struct net_device *soft_iface) 93 { 94 struct bat_priv *bat_priv = netdev_priv(soft_iface); 95 96 spin_lock_init(&bat_priv->forw_bat_list_lock); 97 spin_lock_init(&bat_priv->forw_bcast_list_lock); 98 spin_lock_init(&bat_priv->tt_changes_list_lock); 99 spin_lock_init(&bat_priv->tt_req_list_lock); 100 spin_lock_init(&bat_priv->tt_roam_list_lock); 101 spin_lock_init(&bat_priv->tt_buff_lock); 102 spin_lock_init(&bat_priv->gw_list_lock); 103 spin_lock_init(&bat_priv->vis_hash_lock); 104 spin_lock_init(&bat_priv->vis_list_lock); 105 106 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 107 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 108 INIT_HLIST_HEAD(&bat_priv->gw_list); 109 INIT_LIST_HEAD(&bat_priv->tt_changes_list); 110 INIT_LIST_HEAD(&bat_priv->tt_req_list); 111 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 112 113 if (originator_init(bat_priv) < 1) 114 goto err; 115 116 if (tt_init(bat_priv) < 1) 117 goto err; 118 119 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); 120 121 if (vis_init(bat_priv) < 1) 122 goto err; 123 124 if (bla_init(bat_priv) < 1) 125 goto err; 126 127 atomic_set(&bat_priv->gw_reselect, 0); 128 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 129 goto end; 130 131 err: 132 mesh_free(soft_iface); 133 return -1; 134 135 end: 136 return 0; 137 } 138 139 void mesh_free(struct net_device *soft_iface) 140 { 141 struct bat_priv *bat_priv = netdev_priv(soft_iface); 142 143 atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING); 144 145 purge_outstanding_packets(bat_priv, NULL); 146 147 vis_quit(bat_priv); 148 149 gw_node_purge(bat_priv); 150 originator_free(bat_priv); 151 152 tt_free(bat_priv); 153 154 bla_free(bat_priv); 155 156 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 157 } 158 159 void inc_module_count(void) 160 { 161 try_module_get(THIS_MODULE); 162 } 163 164 void dec_module_count(void) 165 { 166 module_put(THIS_MODULE); 167 } 168 169 int is_my_mac(const uint8_t *addr) 170 { 171 const struct hard_iface *hard_iface; 172 173 rcu_read_lock(); 174 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 175 if (hard_iface->if_status != IF_ACTIVE) 176 continue; 177 178 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { 179 rcu_read_unlock(); 180 return 1; 181 } 182 } 183 rcu_read_unlock(); 184 return 0; 185 } 186 187 static int recv_unhandled_packet(struct sk_buff *skb, 188 struct hard_iface *recv_if) 189 { 190 return NET_RX_DROP; 191 } 192 193 /* incoming packets with the batman ethertype received on any active hard 194 * interface 195 */ 196 int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 197 struct packet_type *ptype, struct net_device *orig_dev) 198 { 199 struct bat_priv *bat_priv; 200 struct batman_ogm_packet *batman_ogm_packet; 201 struct hard_iface *hard_iface; 202 uint8_t idx; 203 int ret; 204 205 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); 206 skb = skb_share_check(skb, GFP_ATOMIC); 207 208 /* skb was released by skb_share_check() */ 209 if (!skb) 210 goto err_out; 211 212 /* packet should hold at least type and version */ 213 if (unlikely(!pskb_may_pull(skb, 2))) 214 goto err_free; 215 216 /* expect a valid ethernet header here. */ 217 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) 218 goto err_free; 219 220 if (!hard_iface->soft_iface) 221 goto err_free; 222 223 bat_priv = netdev_priv(hard_iface->soft_iface); 224 225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 226 goto err_free; 227 228 /* discard frames on not active interfaces */ 229 if (hard_iface->if_status != IF_ACTIVE) 230 goto err_free; 231 232 batman_ogm_packet = (struct batman_ogm_packet *)skb->data; 233 234 if (batman_ogm_packet->header.version != COMPAT_VERSION) { 235 bat_dbg(DBG_BATMAN, bat_priv, 236 "Drop packet: incompatible batman version (%i)\n", 237 batman_ogm_packet->header.version); 238 goto err_free; 239 } 240 241 /* all receive handlers return whether they received or reused 242 * the supplied skb. if not, we have to free the skb. 243 */ 244 idx = batman_ogm_packet->header.packet_type; 245 ret = (*recv_packet_handler[idx])(skb, hard_iface); 246 247 if (ret == NET_RX_DROP) 248 kfree_skb(skb); 249 250 /* return NET_RX_SUCCESS in any case as we 251 * most probably dropped the packet for 252 * routing-logical reasons. 253 */ 254 return NET_RX_SUCCESS; 255 256 err_free: 257 kfree_skb(skb); 258 err_out: 259 return NET_RX_DROP; 260 } 261 262 static void recv_handler_init(void) 263 { 264 int i; 265 266 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) 267 recv_packet_handler[i] = recv_unhandled_packet; 268 269 /* batman icmp packet */ 270 recv_packet_handler[BAT_ICMP] = recv_icmp_packet; 271 /* unicast packet */ 272 recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; 273 /* fragmented unicast packet */ 274 recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; 275 /* broadcast packet */ 276 recv_packet_handler[BAT_BCAST] = recv_bcast_packet; 277 /* vis packet */ 278 recv_packet_handler[BAT_VIS] = recv_vis_packet; 279 /* Translation table query (request or response) */ 280 recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; 281 /* Roaming advertisement */ 282 recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; 283 } 284 285 int recv_handler_register(uint8_t packet_type, 286 int (*recv_handler)(struct sk_buff *, 287 struct hard_iface *)) 288 { 289 if (recv_packet_handler[packet_type] != &recv_unhandled_packet) 290 return -EBUSY; 291 292 recv_packet_handler[packet_type] = recv_handler; 293 return 0; 294 } 295 296 void recv_handler_unregister(uint8_t packet_type) 297 { 298 recv_packet_handler[packet_type] = recv_unhandled_packet; 299 } 300 301 static struct bat_algo_ops *bat_algo_get(char *name) 302 { 303 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 304 struct hlist_node *node; 305 306 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) { 307 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 308 continue; 309 310 bat_algo_ops = bat_algo_ops_tmp; 311 break; 312 } 313 314 return bat_algo_ops; 315 } 316 317 int bat_algo_register(struct bat_algo_ops *bat_algo_ops) 318 { 319 struct bat_algo_ops *bat_algo_ops_tmp; 320 int ret = -1; 321 322 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name); 323 if (bat_algo_ops_tmp) { 324 pr_info("Trying to register already registered routing algorithm: %s\n", 325 bat_algo_ops->name); 326 goto out; 327 } 328 329 /* all algorithms must implement all ops (for now) */ 330 if (!bat_algo_ops->bat_iface_enable || 331 !bat_algo_ops->bat_iface_disable || 332 !bat_algo_ops->bat_iface_update_mac || 333 !bat_algo_ops->bat_primary_iface_set || 334 !bat_algo_ops->bat_ogm_schedule || 335 !bat_algo_ops->bat_ogm_emit) { 336 pr_info("Routing algo '%s' does not implement required ops\n", 337 bat_algo_ops->name); 338 goto out; 339 } 340 341 INIT_HLIST_NODE(&bat_algo_ops->list); 342 hlist_add_head(&bat_algo_ops->list, &bat_algo_list); 343 ret = 0; 344 345 out: 346 return ret; 347 } 348 349 int bat_algo_select(struct bat_priv *bat_priv, char *name) 350 { 351 struct bat_algo_ops *bat_algo_ops; 352 int ret = -1; 353 354 bat_algo_ops = bat_algo_get(name); 355 if (!bat_algo_ops) 356 goto out; 357 358 bat_priv->bat_algo_ops = bat_algo_ops; 359 ret = 0; 360 361 out: 362 return ret; 363 } 364 365 int bat_algo_seq_print_text(struct seq_file *seq, void *offset) 366 { 367 struct bat_algo_ops *bat_algo_ops; 368 struct hlist_node *node; 369 370 seq_printf(seq, "Available routing algorithms:\n"); 371 372 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) { 373 seq_printf(seq, "%s\n", bat_algo_ops->name); 374 } 375 376 return 0; 377 } 378 379 static int param_set_ra(const char *val, const struct kernel_param *kp) 380 { 381 struct bat_algo_ops *bat_algo_ops; 382 383 bat_algo_ops = bat_algo_get((char *)val); 384 if (!bat_algo_ops) { 385 pr_err("Routing algorithm '%s' is not supported\n", val); 386 return -EINVAL; 387 } 388 389 return param_set_copystring(val, kp); 390 } 391 392 static const struct kernel_param_ops param_ops_ra = { 393 .set = param_set_ra, 394 .get = param_get_string, 395 }; 396 397 static struct kparam_string __param_string_ra = { 398 .maxlen = sizeof(bat_routing_algo), 399 .string = bat_routing_algo, 400 }; 401 402 module_param_cb(routing_algo, ¶m_ops_ra, &__param_string_ra, 0644); 403 module_init(batman_init); 404 module_exit(batman_exit); 405 406 MODULE_LICENSE("GPL"); 407 408 MODULE_AUTHOR(DRIVER_AUTHOR); 409 MODULE_DESCRIPTION(DRIVER_DESC); 410 MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); 411 MODULE_VERSION(SOURCE_VERSION); 412