xref: /linux/net/batman-adv/send.c (revision 64afe35398269577ef9809474dd7dc0e5d265176)
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "types.h"
29 #include "vis.h"
30 #include "aggregation.h"
31 #include "gateway_common.h"
32 #include "originator.h"
33 
34 static void send_outstanding_bcast_packet(struct work_struct *work);
35 
36 /* apply hop penalty for a normal link */
37 static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
38 {
39 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
40 	return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
41 }
42 
43 /* when do we schedule our own packet to be sent */
44 static unsigned long own_send_time(struct bat_priv *bat_priv)
45 {
46 	return jiffies + msecs_to_jiffies(
47 		   atomic_read(&bat_priv->orig_interval) -
48 		   JITTER + (random32() % 2*JITTER));
49 }
50 
51 /* when do we schedule a forwarded packet to be sent */
52 static unsigned long forward_send_time(void)
53 {
54 	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
55 }
56 
57 /* send out an already prepared packet to the given address via the
58  * specified batman interface */
59 int send_skb_packet(struct sk_buff *skb,
60 				struct batman_if *batman_if,
61 				uint8_t *dst_addr)
62 {
63 	struct ethhdr *ethhdr;
64 
65 	if (batman_if->if_status != IF_ACTIVE)
66 		goto send_skb_err;
67 
68 	if (unlikely(!batman_if->net_dev))
69 		goto send_skb_err;
70 
71 	if (!(batman_if->net_dev->flags & IFF_UP)) {
72 		pr_warning("Interface %s is not up - can't send packet via "
73 			   "that interface!\n", batman_if->net_dev->name);
74 		goto send_skb_err;
75 	}
76 
77 	/* push to the ethernet header. */
78 	if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
79 		goto send_skb_err;
80 
81 	skb_reset_mac_header(skb);
82 
83 	ethhdr = (struct ethhdr *) skb_mac_header(skb);
84 	memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
85 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
86 	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
87 
88 	skb_set_network_header(skb, ETH_HLEN);
89 	skb->priority = TC_PRIO_CONTROL;
90 	skb->protocol = __constant_htons(ETH_P_BATMAN);
91 
92 	skb->dev = batman_if->net_dev;
93 
94 	/* dev_queue_xmit() returns a negative result on error.	 However on
95 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
96 	 * (which is > 0). This will not be treated as an error. */
97 
98 	return dev_queue_xmit(skb);
99 send_skb_err:
100 	kfree_skb(skb);
101 	return NET_XMIT_DROP;
102 }
103 
104 /* Send a packet to a given interface */
105 static void send_packet_to_if(struct forw_packet *forw_packet,
106 			      struct batman_if *batman_if)
107 {
108 	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
109 	char *fwd_str;
110 	uint8_t packet_num;
111 	int16_t buff_pos;
112 	struct batman_packet *batman_packet;
113 	struct sk_buff *skb;
114 
115 	if (batman_if->if_status != IF_ACTIVE)
116 		return;
117 
118 	packet_num = 0;
119 	buff_pos = 0;
120 	batman_packet = (struct batman_packet *)forw_packet->skb->data;
121 
122 	/* adjust all flags and log packets */
123 	while (aggregated_packet(buff_pos,
124 				 forw_packet->packet_len,
125 				 batman_packet->num_hna)) {
126 
127 		/* we might have aggregated direct link packets with an
128 		 * ordinary base packet */
129 		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
130 		    (forw_packet->if_incoming == batman_if))
131 			batman_packet->flags |= DIRECTLINK;
132 		else
133 			batman_packet->flags &= ~DIRECTLINK;
134 
135 		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
136 							    "Sending own" :
137 							    "Forwarding"));
138 		bat_dbg(DBG_BATMAN, bat_priv,
139 			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
140 			" IDF %s) on interface %s [%pM]\n",
141 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
142 			batman_packet->orig, ntohl(batman_packet->seqno),
143 			batman_packet->tq, batman_packet->ttl,
144 			(batman_packet->flags & DIRECTLINK ?
145 			 "on" : "off"),
146 			batman_if->net_dev->name, batman_if->net_dev->dev_addr);
147 
148 		buff_pos += sizeof(struct batman_packet) +
149 			(batman_packet->num_hna * ETH_ALEN);
150 		packet_num++;
151 		batman_packet = (struct batman_packet *)
152 			(forw_packet->skb->data + buff_pos);
153 	}
154 
155 	/* create clone because function is called more than once */
156 	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
157 	if (skb)
158 		send_skb_packet(skb, batman_if, broadcast_addr);
159 }
160 
161 /* send a batman packet */
162 static void send_packet(struct forw_packet *forw_packet)
163 {
164 	struct batman_if *batman_if;
165 	struct net_device *soft_iface;
166 	struct bat_priv *bat_priv;
167 	struct batman_packet *batman_packet =
168 		(struct batman_packet *)(forw_packet->skb->data);
169 	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 
171 	if (!forw_packet->if_incoming) {
172 		pr_err("Error - can't forward packet: incoming iface not "
173 		       "specified\n");
174 		return;
175 	}
176 
177 	soft_iface = forw_packet->if_incoming->soft_iface;
178 	bat_priv = netdev_priv(soft_iface);
179 
180 	if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 		return;
182 
183 	/* multihomed peer assumed */
184 	/* non-primary OGMs are only broadcasted on their interface */
185 	if ((directlink && (batman_packet->ttl == 1)) ||
186 	    (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
187 
188 		/* FIXME: what about aggregated packets ? */
189 		bat_dbg(DBG_BATMAN, bat_priv,
190 			"%s packet (originator %pM, seqno %d, TTL %d) "
191 			"on interface %s [%pM]\n",
192 			(forw_packet->own ? "Sending own" : "Forwarding"),
193 			batman_packet->orig, ntohl(batman_packet->seqno),
194 			batman_packet->ttl,
195 			forw_packet->if_incoming->net_dev->name,
196 			forw_packet->if_incoming->net_dev->dev_addr);
197 
198 		/* skb is only used once and than forw_packet is free'd */
199 		send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
200 				broadcast_addr);
201 		forw_packet->skb = NULL;
202 
203 		return;
204 	}
205 
206 	/* broadcast on every interface */
207 	rcu_read_lock();
208 	list_for_each_entry_rcu(batman_if, &if_list, list) {
209 		if (batman_if->soft_iface != soft_iface)
210 			continue;
211 
212 		send_packet_to_if(forw_packet, batman_if);
213 	}
214 	rcu_read_unlock();
215 }
216 
217 static void rebuild_batman_packet(struct bat_priv *bat_priv,
218 				  struct batman_if *batman_if)
219 {
220 	int new_len;
221 	unsigned char *new_buff;
222 	struct batman_packet *batman_packet;
223 
224 	new_len = sizeof(struct batman_packet) +
225 			(bat_priv->num_local_hna * ETH_ALEN);
226 	new_buff = kmalloc(new_len, GFP_ATOMIC);
227 
228 	/* keep old buffer if kmalloc should fail */
229 	if (new_buff) {
230 		memcpy(new_buff, batman_if->packet_buff,
231 		       sizeof(struct batman_packet));
232 		batman_packet = (struct batman_packet *)new_buff;
233 
234 		batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
235 				new_buff + sizeof(struct batman_packet),
236 				new_len - sizeof(struct batman_packet));
237 
238 		kfree(batman_if->packet_buff);
239 		batman_if->packet_buff = new_buff;
240 		batman_if->packet_len = new_len;
241 	}
242 }
243 
244 void schedule_own_packet(struct batman_if *batman_if)
245 {
246 	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
247 	unsigned long send_time;
248 	struct batman_packet *batman_packet;
249 	int vis_server;
250 
251 	if ((batman_if->if_status == IF_NOT_IN_USE) ||
252 	    (batman_if->if_status == IF_TO_BE_REMOVED))
253 		return;
254 
255 	vis_server = atomic_read(&bat_priv->vis_mode);
256 
257 	/**
258 	 * the interface gets activated here to avoid race conditions between
259 	 * the moment of activating the interface in
260 	 * hardif_activate_interface() where the originator mac is set and
261 	 * outdated packets (especially uninitialized mac addresses) in the
262 	 * packet queue
263 	 */
264 	if (batman_if->if_status == IF_TO_BE_ACTIVATED)
265 		batman_if->if_status = IF_ACTIVE;
266 
267 	/* if local hna has changed and interface is a primary interface */
268 	if ((atomic_read(&bat_priv->hna_local_changed)) &&
269 	    (batman_if == bat_priv->primary_if))
270 		rebuild_batman_packet(bat_priv, batman_if);
271 
272 	/**
273 	 * NOTE: packet_buff might just have been re-allocated in
274 	 * rebuild_batman_packet()
275 	 */
276 	batman_packet = (struct batman_packet *)batman_if->packet_buff;
277 
278 	/* change sequence number to network order */
279 	batman_packet->seqno =
280 		htonl((uint32_t)atomic_read(&batman_if->seqno));
281 
282 	if (vis_server == VIS_TYPE_SERVER_SYNC)
283 		batman_packet->flags |= VIS_SERVER;
284 	else
285 		batman_packet->flags &= ~VIS_SERVER;
286 
287 	if ((batman_if == bat_priv->primary_if) &&
288 	    (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289 		batman_packet->gw_flags =
290 				(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
291 	else
292 		batman_packet->gw_flags = 0;
293 
294 	atomic_inc(&batman_if->seqno);
295 
296 	slide_own_bcast_window(batman_if);
297 	send_time = own_send_time(bat_priv);
298 	add_bat_packet_to_list(bat_priv,
299 			       batman_if->packet_buff,
300 			       batman_if->packet_len,
301 			       batman_if, 1, send_time);
302 }
303 
304 void schedule_forward_packet(struct orig_node *orig_node,
305 			     struct ethhdr *ethhdr,
306 			     struct batman_packet *batman_packet,
307 			     uint8_t directlink, int hna_buff_len,
308 			     struct batman_if *if_incoming)
309 {
310 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
311 	unsigned char in_tq, in_ttl, tq_avg = 0;
312 	unsigned long send_time;
313 
314 	if (batman_packet->ttl <= 1) {
315 		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
316 		return;
317 	}
318 
319 	in_tq = batman_packet->tq;
320 	in_ttl = batman_packet->ttl;
321 
322 	batman_packet->ttl--;
323 	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
324 
325 	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326 	 * of our best tq value */
327 	if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
328 
329 		/* rebroadcast ogm of best ranking neighbor as is */
330 		if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
331 			batman_packet->tq = orig_node->router->tq_avg;
332 
333 			if (orig_node->router->last_ttl)
334 				batman_packet->ttl = orig_node->router->last_ttl
335 							- 1;
336 		}
337 
338 		tq_avg = orig_node->router->tq_avg;
339 	}
340 
341 	/* apply hop penalty */
342 	batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 
344 	bat_dbg(DBG_BATMAN, bat_priv,
345 		"Forwarding packet: tq_orig: %i, tq_avg: %i, "
346 		"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
347 		in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
348 		batman_packet->ttl);
349 
350 	batman_packet->seqno = htonl(batman_packet->seqno);
351 
352 	/* switch of primaries first hop flag when forwarding */
353 	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
354 	if (directlink)
355 		batman_packet->flags |= DIRECTLINK;
356 	else
357 		batman_packet->flags &= ~DIRECTLINK;
358 
359 	send_time = forward_send_time();
360 	add_bat_packet_to_list(bat_priv,
361 			       (unsigned char *)batman_packet,
362 			       sizeof(struct batman_packet) + hna_buff_len,
363 			       if_incoming, 0, send_time);
364 }
365 
366 static void forw_packet_free(struct forw_packet *forw_packet)
367 {
368 	if (forw_packet->skb)
369 		kfree_skb(forw_packet->skb);
370 	kfree(forw_packet);
371 }
372 
373 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
374 				      struct forw_packet *forw_packet,
375 				      unsigned long send_time)
376 {
377 	INIT_HLIST_NODE(&forw_packet->list);
378 
379 	/* add new packet to packet list */
380 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
381 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
382 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
383 
384 	/* start timer for this packet */
385 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
386 			  send_outstanding_bcast_packet);
387 	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
388 			   send_time);
389 }
390 
391 #define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
392 /* add a broadcast packet to the queue and setup timers. broadcast packets
393  * are sent multiple times to increase probability for beeing received.
394  *
395  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
396  * errors.
397  *
398  * The skb is not consumed, so the caller should make sure that the
399  * skb is freed. */
400 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401 {
402 	struct forw_packet *forw_packet;
403 	struct bcast_packet *bcast_packet;
404 
405 	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
406 		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
407 		goto out;
408 	}
409 
410 	if (!bat_priv->primary_if)
411 		goto out;
412 
413 	forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
414 
415 	if (!forw_packet)
416 		goto out_and_inc;
417 
418 	skb = skb_copy(skb, GFP_ATOMIC);
419 	if (!skb)
420 		goto packet_free;
421 
422 	/* as we have a copy now, it is safe to decrease the TTL */
423 	bcast_packet = (struct bcast_packet *)skb->data;
424 	bcast_packet->ttl--;
425 
426 	skb_reset_mac_header(skb);
427 
428 	forw_packet->skb = skb;
429 	forw_packet->if_incoming = bat_priv->primary_if;
430 
431 	/* how often did we send the bcast packet ? */
432 	forw_packet->num_packets = 0;
433 
434 	_add_bcast_packet_to_list(bat_priv, forw_packet, 1);
435 	return NETDEV_TX_OK;
436 
437 packet_free:
438 	kfree(forw_packet);
439 out_and_inc:
440 	atomic_inc(&bat_priv->bcast_queue_left);
441 out:
442 	return NETDEV_TX_BUSY;
443 }
444 
445 static void send_outstanding_bcast_packet(struct work_struct *work)
446 {
447 	struct batman_if *batman_if;
448 	struct delayed_work *delayed_work =
449 		container_of(work, struct delayed_work, work);
450 	struct forw_packet *forw_packet =
451 		container_of(delayed_work, struct forw_packet, delayed_work);
452 	struct sk_buff *skb1;
453 	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
454 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
455 
456 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
457 	hlist_del(&forw_packet->list);
458 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
459 
460 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
461 		goto out;
462 
463 	/* rebroadcast packet */
464 	rcu_read_lock();
465 	list_for_each_entry_rcu(batman_if, &if_list, list) {
466 		if (batman_if->soft_iface != soft_iface)
467 			continue;
468 
469 		/* send a copy of the saved skb */
470 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
471 		if (skb1)
472 			send_skb_packet(skb1, batman_if, broadcast_addr);
473 	}
474 	rcu_read_unlock();
475 
476 	forw_packet->num_packets++;
477 
478 	/* if we still have some more bcasts to send */
479 	if (forw_packet->num_packets < 3) {
480 		_add_bcast_packet_to_list(bat_priv, forw_packet,
481 					  ((5 * HZ) / 1000));
482 		return;
483 	}
484 
485 out:
486 	forw_packet_free(forw_packet);
487 	atomic_inc(&bat_priv->bcast_queue_left);
488 }
489 
490 void send_outstanding_bat_packet(struct work_struct *work)
491 {
492 	struct delayed_work *delayed_work =
493 		container_of(work, struct delayed_work, work);
494 	struct forw_packet *forw_packet =
495 		container_of(delayed_work, struct forw_packet, delayed_work);
496 	struct bat_priv *bat_priv;
497 
498 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
499 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
500 	hlist_del(&forw_packet->list);
501 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
502 
503 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
504 		goto out;
505 
506 	send_packet(forw_packet);
507 
508 	/**
509 	 * we have to have at least one packet in the queue
510 	 * to determine the queues wake up time unless we are
511 	 * shutting down
512 	 */
513 	if (forw_packet->own)
514 		schedule_own_packet(forw_packet->if_incoming);
515 
516 out:
517 	/* don't count own packet */
518 	if (!forw_packet->own)
519 		atomic_inc(&bat_priv->batman_queue_left);
520 
521 	forw_packet_free(forw_packet);
522 }
523 
524 void purge_outstanding_packets(struct bat_priv *bat_priv,
525 			       struct batman_if *batman_if)
526 {
527 	struct forw_packet *forw_packet;
528 	struct hlist_node *tmp_node, *safe_tmp_node;
529 
530 	if (batman_if)
531 		bat_dbg(DBG_BATMAN, bat_priv,
532 			"purge_outstanding_packets(): %s\n",
533 			batman_if->net_dev->name);
534 	else
535 		bat_dbg(DBG_BATMAN, bat_priv,
536 			"purge_outstanding_packets()\n");
537 
538 	/* free bcast list */
539 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
540 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
541 				  &bat_priv->forw_bcast_list, list) {
542 
543 		/**
544 		 * if purge_outstanding_packets() was called with an argmument
545 		 * we delete only packets belonging to the given interface
546 		 */
547 		if ((batman_if) &&
548 		    (forw_packet->if_incoming != batman_if))
549 			continue;
550 
551 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
552 
553 		/**
554 		 * send_outstanding_bcast_packet() will lock the list to
555 		 * delete the item from the list
556 		 */
557 		cancel_delayed_work_sync(&forw_packet->delayed_work);
558 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
559 	}
560 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
561 
562 	/* free batman packet list */
563 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
564 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
565 				  &bat_priv->forw_bat_list, list) {
566 
567 		/**
568 		 * if purge_outstanding_packets() was called with an argmument
569 		 * we delete only packets belonging to the given interface
570 		 */
571 		if ((batman_if) &&
572 		    (forw_packet->if_incoming != batman_if))
573 			continue;
574 
575 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
576 
577 		/**
578 		 * send_outstanding_bat_packet() will lock the list to
579 		 * delete the item from the list
580 		 */
581 		cancel_delayed_work_sync(&forw_packet->delayed_work);
582 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
583 	}
584 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
585 }
586