xref: /linux/net/batman-adv/send.c (revision 7912825d8b755e6a5b9839eab910f451b0271aba)
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
32 
33 static void send_outstanding_bcast_packet(struct work_struct *work);
34 
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
37 {
38 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 	return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40 }
41 
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(struct bat_priv *bat_priv)
44 {
45 	return jiffies + msecs_to_jiffies(
46 		   atomic_read(&bat_priv->orig_interval) -
47 		   JITTER + (random32() % 2*JITTER));
48 }
49 
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
52 {
53 	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54 }
55 
56 /* send out an already prepared packet to the given address via the
57  * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb,
59 				struct hard_iface *hard_iface,
60 				uint8_t *dst_addr)
61 {
62 	struct ethhdr *ethhdr;
63 
64 	if (hard_iface->if_status != IF_ACTIVE)
65 		goto send_skb_err;
66 
67 	if (unlikely(!hard_iface->net_dev))
68 		goto send_skb_err;
69 
70 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
71 		pr_warning("Interface %s is not up - can't send packet via "
72 			   "that interface!\n", hard_iface->net_dev->name);
73 		goto send_skb_err;
74 	}
75 
76 	/* push to the ethernet header. */
77 	if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
78 		goto send_skb_err;
79 
80 	skb_reset_mac_header(skb);
81 
82 	ethhdr = (struct ethhdr *) skb_mac_header(skb);
83 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
84 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85 	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
86 
87 	skb_set_network_header(skb, ETH_HLEN);
88 	skb->priority = TC_PRIO_CONTROL;
89 	skb->protocol = __constant_htons(ETH_P_BATMAN);
90 
91 	skb->dev = hard_iface->net_dev;
92 
93 	/* dev_queue_xmit() returns a negative result on error.	 However on
94 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 	 * (which is > 0). This will not be treated as an error. */
96 
97 	return dev_queue_xmit(skb);
98 send_skb_err:
99 	kfree_skb(skb);
100 	return NET_XMIT_DROP;
101 }
102 
103 /* Send a packet to a given interface */
104 static void send_packet_to_if(struct forw_packet *forw_packet,
105 			      struct hard_iface *hard_iface)
106 {
107 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
108 	char *fwd_str;
109 	uint8_t packet_num;
110 	int16_t buff_pos;
111 	struct batman_packet *batman_packet;
112 	struct sk_buff *skb;
113 
114 	if (hard_iface->if_status != IF_ACTIVE)
115 		return;
116 
117 	packet_num = 0;
118 	buff_pos = 0;
119 	batman_packet = (struct batman_packet *)forw_packet->skb->data;
120 
121 	/* adjust all flags and log packets */
122 	while (aggregated_packet(buff_pos,
123 				 forw_packet->packet_len,
124 				 batman_packet->num_tt)) {
125 
126 		/* we might have aggregated direct link packets with an
127 		 * ordinary base packet */
128 		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
129 		    (forw_packet->if_incoming == hard_iface))
130 			batman_packet->flags |= DIRECTLINK;
131 		else
132 			batman_packet->flags &= ~DIRECTLINK;
133 
134 		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
135 							    "Sending own" :
136 							    "Forwarding"));
137 		bat_dbg(DBG_BATMAN, bat_priv,
138 			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139 			" IDF %s) on interface %s [%pM]\n",
140 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
141 			batman_packet->orig, ntohl(batman_packet->seqno),
142 			batman_packet->tq, batman_packet->ttl,
143 			(batman_packet->flags & DIRECTLINK ?
144 			 "on" : "off"),
145 			hard_iface->net_dev->name,
146 			hard_iface->net_dev->dev_addr);
147 
148 		buff_pos += sizeof(struct batman_packet) +
149 			(batman_packet->num_tt * ETH_ALEN);
150 		packet_num++;
151 		batman_packet = (struct batman_packet *)
152 			(forw_packet->skb->data + buff_pos);
153 	}
154 
155 	/* create clone because function is called more than once */
156 	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
157 	if (skb)
158 		send_skb_packet(skb, hard_iface, broadcast_addr);
159 }
160 
161 /* send a batman packet */
162 static void send_packet(struct forw_packet *forw_packet)
163 {
164 	struct hard_iface *hard_iface;
165 	struct net_device *soft_iface;
166 	struct bat_priv *bat_priv;
167 	struct batman_packet *batman_packet =
168 		(struct batman_packet *)(forw_packet->skb->data);
169 	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 
171 	if (!forw_packet->if_incoming) {
172 		pr_err("Error - can't forward packet: incoming iface not "
173 		       "specified\n");
174 		return;
175 	}
176 
177 	soft_iface = forw_packet->if_incoming->soft_iface;
178 	bat_priv = netdev_priv(soft_iface);
179 
180 	if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 		return;
182 
183 	/* multihomed peer assumed */
184 	/* non-primary OGMs are only broadcasted on their interface */
185 	if ((directlink && (batman_packet->ttl == 1)) ||
186 	    (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
187 
188 		/* FIXME: what about aggregated packets ? */
189 		bat_dbg(DBG_BATMAN, bat_priv,
190 			"%s packet (originator %pM, seqno %d, TTL %d) "
191 			"on interface %s [%pM]\n",
192 			(forw_packet->own ? "Sending own" : "Forwarding"),
193 			batman_packet->orig, ntohl(batman_packet->seqno),
194 			batman_packet->ttl,
195 			forw_packet->if_incoming->net_dev->name,
196 			forw_packet->if_incoming->net_dev->dev_addr);
197 
198 		/* skb is only used once and than forw_packet is free'd */
199 		send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
200 				broadcast_addr);
201 		forw_packet->skb = NULL;
202 
203 		return;
204 	}
205 
206 	/* broadcast on every interface */
207 	rcu_read_lock();
208 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
209 		if (hard_iface->soft_iface != soft_iface)
210 			continue;
211 
212 		send_packet_to_if(forw_packet, hard_iface);
213 	}
214 	rcu_read_unlock();
215 }
216 
217 static void rebuild_batman_packet(struct bat_priv *bat_priv,
218 				  struct hard_iface *hard_iface)
219 {
220 	int new_len;
221 	unsigned char *new_buff;
222 	struct batman_packet *batman_packet;
223 
224 	new_len = sizeof(struct batman_packet) +
225 			(bat_priv->num_local_tt * ETH_ALEN);
226 	new_buff = kmalloc(new_len, GFP_ATOMIC);
227 
228 	/* keep old buffer if kmalloc should fail */
229 	if (new_buff) {
230 		memcpy(new_buff, hard_iface->packet_buff,
231 		       sizeof(struct batman_packet));
232 		batman_packet = (struct batman_packet *)new_buff;
233 
234 		batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
235 				new_buff + sizeof(struct batman_packet),
236 				new_len - sizeof(struct batman_packet));
237 
238 		kfree(hard_iface->packet_buff);
239 		hard_iface->packet_buff = new_buff;
240 		hard_iface->packet_len = new_len;
241 	}
242 }
243 
244 void schedule_own_packet(struct hard_iface *hard_iface)
245 {
246 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247 	struct hard_iface *primary_if;
248 	unsigned long send_time;
249 	struct batman_packet *batman_packet;
250 	int vis_server;
251 
252 	if ((hard_iface->if_status == IF_NOT_IN_USE) ||
253 	    (hard_iface->if_status == IF_TO_BE_REMOVED))
254 		return;
255 
256 	vis_server = atomic_read(&bat_priv->vis_mode);
257 	primary_if = primary_if_get_selected(bat_priv);
258 
259 	/**
260 	 * the interface gets activated here to avoid race conditions between
261 	 * the moment of activating the interface in
262 	 * hardif_activate_interface() where the originator mac is set and
263 	 * outdated packets (especially uninitialized mac addresses) in the
264 	 * packet queue
265 	 */
266 	if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
267 		hard_iface->if_status = IF_ACTIVE;
268 
269 	/* if local tt has changed and interface is a primary interface */
270 	if ((atomic_read(&bat_priv->tt_local_changed)) &&
271 	    (hard_iface == primary_if))
272 		rebuild_batman_packet(bat_priv, hard_iface);
273 
274 	/**
275 	 * NOTE: packet_buff might just have been re-allocated in
276 	 * rebuild_batman_packet()
277 	 */
278 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
279 
280 	/* change sequence number to network order */
281 	batman_packet->seqno =
282 		htonl((uint32_t)atomic_read(&hard_iface->seqno));
283 
284 	if (vis_server == VIS_TYPE_SERVER_SYNC)
285 		batman_packet->flags |= VIS_SERVER;
286 	else
287 		batman_packet->flags &= ~VIS_SERVER;
288 
289 	if ((hard_iface == primary_if) &&
290 	    (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
291 		batman_packet->gw_flags =
292 				(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
293 	else
294 		batman_packet->gw_flags = 0;
295 
296 	atomic_inc(&hard_iface->seqno);
297 
298 	slide_own_bcast_window(hard_iface);
299 	send_time = own_send_time(bat_priv);
300 	add_bat_packet_to_list(bat_priv,
301 			       hard_iface->packet_buff,
302 			       hard_iface->packet_len,
303 			       hard_iface, 1, send_time);
304 
305 	if (primary_if)
306 		hardif_free_ref(primary_if);
307 }
308 
309 void schedule_forward_packet(struct orig_node *orig_node,
310 			     struct ethhdr *ethhdr,
311 			     struct batman_packet *batman_packet,
312 			     uint8_t directlink, int tt_buff_len,
313 			     struct hard_iface *if_incoming)
314 {
315 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 	struct neigh_node *router;
317 	unsigned char in_tq, in_ttl, tq_avg = 0;
318 	unsigned long send_time;
319 
320 	if (batman_packet->ttl <= 1) {
321 		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
322 		return;
323 	}
324 
325 	router = orig_node_get_router(orig_node);
326 
327 	in_tq = batman_packet->tq;
328 	in_ttl = batman_packet->ttl;
329 
330 	batman_packet->ttl--;
331 	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
332 
333 	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
334 	 * of our best tq value */
335 	if (router && router->tq_avg != 0) {
336 
337 		/* rebroadcast ogm of best ranking neighbor as is */
338 		if (!compare_eth(router->addr, ethhdr->h_source)) {
339 			batman_packet->tq = router->tq_avg;
340 
341 			if (router->last_ttl)
342 				batman_packet->ttl = router->last_ttl - 1;
343 		}
344 
345 		tq_avg = router->tq_avg;
346 	}
347 
348 	if (router)
349 		neigh_node_free_ref(router);
350 
351 	/* apply hop penalty */
352 	batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
353 
354 	bat_dbg(DBG_BATMAN, bat_priv,
355 		"Forwarding packet: tq_orig: %i, tq_avg: %i, "
356 		"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
357 		in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
358 		batman_packet->ttl);
359 
360 	batman_packet->seqno = htonl(batman_packet->seqno);
361 
362 	/* switch of primaries first hop flag when forwarding */
363 	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
364 	if (directlink)
365 		batman_packet->flags |= DIRECTLINK;
366 	else
367 		batman_packet->flags &= ~DIRECTLINK;
368 
369 	send_time = forward_send_time();
370 	add_bat_packet_to_list(bat_priv,
371 			       (unsigned char *)batman_packet,
372 			       sizeof(struct batman_packet) + tt_buff_len,
373 			       if_incoming, 0, send_time);
374 }
375 
376 static void forw_packet_free(struct forw_packet *forw_packet)
377 {
378 	if (forw_packet->skb)
379 		kfree_skb(forw_packet->skb);
380 	if (forw_packet->if_incoming)
381 		hardif_free_ref(forw_packet->if_incoming);
382 	kfree(forw_packet);
383 }
384 
385 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
386 				      struct forw_packet *forw_packet,
387 				      unsigned long send_time)
388 {
389 	INIT_HLIST_NODE(&forw_packet->list);
390 
391 	/* add new packet to packet list */
392 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
393 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
394 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
395 
396 	/* start timer for this packet */
397 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
398 			  send_outstanding_bcast_packet);
399 	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
400 			   send_time);
401 }
402 
403 /* add a broadcast packet to the queue and setup timers. broadcast packets
404  * are sent multiple times to increase probability for beeing received.
405  *
406  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
407  * errors.
408  *
409  * The skb is not consumed, so the caller should make sure that the
410  * skb is freed. */
411 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
412 {
413 	struct hard_iface *primary_if = NULL;
414 	struct forw_packet *forw_packet;
415 	struct bcast_packet *bcast_packet;
416 
417 	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
418 		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
419 		goto out;
420 	}
421 
422 	primary_if = primary_if_get_selected(bat_priv);
423 	if (!primary_if)
424 		goto out_and_inc;
425 
426 	forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
427 
428 	if (!forw_packet)
429 		goto out_and_inc;
430 
431 	skb = skb_copy(skb, GFP_ATOMIC);
432 	if (!skb)
433 		goto packet_free;
434 
435 	/* as we have a copy now, it is safe to decrease the TTL */
436 	bcast_packet = (struct bcast_packet *)skb->data;
437 	bcast_packet->ttl--;
438 
439 	skb_reset_mac_header(skb);
440 
441 	forw_packet->skb = skb;
442 	forw_packet->if_incoming = primary_if;
443 
444 	/* how often did we send the bcast packet ? */
445 	forw_packet->num_packets = 0;
446 
447 	_add_bcast_packet_to_list(bat_priv, forw_packet, 1);
448 	return NETDEV_TX_OK;
449 
450 packet_free:
451 	kfree(forw_packet);
452 out_and_inc:
453 	atomic_inc(&bat_priv->bcast_queue_left);
454 out:
455 	if (primary_if)
456 		hardif_free_ref(primary_if);
457 	return NETDEV_TX_BUSY;
458 }
459 
460 static void send_outstanding_bcast_packet(struct work_struct *work)
461 {
462 	struct hard_iface *hard_iface;
463 	struct delayed_work *delayed_work =
464 		container_of(work, struct delayed_work, work);
465 	struct forw_packet *forw_packet =
466 		container_of(delayed_work, struct forw_packet, delayed_work);
467 	struct sk_buff *skb1;
468 	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
469 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
470 
471 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
472 	hlist_del(&forw_packet->list);
473 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
474 
475 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
476 		goto out;
477 
478 	/* rebroadcast packet */
479 	rcu_read_lock();
480 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
481 		if (hard_iface->soft_iface != soft_iface)
482 			continue;
483 
484 		/* send a copy of the saved skb */
485 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
486 		if (skb1)
487 			send_skb_packet(skb1, hard_iface, broadcast_addr);
488 	}
489 	rcu_read_unlock();
490 
491 	forw_packet->num_packets++;
492 
493 	/* if we still have some more bcasts to send */
494 	if (forw_packet->num_packets < 3) {
495 		_add_bcast_packet_to_list(bat_priv, forw_packet,
496 					  ((5 * HZ) / 1000));
497 		return;
498 	}
499 
500 out:
501 	forw_packet_free(forw_packet);
502 	atomic_inc(&bat_priv->bcast_queue_left);
503 }
504 
505 void send_outstanding_bat_packet(struct work_struct *work)
506 {
507 	struct delayed_work *delayed_work =
508 		container_of(work, struct delayed_work, work);
509 	struct forw_packet *forw_packet =
510 		container_of(delayed_work, struct forw_packet, delayed_work);
511 	struct bat_priv *bat_priv;
512 
513 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
514 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
515 	hlist_del(&forw_packet->list);
516 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
517 
518 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
519 		goto out;
520 
521 	send_packet(forw_packet);
522 
523 	/**
524 	 * we have to have at least one packet in the queue
525 	 * to determine the queues wake up time unless we are
526 	 * shutting down
527 	 */
528 	if (forw_packet->own)
529 		schedule_own_packet(forw_packet->if_incoming);
530 
531 out:
532 	/* don't count own packet */
533 	if (!forw_packet->own)
534 		atomic_inc(&bat_priv->batman_queue_left);
535 
536 	forw_packet_free(forw_packet);
537 }
538 
539 void purge_outstanding_packets(struct bat_priv *bat_priv,
540 			       struct hard_iface *hard_iface)
541 {
542 	struct forw_packet *forw_packet;
543 	struct hlist_node *tmp_node, *safe_tmp_node;
544 	bool pending;
545 
546 	if (hard_iface)
547 		bat_dbg(DBG_BATMAN, bat_priv,
548 			"purge_outstanding_packets(): %s\n",
549 			hard_iface->net_dev->name);
550 	else
551 		bat_dbg(DBG_BATMAN, bat_priv,
552 			"purge_outstanding_packets()\n");
553 
554 	/* free bcast list */
555 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
556 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
557 				  &bat_priv->forw_bcast_list, list) {
558 
559 		/**
560 		 * if purge_outstanding_packets() was called with an argmument
561 		 * we delete only packets belonging to the given interface
562 		 */
563 		if ((hard_iface) &&
564 		    (forw_packet->if_incoming != hard_iface))
565 			continue;
566 
567 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
568 
569 		/**
570 		 * send_outstanding_bcast_packet() will lock the list to
571 		 * delete the item from the list
572 		 */
573 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
574 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
575 
576 		if (pending) {
577 			hlist_del(&forw_packet->list);
578 			forw_packet_free(forw_packet);
579 		}
580 	}
581 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
582 
583 	/* free batman packet list */
584 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
585 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
586 				  &bat_priv->forw_bat_list, list) {
587 
588 		/**
589 		 * if purge_outstanding_packets() was called with an argmument
590 		 * we delete only packets belonging to the given interface
591 		 */
592 		if ((hard_iface) &&
593 		    (forw_packet->if_incoming != hard_iface))
594 			continue;
595 
596 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
597 
598 		/**
599 		 * send_outstanding_bat_packet() will lock the list to
600 		 * delete the item from the list
601 		 */
602 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
603 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
604 
605 		if (pending) {
606 			hlist_del(&forw_packet->list);
607 			forw_packet_free(forw_packet);
608 		}
609 	}
610 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
611 }
612