xref: /linux/net/batman-adv/send.c (revision 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257)
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "vis.h"
28 #include "gateway_common.h"
29 #include "originator.h"
30 #include "network-coding.h"
31 
32 #include <linux/if_ether.h>
33 
34 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
35 
36 /* send out an already prepared packet to the given address via the
37  * specified batman interface
38  */
39 int batadv_send_skb_packet(struct sk_buff *skb,
40 			   struct batadv_hard_iface *hard_iface,
41 			   const uint8_t *dst_addr)
42 {
43 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
44 	struct ethhdr *ethhdr;
45 
46 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
47 		goto send_skb_err;
48 
49 	if (unlikely(!hard_iface->net_dev))
50 		goto send_skb_err;
51 
52 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
53 		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
54 			hard_iface->net_dev->name);
55 		goto send_skb_err;
56 	}
57 
58 	/* push to the ethernet header. */
59 	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
60 		goto send_skb_err;
61 
62 	skb_reset_mac_header(skb);
63 
64 	ethhdr = eth_hdr(skb);
65 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
66 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
67 	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
68 
69 	skb_set_network_header(skb, ETH_HLEN);
70 	skb->priority = TC_PRIO_CONTROL;
71 	skb->protocol = __constant_htons(ETH_P_BATMAN);
72 
73 	skb->dev = hard_iface->net_dev;
74 
75 	/* Save a clone of the skb to use when decoding coded packets */
76 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
77 
78 	/* dev_queue_xmit() returns a negative result on error.	 However on
79 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
80 	 * (which is > 0). This will not be treated as an error.
81 	 */
82 	return dev_queue_xmit(skb);
83 send_skb_err:
84 	kfree_skb(skb);
85 	return NET_XMIT_DROP;
86 }
87 
88 /**
89  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
90  * @skb: Packet to be transmitted.
91  * @orig_node: Final destination of the packet.
92  * @recv_if: Interface used when receiving the packet (can be NULL).
93  *
94  * Looks up the best next-hop towards the passed originator and passes the
95  * skb on for preparation of MAC header. If the packet originated from this
96  * host, NULL can be passed as recv_if and no interface alternating is
97  * attempted.
98  *
99  * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
100  * NET_XMIT_POLICED if the skb is buffered for later transmit.
101  */
102 int batadv_send_skb_to_orig(struct sk_buff *skb,
103 			    struct batadv_orig_node *orig_node,
104 			    struct batadv_hard_iface *recv_if)
105 {
106 	struct batadv_priv *bat_priv = orig_node->bat_priv;
107 	struct batadv_neigh_node *neigh_node;
108 	int ret = NET_XMIT_DROP;
109 
110 	/* batadv_find_router() increases neigh_nodes refcount if found. */
111 	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
112 	if (!neigh_node)
113 		return ret;
114 
115 	/* try to network code the packet, if it is received on an interface
116 	 * (i.e. being forwarded). If the packet originates from this node or if
117 	 * network coding fails, then send the packet as usual.
118 	 */
119 	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
120 		ret = NET_XMIT_POLICED;
121 	} else {
122 		batadv_send_skb_packet(skb, neigh_node->if_incoming,
123 				       neigh_node->addr);
124 		ret = NET_XMIT_SUCCESS;
125 	}
126 
127 	batadv_neigh_node_free_ref(neigh_node);
128 
129 	return ret;
130 }
131 
132 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
133 {
134 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
135 
136 	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
137 	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
138 		return;
139 
140 	/* the interface gets activated here to avoid race conditions between
141 	 * the moment of activating the interface in
142 	 * hardif_activate_interface() where the originator mac is set and
143 	 * outdated packets (especially uninitialized mac addresses) in the
144 	 * packet queue
145 	 */
146 	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
147 		hard_iface->if_status = BATADV_IF_ACTIVE;
148 
149 	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
150 }
151 
152 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
153 {
154 	if (forw_packet->skb)
155 		kfree_skb(forw_packet->skb);
156 	if (forw_packet->if_incoming)
157 		batadv_hardif_free_ref(forw_packet->if_incoming);
158 	kfree(forw_packet);
159 }
160 
161 static void
162 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
163 				 struct batadv_forw_packet *forw_packet,
164 				 unsigned long send_time)
165 {
166 	/* add new packet to packet list */
167 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
168 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
169 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
170 
171 	/* start timer for this packet */
172 	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
173 			   send_time);
174 }
175 
176 /* add a broadcast packet to the queue and setup timers. broadcast packets
177  * are sent multiple times to increase probability for being received.
178  *
179  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
180  * errors.
181  *
182  * The skb is not consumed, so the caller should make sure that the
183  * skb is freed.
184  */
185 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
186 				    const struct sk_buff *skb,
187 				    unsigned long delay)
188 {
189 	struct batadv_hard_iface *primary_if = NULL;
190 	struct batadv_forw_packet *forw_packet;
191 	struct batadv_bcast_packet *bcast_packet;
192 	struct sk_buff *newskb;
193 
194 	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
195 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
196 			   "bcast packet queue full\n");
197 		goto out;
198 	}
199 
200 	primary_if = batadv_primary_if_get_selected(bat_priv);
201 	if (!primary_if)
202 		goto out_and_inc;
203 
204 	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
205 
206 	if (!forw_packet)
207 		goto out_and_inc;
208 
209 	newskb = skb_copy(skb, GFP_ATOMIC);
210 	if (!newskb)
211 		goto packet_free;
212 
213 	/* as we have a copy now, it is safe to decrease the TTL */
214 	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
215 	bcast_packet->header.ttl--;
216 
217 	skb_reset_mac_header(newskb);
218 
219 	forw_packet->skb = newskb;
220 	forw_packet->if_incoming = primary_if;
221 
222 	/* how often did we send the bcast packet ? */
223 	forw_packet->num_packets = 0;
224 
225 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
226 			  batadv_send_outstanding_bcast_packet);
227 
228 	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
229 	return NETDEV_TX_OK;
230 
231 packet_free:
232 	kfree(forw_packet);
233 out_and_inc:
234 	atomic_inc(&bat_priv->bcast_queue_left);
235 out:
236 	if (primary_if)
237 		batadv_hardif_free_ref(primary_if);
238 	return NETDEV_TX_BUSY;
239 }
240 
241 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
242 {
243 	struct batadv_hard_iface *hard_iface;
244 	struct delayed_work *delayed_work;
245 	struct batadv_forw_packet *forw_packet;
246 	struct sk_buff *skb1;
247 	struct net_device *soft_iface;
248 	struct batadv_priv *bat_priv;
249 
250 	delayed_work = container_of(work, struct delayed_work, work);
251 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
252 				   delayed_work);
253 	soft_iface = forw_packet->if_incoming->soft_iface;
254 	bat_priv = netdev_priv(soft_iface);
255 
256 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
257 	hlist_del(&forw_packet->list);
258 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
259 
260 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
261 		goto out;
262 
263 	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
264 		goto out;
265 
266 	/* rebroadcast packet */
267 	rcu_read_lock();
268 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
269 		if (hard_iface->soft_iface != soft_iface)
270 			continue;
271 
272 		if (forw_packet->num_packets >= hard_iface->num_bcasts)
273 			continue;
274 
275 		/* send a copy of the saved skb */
276 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
277 		if (skb1)
278 			batadv_send_skb_packet(skb1, hard_iface,
279 					       batadv_broadcast_addr);
280 	}
281 	rcu_read_unlock();
282 
283 	forw_packet->num_packets++;
284 
285 	/* if we still have some more bcasts to send */
286 	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
287 		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
288 						 msecs_to_jiffies(5));
289 		return;
290 	}
291 
292 out:
293 	batadv_forw_packet_free(forw_packet);
294 	atomic_inc(&bat_priv->bcast_queue_left);
295 }
296 
297 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
298 {
299 	struct delayed_work *delayed_work;
300 	struct batadv_forw_packet *forw_packet;
301 	struct batadv_priv *bat_priv;
302 
303 	delayed_work = container_of(work, struct delayed_work, work);
304 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
305 				   delayed_work);
306 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
307 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
308 	hlist_del(&forw_packet->list);
309 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
310 
311 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
312 		goto out;
313 
314 	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
315 
316 	/* we have to have at least one packet in the queue
317 	 * to determine the queues wake up time unless we are
318 	 * shutting down
319 	 */
320 	if (forw_packet->own)
321 		batadv_schedule_bat_ogm(forw_packet->if_incoming);
322 
323 out:
324 	/* don't count own packet */
325 	if (!forw_packet->own)
326 		atomic_inc(&bat_priv->batman_queue_left);
327 
328 	batadv_forw_packet_free(forw_packet);
329 }
330 
331 void
332 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
333 				 const struct batadv_hard_iface *hard_iface)
334 {
335 	struct batadv_forw_packet *forw_packet;
336 	struct hlist_node *safe_tmp_node;
337 	bool pending;
338 
339 	if (hard_iface)
340 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
341 			   "purge_outstanding_packets(): %s\n",
342 			   hard_iface->net_dev->name);
343 	else
344 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
345 			   "purge_outstanding_packets()\n");
346 
347 	/* free bcast list */
348 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
349 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
350 				  &bat_priv->forw_bcast_list, list) {
351 		/* if purge_outstanding_packets() was called with an argument
352 		 * we delete only packets belonging to the given interface
353 		 */
354 		if ((hard_iface) &&
355 		    (forw_packet->if_incoming != hard_iface))
356 			continue;
357 
358 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
359 
360 		/* batadv_send_outstanding_bcast_packet() will lock the list to
361 		 * delete the item from the list
362 		 */
363 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
364 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
365 
366 		if (pending) {
367 			hlist_del(&forw_packet->list);
368 			batadv_forw_packet_free(forw_packet);
369 		}
370 	}
371 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
372 
373 	/* free batman packet list */
374 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
375 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
376 				  &bat_priv->forw_bat_list, list) {
377 		/* if purge_outstanding_packets() was called with an argument
378 		 * we delete only packets belonging to the given interface
379 		 */
380 		if ((hard_iface) &&
381 		    (forw_packet->if_incoming != hard_iface))
382 			continue;
383 
384 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
385 
386 		/* send_outstanding_bat_packet() will lock the list to
387 		 * delete the item from the list
388 		 */
389 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
390 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
391 
392 		if (pending) {
393 			hlist_del(&forw_packet->list);
394 			batadv_forw_packet_free(forw_packet);
395 		}
396 	}
397 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
398 }
399