xref: /linux/net/batman-adv/send.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "gateway_common.h"
28 #include "gateway_client.h"
29 #include "originator.h"
30 #include "network-coding.h"
31 #include "fragmentation.h"
32 
33 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
34 
35 /* send out an already prepared packet to the given address via the
36  * specified batman interface
37  */
38 int batadv_send_skb_packet(struct sk_buff *skb,
39 			   struct batadv_hard_iface *hard_iface,
40 			   const uint8_t *dst_addr)
41 {
42 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
43 	struct ethhdr *ethhdr;
44 
45 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
46 		goto send_skb_err;
47 
48 	if (unlikely(!hard_iface->net_dev))
49 		goto send_skb_err;
50 
51 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
52 		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
53 			hard_iface->net_dev->name);
54 		goto send_skb_err;
55 	}
56 
57 	/* push to the ethernet header. */
58 	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
59 		goto send_skb_err;
60 
61 	skb_reset_mac_header(skb);
62 
63 	ethhdr = eth_hdr(skb);
64 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
65 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
66 	ethhdr->h_proto = htons(ETH_P_BATMAN);
67 
68 	skb_set_network_header(skb, ETH_HLEN);
69 	skb->protocol = htons(ETH_P_BATMAN);
70 
71 	skb->dev = hard_iface->net_dev;
72 
73 	/* Save a clone of the skb to use when decoding coded packets */
74 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
75 
76 	/* dev_queue_xmit() returns a negative result on error.	 However on
77 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
78 	 * (which is > 0). This will not be treated as an error.
79 	 */
80 	return dev_queue_xmit(skb);
81 send_skb_err:
82 	kfree_skb(skb);
83 	return NET_XMIT_DROP;
84 }
85 
86 /**
87  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
88  * @skb: Packet to be transmitted.
89  * @orig_node: Final destination of the packet.
90  * @recv_if: Interface used when receiving the packet (can be NULL).
91  *
92  * Looks up the best next-hop towards the passed originator and passes the
93  * skb on for preparation of MAC header. If the packet originated from this
94  * host, NULL can be passed as recv_if and no interface alternating is
95  * attempted.
96  *
97  * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
98  * NET_XMIT_POLICED if the skb is buffered for later transmit.
99  */
100 int batadv_send_skb_to_orig(struct sk_buff *skb,
101 			    struct batadv_orig_node *orig_node,
102 			    struct batadv_hard_iface *recv_if)
103 {
104 	struct batadv_priv *bat_priv = orig_node->bat_priv;
105 	struct batadv_neigh_node *neigh_node;
106 	int ret = NET_XMIT_DROP;
107 
108 	/* batadv_find_router() increases neigh_nodes refcount if found. */
109 	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
110 	if (!neigh_node)
111 		goto out;
112 
113 	/* Check if the skb is too large to send in one piece and fragment
114 	 * it if needed.
115 	 */
116 	if (atomic_read(&bat_priv->fragmentation) &&
117 	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
118 		/* Fragment and send packet. */
119 		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
120 			ret = NET_XMIT_SUCCESS;
121 
122 		goto out;
123 	}
124 
125 	/* try to network code the packet, if it is received on an interface
126 	 * (i.e. being forwarded). If the packet originates from this node or if
127 	 * network coding fails, then send the packet as usual.
128 	 */
129 	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
130 		ret = NET_XMIT_POLICED;
131 	} else {
132 		batadv_send_skb_packet(skb, neigh_node->if_incoming,
133 				       neigh_node->addr);
134 		ret = NET_XMIT_SUCCESS;
135 	}
136 
137 out:
138 	if (neigh_node)
139 		batadv_neigh_node_free_ref(neigh_node);
140 
141 	return ret;
142 }
143 
144 /**
145  * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
146  *  common fields for unicast packets
147  * @skb: the skb carrying the unicast header to initialize
148  * @hdr_size: amount of bytes to push at the beginning of the skb
149  * @orig_node: the destination node
150  *
151  * Returns false if the buffer extension was not possible or true otherwise.
152  */
153 static bool
154 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
155 				  struct batadv_orig_node *orig_node)
156 {
157 	struct batadv_unicast_packet *unicast_packet;
158 	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
159 
160 	if (batadv_skb_head_push(skb, hdr_size) < 0)
161 		return false;
162 
163 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
164 	unicast_packet->header.version = BATADV_COMPAT_VERSION;
165 	/* batman packet type: unicast */
166 	unicast_packet->header.packet_type = BATADV_UNICAST;
167 	/* set unicast ttl */
168 	unicast_packet->header.ttl = BATADV_TTL;
169 	/* copy the destination for faster routing */
170 	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
171 	/* set the destination tt version number */
172 	unicast_packet->ttvn = ttvn;
173 
174 	return true;
175 }
176 
177 /**
178  * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
179  * @skb: the skb containing the payload to encapsulate
180  * @orig_node: the destination node
181  *
182  * Returns false if the payload could not be encapsulated or true otherwise.
183  */
184 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
185 					    struct batadv_orig_node *orig_node)
186 {
187 	size_t uni_size = sizeof(struct batadv_unicast_packet);
188 
189 	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
190 }
191 
192 /**
193  * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
194  *  unicast 4addr header
195  * @bat_priv: the bat priv with all the soft interface information
196  * @skb: the skb containing the payload to encapsulate
197  * @orig_node: the destination node
198  * @packet_subtype: the unicast 4addr packet subtype to use
199  *
200  * Returns false if the payload could not be encapsulated or true otherwise.
201  */
202 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
203 					   struct sk_buff *skb,
204 					   struct batadv_orig_node *orig,
205 					   int packet_subtype)
206 {
207 	struct batadv_hard_iface *primary_if;
208 	struct batadv_unicast_4addr_packet *uc_4addr_packet;
209 	bool ret = false;
210 
211 	primary_if = batadv_primary_if_get_selected(bat_priv);
212 	if (!primary_if)
213 		goto out;
214 
215 	/* Pull the header space and fill the unicast_packet substructure.
216 	 * We can do that because the first member of the uc_4addr_packet
217 	 * is of type struct unicast_packet
218 	 */
219 	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
220 					       orig))
221 		goto out;
222 
223 	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
224 	uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
225 	memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
226 	uc_4addr_packet->subtype = packet_subtype;
227 	uc_4addr_packet->reserved = 0;
228 
229 	ret = true;
230 out:
231 	if (primary_if)
232 		batadv_hardif_free_ref(primary_if);
233 	return ret;
234 }
235 
236 /**
237  * batadv_send_skb_unicast - encapsulate and send an skb via unicast
238  * @bat_priv: the bat priv with all the soft interface information
239  * @skb: payload to send
240  * @packet_type: the batman unicast packet type to use
241  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
242  *  4addr packets)
243  * @orig_node: the originator to send the packet to
244  * @vid: the vid to be used to search the translation table
245  *
246  * Wrap the given skb into a batman-adv unicast or unicast-4addr header
247  * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
248  * as packet_type. Then send this frame to the given orig_node and release a
249  * reference to this orig_node.
250  *
251  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
252  */
253 static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
254 				   struct sk_buff *skb, int packet_type,
255 				   int packet_subtype,
256 				   struct batadv_orig_node *orig_node,
257 				   unsigned short vid)
258 {
259 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
260 	struct batadv_unicast_packet *unicast_packet;
261 	int ret = NET_XMIT_DROP;
262 
263 	if (!orig_node)
264 		goto out;
265 
266 	switch (packet_type) {
267 	case BATADV_UNICAST:
268 		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
269 			goto out;
270 		break;
271 	case BATADV_UNICAST_4ADDR:
272 		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
273 							   orig_node,
274 							   packet_subtype))
275 			goto out;
276 		break;
277 	default:
278 		/* this function supports UNICAST and UNICAST_4ADDR only. It
279 		 * should never be invoked with any other packet type
280 		 */
281 		goto out;
282 	}
283 
284 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
285 
286 	/* inform the destination node that we are still missing a correct route
287 	 * for this client. The destination will receive this packet and will
288 	 * try to reroute it because the ttvn contained in the header is less
289 	 * than the current one
290 	 */
291 	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
292 		unicast_packet->ttvn = unicast_packet->ttvn - 1;
293 
294 	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
295 		ret = NET_XMIT_SUCCESS;
296 
297 out:
298 	if (orig_node)
299 		batadv_orig_node_free_ref(orig_node);
300 	if (ret == NET_XMIT_DROP)
301 		kfree_skb(skb);
302 	return ret;
303 }
304 
305 /**
306  * batadv_send_skb_via_tt_generic - send an skb via TT lookup
307  * @bat_priv: the bat priv with all the soft interface information
308  * @skb: payload to send
309  * @packet_type: the batman unicast packet type to use
310  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
311  *  4addr packets)
312  * @vid: the vid to be used to search the translation table
313  *
314  * Look up the recipient node for the destination address in the ethernet
315  * header via the translation table. Wrap the given skb into a batman-adv
316  * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
317  * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
318  * to the according destination node.
319  *
320  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
321  */
322 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
323 				   struct sk_buff *skb, int packet_type,
324 				   int packet_subtype, unsigned short vid)
325 {
326 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
327 	struct batadv_orig_node *orig_node;
328 
329 	orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
330 					     ethhdr->h_dest, vid);
331 	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
332 				       packet_subtype, orig_node, vid);
333 }
334 
335 /**
336  * batadv_send_skb_via_gw - send an skb via gateway lookup
337  * @bat_priv: the bat priv with all the soft interface information
338  * @skb: payload to send
339  * @vid: the vid to be used to search the translation table
340  *
341  * Look up the currently selected gateway. Wrap the given skb into a batman-adv
342  * unicast header and send this frame to this gateway node.
343  *
344  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
345  */
346 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
347 			   unsigned short vid)
348 {
349 	struct batadv_orig_node *orig_node;
350 
351 	orig_node = batadv_gw_get_selected_orig(bat_priv);
352 	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
353 				       orig_node, vid);
354 }
355 
356 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
357 {
358 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
359 
360 	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
361 	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
362 		return;
363 
364 	/* the interface gets activated here to avoid race conditions between
365 	 * the moment of activating the interface in
366 	 * hardif_activate_interface() where the originator mac is set and
367 	 * outdated packets (especially uninitialized mac addresses) in the
368 	 * packet queue
369 	 */
370 	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
371 		hard_iface->if_status = BATADV_IF_ACTIVE;
372 
373 	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
374 }
375 
376 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
377 {
378 	if (forw_packet->skb)
379 		kfree_skb(forw_packet->skb);
380 	if (forw_packet->if_incoming)
381 		batadv_hardif_free_ref(forw_packet->if_incoming);
382 	kfree(forw_packet);
383 }
384 
385 static void
386 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
387 				 struct batadv_forw_packet *forw_packet,
388 				 unsigned long send_time)
389 {
390 	/* add new packet to packet list */
391 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
392 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
393 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
394 
395 	/* start timer for this packet */
396 	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
397 			   send_time);
398 }
399 
400 /* add a broadcast packet to the queue and setup timers. broadcast packets
401  * are sent multiple times to increase probability for being received.
402  *
403  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
404  * errors.
405  *
406  * The skb is not consumed, so the caller should make sure that the
407  * skb is freed.
408  */
409 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
410 				    const struct sk_buff *skb,
411 				    unsigned long delay)
412 {
413 	struct batadv_hard_iface *primary_if = NULL;
414 	struct batadv_forw_packet *forw_packet;
415 	struct batadv_bcast_packet *bcast_packet;
416 	struct sk_buff *newskb;
417 
418 	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
419 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
420 			   "bcast packet queue full\n");
421 		goto out;
422 	}
423 
424 	primary_if = batadv_primary_if_get_selected(bat_priv);
425 	if (!primary_if)
426 		goto out_and_inc;
427 
428 	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
429 
430 	if (!forw_packet)
431 		goto out_and_inc;
432 
433 	newskb = skb_copy(skb, GFP_ATOMIC);
434 	if (!newskb)
435 		goto packet_free;
436 
437 	/* as we have a copy now, it is safe to decrease the TTL */
438 	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
439 	bcast_packet->header.ttl--;
440 
441 	skb_reset_mac_header(newskb);
442 
443 	forw_packet->skb = newskb;
444 	forw_packet->if_incoming = primary_if;
445 
446 	/* how often did we send the bcast packet ? */
447 	forw_packet->num_packets = 0;
448 
449 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
450 			  batadv_send_outstanding_bcast_packet);
451 
452 	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
453 	return NETDEV_TX_OK;
454 
455 packet_free:
456 	kfree(forw_packet);
457 out_and_inc:
458 	atomic_inc(&bat_priv->bcast_queue_left);
459 out:
460 	if (primary_if)
461 		batadv_hardif_free_ref(primary_if);
462 	return NETDEV_TX_BUSY;
463 }
464 
465 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
466 {
467 	struct batadv_hard_iface *hard_iface;
468 	struct delayed_work *delayed_work;
469 	struct batadv_forw_packet *forw_packet;
470 	struct sk_buff *skb1;
471 	struct net_device *soft_iface;
472 	struct batadv_priv *bat_priv;
473 
474 	delayed_work = container_of(work, struct delayed_work, work);
475 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
476 				   delayed_work);
477 	soft_iface = forw_packet->if_incoming->soft_iface;
478 	bat_priv = netdev_priv(soft_iface);
479 
480 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
481 	hlist_del(&forw_packet->list);
482 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
483 
484 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
485 		goto out;
486 
487 	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
488 		goto out;
489 
490 	/* rebroadcast packet */
491 	rcu_read_lock();
492 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
493 		if (hard_iface->soft_iface != soft_iface)
494 			continue;
495 
496 		if (forw_packet->num_packets >= hard_iface->num_bcasts)
497 			continue;
498 
499 		/* send a copy of the saved skb */
500 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
501 		if (skb1)
502 			batadv_send_skb_packet(skb1, hard_iface,
503 					       batadv_broadcast_addr);
504 	}
505 	rcu_read_unlock();
506 
507 	forw_packet->num_packets++;
508 
509 	/* if we still have some more bcasts to send */
510 	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
511 		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
512 						 msecs_to_jiffies(5));
513 		return;
514 	}
515 
516 out:
517 	batadv_forw_packet_free(forw_packet);
518 	atomic_inc(&bat_priv->bcast_queue_left);
519 }
520 
521 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
522 {
523 	struct delayed_work *delayed_work;
524 	struct batadv_forw_packet *forw_packet;
525 	struct batadv_priv *bat_priv;
526 
527 	delayed_work = container_of(work, struct delayed_work, work);
528 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
529 				   delayed_work);
530 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
531 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
532 	hlist_del(&forw_packet->list);
533 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
534 
535 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
536 		goto out;
537 
538 	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
539 
540 	/* we have to have at least one packet in the queue
541 	 * to determine the queues wake up time unless we are
542 	 * shutting down
543 	 */
544 	if (forw_packet->own)
545 		batadv_schedule_bat_ogm(forw_packet->if_incoming);
546 
547 out:
548 	/* don't count own packet */
549 	if (!forw_packet->own)
550 		atomic_inc(&bat_priv->batman_queue_left);
551 
552 	batadv_forw_packet_free(forw_packet);
553 }
554 
555 void
556 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
557 				 const struct batadv_hard_iface *hard_iface)
558 {
559 	struct batadv_forw_packet *forw_packet;
560 	struct hlist_node *safe_tmp_node;
561 	bool pending;
562 
563 	if (hard_iface)
564 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
565 			   "purge_outstanding_packets(): %s\n",
566 			   hard_iface->net_dev->name);
567 	else
568 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
569 			   "purge_outstanding_packets()\n");
570 
571 	/* free bcast list */
572 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
573 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
574 				  &bat_priv->forw_bcast_list, list) {
575 		/* if purge_outstanding_packets() was called with an argument
576 		 * we delete only packets belonging to the given interface
577 		 */
578 		if ((hard_iface) &&
579 		    (forw_packet->if_incoming != hard_iface))
580 			continue;
581 
582 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
583 
584 		/* batadv_send_outstanding_bcast_packet() will lock the list to
585 		 * delete the item from the list
586 		 */
587 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
588 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
589 
590 		if (pending) {
591 			hlist_del(&forw_packet->list);
592 			batadv_forw_packet_free(forw_packet);
593 		}
594 	}
595 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
596 
597 	/* free batman packet list */
598 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
599 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
600 				  &bat_priv->forw_bat_list, list) {
601 		/* if purge_outstanding_packets() was called with an argument
602 		 * we delete only packets belonging to the given interface
603 		 */
604 		if ((hard_iface) &&
605 		    (forw_packet->if_incoming != hard_iface))
606 			continue;
607 
608 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
609 
610 		/* send_outstanding_bat_packet() will lock the list to
611 		 * delete the item from the list
612 		 */
613 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
614 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
615 
616 		if (pending) {
617 			hlist_del(&forw_packet->list);
618 			batadv_forw_packet_free(forw_packet);
619 		}
620 	}
621 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
622 }
623