1 /* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "main.h" 19 #include "distributed-arp-table.h" 20 #include "send.h" 21 #include "routing.h" 22 #include "translation-table.h" 23 #include "soft-interface.h" 24 #include "hard-interface.h" 25 #include "gateway_common.h" 26 #include "gateway_client.h" 27 #include "originator.h" 28 #include "network-coding.h" 29 #include "fragmentation.h" 30 31 static void batadv_send_outstanding_bcast_packet(struct work_struct *work); 32 33 /* send out an already prepared packet to the given address via the 34 * specified batman interface 35 */ 36 int batadv_send_skb_packet(struct sk_buff *skb, 37 struct batadv_hard_iface *hard_iface, 38 const uint8_t *dst_addr) 39 { 40 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 41 struct ethhdr *ethhdr; 42 43 if (hard_iface->if_status != BATADV_IF_ACTIVE) 44 goto send_skb_err; 45 46 if (unlikely(!hard_iface->net_dev)) 47 goto send_skb_err; 48 49 if (!(hard_iface->net_dev->flags & IFF_UP)) { 50 pr_warn("Interface %s is not up - can't send packet via that interface!\n", 51 hard_iface->net_dev->name); 52 goto send_skb_err; 53 } 54 55 /* push to the ethernet header. */ 56 if (batadv_skb_head_push(skb, ETH_HLEN) < 0) 57 goto send_skb_err; 58 59 skb_reset_mac_header(skb); 60 61 ethhdr = eth_hdr(skb); 62 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 63 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 64 ethhdr->h_proto = htons(ETH_P_BATMAN); 65 66 skb_set_network_header(skb, ETH_HLEN); 67 skb->protocol = htons(ETH_P_BATMAN); 68 69 skb->dev = hard_iface->net_dev; 70 71 /* Save a clone of the skb to use when decoding coded packets */ 72 batadv_nc_skb_store_for_decoding(bat_priv, skb); 73 74 /* dev_queue_xmit() returns a negative result on error. However on 75 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 76 * (which is > 0). This will not be treated as an error. 77 */ 78 return dev_queue_xmit(skb); 79 send_skb_err: 80 kfree_skb(skb); 81 return NET_XMIT_DROP; 82 } 83 84 /** 85 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. 86 * @skb: Packet to be transmitted. 87 * @orig_node: Final destination of the packet. 88 * @recv_if: Interface used when receiving the packet (can be NULL). 89 * 90 * Looks up the best next-hop towards the passed originator and passes the 91 * skb on for preparation of MAC header. If the packet originated from this 92 * host, NULL can be passed as recv_if and no interface alternating is 93 * attempted. 94 * 95 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or 96 * NET_XMIT_POLICED if the skb is buffered for later transmit. 97 */ 98 int batadv_send_skb_to_orig(struct sk_buff *skb, 99 struct batadv_orig_node *orig_node, 100 struct batadv_hard_iface *recv_if) 101 { 102 struct batadv_priv *bat_priv = orig_node->bat_priv; 103 struct batadv_neigh_node *neigh_node; 104 int ret = NET_XMIT_DROP; 105 106 /* batadv_find_router() increases neigh_nodes refcount if found. */ 107 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); 108 if (!neigh_node) 109 goto out; 110 111 /* Check if the skb is too large to send in one piece and fragment 112 * it if needed. 113 */ 114 if (atomic_read(&bat_priv->fragmentation) && 115 skb->len > neigh_node->if_incoming->net_dev->mtu) { 116 /* Fragment and send packet. */ 117 if (batadv_frag_send_packet(skb, orig_node, neigh_node)) 118 ret = NET_XMIT_SUCCESS; 119 120 goto out; 121 } 122 123 /* try to network code the packet, if it is received on an interface 124 * (i.e. being forwarded). If the packet originates from this node or if 125 * network coding fails, then send the packet as usual. 126 */ 127 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { 128 ret = NET_XMIT_POLICED; 129 } else { 130 batadv_send_skb_packet(skb, neigh_node->if_incoming, 131 neigh_node->addr); 132 ret = NET_XMIT_SUCCESS; 133 } 134 135 out: 136 if (neigh_node) 137 batadv_neigh_node_free_ref(neigh_node); 138 139 return ret; 140 } 141 142 /** 143 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the 144 * common fields for unicast packets 145 * @skb: the skb carrying the unicast header to initialize 146 * @hdr_size: amount of bytes to push at the beginning of the skb 147 * @orig_node: the destination node 148 * 149 * Returns false if the buffer extension was not possible or true otherwise. 150 */ 151 static bool 152 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, 153 struct batadv_orig_node *orig_node) 154 { 155 struct batadv_unicast_packet *unicast_packet; 156 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 157 158 if (batadv_skb_head_push(skb, hdr_size) < 0) 159 return false; 160 161 unicast_packet = (struct batadv_unicast_packet *)skb->data; 162 unicast_packet->version = BATADV_COMPAT_VERSION; 163 /* batman packet type: unicast */ 164 unicast_packet->packet_type = BATADV_UNICAST; 165 /* set unicast ttl */ 166 unicast_packet->ttl = BATADV_TTL; 167 /* copy the destination for faster routing */ 168 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 169 /* set the destination tt version number */ 170 unicast_packet->ttvn = ttvn; 171 172 return true; 173 } 174 175 /** 176 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header 177 * @skb: the skb containing the payload to encapsulate 178 * @orig_node: the destination node 179 * 180 * Returns false if the payload could not be encapsulated or true otherwise. 181 */ 182 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, 183 struct batadv_orig_node *orig_node) 184 { 185 size_t uni_size = sizeof(struct batadv_unicast_packet); 186 187 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); 188 } 189 190 /** 191 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a 192 * unicast 4addr header 193 * @bat_priv: the bat priv with all the soft interface information 194 * @skb: the skb containing the payload to encapsulate 195 * @orig_node: the destination node 196 * @packet_subtype: the unicast 4addr packet subtype to use 197 * 198 * Returns false if the payload could not be encapsulated or true otherwise. 199 */ 200 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, 201 struct sk_buff *skb, 202 struct batadv_orig_node *orig, 203 int packet_subtype) 204 { 205 struct batadv_hard_iface *primary_if; 206 struct batadv_unicast_4addr_packet *uc_4addr_packet; 207 bool ret = false; 208 209 primary_if = batadv_primary_if_get_selected(bat_priv); 210 if (!primary_if) 211 goto out; 212 213 /* Pull the header space and fill the unicast_packet substructure. 214 * We can do that because the first member of the uc_4addr_packet 215 * is of type struct unicast_packet 216 */ 217 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), 218 orig)) 219 goto out; 220 221 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 222 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; 223 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 224 uc_4addr_packet->subtype = packet_subtype; 225 uc_4addr_packet->reserved = 0; 226 227 ret = true; 228 out: 229 if (primary_if) 230 batadv_hardif_free_ref(primary_if); 231 return ret; 232 } 233 234 /** 235 * batadv_send_skb_unicast - encapsulate and send an skb via unicast 236 * @bat_priv: the bat priv with all the soft interface information 237 * @skb: payload to send 238 * @packet_type: the batman unicast packet type to use 239 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast 240 * 4addr packets) 241 * @orig_node: the originator to send the packet to 242 * @vid: the vid to be used to search the translation table 243 * 244 * Wrap the given skb into a batman-adv unicast or unicast-4addr header 245 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied 246 * as packet_type. Then send this frame to the given orig_node and release a 247 * reference to this orig_node. 248 * 249 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 250 */ 251 static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, 252 struct sk_buff *skb, int packet_type, 253 int packet_subtype, 254 struct batadv_orig_node *orig_node, 255 unsigned short vid) 256 { 257 struct ethhdr *ethhdr; 258 struct batadv_unicast_packet *unicast_packet; 259 int ret = NET_XMIT_DROP, hdr_size; 260 261 if (!orig_node) 262 goto out; 263 264 switch (packet_type) { 265 case BATADV_UNICAST: 266 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 267 goto out; 268 269 hdr_size = sizeof(*unicast_packet); 270 break; 271 case BATADV_UNICAST_4ADDR: 272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 273 orig_node, 274 packet_subtype)) 275 goto out; 276 277 hdr_size = sizeof(struct batadv_unicast_4addr_packet); 278 break; 279 default: 280 /* this function supports UNICAST and UNICAST_4ADDR only. It 281 * should never be invoked with any other packet type 282 */ 283 goto out; 284 } 285 286 ethhdr = (struct ethhdr *)(skb->data + hdr_size); 287 unicast_packet = (struct batadv_unicast_packet *)skb->data; 288 289 /* inform the destination node that we are still missing a correct route 290 * for this client. The destination will receive this packet and will 291 * try to reroute it because the ttvn contained in the header is less 292 * than the current one 293 */ 294 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) 295 unicast_packet->ttvn = unicast_packet->ttvn - 1; 296 297 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 298 ret = NET_XMIT_SUCCESS; 299 300 out: 301 if (orig_node) 302 batadv_orig_node_free_ref(orig_node); 303 if (ret == NET_XMIT_DROP) 304 kfree_skb(skb); 305 return ret; 306 } 307 308 /** 309 * batadv_send_skb_via_tt_generic - send an skb via TT lookup 310 * @bat_priv: the bat priv with all the soft interface information 311 * @skb: payload to send 312 * @packet_type: the batman unicast packet type to use 313 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast 314 * 4addr packets) 315 * @vid: the vid to be used to search the translation table 316 * 317 * Look up the recipient node for the destination address in the ethernet 318 * header via the translation table. Wrap the given skb into a batman-adv 319 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or 320 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame 321 * to the according destination node. 322 * 323 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 324 */ 325 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, 326 struct sk_buff *skb, int packet_type, 327 int packet_subtype, uint8_t *dst_hint, 328 unsigned short vid) 329 { 330 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 331 struct batadv_orig_node *orig_node; 332 uint8_t *src, *dst; 333 334 src = ethhdr->h_source; 335 dst = ethhdr->h_dest; 336 337 /* if we got an hint! let's send the packet to this client (if any) */ 338 if (dst_hint) { 339 src = NULL; 340 dst = dst_hint; 341 } 342 orig_node = batadv_transtable_search(bat_priv, src, dst, vid); 343 344 return batadv_send_skb_unicast(bat_priv, skb, packet_type, 345 packet_subtype, orig_node, vid); 346 } 347 348 /** 349 * batadv_send_skb_via_gw - send an skb via gateway lookup 350 * @bat_priv: the bat priv with all the soft interface information 351 * @skb: payload to send 352 * @vid: the vid to be used to search the translation table 353 * 354 * Look up the currently selected gateway. Wrap the given skb into a batman-adv 355 * unicast header and send this frame to this gateway node. 356 * 357 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 358 */ 359 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, 360 unsigned short vid) 361 { 362 struct batadv_orig_node *orig_node; 363 364 orig_node = batadv_gw_get_selected_orig(bat_priv); 365 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 366 orig_node, vid); 367 } 368 369 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) 370 { 371 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 372 373 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) || 374 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)) 375 return; 376 377 /* the interface gets activated here to avoid race conditions between 378 * the moment of activating the interface in 379 * hardif_activate_interface() where the originator mac is set and 380 * outdated packets (especially uninitialized mac addresses) in the 381 * packet queue 382 */ 383 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) 384 hard_iface->if_status = BATADV_IF_ACTIVE; 385 386 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); 387 } 388 389 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) 390 { 391 if (forw_packet->skb) 392 kfree_skb(forw_packet->skb); 393 if (forw_packet->if_incoming) 394 batadv_hardif_free_ref(forw_packet->if_incoming); 395 if (forw_packet->if_outgoing) 396 batadv_hardif_free_ref(forw_packet->if_outgoing); 397 kfree(forw_packet); 398 } 399 400 static void 401 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, 402 struct batadv_forw_packet *forw_packet, 403 unsigned long send_time) 404 { 405 /* add new packet to packet list */ 406 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 407 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); 408 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 409 410 /* start timer for this packet */ 411 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, 412 send_time); 413 } 414 415 /* add a broadcast packet to the queue and setup timers. broadcast packets 416 * are sent multiple times to increase probability for being received. 417 * 418 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on 419 * errors. 420 * 421 * The skb is not consumed, so the caller should make sure that the 422 * skb is freed. 423 */ 424 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, 425 const struct sk_buff *skb, 426 unsigned long delay) 427 { 428 struct batadv_hard_iface *primary_if = NULL; 429 struct batadv_forw_packet *forw_packet; 430 struct batadv_bcast_packet *bcast_packet; 431 struct sk_buff *newskb; 432 433 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 434 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 435 "bcast packet queue full\n"); 436 goto out; 437 } 438 439 primary_if = batadv_primary_if_get_selected(bat_priv); 440 if (!primary_if) 441 goto out_and_inc; 442 443 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); 444 445 if (!forw_packet) 446 goto out_and_inc; 447 448 newskb = skb_copy(skb, GFP_ATOMIC); 449 if (!newskb) 450 goto packet_free; 451 452 /* as we have a copy now, it is safe to decrease the TTL */ 453 bcast_packet = (struct batadv_bcast_packet *)newskb->data; 454 bcast_packet->ttl--; 455 456 skb_reset_mac_header(newskb); 457 458 forw_packet->skb = newskb; 459 forw_packet->if_incoming = primary_if; 460 forw_packet->if_outgoing = NULL; 461 462 /* how often did we send the bcast packet ? */ 463 forw_packet->num_packets = 0; 464 465 INIT_DELAYED_WORK(&forw_packet->delayed_work, 466 batadv_send_outstanding_bcast_packet); 467 468 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); 469 return NETDEV_TX_OK; 470 471 packet_free: 472 kfree(forw_packet); 473 out_and_inc: 474 atomic_inc(&bat_priv->bcast_queue_left); 475 out: 476 if (primary_if) 477 batadv_hardif_free_ref(primary_if); 478 return NETDEV_TX_BUSY; 479 } 480 481 static void batadv_send_outstanding_bcast_packet(struct work_struct *work) 482 { 483 struct batadv_hard_iface *hard_iface; 484 struct delayed_work *delayed_work; 485 struct batadv_forw_packet *forw_packet; 486 struct sk_buff *skb1; 487 struct net_device *soft_iface; 488 struct batadv_priv *bat_priv; 489 490 delayed_work = container_of(work, struct delayed_work, work); 491 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 492 delayed_work); 493 soft_iface = forw_packet->if_incoming->soft_iface; 494 bat_priv = netdev_priv(soft_iface); 495 496 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 497 hlist_del(&forw_packet->list); 498 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 499 500 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) 501 goto out; 502 503 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) 504 goto out; 505 506 /* rebroadcast packet */ 507 rcu_read_lock(); 508 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 509 if (hard_iface->soft_iface != soft_iface) 510 continue; 511 512 if (forw_packet->num_packets >= hard_iface->num_bcasts) 513 continue; 514 515 /* send a copy of the saved skb */ 516 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 517 if (skb1) 518 batadv_send_skb_packet(skb1, hard_iface, 519 batadv_broadcast_addr); 520 } 521 rcu_read_unlock(); 522 523 forw_packet->num_packets++; 524 525 /* if we still have some more bcasts to send */ 526 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { 527 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, 528 msecs_to_jiffies(5)); 529 return; 530 } 531 532 out: 533 batadv_forw_packet_free(forw_packet); 534 atomic_inc(&bat_priv->bcast_queue_left); 535 } 536 537 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) 538 { 539 struct delayed_work *delayed_work; 540 struct batadv_forw_packet *forw_packet; 541 struct batadv_priv *bat_priv; 542 543 delayed_work = container_of(work, struct delayed_work, work); 544 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 545 delayed_work); 546 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 547 spin_lock_bh(&bat_priv->forw_bat_list_lock); 548 hlist_del(&forw_packet->list); 549 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 550 551 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) 552 goto out; 553 554 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); 555 556 /* we have to have at least one packet in the queue to determine the 557 * queues wake up time unless we are shutting down. 558 * 559 * only re-schedule if this is the "original" copy, e.g. the OGM of the 560 * primary interface should only be rescheduled once per period, but 561 * this function will be called for the forw_packet instances of the 562 * other secondary interfaces as well. 563 */ 564 if (forw_packet->own && 565 forw_packet->if_incoming == forw_packet->if_outgoing) 566 batadv_schedule_bat_ogm(forw_packet->if_incoming); 567 568 out: 569 /* don't count own packet */ 570 if (!forw_packet->own) 571 atomic_inc(&bat_priv->batman_queue_left); 572 573 batadv_forw_packet_free(forw_packet); 574 } 575 576 void 577 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, 578 const struct batadv_hard_iface *hard_iface) 579 { 580 struct batadv_forw_packet *forw_packet; 581 struct hlist_node *safe_tmp_node; 582 bool pending; 583 584 if (hard_iface) 585 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 586 "purge_outstanding_packets(): %s\n", 587 hard_iface->net_dev->name); 588 else 589 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 590 "purge_outstanding_packets()\n"); 591 592 /* free bcast list */ 593 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 594 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, 595 &bat_priv->forw_bcast_list, list) { 596 /* if purge_outstanding_packets() was called with an argument 597 * we delete only packets belonging to the given interface 598 */ 599 if ((hard_iface) && 600 (forw_packet->if_incoming != hard_iface)) 601 continue; 602 603 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 604 605 /* batadv_send_outstanding_bcast_packet() will lock the list to 606 * delete the item from the list 607 */ 608 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 609 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 610 611 if (pending) { 612 hlist_del(&forw_packet->list); 613 batadv_forw_packet_free(forw_packet); 614 } 615 } 616 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 617 618 /* free batman packet list */ 619 spin_lock_bh(&bat_priv->forw_bat_list_lock); 620 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, 621 &bat_priv->forw_bat_list, list) { 622 /* if purge_outstanding_packets() was called with an argument 623 * we delete only packets belonging to the given interface 624 */ 625 if ((hard_iface) && 626 (forw_packet->if_incoming != hard_iface) && 627 (forw_packet->if_outgoing != hard_iface)) 628 continue; 629 630 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 631 632 /* send_outstanding_bat_packet() will lock the list to 633 * delete the item from the list 634 */ 635 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 636 spin_lock_bh(&bat_priv->forw_bat_list_lock); 637 638 if (pending) { 639 hlist_del(&forw_packet->list); 640 batadv_forw_packet_free(forw_packet); 641 } 642 } 643 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 644 } 645