| /linux/net/llc/ |
| H A D | llc_c_ac.c | 202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_disc_cmd_p_set_x() local 204 if (nskb) { in llc_conn_ac_send_disc_cmd_p_set_x() 207 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_disc_cmd_p_set_x() 209 llc_pdu_init_as_disc_cmd(nskb, 1); in llc_conn_ac_send_disc_cmd_p_set_x() 210 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); in llc_conn_ac_send_disc_cmd_p_set_x() 213 llc_conn_send_pdu(sk, nskb); in llc_conn_ac_send_disc_cmd_p_set_x() 219 kfree_skb(nskb); in llc_conn_ac_send_disc_cmd_p_set_x() 227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_dm_rsp_f_set_p() local 229 if (nskb) { in llc_conn_ac_send_dm_rsp_f_set_p() 234 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_dm_rsp_f_set_p() [all …]
|
| H A D | llc_s_ac.c | 46 struct sk_buff *nskb; in llc_prepare_and_xmit() local 53 nskb = skb_clone(skb, GFP_ATOMIC); in llc_prepare_and_xmit() 54 if (!nskb) in llc_prepare_and_xmit() 58 skb_set_owner_w(nskb, skb->sk); in llc_prepare_and_xmit() 60 return dev_queue_xmit(nskb); in llc_prepare_and_xmit() 115 struct sk_buff *nskb; in llc_sap_action_send_xid_r() local 120 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_sap_action_send_xid_r() 122 if (!nskb) in llc_sap_action_send_xid_r() 124 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, in llc_sap_action_send_xid_r() 126 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); in llc_sap_action_send_xid_r() [all …]
|
| H A D | llc_station.c | 52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_station_ac_send_xid_r() local 55 if (!nskb) in llc_station_ac_send_xid_r() 59 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); in llc_station_ac_send_xid_r() 60 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); in llc_station_ac_send_xid_r() 61 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); in llc_station_ac_send_xid_r() 64 dev_queue_xmit(nskb); in llc_station_ac_send_xid_r() 68 kfree_skb(nskb); in llc_station_ac_send_xid_r() 77 struct sk_buff *nskb; in llc_station_ac_send_test_r() local 84 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); in llc_station_ac_send_test_r() 86 if (!nskb) in llc_station_ac_send_test_r() [all …]
|
| /linux/net/bridge/netfilter/ |
| H A D | nft_reject_bridge.c | 24 struct sk_buff *nskb) in nft_reject_br_push_etherhdr() argument 28 eth = skb_push(nskb, ETH_HLEN); in nft_reject_br_push_etherhdr() 29 skb_reset_mac_header(nskb); in nft_reject_br_push_etherhdr() 33 skb_pull(nskb, ETH_HLEN); in nft_reject_br_push_etherhdr() 38 __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); in nft_reject_br_push_etherhdr() 50 struct sk_buff *nskb; in nft_reject_br_send_v4_tcp_reset() local 52 nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, NULL, hook); in nft_reject_br_send_v4_tcp_reset() 53 if (!nskb) in nft_reject_br_send_v4_tcp_reset() 56 nft_reject_br_push_etherhdr(oldskb, nskb); in nft_reject_br_send_v4_tcp_reset() 58 br_forward(br_port_get_rcu(dev), nskb, false, true); in nft_reject_br_send_v4_tcp_reset() [all …]
|
| /linux/net/netfilter/ |
| H A D | nft_reject_netdev.c | 19 static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb) in nft_reject_queue_xmit() argument 21 dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol), in nft_reject_queue_xmit() 23 nskb->len); in nft_reject_queue_xmit() 24 dev_queue_xmit(nskb); in nft_reject_queue_xmit() 32 struct sk_buff *nskb; in nft_reject_netdev_send_v4_tcp_reset() local 34 nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook); in nft_reject_netdev_send_v4_tcp_reset() 35 if (!nskb) in nft_reject_netdev_send_v4_tcp_reset() 38 nft_reject_queue_xmit(nskb, oldskb); in nft_reject_netdev_send_v4_tcp_reset() 46 struct sk_buff *nskb; in nft_reject_netdev_send_v4_unreach() local 48 nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code); in nft_reject_netdev_send_v4_unreach() [all …]
|
| H A D | nf_synproxy_core.c | 419 const struct sk_buff *skb, struct sk_buff *nskb, in synproxy_send_tcp() argument 425 nskb->ip_summed = CHECKSUM_PARTIAL; in synproxy_send_tcp() 426 nskb->csum_start = (unsigned char *)nth - nskb->head; in synproxy_send_tcp() 427 nskb->csum_offset = offsetof(struct tcphdr, check); in synproxy_send_tcp() 429 skb_dst_set_noref(nskb, skb_dst(skb)); in synproxy_send_tcp() 430 nskb->protocol = htons(ETH_P_IP); in synproxy_send_tcp() 431 if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) in synproxy_send_tcp() 435 nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); in synproxy_send_tcp() 439 ip_local_out(net, nskb->sk, nskb); in synproxy_send_tcp() 443 kfree_skb(nskb); in synproxy_send_tcp() [all …]
|
| /linux/net/tls/ |
| H A D | tls_device_fallback.c | 183 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) in update_chksum() 188 skb_copy_header(nskb, skb); in update_chksum() 190 skb_put(nskb, skb->len); in update_chksum() 191 memcpy(nskb->data, skb->data, headln); in update_chksum() 193 nskb->destructor = skb->destructor; in update_chksum() 194 nskb->sk = sk; in update_chksum() 198 update_chksum(nskb, headln); in update_chksum() 201 if (nskb->destructor == sock_efree) 204 delta = nskb->truesize - skb->truesize; in complete_skb() 282 struct sk_buff *nskb, in fill_sg_in() 202 complete_skb(struct sk_buff * nskb,struct sk_buff * skb,int headln) complete_skb() argument 301 fill_sg_out(struct scatterlist sg_out[3],void * buf,struct tls_context * tls_ctx,struct sk_buff * nskb,int tcp_payload_offset,int payload_len,int sync_size,void * dummy_buf) fill_sg_out() argument 329 struct sk_buff *nskb = NULL; tls_enc_skb() local 390 struct sk_buff *nskb = NULL; tls_sw_fallback() local [all...] |
| H A D | tls_strp.c | 290 struct sk_buff *nskb, *first, *last; in tls_strp_copyin_skb() local 301 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); in tls_strp_copyin_skb() 302 if (!nskb) in tls_strp_copyin_skb() 307 shinfo->frag_list = nskb; in tls_strp_copyin_skb() 308 nskb->prev = nskb; in tls_strp_copyin_skb() 312 last->next = nskb; in tls_strp_copyin_skb() 313 first->prev = nskb; in tls_strp_copyin_skb() 331 __pskb_trim(nskb, nskb->len - over); in tls_strp_copyin_skb()
|
| /linux/net/bluetooth/cmtp/ |
| H A D | core.c | 109 struct sk_buff *skb = session->reassembly[id], *nskb; in cmtp_add_msgpart() local 116 nskb = alloc_skb(size, GFP_ATOMIC); in cmtp_add_msgpart() 117 if (!nskb) { in cmtp_add_msgpart() 123 skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); in cmtp_add_msgpart() 125 skb_put_data(nskb, buf, count); in cmtp_add_msgpart() 127 session->reassembly[id] = nskb; in cmtp_add_msgpart() 211 struct sk_buff *skb, *nskb; in cmtp_process_transmit() local 217 nskb = alloc_skb(session->mtu, GFP_ATOMIC); in cmtp_process_transmit() 218 if (!nskb) { in cmtp_process_transmit() 226 tail = session->mtu - nskb->len; in cmtp_process_transmit() [all …]
|
| /linux/net/dsa/ |
| H A D | tag.c | 59 struct sk_buff *nskb = NULL; in dsa_switch_rcv() local 81 nskb = skb; in dsa_switch_rcv() 84 nskb = cpu_dp->rcv(skb, dev); in dsa_switch_rcv() 87 if (!nskb) { in dsa_switch_rcv() 92 skb = nskb; in dsa_switch_rcv() 110 nskb = dsa_software_vlan_untag(skb); in dsa_switch_rcv() 111 if (!nskb) { in dsa_switch_rcv() 115 skb = nskb; in dsa_switch_rcv()
|
| H A D | tag_brcm.c | 197 struct sk_buff *nskb; in brcm_tag_rcv() local 200 nskb = brcm_tag_rcv_ll(skb, dev, 2); in brcm_tag_rcv() 201 if (!nskb) in brcm_tag_rcv() 202 return nskb; in brcm_tag_rcv() 206 return nskb; in brcm_tag_rcv()
|
| /linux/net/bluetooth/bnep/ |
| H A D | core.c | 301 struct sk_buff *nskb; in bnep_rx_frame() local 366 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); in bnep_rx_frame() 367 if (!nskb) { in bnep_rx_frame() 372 skb_reserve(nskb, 2); in bnep_rx_frame() 377 __skb_put_data(nskb, &s->eh, ETH_HLEN); in bnep_rx_frame() 381 __skb_put_data(nskb, s->eh.h_dest, ETH_ALEN); in bnep_rx_frame() 382 __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); in bnep_rx_frame() 383 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); in bnep_rx_frame() 387 __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); in bnep_rx_frame() 388 __skb_put_data(nskb, s->eh.h_source, ETH_ALEN); in bnep_rx_frame() [all …]
|
| /linux/include/linux/can/ |
| H A D | skb.h | 91 struct sk_buff *nskb; in can_create_echo_skb() local 93 nskb = skb_clone(skb, GFP_ATOMIC); in can_create_echo_skb() 94 if (unlikely(!nskb)) { in can_create_echo_skb() 99 can_skb_set_owner(nskb, skb->sk); in can_create_echo_skb() 101 return nskb; in can_create_echo_skb()
|
| /linux/net/sched/ |
| H A D | sch_etf.c | 75 static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) in is_packet_valid() argument 78 ktime_t txtime = nskb->tstamp; in is_packet_valid() 79 struct sock *sk = nskb->sk; in is_packet_valid() 162 static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, in etf_enqueue_timesortedlist() argument 167 ktime_t txtime = nskb->tstamp; in etf_enqueue_timesortedlist() 170 if (!is_packet_valid(sch, nskb)) { in etf_enqueue_timesortedlist() 171 report_sock_error(nskb, EINVAL, in etf_enqueue_timesortedlist() 173 return qdisc_drop(nskb, sch, to_free); in etf_enqueue_timesortedlist() 188 rb_link_node(&nskb->rbnode, parent, p); in etf_enqueue_timesortedlist() 189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist() [all …]
|
| H A D | sch_dualpi2.c | 457 struct sk_buff *nskb, *next; in dualpi2_qdisc_enqueue() local 462 nskb = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in dualpi2_qdisc_enqueue() 463 if (IS_ERR_OR_NULL(nskb)) in dualpi2_qdisc_enqueue() 469 skb_list_walk_safe(nskb, nskb, next) { in dualpi2_qdisc_enqueue() 470 skb_mark_not_on_list(nskb); in dualpi2_qdisc_enqueue() 477 qdisc_skb_cb(nskb)->pkt_len = nskb->len; in dualpi2_qdisc_enqueue() 478 qdisc_skb_cb(nskb)->pkt_segs = 1; in dualpi2_qdisc_enqueue() 479 dualpi2_skb_cb(nskb) in dualpi2_qdisc_enqueue() [all...] |
| /linux/net/vmw_vsock/ |
| H A D | af_vsock_tap.c | 64 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in __vsock_deliver_tap_skb() local 66 if (nskb) { in __vsock_deliver_tap_skb() 69 nskb->dev = dev; in __vsock_deliver_tap_skb() 70 ret = dev_queue_xmit(nskb); in __vsock_deliver_tap_skb()
|
| /linux/net/core/ |
| H A D | skbuff.c | 923 struct sk_buff *skb = *pskb, *nskb; in skb_pp_cow_data() local 943 nskb = napi_build_skb(data, truesize); in skb_pp_cow_data() 944 if (!nskb) { in skb_pp_cow_data() 949 skb_reserve(nskb, headroom); in skb_pp_cow_data() 950 skb_copy_header(nskb, skb); in skb_pp_cow_data() 951 skb_mark_for_recycle(nskb); in skb_pp_cow_data() 953 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data() 955 consume_skb(nskb); in skb_pp_cow_data() 958 skb_put(nskb, size); in skb_pp_cow_data() 960 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data() [all …]
|
| /linux/drivers/isdn/mISDN/ |
| H A D | dsp_core.c | 284 struct sk_buff *nskb; in dsp_control_req() local 590 nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY, in dsp_control_req() 592 if (nskb) { in dsp_control_req() 594 if (dsp->up->send(dsp->up, nskb)) in dsp_control_req() 595 dev_kfree_skb(nskb); in dsp_control_req() 597 dev_kfree_skb(nskb); in dsp_control_req() 742 struct sk_buff *nskb; in dsp_function() local 748 nskb = _alloc_mISDN_skb(PH_CONTROL_IND, in dsp_function() 751 if (nskb) { in dsp_function() 754 dsp->up, nskb)) in dsp_function() [all …]
|
| H A D | dsp_cmx.c | 1304 struct sk_buff *nskb, *txskb; local 1343 nskb = mI_alloc_skb(len + preload, GFP_ATOMIC); 1344 if (!nskb) { 1350 hh = mISDN_HEAD_P(nskb); 1359 d = skb_put(nskb, preload + len); /* result */ 1573 skb_queue_tail(&dsp->sendq, nskb); 1587 skb_put_data(txskb, nskb->data + preload, len); 1597 dsp_change_volume(nskb, dsp->tx_volume); 1600 dsp_pipeline_process_tx(&dsp->pipeline, nskb->data, 1601 nskb->len); [all …]
|
| /linux/net/x25/ |
| H A D | x25_dev.c | 97 struct sk_buff *nskb; in x25_lapb_receive_frame() local 103 nskb = skb_copy(skb, GFP_ATOMIC); in x25_lapb_receive_frame() 104 if (!nskb) in x25_lapb_receive_frame() 107 skb = nskb; in x25_lapb_receive_frame()
|
| /linux/drivers/net/xen-netback/ |
| H A D | netback.c | 383 struct sk_buff *nskb, in xenvif_get_requests() argument 486 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 507 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests() 508 nskb = NULL; in xenvif_get_requests() 512 if (nskb) { in xenvif_get_requests() 517 kfree_skb(nskb); in xenvif_get_requests() 921 struct sk_buff *skb, *nskb; in xenvif_tx_build_gops() local 1040 nskb = NULL; in xenvif_tx_build_gops() 1045 nskb = xenvif_alloc_skb(0); in xenvif_tx_build_gops() 1046 if (unlikely(nskb == NULL)) { in xenvif_tx_build_gops() [all …]
|
| /linux/drivers/net/ethernet/sun/ |
| H A D | sunvnet_common.c | 1111 struct sk_buff *nskb; in vnet_skb_shape() local 1137 nskb = alloc_and_align_skb(skb->dev, len); in vnet_skb_shape() 1138 if (!nskb) { in vnet_skb_shape() 1142 skb_reserve(nskb, VNET_PACKET_SKIP); in vnet_skb_shape() 1144 nskb->protocol = skb->protocol; in vnet_skb_shape() 1146 skb_set_mac_header(nskb, offset); in vnet_skb_shape() 1148 skb_set_network_header(nskb, offset); in vnet_skb_shape() 1150 skb_set_transport_header(nskb, offset); in vnet_skb_shape() 1153 nskb->csum_offset = skb->csum_offset; in vnet_skb_shape() 1154 nskb->ip_summed = skb->ip_summed; in vnet_skb_shape() [all …]
|
| /linux/drivers/bluetooth/ |
| H A D | bfusb.c | 451 struct sk_buff *nskb; in bfusb_send_frame() local 476 nskb = bt_skb_alloc(count + 32, GFP_KERNEL); in bfusb_send_frame() 477 if (!nskb) { in bfusb_send_frame() 482 nskb->dev = (void *) data; in bfusb_send_frame() 491 skb_put_data(nskb, buf, 3); in bfusb_send_frame() 492 skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); in bfusb_send_frame() 499 if ((nskb->len % data->bulk_pkt_size) == 0) { in bfusb_send_frame() 502 skb_put_data(nskb, buf, 2); in bfusb_send_frame() 507 skb_queue_tail(&data->transmit_q, nskb); in bfusb_send_frame()
|
| /linux/net/sctp/ |
| H A D | output.c | 444 struct sk_buff *nskb; in sctp_packet_pack() local 451 nskb = head; in sctp_packet_pack() 472 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); in sctp_packet_pack() 473 if (!nskb) in sctp_packet_pack() 475 skb_reserve(nskb, packet->overhead + MAX_HEADER); in sctp_packet_pack() 498 skb_tail_pointer(nskb); in sctp_packet_pack() 500 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); in sctp_packet_pack() 520 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, in sctp_packet_pack() 531 sctp_packet_gso_append(head, nskb); in sctp_packet_pack()
|
| /linux/net/nfc/ |
| H A D | rawsock.c | 362 struct sk_buff *skb_copy = NULL, *nskb; in nfc_send_to_raw_sock() local 382 nskb = skb_clone(skb_copy, GFP_ATOMIC); in nfc_send_to_raw_sock() 383 if (!nskb) in nfc_send_to_raw_sock() 386 if (sock_queue_rcv_skb(sk, nskb)) in nfc_send_to_raw_sock() 387 kfree_skb(nskb); in nfc_send_to_raw_sock()
|