| /linux/include/linux/ |
| H A D | skbuff.h | 535 int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg); 923 void (*destructor)(struct sk_buff *skb); 1139 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument 1141 return unlikely(skb->pfmemalloc); in skb_pfmemalloc() 1157 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument 1162 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst() 1165 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst() 1168 static inline void skb_dst_check_unset(struct sk_buff *skb) in skb_dst_check_unset() argument 1170 DEBUG_NET_WARN_ON_ONCE((skb->_skb_refdst & SKB_DST_PTRMASK) && in skb_dst_check_unset() 1171 !(skb->_skb_refdst & SKB_DST_NOREF)); in skb_dst_check_unset() [all …]
|
| /linux/net/xfrm/ |
| H A D | xfrm_output.c | 28 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb); 29 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 31 static int xfrm_skb_check_space(struct sk_buff *skb) in xfrm_skb_check_space() argument 33 struct dst_entry *dst = skb_dst(skb); in xfrm_skb_check_space() 35 - skb_headroom(skb); in xfrm_skb_check_space() 36 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); in xfrm_skb_check_space() 45 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); in xfrm_skb_check_space() 52 static struct dst_entry *skb_dst_pop(struct sk_buff *skb) in skb_dst_pop() argument 54 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb))); in skb_dst_pop() 56 skb_dst_drop(skb); in skb_dst_pop() [all …]
|
| H A D | xfrm_input.c | 41 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 103 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, in xfrm_rcv_cb() argument 114 ret = afinfo->callback(skb, protocol, err); in xfrm_rcv_cb() 120 struct sec_path *secpath_set(struct sk_buff *skb) in secpath_set() argument 122 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); in secpath_set() 124 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); in secpath_set() 143 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) in xfrm_parse_spi() argument 160 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) in xfrm_parse_spi() 162 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); in xfrm_parse_spi() 169 if (!pskb_may_pull(skb, hle in xfrm_parse_spi() 178 xfrm4_remove_beet_encap(struct xfrm_state * x,struct sk_buff * skb) xfrm4_remove_beet_encap() argument 226 ipip_ecn_decapsulate(struct sk_buff * skb) ipip_ecn_decapsulate() argument 234 xfrm4_remove_tunnel_encap(struct xfrm_state * x,struct sk_buff * skb) xfrm4_remove_tunnel_encap() argument 263 ipip6_ecn_decapsulate(struct sk_buff * skb) ipip6_ecn_decapsulate() argument 271 xfrm6_remove_tunnel_encap(struct xfrm_state * x,struct sk_buff * skb) xfrm6_remove_tunnel_encap() argument 300 xfrm6_remove_beet_encap(struct xfrm_state * x,struct sk_buff * skb) xfrm6_remove_beet_encap() argument 339 xfrm_inner_mode_encap_remove(struct xfrm_state * x,struct sk_buff * skb) xfrm_inner_mode_encap_remove() argument 365 xfrm_prepare_input(struct xfrm_state * x,struct sk_buff * skb) xfrm_prepare_input() argument 390 xfrm4_transport_input(struct xfrm_state * x,struct sk_buff * skb) xfrm4_transport_input() argument 408 xfrm6_transport_input(struct xfrm_state * x,struct sk_buff * skb) xfrm6_transport_input() argument 433 xfrm_inner_mode_input(struct xfrm_state * x,struct sk_buff * skb) xfrm_inner_mode_input() argument 463 xfrm_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type) xfrm_input() argument 769 xfrm_input_resume(struct sk_buff * skb,int nexthdr) xfrm_input_resume() argument 779 struct sk_buff *skb; xfrm_trans_reinject() local 793 xfrm_trans_queue_net(struct net * net,struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *)) xfrm_trans_queue_net() argument 816 xfrm_trans_queue(struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *)) xfrm_trans_queue() argument [all...] |
| /linux/net/bridge/ |
| H A D | br_netfilter_hooks.c | 72 #define IS_IP(skb) \ argument 73 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP)) 75 #define IS_IPV6(skb) \ argument 76 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6)) 78 #define IS_ARP(skb) \ argument 79 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) 81 static inline __be16 vlan_proto(const struct sk_buff *skb) in vlan_proto() argument 83 if (skb_vlan_tag_present(skb)) in vlan_proto() 84 return skb->protocol; in vlan_proto() 85 else if (skb->protocol == htons(ETH_P_8021Q)) in vlan_proto() [all …]
|
| H A D | br_forward.c | 22 const struct sk_buff *skb) in should_deliver() argument 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver() 29 br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && in should_deliver() 30 !br_skb_isolated(p, skb); in should_deliver() 33 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in br_dev_queue_push_xmit() argument 35 skb_push(skb, ETH_HLEN); in br_dev_queue_push_xmit() 36 if (!is_skb_forwardable(skb->dev, skb)) in br_dev_queue_push_xmit() 39 br_drop_fake_rtable(skb); in br_dev_queue_push_xmit() 41 if (skb->ip_summed == CHECKSUM_PARTIAL && in br_dev_queue_push_xmit() 42 eth_type_vlan(skb->protocol)) { in br_dev_queue_push_xmit() [all …]
|
| H A D | br_netfilter_ipv6.c | 43 int br_validate_ipv6(struct net *net, struct sk_buff *skb) in br_validate_ipv6() argument 46 struct inet6_dev *idev = __in6_dev_get(skb->dev); in br_validate_ipv6() 50 if (!pskb_may_pull(skb, ip6h_len)) in br_validate_ipv6() 53 if (skb->len < ip6h_len) in br_validate_ipv6() 56 hdr = ipv6_hdr(skb); in br_validate_ipv6() 62 if (hdr->nexthdr == NEXTHDR_HOP && nf_ip6_check_hbh_len(skb, &pkt_len)) in br_validate_ipv6() 65 if (pkt_len + ip6h_len > skb->len) { in br_validate_ipv6() 70 if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) { in br_validate_ipv6() 76 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); in br_validate_ipv6() 89 br_nf_ipv6_daddr_was_changed(const struct sk_buff *skb, in br_nf_ipv6_daddr_was_changed() argument [all …]
|
| /linux/net/core/ |
| H A D | gro.c | 92 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument 94 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive() 95 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive() 96 unsigned int headlen = skb_headlen(skb); in skb_gro_receive() 97 unsigned int len = skb_gro_len(skb); in skb_gro_receive() 109 if (p->pp_recycle != skb->pp_recycle) in skb_gro_receive() 113 NAPI_GRO_CB(skb)->flush)) in skb_gro_receive() 117 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || in skb_gro_receive() 124 segs = NAPI_GRO_CB(skb)->count; in skb_gro_receive() 151 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive() [all …]
|
| H A D | skbuff.c | 206 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument 210 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic() 211 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic() 212 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic() 216 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument 218 skb_panic(skb, sz, addr, __func__); in skb_over_panic() 221 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument 223 skb_panic(skb, sz, addr, __func__); in skb_under_panic() 286 struct sk_buff *skb; in napi_skb_cache_get() local 301 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get() [all …]
|
| H A D | gso.c | 13 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, in skb_eth_gso_segment() argument 22 segs = ptype->callbacks.gso_segment(skb, features); in skb_eth_gso_segment() 37 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, in skb_mac_gso_segment() argument 42 int vlan_depth = skb->mac_len; in skb_mac_gso_segment() 43 __be16 type = skb_network_protocol(skb, &vlan_depth); in skb_mac_gso_segment() 48 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment() 53 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment() 59 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment() 66 static bool skb_needs_check(const struct sk_buff *skb, bool tx_path) in skb_needs_check() argument 69 return skb->ip_summed != CHECKSUM_PARTIAL && in skb_needs_check() [all …]
|
| /linux/drivers/net/can/dev/ |
| H A D | skb.c | 47 int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, in can_put_echo_skb() argument 60 (skb->protocol != htons(ETH_P_CAN) && in can_put_echo_skb() 61 skb->protocol != htons(ETH_P_CANFD) && in can_put_echo_skb() 62 skb->protocol != htons(ETH_P_CANXL))) { in can_put_echo_skb() 63 kfree_skb(skb); in can_put_echo_skb() 68 skb = can_create_echo_skb(skb); in can_put_echo_skb() 69 if (!skb) in can_put_echo_skb() 73 skb->ip_summed = CHECKSUM_UNNECESSARY; in can_put_echo_skb() 74 skb->dev = dev; in can_put_echo_skb() 77 can_skb_prv(skb)->frame_len = frame_len; in can_put_echo_skb() [all …]
|
| /linux/net/ipv6/ |
| H A D | exthdrs.c | 65 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff, in ip6_tlvopt_unknown() argument 79 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown() 90 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown() 94 icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff, in ip6_tlvopt_unknown() 100 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); in ip6_tlvopt_unknown() 104 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff); 105 static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff); 106 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff); 107 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff); 109 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff); [all …]
|
| H A D | ip6_input.c | 49 struct sk_buff *skb) in ip6_rcv_finish_core() argument 52 !skb_dst(skb) && !skb->sk) { in ip6_rcv_finish_core() 53 switch (ipv6_hdr(skb)->nexthdr) { in ip6_rcv_finish_core() 56 tcp_v6_early_demux(skb); in ip6_rcv_finish_core() 60 udp_v6_early_demux(skb); in ip6_rcv_finish_core() 65 if (!skb_valid_dst(skb)) in ip6_rcv_finish_core() 66 ip6_route_input(skb); in ip6_rcv_finish_core() 69 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_rcv_finish() argument 74 skb = l3mdev_ip6_rcv(skb); in ip6_rcv_finish() 75 if (!skb) in ip6_rcv_finish() [all …]
|
| H A D | ip6_offload.c | 33 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ argument 35 unlikely(gro_recursion_inc_test(skb)) ? \ 36 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 37 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ 40 static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto) in ipv6_gro_pull_exthdrs() argument 56 opth = skb_gro_header(skb, off + sizeof(*opth), off); in ipv6_gro_pull_exthdrs() 62 opth = skb_gro_header(skb, off + len, off); in ipv6_gro_pull_exthdrs() 70 skb_gro_pull(skb, off - skb_gro_receive_network_offset(skb)); in ipv6_gro_pull_exthdrs() 74 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) in ipv6_gso_pull_exthdrs() argument 90 if (unlikely(!pskb_may_pull(skb, 8))) in ipv6_gso_pull_exthdrs() [all …]
|
| /linux/net/devlink/ |
| H A D | netlink_gen.h | 23 int devlink_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 26 struct sk_buff *skb, struct genl_info *info); 28 struct sk_buff *skb, struct genl_info *info); 30 struct sk_buff *skb, 33 devlink_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 37 struct sk_buff *skb, struct genl_info *info); 39 int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info); 40 int devlink_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); 41 int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info); 42 int devlink_nl_port_get_dumpit(struct sk_buff *skb, [all...] |
| /linux/drivers/net/ethernet/qualcomm/rmnet/ |
| H A D | rmnet_handlers.c | 22 static void rmnet_set_skb_proto(struct sk_buff *skb) in rmnet_set_skb_proto() argument 24 switch (skb->data[0] & 0xF0) { in rmnet_set_skb_proto() 26 skb->protocol = htons(ETH_P_IP); in rmnet_set_skb_proto() 29 skb->protocol = htons(ETH_P_IPV6); in rmnet_set_skb_proto() 32 skb->protocol = htons(ETH_P_MAP); in rmnet_set_skb_proto() 40 rmnet_deliver_skb(struct sk_buff *skb) in rmnet_deliver_skb() argument 42 struct rmnet_priv *priv = netdev_priv(skb->dev); in rmnet_deliver_skb() 44 skb_reset_transport_header(skb); in rmnet_deliver_skb() 45 skb_reset_network_header(skb); in rmnet_deliver_skb() 46 rmnet_vnd_rx_fixup(skb, skb->dev); in rmnet_deliver_skb() [all …]
|
| /linux/include/net/ |
| H A D | llc_c_ev.h | 123 static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) in llc_conn_ev() argument 125 return (struct llc_conn_state_ev *)skb->cb; in llc_conn_ev() 128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); 129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); 131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); 132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); 133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb); 134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); 135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb); 136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb); [all …]
|
| H A D | llc_c_ac.h | 97 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); 99 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); 100 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); 101 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb); 102 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb); 103 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb); 104 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb); 105 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb); 107 struct sk_buff *skb); 109 struct sk_buff *skb); [all …]
|
| /linux/net/sched/ |
| H A D | sch_frag.c | 18 int (*xmit)(struct sk_buff *skb); 26 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in sch_frag_xmit() 31 if (skb_cow_head(skb, data->l2_len) < 0) { in sch_frag_xmit() 32 kfree_skb(skb); in sch_frag_xmit() 36 __skb_dst_copy(skb, data->dst); in sch_frag_xmit() 37 *qdisc_skb_cb(skb) = data->cb; in sch_frag_xmit() 38 skb->inner_protocol = data->inner_protocol; in sch_frag_xmit() 40 __vlan_hwaccel_put_tag(skb, data->vlan_proto, in sch_frag_xmit() 43 __vlan_hwaccel_clear_tag(skb); in sch_frag_xmit() 46 skb_push(skb, dat in sch_frag_xmit() 23 sch_frag_xmit(struct net * net,struct sock * sk,struct sk_buff * skb) sch_frag_xmit() argument 50 sch_frag_prepare_frag(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb)) sch_frag_prepare_frag() argument 51 sch_frag_prepare_frag(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb)) sch_frag_prepare_frag() argument 84 sch_fragment(struct net * net,struct sk_buff * skb,u16 mru,int (* xmit)(struct sk_buff * skb)) sch_fragment() argument 85 sch_fragment(struct net * net,struct sk_buff * skb,u16 mru,int (* xmit)(struct sk_buff * skb)) sch_fragment() argument 140 sch_frag_xmit_hook(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb)) sch_frag_xmit_hook() argument [all...] |
| /linux/drivers/net/wireless/ath/ath10k/ |
| H A D | wmi-ops.h | 15 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 19 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 21 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 23 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 26 struct ath10k *ar, struct sk_buff *skb, 28 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 30 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 32 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 34 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 36 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, [all …]
|
| /linux/net/bridge/netfilter/ |
| H A D | nf_conntrack_bridge.c | 28 struct sk_buff *skb, in nf_br_ip_fragment() argument 34 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; in nf_br_ip_fragment() 35 u8 tstamp_type = skb->tstamp_type; in nf_br_ip_fragment() 37 ktime_t tstamp = skb->tstamp; in nf_br_ip_fragment() 43 if (skb->ip_summed == CHECKSUM_PARTIAL && in nf_br_ip_fragment() 44 (err = skb_checksum_help(skb))) in nf_br_ip_fragment() 47 iph = ip_hdr(skb); in nf_br_ip_fragment() 55 ll_rs = LL_RESERVED_SPACE(skb->dev); in nf_br_ip_fragment() 56 mtu = skb->dev->mtu; in nf_br_ip_fragment() 58 if (skb_has_frag_list(skb)) { in nf_br_ip_fragment() 130 br_skb_cb_save(struct sk_buff * skb,struct br_input_skb_cb * cb,size_t inet_skb_parm_size) br_skb_cb_save() argument 137 br_skb_cb_restore(struct sk_buff * skb,const struct br_input_skb_cb * cb,u16 fragsz) br_skb_cb_restore() argument 145 nf_ct_br_defrag4(struct sk_buff * skb,const struct nf_hook_state * state) nf_ct_br_defrag4() argument 175 nf_ct_br_defrag6(struct sk_buff * skb,const struct nf_hook_state * state) nf_ct_br_defrag6() argument 204 nf_ct_br_ip_check(const struct sk_buff * skb) nf_ct_br_ip_check() argument 223 nf_ct_br_ipv6_check(const struct sk_buff * skb) nf_ct_br_ipv6_check() argument 240 nf_ct_bridge_pre(void * priv,struct sk_buff * skb,const struct nf_hook_state * state) nf_ct_bridge_pre() argument 294 nf_ct_bridge_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state) nf_ct_bridge_in() argument 324 nf_ct_bridge_frag_save(struct sk_buff * skb,struct nf_bridge_frag_data * data) nf_ct_bridge_frag_save() argument 338 nf_ct_bridge_refrag(struct sk_buff * skb,const struct nf_hook_state * state,int (* output)(struct net *,struct sock * sk,const struct nf_bridge_frag_data * data,struct sk_buff *)) nf_ct_bridge_refrag() argument 365 nf_ct_bridge_frag_restore(struct sk_buff * skb,const struct nf_bridge_frag_data * data) nf_ct_bridge_frag_restore() argument 388 nf_ct_bridge_refrag_post(struct net * net,struct sock * sk,const struct nf_bridge_frag_data * data,struct sk_buff * skb) nf_ct_bridge_refrag_post() argument 399 nf_ct_bridge_post(void * priv,struct sk_buff * skb,const struct nf_hook_state * state) nf_ct_bridge_post() argument [all...] |
| /linux/drivers/net/ovpn/ |
| H A D | io.c | 43 static bool ovpn_is_keepalive(struct sk_buff *skb) in ovpn_is_keepalive() argument 45 if (*skb->data != ovpn_keepalive_message[0]) in ovpn_is_keepalive() 48 if (skb->len != OVPN_KEEPALIVE_SIZE) in ovpn_is_keepalive() 51 if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE)) in ovpn_is_keepalive() 54 return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE); in ovpn_is_keepalive() 60 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb) in ovpn_netdev_write() argument 70 skb_gso_reset(skb); in ovpn_netdev_write() 75 skb->ip_summed = CHECKSUM_NONE; in ovpn_netdev_write() 78 skb_clear_hash(skb); in ovpn_netdev_write() 83 skb->dev = peer->ovpn->dev; in ovpn_netdev_write() [all …]
|
| /linux/net/ieee802154/6lowpan/ |
| H A D | rx.c | 21 static int lowpan_give_skb_to_device(struct sk_buff *skb) in lowpan_give_skb_to_device() argument 23 skb->protocol = htons(ETH_P_IPV6); in lowpan_give_skb_to_device() 24 skb->dev->stats.rx_packets++; in lowpan_give_skb_to_device() 25 skb->dev->stats.rx_bytes += skb->len; in lowpan_give_skb_to_device() 27 return netif_rx(skb); in lowpan_give_skb_to_device() 30 static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) in lowpan_rx_handlers_result() argument 40 kfree_skb(skb); in lowpan_rx_handlers_result() 46 return lowpan_give_skb_to_device(skb); in lowpan_rx_handlers_result() 64 static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb) in lowpan_rx_h_frag() argument 68 if (!(lowpan_is_frag1(*skb_network_header(skb)) || in lowpan_rx_h_frag() [all …]
|
| /linux/net/ipv4/ |
| H A D | gre_offload.c | 16 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, in gre_gso_segment() argument 19 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in gre_gso_segment() 22 u16 mac_offset = skb->mac_header; in gre_gso_segment() 23 __be16 protocol = skb->protocol; in gre_gso_segment() 24 u16 mac_len = skb->mac_len; in gre_gso_segment() 27 if (!skb->encapsulation) in gre_gso_segment() 33 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in gre_gso_segment() 37 skb->encapsulation = 0; in gre_gso_segment() 38 SKB_GSO_CB(skb)->encap_level = 0; in gre_gso_segment() 39 __skb_pull(skb, tnl_hlen); in gre_gso_segment() [all …]
|
| /linux/net/netfilter/ipvs/ |
| H A D | ip_vs_xmit.c | 19 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING) 20 * - skb->pkt_type is not set yet 21 * - the only place where we can see skb->sk != NULL 106 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) 108 if (IP6CB(skb)->frag_max_size) { in __mtu_check_toobig_v6() 112 if (IP6CB(skb)->frag_max_size > mtu) in __mtu_check_toobig_v6() 115 else if (skb->len > mtu && !skb_is_gso(skb)) { in __mtu_check_toobig_v6() 157 static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, 107 __mtu_check_toobig_v6(const struct sk_buff * skb,u32 mtu) __mtu_check_toobig_v6() argument 158 crosses_local_route_boundary(int skb_af,struct sk_buff * skb,int rt_mode,bool new_rt_is_local) crosses_local_route_boundary() argument 198 maybe_update_pmtu(int skb_af,struct sk_buff * skb,int mtu) maybe_update_pmtu() argument 210 ensure_mtu_is_adequate(struct netns_ipvs * ipvs,int skb_af,int rt_mode,struct ip_vs_iphdr * ipvsh,struct sk_buff * skb,int mtu) ensure_mtu_is_adequate() argument 251 decrement_ttl(struct netns_ipvs * ipvs,int skb_af,struct sk_buff * skb) decrement_ttl() argument 300 __ip_vs_get_out_rt(struct netns_ipvs * ipvs,int skb_af,struct sk_buff * skb,struct ip_vs_dest * dest,__be32 daddr,int rt_mode,__be32 * ret_saddr,struct ip_vs_iphdr * ipvsh) __ip_vs_get_out_rt() argument 458 __ip_vs_get_out_rt_v6(struct netns_ipvs * ipvs,int skb_af,struct sk_buff * skb,struct ip_vs_dest * dest,struct in6_addr * daddr,struct in6_addr * ret_saddr,struct ip_vs_iphdr * ipvsh,int do_xfrm,int rt_mode) __ip_vs_get_out_rt_v6() argument 590 ip_vs_tunnel_xmit_prepare(struct sk_buff * skb,struct ip_vs_conn * cp) ip_vs_tunnel_xmit_prepare() argument 613 ip_vs_drop_early_demux_sk(struct sk_buff * skb) ip_vs_drop_early_demux_sk() argument 623 ip_vs_nat_send_or_cont(int pf,struct sk_buff * skb,struct ip_vs_conn * cp,int local) ip_vs_nat_send_or_cont() argument 654 ip_vs_send_or_cont(int pf,struct sk_buff * skb,struct ip_vs_conn * cp,int local) ip_vs_send_or_cont() argument 679 ip_vs_null_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_null_xmit() argument 693 ip_vs_bypass_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_bypass_xmit() argument 718 ip_vs_bypass_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_bypass_xmit_v6() argument 746 ip_vs_nat_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_nat_xmit() argument 830 ip_vs_nat_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_nat_xmit_v6() argument 920 ip_vs_prepare_tunneled_skb(struct sk_buff * skb,int skb_af,unsigned int max_headroom,__u8 * next_protocol,__u32 * payload_len,__u8 * dsfield,__u8 * ttl,__be16 * df) ip_vs_prepare_tunneled_skb() argument 995 ipvs_gue_encap(struct net * net,struct sk_buff * skb,struct ip_vs_conn * cp,__u8 * next_protocol) ipvs_gue_encap() argument 1069 ipvs_gre_encap(struct net * net,struct sk_buff * skb,struct ip_vs_conn * cp,__u8 * next_protocol) ipvs_gre_encap() argument 1106 ip_vs_tunnel_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_tunnel_xmit() argument 1251 ip_vs_tunnel_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_tunnel_xmit_v6() argument 1398 ip_vs_dr_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_dr_xmit() argument 1428 ip_vs_dr_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_dr_xmit_v6() argument 1463 ip_vs_icmp_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,int offset,unsigned int hooknum,struct ip_vs_iphdr * iph) ip_vs_icmp_xmit() argument 1548 ip_vs_icmp_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,int offset,unsigned int hooknum,struct ip_vs_iphdr * ipvsh) ip_vs_icmp_xmit_v6() argument [all...] |
| /linux/drivers/net/wireguard/ |
| H A D | receive.c | 26 #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) argument 28 static size_t validate_header_len(struct sk_buff *skb) in validate_header_len() argument 30 if (unlikely(skb->len < sizeof(struct message_header))) in validate_header_len() 32 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && in validate_header_len() 33 skb->len >= MESSAGE_MINIMUM_LENGTH) in validate_header_len() 35 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && in validate_header_len() 36 skb->len == sizeof(struct message_handshake_initiation)) in validate_header_len() 38 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && in validate_header_len() 39 skb->len == sizeof(struct message_handshake_response)) in validate_header_len() 41 if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && in validate_header_len() [all …]
|