1*49ed6e97SBjoern A. Zeeb /*- 2*49ed6e97SBjoern A. Zeeb * Copyright (c) 2020-2021 The FreeBSD Foundation 3*49ed6e97SBjoern A. Zeeb * Copyright (c) 2021 Bjoern A. Zeeb 4*49ed6e97SBjoern A. Zeeb * 5*49ed6e97SBjoern A. Zeeb * This software was developed by Björn Zeeb under sponsorship from 6*49ed6e97SBjoern A. Zeeb * the FreeBSD Foundation. 7*49ed6e97SBjoern A. Zeeb * 8*49ed6e97SBjoern A. Zeeb * Redistribution and use in source and binary forms, with or without 9*49ed6e97SBjoern A. Zeeb * modification, are permitted provided that the following conditions 10*49ed6e97SBjoern A. Zeeb * are met: 11*49ed6e97SBjoern A. Zeeb * 1. Redistributions of source code must retain the above copyright 12*49ed6e97SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer. 13*49ed6e97SBjoern A. Zeeb * 2. Redistributions in binary form must reproduce the above copyright 14*49ed6e97SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer in the 15*49ed6e97SBjoern A. Zeeb * documentation and/or other materials provided with the distribution. 16*49ed6e97SBjoern A. Zeeb * 17*49ed6e97SBjoern A. Zeeb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18*49ed6e97SBjoern A. Zeeb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19*49ed6e97SBjoern A. Zeeb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20*49ed6e97SBjoern A. Zeeb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21*49ed6e97SBjoern A. Zeeb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22*49ed6e97SBjoern A. Zeeb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23*49ed6e97SBjoern A. Zeeb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24*49ed6e97SBjoern A. Zeeb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25*49ed6e97SBjoern A. Zeeb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26*49ed6e97SBjoern A. Zeeb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27*49ed6e97SBjoern A. Zeeb * SUCH DAMAGE. 28*49ed6e97SBjoern A. Zeeb * 29*49ed6e97SBjoern A. Zeeb * $FreeBSD$ 30*49ed6e97SBjoern A. Zeeb */ 31*49ed6e97SBjoern A. Zeeb 32*49ed6e97SBjoern A. Zeeb /* 33*49ed6e97SBjoern A. Zeeb * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL. 34*49ed6e97SBjoern A. Zeeb * Do not rely on the internals of this implementation. They are highly 35*49ed6e97SBjoern A. Zeeb * likely to change as we will improve the integration to FreeBSD mbufs. 36*49ed6e97SBjoern A. Zeeb */ 37*49ed6e97SBjoern A. Zeeb 38*49ed6e97SBjoern A. Zeeb #ifndef _LINUXKPI_LINUX_SKBUFF_H 39*49ed6e97SBjoern A. Zeeb #define _LINUXKPI_LINUX_SKBUFF_H 40*49ed6e97SBjoern A. Zeeb 41*49ed6e97SBjoern A. Zeeb #include <linux/page.h> 42*49ed6e97SBjoern A. Zeeb #include <linux/dma-mapping.h> 43*49ed6e97SBjoern A. Zeeb #include <linux/netdev_features.h> 44*49ed6e97SBjoern A. Zeeb #include <linux/list.h> 45*49ed6e97SBjoern A. Zeeb #include <linux/gfp.h> 46*49ed6e97SBjoern A. Zeeb 47*49ed6e97SBjoern A. Zeeb /* #define SKB_DEBUG */ 48*49ed6e97SBjoern A. Zeeb #ifdef SKB_DEBUG 49*49ed6e97SBjoern A. Zeeb 50*49ed6e97SBjoern A. Zeeb #define DSKB_TODO 0x01 51*49ed6e97SBjoern A. Zeeb #define DSKB_TRACE 0x02 52*49ed6e97SBjoern A. Zeeb #define DSKB_TRACEX 0x04 53*49ed6e97SBjoern A. Zeeb extern int debug_skb; 54*49ed6e97SBjoern A. Zeeb 55*49ed6e97SBjoern A. Zeeb #define SKB_TRACE(_s) if (debug_skb & DSKB_TRACE) \ 56*49ed6e97SBjoern A. Zeeb printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s) 57*49ed6e97SBjoern A. Zeeb #define SKB_TRACE2(_s, _p) if (debug_skb & DSKB_TRACE) \ 58*49ed6e97SBjoern A. Zeeb printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p) 59*49ed6e97SBjoern A. Zeeb #define SKB_TRACE_FMT(_s, _fmt, ...) if (debug_skb & DSKB_TRACE) \ 60*49ed6e97SBjoern A. Zeeb printf("SKB_TRACE %s:%d %p" _fmt "\n", __func__, __LINE__, _s, __VA_ARGS__) 61*49ed6e97SBjoern A. Zeeb #define SKB_TODO() if (debug_skb & DSKB_TODO) \ 62*49ed6e97SBjoern A. Zeeb printf("SKB_TODO %s:%d\n", __func__, __LINE__) 63*49ed6e97SBjoern A. Zeeb #else 64*49ed6e97SBjoern A. Zeeb #define SKB_TRACE(_s) do { } while(0) 65*49ed6e97SBjoern A. Zeeb #define SKB_TRACE2(_s, _p) do { } while(0) 66*49ed6e97SBjoern A. Zeeb #define SKB_TRACE_FMT(_s, ...) do { } while(0) 67*49ed6e97SBjoern A. Zeeb #define SKB_TODO() do { } while(0) 68*49ed6e97SBjoern A. Zeeb #endif 69*49ed6e97SBjoern A. Zeeb 70*49ed6e97SBjoern A. Zeeb enum sk_buff_pkt_type { 71*49ed6e97SBjoern A. Zeeb PACKET_BROADCAST, 72*49ed6e97SBjoern A. Zeeb PACKET_MULTICAST, 73*49ed6e97SBjoern A. Zeeb PACKET_OTHERHOST, 74*49ed6e97SBjoern A. Zeeb }; 75*49ed6e97SBjoern A. Zeeb 76*49ed6e97SBjoern A. Zeeb #define NET_SKB_PAD CACHE_LINE_SIZE /* ? */ 77*49ed6e97SBjoern A. Zeeb 78*49ed6e97SBjoern A. Zeeb struct sk_buff_head { 79*49ed6e97SBjoern A. Zeeb /* XXX TODO */ 80*49ed6e97SBjoern A. Zeeb struct sk_buff *next; 81*49ed6e97SBjoern A. Zeeb struct sk_buff *prev; 82*49ed6e97SBjoern A. Zeeb size_t qlen; 83*49ed6e97SBjoern A. Zeeb int lock; /* XXX TYPE */ 84*49ed6e97SBjoern A. Zeeb }; 85*49ed6e97SBjoern A. Zeeb 86*49ed6e97SBjoern A. Zeeb enum sk_checksum_flags { 87*49ed6e97SBjoern A. Zeeb CHECKSUM_NONE = 0x00, 88*49ed6e97SBjoern A. Zeeb CHECKSUM_UNNECESSARY = 0x01, 89*49ed6e97SBjoern A. Zeeb CHECKSUM_PARTIAL = 0x02, 90*49ed6e97SBjoern A. Zeeb CHECKSUM_COMPLETE = 0x04, 91*49ed6e97SBjoern A. Zeeb }; 92*49ed6e97SBjoern A. Zeeb 93*49ed6e97SBjoern A. Zeeb struct skb_frag { 94*49ed6e97SBjoern A. Zeeb /* XXX TODO */ 95*49ed6e97SBjoern A. Zeeb struct page *page; /* XXX-BZ These three are a wild guess so far! */ 96*49ed6e97SBjoern A. Zeeb off_t offset; 97*49ed6e97SBjoern A. Zeeb size_t size; 98*49ed6e97SBjoern A. Zeeb }; 99*49ed6e97SBjoern A. Zeeb typedef struct skb_frag skb_frag_t; 100*49ed6e97SBjoern A. Zeeb 101*49ed6e97SBjoern A. Zeeb enum skb_shared_info_gso_type { 102*49ed6e97SBjoern A. Zeeb SKB_GSO_TCPV4, 103*49ed6e97SBjoern A. Zeeb SKB_GSO_TCPV6, 104*49ed6e97SBjoern A. Zeeb }; 105*49ed6e97SBjoern A. Zeeb 106*49ed6e97SBjoern A. Zeeb struct skb_shared_info { 107*49ed6e97SBjoern A. Zeeb enum skb_shared_info_gso_type gso_type; 108*49ed6e97SBjoern A. Zeeb uint16_t gso_size; 109*49ed6e97SBjoern A. Zeeb uint16_t nr_frags; 110*49ed6e97SBjoern A. Zeeb skb_frag_t frags[64]; /* XXX TODO, 16xpage? */ 111*49ed6e97SBjoern A. Zeeb }; 112*49ed6e97SBjoern A. Zeeb 113*49ed6e97SBjoern A. Zeeb struct sk_buff { 114*49ed6e97SBjoern A. Zeeb /* XXX TODO */ 115*49ed6e97SBjoern A. Zeeb /* struct sk_buff_head */ 116*49ed6e97SBjoern A. Zeeb struct sk_buff *next; 117*49ed6e97SBjoern A. Zeeb struct sk_buff *prev; 118*49ed6e97SBjoern A. Zeeb int list; /* XXX TYPE */ 119*49ed6e97SBjoern A. Zeeb uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */ 120*49ed6e97SBjoern A. Zeeb uint32_t len; /* ? */ 121*49ed6e97SBjoern A. Zeeb uint32_t data_len; /* ? If we have frags? */ 122*49ed6e97SBjoern A. Zeeb uint32_t truesize; /* The total size of all buffers, incl. frags. */ 123*49ed6e97SBjoern A. Zeeb uint16_t mac_len; /* Link-layer header length. */ 124*49ed6e97SBjoern A. Zeeb __sum16 csum; 125*49ed6e97SBjoern A. Zeeb uint16_t l3hdroff; /* network header offset from *head */ 126*49ed6e97SBjoern A. Zeeb uint16_t l4hdroff; /* transport header offset from *head */ 127*49ed6e97SBjoern A. Zeeb uint32_t priority; 128*49ed6e97SBjoern A. Zeeb uint16_t qmap; /* queue mapping */ 129*49ed6e97SBjoern A. Zeeb uint16_t _spareu16_0; 130*49ed6e97SBjoern A. Zeeb enum sk_buff_pkt_type pkt_type; 131*49ed6e97SBjoern A. Zeeb 132*49ed6e97SBjoern A. Zeeb /* "Scratch" area for layers to store metadata. */ 133*49ed6e97SBjoern A. Zeeb /* ??? I see sizeof() operations so probably an array. */ 134*49ed6e97SBjoern A. Zeeb uint8_t cb[64] __aligned(CACHE_LINE_SIZE); 135*49ed6e97SBjoern A. Zeeb 136*49ed6e97SBjoern A. Zeeb struct net_device *dev; 137*49ed6e97SBjoern A. Zeeb void *sk; /* XXX net/sock.h? */ 138*49ed6e97SBjoern A. Zeeb 139*49ed6e97SBjoern A. Zeeb int csum_offset, csum_start, ip_summed, protocol; 140*49ed6e97SBjoern A. Zeeb 141*49ed6e97SBjoern A. Zeeb uint8_t *head; /* Head of buffer. */ 142*49ed6e97SBjoern A. Zeeb uint8_t *data; /* Head of data. */ 143*49ed6e97SBjoern A. Zeeb uint8_t *tail; /* End of data. */ 144*49ed6e97SBjoern A. Zeeb uint8_t *end; /* End of buffer. */ 145*49ed6e97SBjoern A. Zeeb 146*49ed6e97SBjoern A. Zeeb struct skb_shared_info *shinfo; 147*49ed6e97SBjoern A. Zeeb 148*49ed6e97SBjoern A. Zeeb /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */ 149*49ed6e97SBjoern A. Zeeb void *m; 150*49ed6e97SBjoern A. Zeeb void(*m_free_func)(void *); 151*49ed6e97SBjoern A. Zeeb 152*49ed6e97SBjoern A. Zeeb /* Force padding to CACHE_LINE_SIZE. */ 153*49ed6e97SBjoern A. Zeeb uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE); 154*49ed6e97SBjoern A. Zeeb }; 155*49ed6e97SBjoern A. Zeeb 156*49ed6e97SBjoern A. Zeeb /* -------------------------------------------------------------------------- */ 157*49ed6e97SBjoern A. Zeeb 158*49ed6e97SBjoern A. Zeeb struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t); 159*49ed6e97SBjoern A. Zeeb void linuxkpi_kfree_skb(struct sk_buff *); 160*49ed6e97SBjoern A. Zeeb 161*49ed6e97SBjoern A. Zeeb /* -------------------------------------------------------------------------- */ 162*49ed6e97SBjoern A. Zeeb 163*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 164*49ed6e97SBjoern A. Zeeb alloc_skb(size_t size, gfp_t gfp) 165*49ed6e97SBjoern A. Zeeb { 166*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 167*49ed6e97SBjoern A. Zeeb 168*49ed6e97SBjoern A. Zeeb skb = linuxkpi_alloc_skb(size, gfp); 169*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 170*49ed6e97SBjoern A. Zeeb return (skb); 171*49ed6e97SBjoern A. Zeeb } 172*49ed6e97SBjoern A. Zeeb 173*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 174*49ed6e97SBjoern A. Zeeb dev_alloc_skb(size_t len) 175*49ed6e97SBjoern A. Zeeb { 176*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 177*49ed6e97SBjoern A. Zeeb 178*49ed6e97SBjoern A. Zeeb skb = alloc_skb(len, GFP_KERNEL); 179*49ed6e97SBjoern A. Zeeb /* XXX TODO */ 180*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 181*49ed6e97SBjoern A. Zeeb return (skb); 182*49ed6e97SBjoern A. Zeeb } 183*49ed6e97SBjoern A. Zeeb 184*49ed6e97SBjoern A. Zeeb static inline void 185*49ed6e97SBjoern A. Zeeb kfree_skb(struct sk_buff *skb) 186*49ed6e97SBjoern A. Zeeb { 187*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 188*49ed6e97SBjoern A. Zeeb linuxkpi_kfree_skb(skb); 189*49ed6e97SBjoern A. Zeeb } 190*49ed6e97SBjoern A. Zeeb 191*49ed6e97SBjoern A. Zeeb static inline void 192*49ed6e97SBjoern A. Zeeb dev_kfree_skb(struct sk_buff *skb) 193*49ed6e97SBjoern A. Zeeb { 194*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 195*49ed6e97SBjoern A. Zeeb kfree_skb(skb); 196*49ed6e97SBjoern A. Zeeb } 197*49ed6e97SBjoern A. Zeeb 198*49ed6e97SBjoern A. Zeeb static inline void 199*49ed6e97SBjoern A. Zeeb dev_kfree_skb_any(struct sk_buff *skb) 200*49ed6e97SBjoern A. Zeeb { 201*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 202*49ed6e97SBjoern A. Zeeb dev_kfree_skb(skb); 203*49ed6e97SBjoern A. Zeeb } 204*49ed6e97SBjoern A. Zeeb 205*49ed6e97SBjoern A. Zeeb static inline void 206*49ed6e97SBjoern A. Zeeb dev_kfree_skb_irq(struct sk_buff *skb) 207*49ed6e97SBjoern A. Zeeb { 208*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 209*49ed6e97SBjoern A. Zeeb SKB_TODO(); 210*49ed6e97SBjoern A. Zeeb } 211*49ed6e97SBjoern A. Zeeb 212*49ed6e97SBjoern A. Zeeb /* -------------------------------------------------------------------------- */ 213*49ed6e97SBjoern A. Zeeb 214*49ed6e97SBjoern A. Zeeb /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ 215*49ed6e97SBjoern A. Zeeb #define skb_list_walk_safe(_q, skb, tmp) \ 216*49ed6e97SBjoern A. Zeeb for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp)) 217*49ed6e97SBjoern A. Zeeb 218*49ed6e97SBjoern A. Zeeb /* Add headroom; cannot do once there is data in there. */ 219*49ed6e97SBjoern A. Zeeb static inline void 220*49ed6e97SBjoern A. Zeeb skb_reserve(struct sk_buff *skb, size_t len) 221*49ed6e97SBjoern A. Zeeb { 222*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 223*49ed6e97SBjoern A. Zeeb KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p " 224*49ed6e97SBjoern A. Zeeb "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail)); 225*49ed6e97SBjoern A. Zeeb skb->data += len; 226*49ed6e97SBjoern A. Zeeb skb->tail += len; 227*49ed6e97SBjoern A. Zeeb } 228*49ed6e97SBjoern A. Zeeb 229*49ed6e97SBjoern A. Zeeb /* 230*49ed6e97SBjoern A. Zeeb * Remove headroom; return new data pointer; basically make space at the 231*49ed6e97SBjoern A. Zeeb * front to copy data in (manually). 232*49ed6e97SBjoern A. Zeeb */ 233*49ed6e97SBjoern A. Zeeb static inline void * 234*49ed6e97SBjoern A. Zeeb skb_push(struct sk_buff *skb, size_t len) 235*49ed6e97SBjoern A. Zeeb { 236*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 237*49ed6e97SBjoern A. Zeeb KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - " 238*49ed6e97SBjoern A. Zeeb "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data)); 239*49ed6e97SBjoern A. Zeeb skb->len += len; 240*49ed6e97SBjoern A. Zeeb skb->data -= len; 241*49ed6e97SBjoern A. Zeeb return (skb->data); 242*49ed6e97SBjoern A. Zeeb } 243*49ed6e97SBjoern A. Zeeb 244*49ed6e97SBjoern A. Zeeb /* 245*49ed6e97SBjoern A. Zeeb * Length of the data on the skb (without any frags)??? 246*49ed6e97SBjoern A. Zeeb */ 247*49ed6e97SBjoern A. Zeeb static inline size_t 248*49ed6e97SBjoern A. Zeeb skb_headlen(struct sk_buff *skb) 249*49ed6e97SBjoern A. Zeeb { 250*49ed6e97SBjoern A. Zeeb 251*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 252*49ed6e97SBjoern A. Zeeb return (skb->len - skb->data_len); 253*49ed6e97SBjoern A. Zeeb } 254*49ed6e97SBjoern A. Zeeb 255*49ed6e97SBjoern A. Zeeb 256*49ed6e97SBjoern A. Zeeb /* Return the end of data (tail pointer). */ 257*49ed6e97SBjoern A. Zeeb static inline uint8_t * 258*49ed6e97SBjoern A. Zeeb skb_tail_pointer(struct sk_buff *skb) 259*49ed6e97SBjoern A. Zeeb { 260*49ed6e97SBjoern A. Zeeb 261*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 262*49ed6e97SBjoern A. Zeeb return (skb->tail); 263*49ed6e97SBjoern A. Zeeb } 264*49ed6e97SBjoern A. Zeeb 265*49ed6e97SBjoern A. Zeeb /* Return number of bytes available at end of buffer. */ 266*49ed6e97SBjoern A. Zeeb static inline unsigned int 267*49ed6e97SBjoern A. Zeeb skb_tailroom(struct sk_buff *skb) 268*49ed6e97SBjoern A. Zeeb { 269*49ed6e97SBjoern A. Zeeb 270*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 271*49ed6e97SBjoern A. Zeeb KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, " 272*49ed6e97SBjoern A. Zeeb "end %p tail %p\n", __func__, skb, skb->end, skb->tail)); 273*49ed6e97SBjoern A. Zeeb return (skb->end - skb->tail); 274*49ed6e97SBjoern A. Zeeb } 275*49ed6e97SBjoern A. Zeeb 276*49ed6e97SBjoern A. Zeeb /* Return numer of bytes available at the beginning of buffer. */ 277*49ed6e97SBjoern A. Zeeb static inline unsigned int 278*49ed6e97SBjoern A. Zeeb skb_headroom(struct sk_buff *skb) 279*49ed6e97SBjoern A. Zeeb { 280*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 281*49ed6e97SBjoern A. Zeeb KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, " 282*49ed6e97SBjoern A. Zeeb "data %p head %p\n", __func__, skb, skb->data, skb->head)); 283*49ed6e97SBjoern A. Zeeb return (skb->data - skb->head); 284*49ed6e97SBjoern A. Zeeb } 285*49ed6e97SBjoern A. Zeeb 286*49ed6e97SBjoern A. Zeeb 287*49ed6e97SBjoern A. Zeeb /* 288*49ed6e97SBjoern A. Zeeb * Remove tailroom; return the old tail pointer; basically make space at 289*49ed6e97SBjoern A. Zeeb * the end to copy data in (manually). See also skb_put_data() below. 290*49ed6e97SBjoern A. Zeeb */ 291*49ed6e97SBjoern A. Zeeb static inline void * 292*49ed6e97SBjoern A. Zeeb skb_put(struct sk_buff *skb, size_t len) 293*49ed6e97SBjoern A. Zeeb { 294*49ed6e97SBjoern A. Zeeb void *s; 295*49ed6e97SBjoern A. Zeeb 296*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 297*49ed6e97SBjoern A. Zeeb KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + " 298*49ed6e97SBjoern A. Zeeb "len %zu) > end %p, head %p data %p len %u\n", __func__, 299*49ed6e97SBjoern A. Zeeb skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len)); 300*49ed6e97SBjoern A. Zeeb 301*49ed6e97SBjoern A. Zeeb s = skb_tail_pointer(skb); 302*49ed6e97SBjoern A. Zeeb skb->tail += len; 303*49ed6e97SBjoern A. Zeeb skb->len += len; 304*49ed6e97SBjoern A. Zeeb #ifdef SKB_DEBUG 305*49ed6e97SBjoern A. Zeeb if (debug_skb & DSKB_TRACEX) 306*49ed6e97SBjoern A. Zeeb printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n", 307*49ed6e97SBjoern A. Zeeb __func__, skb,skb->len, skb->head, skb->data, skb->tail, skb->end, 308*49ed6e97SBjoern A. Zeeb s, len); 309*49ed6e97SBjoern A. Zeeb #endif 310*49ed6e97SBjoern A. Zeeb return (s); 311*49ed6e97SBjoern A. Zeeb } 312*49ed6e97SBjoern A. Zeeb 313*49ed6e97SBjoern A. Zeeb /* skb_put() + copying data in. */ 314*49ed6e97SBjoern A. Zeeb static inline void * 315*49ed6e97SBjoern A. Zeeb skb_put_data(struct sk_buff *skb, const void *buf, size_t len) 316*49ed6e97SBjoern A. Zeeb { 317*49ed6e97SBjoern A. Zeeb void *s; 318*49ed6e97SBjoern A. Zeeb 319*49ed6e97SBjoern A. Zeeb SKB_TRACE2(skb, buf); 320*49ed6e97SBjoern A. Zeeb s = skb_put(skb, len); 321*49ed6e97SBjoern A. Zeeb memcpy(s, buf, len); 322*49ed6e97SBjoern A. Zeeb return (s); 323*49ed6e97SBjoern A. Zeeb } 324*49ed6e97SBjoern A. Zeeb 325*49ed6e97SBjoern A. Zeeb /* skb_put() + filling with zeros. */ 326*49ed6e97SBjoern A. Zeeb static inline void * 327*49ed6e97SBjoern A. Zeeb skb_put_zero(struct sk_buff *skb, size_t len) 328*49ed6e97SBjoern A. Zeeb { 329*49ed6e97SBjoern A. Zeeb void *s; 330*49ed6e97SBjoern A. Zeeb 331*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 332*49ed6e97SBjoern A. Zeeb s = skb_put(skb, len); 333*49ed6e97SBjoern A. Zeeb memset(s, '\0', len); 334*49ed6e97SBjoern A. Zeeb return (s); 335*49ed6e97SBjoern A. Zeeb } 336*49ed6e97SBjoern A. Zeeb 337*49ed6e97SBjoern A. Zeeb /* 338*49ed6e97SBjoern A. Zeeb * Remove len bytes from beginning of data. 339*49ed6e97SBjoern A. Zeeb * 340*49ed6e97SBjoern A. Zeeb * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic; 341*49ed6e97SBjoern A. Zeeb * we return the advanced data pointer so we don't have to keep a temp, correct? 342*49ed6e97SBjoern A. Zeeb */ 343*49ed6e97SBjoern A. Zeeb static inline void * 344*49ed6e97SBjoern A. Zeeb skb_pull(struct sk_buff *skb, size_t len) 345*49ed6e97SBjoern A. Zeeb { 346*49ed6e97SBjoern A. Zeeb 347*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 348*49ed6e97SBjoern A. Zeeb #if 0 /* Apparently this doesn't barf... */ 349*49ed6e97SBjoern A. Zeeb KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n", 350*49ed6e97SBjoern A. Zeeb __func__, skb, skb->len, len, skb->data)); 351*49ed6e97SBjoern A. Zeeb #endif 352*49ed6e97SBjoern A. Zeeb if (skb->len < len) 353*49ed6e97SBjoern A. Zeeb return (NULL); 354*49ed6e97SBjoern A. Zeeb skb->len -= len; 355*49ed6e97SBjoern A. Zeeb skb->data += len; 356*49ed6e97SBjoern A. Zeeb return (skb->data); 357*49ed6e97SBjoern A. Zeeb } 358*49ed6e97SBjoern A. Zeeb 359*49ed6e97SBjoern A. Zeeb /* Reduce skb data to given length or do nothing if smaller already. */ 360*49ed6e97SBjoern A. Zeeb static inline void 361*49ed6e97SBjoern A. Zeeb __skb_trim(struct sk_buff *skb, unsigned int len) 362*49ed6e97SBjoern A. Zeeb { 363*49ed6e97SBjoern A. Zeeb 364*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 365*49ed6e97SBjoern A. Zeeb if (skb->len < len) 366*49ed6e97SBjoern A. Zeeb return; 367*49ed6e97SBjoern A. Zeeb 368*49ed6e97SBjoern A. Zeeb skb->len = len; 369*49ed6e97SBjoern A. Zeeb skb->tail = skb->data + skb->len; 370*49ed6e97SBjoern A. Zeeb } 371*49ed6e97SBjoern A. Zeeb 372*49ed6e97SBjoern A. Zeeb static inline void 373*49ed6e97SBjoern A. Zeeb skb_trim(struct sk_buff *skb, unsigned int len) 374*49ed6e97SBjoern A. Zeeb { 375*49ed6e97SBjoern A. Zeeb 376*49ed6e97SBjoern A. Zeeb return (__skb_trim(skb, len)); 377*49ed6e97SBjoern A. Zeeb } 378*49ed6e97SBjoern A. Zeeb 379*49ed6e97SBjoern A. Zeeb static inline struct skb_shared_info * 380*49ed6e97SBjoern A. Zeeb skb_shinfo(struct sk_buff *skb) 381*49ed6e97SBjoern A. Zeeb { 382*49ed6e97SBjoern A. Zeeb 383*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 384*49ed6e97SBjoern A. Zeeb return (skb->shinfo); 385*49ed6e97SBjoern A. Zeeb } 386*49ed6e97SBjoern A. Zeeb 387*49ed6e97SBjoern A. Zeeb static inline void 388*49ed6e97SBjoern A. Zeeb skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page, 389*49ed6e97SBjoern A. Zeeb off_t offset, size_t size, unsigned int truesize) 390*49ed6e97SBjoern A. Zeeb { 391*49ed6e97SBjoern A. Zeeb struct skb_shared_info *shinfo; 392*49ed6e97SBjoern A. Zeeb 393*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 394*49ed6e97SBjoern A. Zeeb #ifdef SKB_DEBUG 395*49ed6e97SBjoern A. Zeeb if (debug_skb & DSKB_TRACEX) 396*49ed6e97SBjoern A. Zeeb printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d " 397*49ed6e97SBjoern A. Zeeb "page %#jx offset %ju size %zu truesize %u\n", __func__, 398*49ed6e97SBjoern A. Zeeb skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno, 399*49ed6e97SBjoern A. Zeeb (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset, 400*49ed6e97SBjoern A. Zeeb size, truesize); 401*49ed6e97SBjoern A. Zeeb #endif 402*49ed6e97SBjoern A. Zeeb 403*49ed6e97SBjoern A. Zeeb shinfo = skb_shinfo(skb); 404*49ed6e97SBjoern A. Zeeb KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p " 405*49ed6e97SBjoern A. Zeeb "fragno %d too big\n", __func__, skb, fragno)); 406*49ed6e97SBjoern A. Zeeb shinfo->frags[fragno].page = page; 407*49ed6e97SBjoern A. Zeeb shinfo->frags[fragno].offset = offset; 408*49ed6e97SBjoern A. Zeeb shinfo->frags[fragno].size = size; 409*49ed6e97SBjoern A. Zeeb shinfo->nr_frags = fragno + 1; 410*49ed6e97SBjoern A. Zeeb skb->len += size; 411*49ed6e97SBjoern A. Zeeb skb->truesize += truesize; 412*49ed6e97SBjoern A. Zeeb 413*49ed6e97SBjoern A. Zeeb /* XXX TODO EXTEND truesize? */ 414*49ed6e97SBjoern A. Zeeb } 415*49ed6e97SBjoern A. Zeeb 416*49ed6e97SBjoern A. Zeeb /* -------------------------------------------------------------------------- */ 417*49ed6e97SBjoern A. Zeeb 418*49ed6e97SBjoern A. Zeeb /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ 419*49ed6e97SBjoern A. Zeeb #define skb_queue_walk(_q, skb) \ 420*49ed6e97SBjoern A. Zeeb for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \ 421*49ed6e97SBjoern A. Zeeb (skb) = (skb)->next) 422*49ed6e97SBjoern A. Zeeb 423*49ed6e97SBjoern A. Zeeb #define skb_queue_walk_safe(_q, skb, tmp) \ 424*49ed6e97SBjoern A. Zeeb for ((skb) = (_q)->next, (tmp) = (skb)->next; \ 425*49ed6e97SBjoern A. Zeeb (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next) 426*49ed6e97SBjoern A. Zeeb 427*49ed6e97SBjoern A. Zeeb static inline bool 428*49ed6e97SBjoern A. Zeeb skb_queue_empty(struct sk_buff_head *q) 429*49ed6e97SBjoern A. Zeeb { 430*49ed6e97SBjoern A. Zeeb 431*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 432*49ed6e97SBjoern A. Zeeb return (q->qlen == 0); 433*49ed6e97SBjoern A. Zeeb } 434*49ed6e97SBjoern A. Zeeb 435*49ed6e97SBjoern A. Zeeb static inline void 436*49ed6e97SBjoern A. Zeeb __skb_queue_head_init(struct sk_buff_head *q) 437*49ed6e97SBjoern A. Zeeb { 438*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 439*49ed6e97SBjoern A. Zeeb q->prev = q->next = (struct sk_buff *)q; 440*49ed6e97SBjoern A. Zeeb q->qlen = 0; 441*49ed6e97SBjoern A. Zeeb } 442*49ed6e97SBjoern A. Zeeb 443*49ed6e97SBjoern A. Zeeb static inline void 444*49ed6e97SBjoern A. Zeeb skb_queue_head_init(struct sk_buff_head *q) 445*49ed6e97SBjoern A. Zeeb { 446*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 447*49ed6e97SBjoern A. Zeeb return (__skb_queue_head_init(q)); 448*49ed6e97SBjoern A. Zeeb } 449*49ed6e97SBjoern A. Zeeb 450*49ed6e97SBjoern A. Zeeb static inline void 451*49ed6e97SBjoern A. Zeeb __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next, 452*49ed6e97SBjoern A. Zeeb struct sk_buff_head *q) 453*49ed6e97SBjoern A. Zeeb { 454*49ed6e97SBjoern A. Zeeb 455*49ed6e97SBjoern A. Zeeb SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q); 456*49ed6e97SBjoern A. Zeeb new->prev = prev; 457*49ed6e97SBjoern A. Zeeb new->next = next; 458*49ed6e97SBjoern A. Zeeb next->prev = new; 459*49ed6e97SBjoern A. Zeeb prev->next = new; 460*49ed6e97SBjoern A. Zeeb q->qlen++; 461*49ed6e97SBjoern A. Zeeb } 462*49ed6e97SBjoern A. Zeeb 463*49ed6e97SBjoern A. Zeeb static inline void 464*49ed6e97SBjoern A. Zeeb __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb, 465*49ed6e97SBjoern A. Zeeb struct sk_buff *new) 466*49ed6e97SBjoern A. Zeeb { 467*49ed6e97SBjoern A. Zeeb 468*49ed6e97SBjoern A. Zeeb SKB_TRACE_FMT(q, "skb %p new %p", skb, new); 469*49ed6e97SBjoern A. Zeeb __skb_insert(new, skb, skb->next, q); 470*49ed6e97SBjoern A. Zeeb } 471*49ed6e97SBjoern A. Zeeb 472*49ed6e97SBjoern A. Zeeb static inline void 473*49ed6e97SBjoern A. Zeeb __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb, 474*49ed6e97SBjoern A. Zeeb struct sk_buff *new) 475*49ed6e97SBjoern A. Zeeb { 476*49ed6e97SBjoern A. Zeeb 477*49ed6e97SBjoern A. Zeeb SKB_TRACE_FMT(q, "skb %p new %p", skb, new); 478*49ed6e97SBjoern A. Zeeb __skb_insert(new, skb->prev, skb, q); 479*49ed6e97SBjoern A. Zeeb } 480*49ed6e97SBjoern A. Zeeb 481*49ed6e97SBjoern A. Zeeb static inline void 482*49ed6e97SBjoern A. Zeeb __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb) 483*49ed6e97SBjoern A. Zeeb { 484*49ed6e97SBjoern A. Zeeb struct sk_buff *s; 485*49ed6e97SBjoern A. Zeeb 486*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 487*49ed6e97SBjoern A. Zeeb q->qlen++; 488*49ed6e97SBjoern A. Zeeb s = (struct sk_buff *)q; 489*49ed6e97SBjoern A. Zeeb s->prev->next = skb; 490*49ed6e97SBjoern A. Zeeb skb->prev = s->prev; 491*49ed6e97SBjoern A. Zeeb skb->next = s; 492*49ed6e97SBjoern A. Zeeb s->prev = skb; 493*49ed6e97SBjoern A. Zeeb } 494*49ed6e97SBjoern A. Zeeb 495*49ed6e97SBjoern A. Zeeb static inline void 496*49ed6e97SBjoern A. Zeeb skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb) 497*49ed6e97SBjoern A. Zeeb { 498*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 499*49ed6e97SBjoern A. Zeeb return (__skb_queue_tail(q, skb)); 500*49ed6e97SBjoern A. Zeeb } 501*49ed6e97SBjoern A. Zeeb 502*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 503*49ed6e97SBjoern A. Zeeb skb_peek_tail(struct sk_buff_head *q) 504*49ed6e97SBjoern A. Zeeb { 505*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 506*49ed6e97SBjoern A. Zeeb 507*49ed6e97SBjoern A. Zeeb skb = q->prev; 508*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 509*49ed6e97SBjoern A. Zeeb if (skb == (struct sk_buff *)q) 510*49ed6e97SBjoern A. Zeeb return (NULL); 511*49ed6e97SBjoern A. Zeeb return (skb); 512*49ed6e97SBjoern A. Zeeb } 513*49ed6e97SBjoern A. Zeeb 514*49ed6e97SBjoern A. Zeeb static inline void 515*49ed6e97SBjoern A. Zeeb __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) 516*49ed6e97SBjoern A. Zeeb { 517*49ed6e97SBjoern A. Zeeb SKB_TRACE2(skb, head); 518*49ed6e97SBjoern A. Zeeb struct sk_buff *p, *n;; 519*49ed6e97SBjoern A. Zeeb 520*49ed6e97SBjoern A. Zeeb head->qlen--; 521*49ed6e97SBjoern A. Zeeb p = skb->prev; 522*49ed6e97SBjoern A. Zeeb n = skb->next; 523*49ed6e97SBjoern A. Zeeb p->next = n; 524*49ed6e97SBjoern A. Zeeb n->prev = p; 525*49ed6e97SBjoern A. Zeeb skb->prev = skb->next = NULL; 526*49ed6e97SBjoern A. Zeeb } 527*49ed6e97SBjoern A. Zeeb 528*49ed6e97SBjoern A. Zeeb static inline void 529*49ed6e97SBjoern A. Zeeb skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) 530*49ed6e97SBjoern A. Zeeb { 531*49ed6e97SBjoern A. Zeeb SKB_TRACE2(skb, head); 532*49ed6e97SBjoern A. Zeeb return (__skb_unlink(skb, head)); 533*49ed6e97SBjoern A. Zeeb } 534*49ed6e97SBjoern A. Zeeb 535*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 536*49ed6e97SBjoern A. Zeeb __skb_dequeue(struct sk_buff_head *q) 537*49ed6e97SBjoern A. Zeeb { 538*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 539*49ed6e97SBjoern A. Zeeb 540*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 541*49ed6e97SBjoern A. Zeeb skb = q->next; 542*49ed6e97SBjoern A. Zeeb if (skb == (struct sk_buff *)q) 543*49ed6e97SBjoern A. Zeeb return (NULL); 544*49ed6e97SBjoern A. Zeeb if (skb != NULL) 545*49ed6e97SBjoern A. Zeeb __skb_unlink(skb, q); 546*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 547*49ed6e97SBjoern A. Zeeb return (skb); 548*49ed6e97SBjoern A. Zeeb } 549*49ed6e97SBjoern A. Zeeb 550*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 551*49ed6e97SBjoern A. Zeeb skb_dequeue(struct sk_buff_head *q) 552*49ed6e97SBjoern A. Zeeb { 553*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 554*49ed6e97SBjoern A. Zeeb return (__skb_dequeue(q)); 555*49ed6e97SBjoern A. Zeeb } 556*49ed6e97SBjoern A. Zeeb 557*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 558*49ed6e97SBjoern A. Zeeb skb_dequeue_tail(struct sk_buff_head *q) 559*49ed6e97SBjoern A. Zeeb { 560*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 561*49ed6e97SBjoern A. Zeeb 562*49ed6e97SBjoern A. Zeeb skb = skb_peek_tail(q); 563*49ed6e97SBjoern A. Zeeb if (skb != NULL) 564*49ed6e97SBjoern A. Zeeb __skb_unlink(skb, q); 565*49ed6e97SBjoern A. Zeeb 566*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 567*49ed6e97SBjoern A. Zeeb return (skb); 568*49ed6e97SBjoern A. Zeeb } 569*49ed6e97SBjoern A. Zeeb 570*49ed6e97SBjoern A. Zeeb static inline void 571*49ed6e97SBjoern A. Zeeb skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) 572*49ed6e97SBjoern A. Zeeb { 573*49ed6e97SBjoern A. Zeeb 574*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 575*49ed6e97SBjoern A. Zeeb __skb_queue_after(q, (struct sk_buff *)q, skb); 576*49ed6e97SBjoern A. Zeeb } 577*49ed6e97SBjoern A. Zeeb 578*49ed6e97SBjoern A. Zeeb static inline uint32_t 579*49ed6e97SBjoern A. Zeeb skb_queue_len(struct sk_buff_head *head) 580*49ed6e97SBjoern A. Zeeb { 581*49ed6e97SBjoern A. Zeeb SKB_TRACE(head); 582*49ed6e97SBjoern A. Zeeb return (head->qlen); 583*49ed6e97SBjoern A. Zeeb } 584*49ed6e97SBjoern A. Zeeb 585*49ed6e97SBjoern A. Zeeb static inline void 586*49ed6e97SBjoern A. Zeeb __skb_queue_purge(struct sk_buff_head *q) 587*49ed6e97SBjoern A. Zeeb { 588*49ed6e97SBjoern A. Zeeb struct sk_buff *skb; 589*49ed6e97SBjoern A. Zeeb 590*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 591*49ed6e97SBjoern A. Zeeb while ((skb = __skb_dequeue(q)) != NULL) 592*49ed6e97SBjoern A. Zeeb kfree_skb(skb); 593*49ed6e97SBjoern A. Zeeb } 594*49ed6e97SBjoern A. Zeeb 595*49ed6e97SBjoern A. Zeeb static inline void 596*49ed6e97SBjoern A. Zeeb skb_queue_purge(struct sk_buff_head *q) 597*49ed6e97SBjoern A. Zeeb { 598*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 599*49ed6e97SBjoern A. Zeeb return (__skb_queue_purge(q)); 600*49ed6e97SBjoern A. Zeeb } 601*49ed6e97SBjoern A. Zeeb 602*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 603*49ed6e97SBjoern A. Zeeb skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb) 604*49ed6e97SBjoern A. Zeeb { 605*49ed6e97SBjoern A. Zeeb 606*49ed6e97SBjoern A. Zeeb SKB_TRACE2(q, skb); 607*49ed6e97SBjoern A. Zeeb /* XXX what is the q argument good for? */ 608*49ed6e97SBjoern A. Zeeb return (skb->prev); 609*49ed6e97SBjoern A. Zeeb } 610*49ed6e97SBjoern A. Zeeb 611*49ed6e97SBjoern A. Zeeb /* -------------------------------------------------------------------------- */ 612*49ed6e97SBjoern A. Zeeb 613*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 614*49ed6e97SBjoern A. Zeeb skb_copy(struct sk_buff *skb, gfp_t gfp) 615*49ed6e97SBjoern A. Zeeb { 616*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 617*49ed6e97SBjoern A. Zeeb SKB_TODO(); 618*49ed6e97SBjoern A. Zeeb return (NULL); 619*49ed6e97SBjoern A. Zeeb } 620*49ed6e97SBjoern A. Zeeb 621*49ed6e97SBjoern A. Zeeb static inline void 622*49ed6e97SBjoern A. Zeeb consume_skb(struct sk_buff *skb) 623*49ed6e97SBjoern A. Zeeb { 624*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 625*49ed6e97SBjoern A. Zeeb SKB_TODO(); 626*49ed6e97SBjoern A. Zeeb } 627*49ed6e97SBjoern A. Zeeb 628*49ed6e97SBjoern A. Zeeb static inline uint16_t 629*49ed6e97SBjoern A. Zeeb skb_checksum(struct sk_buff *skb, int offs, size_t len, int x) 630*49ed6e97SBjoern A. Zeeb { 631*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 632*49ed6e97SBjoern A. Zeeb SKB_TODO(); 633*49ed6e97SBjoern A. Zeeb return (0xffff); 634*49ed6e97SBjoern A. Zeeb } 635*49ed6e97SBjoern A. Zeeb 636*49ed6e97SBjoern A. Zeeb static inline int 637*49ed6e97SBjoern A. Zeeb skb_checksum_start_offset(struct sk_buff *skb) 638*49ed6e97SBjoern A. Zeeb { 639*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 640*49ed6e97SBjoern A. Zeeb SKB_TODO(); 641*49ed6e97SBjoern A. Zeeb return (-1); 642*49ed6e97SBjoern A. Zeeb } 643*49ed6e97SBjoern A. Zeeb 644*49ed6e97SBjoern A. Zeeb static inline dma_addr_t 645*49ed6e97SBjoern A. Zeeb skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x, 646*49ed6e97SBjoern A. Zeeb size_t fragsz, enum dma_data_direction dir) 647*49ed6e97SBjoern A. Zeeb { 648*49ed6e97SBjoern A. Zeeb SKB_TRACE2(frag, dev); 649*49ed6e97SBjoern A. Zeeb SKB_TODO(); 650*49ed6e97SBjoern A. Zeeb return (-1); 651*49ed6e97SBjoern A. Zeeb } 652*49ed6e97SBjoern A. Zeeb 653*49ed6e97SBjoern A. Zeeb static inline size_t 654*49ed6e97SBjoern A. Zeeb skb_frag_size(const skb_frag_t *frag) 655*49ed6e97SBjoern A. Zeeb { 656*49ed6e97SBjoern A. Zeeb SKB_TRACE(frag); 657*49ed6e97SBjoern A. Zeeb SKB_TODO(); 658*49ed6e97SBjoern A. Zeeb return (-1); 659*49ed6e97SBjoern A. Zeeb } 660*49ed6e97SBjoern A. Zeeb 661*49ed6e97SBjoern A. Zeeb static inline bool 662*49ed6e97SBjoern A. Zeeb skb_is_nonlinear(struct sk_buff *skb) 663*49ed6e97SBjoern A. Zeeb { 664*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 665*49ed6e97SBjoern A. Zeeb return ((skb->data_len > 0) ? true : false); 666*49ed6e97SBjoern A. Zeeb } 667*49ed6e97SBjoern A. Zeeb 668*49ed6e97SBjoern A. Zeeb #define skb_walk_frags(_skb, _frag) \ 669*49ed6e97SBjoern A. Zeeb for ((_frag) = (_skb); false; (_frag)++) 670*49ed6e97SBjoern A. Zeeb 671*49ed6e97SBjoern A. Zeeb static inline void 672*49ed6e97SBjoern A. Zeeb skb_checksum_help(struct sk_buff *skb) 673*49ed6e97SBjoern A. Zeeb { 674*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 675*49ed6e97SBjoern A. Zeeb SKB_TODO(); 676*49ed6e97SBjoern A. Zeeb } 677*49ed6e97SBjoern A. Zeeb 678*49ed6e97SBjoern A. Zeeb static inline bool 679*49ed6e97SBjoern A. Zeeb skb_ensure_writable(struct sk_buff *skb, size_t off) 680*49ed6e97SBjoern A. Zeeb { 681*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 682*49ed6e97SBjoern A. Zeeb SKB_TODO(); 683*49ed6e97SBjoern A. Zeeb return (false); 684*49ed6e97SBjoern A. Zeeb } 685*49ed6e97SBjoern A. Zeeb 686*49ed6e97SBjoern A. Zeeb static inline void * 687*49ed6e97SBjoern A. Zeeb skb_frag_address(const skb_frag_t *frag) 688*49ed6e97SBjoern A. Zeeb { 689*49ed6e97SBjoern A. Zeeb SKB_TRACE(frag); 690*49ed6e97SBjoern A. Zeeb SKB_TODO(); 691*49ed6e97SBjoern A. Zeeb return (NULL); 692*49ed6e97SBjoern A. Zeeb } 693*49ed6e97SBjoern A. Zeeb 694*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 695*49ed6e97SBjoern A. Zeeb skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags) 696*49ed6e97SBjoern A. Zeeb { 697*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 698*49ed6e97SBjoern A. Zeeb SKB_TODO(); 699*49ed6e97SBjoern A. Zeeb return (NULL); 700*49ed6e97SBjoern A. Zeeb } 701*49ed6e97SBjoern A. Zeeb 702*49ed6e97SBjoern A. Zeeb static inline bool 703*49ed6e97SBjoern A. Zeeb skb_is_gso(struct sk_buff *skb) 704*49ed6e97SBjoern A. Zeeb { 705*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 706*49ed6e97SBjoern A. Zeeb SKB_TODO(); 707*49ed6e97SBjoern A. Zeeb return (false); 708*49ed6e97SBjoern A. Zeeb } 709*49ed6e97SBjoern A. Zeeb 710*49ed6e97SBjoern A. Zeeb static inline void 711*49ed6e97SBjoern A. Zeeb skb_mark_not_on_list(struct sk_buff *skb) 712*49ed6e97SBjoern A. Zeeb { 713*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 714*49ed6e97SBjoern A. Zeeb SKB_TODO(); 715*49ed6e97SBjoern A. Zeeb } 716*49ed6e97SBjoern A. Zeeb 717*49ed6e97SBjoern A. Zeeb static inline void 718*49ed6e97SBjoern A. Zeeb skb_queue_splice_init(struct sk_buff_head *qa, struct sk_buff_head *qb) 719*49ed6e97SBjoern A. Zeeb { 720*49ed6e97SBjoern A. Zeeb SKB_TRACE2(qa, qb); 721*49ed6e97SBjoern A. Zeeb SKB_TODO(); 722*49ed6e97SBjoern A. Zeeb } 723*49ed6e97SBjoern A. Zeeb 724*49ed6e97SBjoern A. Zeeb static inline void 725*49ed6e97SBjoern A. Zeeb skb_reset_transport_header(struct sk_buff *skb) 726*49ed6e97SBjoern A. Zeeb { 727*49ed6e97SBjoern A. Zeeb 728*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 729*49ed6e97SBjoern A. Zeeb skb->l4hdroff = skb->data - skb->head; 730*49ed6e97SBjoern A. Zeeb } 731*49ed6e97SBjoern A. Zeeb 732*49ed6e97SBjoern A. Zeeb static inline uint8_t * 733*49ed6e97SBjoern A. Zeeb skb_transport_header(struct sk_buff *skb) 734*49ed6e97SBjoern A. Zeeb { 735*49ed6e97SBjoern A. Zeeb 736*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 737*49ed6e97SBjoern A. Zeeb return (skb->head + skb->l4hdroff); 738*49ed6e97SBjoern A. Zeeb } 739*49ed6e97SBjoern A. Zeeb 740*49ed6e97SBjoern A. Zeeb static inline uint8_t * 741*49ed6e97SBjoern A. Zeeb skb_network_header(struct sk_buff *skb) 742*49ed6e97SBjoern A. Zeeb { 743*49ed6e97SBjoern A. Zeeb 744*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 745*49ed6e97SBjoern A. Zeeb return (skb->head + skb->l3hdroff); 746*49ed6e97SBjoern A. Zeeb } 747*49ed6e97SBjoern A. Zeeb 748*49ed6e97SBjoern A. Zeeb static inline int 749*49ed6e97SBjoern A. Zeeb __skb_linearize(struct sk_buff *skb) 750*49ed6e97SBjoern A. Zeeb { 751*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 752*49ed6e97SBjoern A. Zeeb SKB_TODO(); 753*49ed6e97SBjoern A. Zeeb return (ENXIO); 754*49ed6e97SBjoern A. Zeeb } 755*49ed6e97SBjoern A. Zeeb 756*49ed6e97SBjoern A. Zeeb static inline bool 757*49ed6e97SBjoern A. Zeeb pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp) 758*49ed6e97SBjoern A. Zeeb { 759*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 760*49ed6e97SBjoern A. Zeeb SKB_TODO(); 761*49ed6e97SBjoern A. Zeeb return (false); 762*49ed6e97SBjoern A. Zeeb } 763*49ed6e97SBjoern A. Zeeb 764*49ed6e97SBjoern A. Zeeb /* Not really seen this one but need it as symmetric accessor function. */ 765*49ed6e97SBjoern A. Zeeb static inline void 766*49ed6e97SBjoern A. Zeeb skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap) 767*49ed6e97SBjoern A. Zeeb { 768*49ed6e97SBjoern A. Zeeb 769*49ed6e97SBjoern A. Zeeb SKB_TRACE_FMT(skb, "qmap %u", qmap); 770*49ed6e97SBjoern A. Zeeb skb->qmap = qmap; 771*49ed6e97SBjoern A. Zeeb } 772*49ed6e97SBjoern A. Zeeb 773*49ed6e97SBjoern A. Zeeb static inline uint16_t 774*49ed6e97SBjoern A. Zeeb skb_get_queue_mapping(struct sk_buff *skb) 775*49ed6e97SBjoern A. Zeeb { 776*49ed6e97SBjoern A. Zeeb 777*49ed6e97SBjoern A. Zeeb SKB_TRACE_FMT(skb, "qmap %u", skb->qmap); 778*49ed6e97SBjoern A. Zeeb return (skb->qmap); 779*49ed6e97SBjoern A. Zeeb } 780*49ed6e97SBjoern A. Zeeb 781*49ed6e97SBjoern A. Zeeb static inline bool 782*49ed6e97SBjoern A. Zeeb skb_header_cloned(struct sk_buff *skb) 783*49ed6e97SBjoern A. Zeeb { 784*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 785*49ed6e97SBjoern A. Zeeb SKB_TODO(); 786*49ed6e97SBjoern A. Zeeb return (false); 787*49ed6e97SBjoern A. Zeeb } 788*49ed6e97SBjoern A. Zeeb 789*49ed6e97SBjoern A. Zeeb static inline uint8_t * 790*49ed6e97SBjoern A. Zeeb skb_mac_header(struct sk_buff *skb) 791*49ed6e97SBjoern A. Zeeb { 792*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 793*49ed6e97SBjoern A. Zeeb SKB_TODO(); 794*49ed6e97SBjoern A. Zeeb return (NULL); 795*49ed6e97SBjoern A. Zeeb } 796*49ed6e97SBjoern A. Zeeb 797*49ed6e97SBjoern A. Zeeb static inline void 798*49ed6e97SBjoern A. Zeeb skb_orphan(struct sk_buff *skb) 799*49ed6e97SBjoern A. Zeeb { 800*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 801*49ed6e97SBjoern A. Zeeb SKB_TODO(); 802*49ed6e97SBjoern A. Zeeb } 803*49ed6e97SBjoern A. Zeeb 804*49ed6e97SBjoern A. Zeeb static inline void 805*49ed6e97SBjoern A. Zeeb skb_reset_mac_header(struct sk_buff *skb) 806*49ed6e97SBjoern A. Zeeb { 807*49ed6e97SBjoern A. Zeeb SKB_TRACE(skb); 808*49ed6e97SBjoern A. Zeeb SKB_TODO(); 809*49ed6e97SBjoern A. Zeeb } 810*49ed6e97SBjoern A. Zeeb 811*49ed6e97SBjoern A. Zeeb static inline struct sk_buff * 812*49ed6e97SBjoern A. Zeeb skb_peek(struct sk_buff_head *q) 813*49ed6e97SBjoern A. Zeeb { 814*49ed6e97SBjoern A. Zeeb SKB_TRACE(q); 815*49ed6e97SBjoern A. Zeeb SKB_TODO(); 816*49ed6e97SBjoern A. Zeeb return (NULL); 817*49ed6e97SBjoern A. Zeeb } 818*49ed6e97SBjoern A. Zeeb 819*49ed6e97SBjoern A. Zeeb static inline __sum16 820*49ed6e97SBjoern A. Zeeb csum_unfold(__sum16 sum) 821*49ed6e97SBjoern A. Zeeb { 822*49ed6e97SBjoern A. Zeeb SKB_TODO(); 823*49ed6e97SBjoern A. Zeeb return (sum); 824*49ed6e97SBjoern A. Zeeb } 825*49ed6e97SBjoern A. Zeeb 826*49ed6e97SBjoern A. Zeeb #endif /* _LINUXKPI_LINUX_SKBUFF_H */ 827