1*b4c3e9b5SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2*b4c3e9b5SBjoern A. Zeeb /*
3*b4c3e9b5SBjoern A. Zeeb * Copyright (c) 2010 Broadcom Corporation
4*b4c3e9b5SBjoern A. Zeeb */
5*b4c3e9b5SBjoern A. Zeeb
6*b4c3e9b5SBjoern A. Zeeb #ifndef _BRCMU_UTILS_H_
7*b4c3e9b5SBjoern A. Zeeb #define _BRCMU_UTILS_H_
8*b4c3e9b5SBjoern A. Zeeb
9*b4c3e9b5SBjoern A. Zeeb #include <linux/skbuff.h>
10*b4c3e9b5SBjoern A. Zeeb
11*b4c3e9b5SBjoern A. Zeeb /*
12*b4c3e9b5SBjoern A. Zeeb * Spin at most 'us' microseconds while 'exp' is true.
13*b4c3e9b5SBjoern A. Zeeb * Caller should explicitly test 'exp' when this completes
14*b4c3e9b5SBjoern A. Zeeb * and take appropriate error action if 'exp' is still true.
15*b4c3e9b5SBjoern A. Zeeb */
16*b4c3e9b5SBjoern A. Zeeb #define SPINWAIT(exp, us) { \
17*b4c3e9b5SBjoern A. Zeeb uint countdown = (us) + 9; \
18*b4c3e9b5SBjoern A. Zeeb while ((exp) && (countdown >= 10)) {\
19*b4c3e9b5SBjoern A. Zeeb udelay(10); \
20*b4c3e9b5SBjoern A. Zeeb countdown -= 10; \
21*b4c3e9b5SBjoern A. Zeeb } \
22*b4c3e9b5SBjoern A. Zeeb }
23*b4c3e9b5SBjoern A. Zeeb
24*b4c3e9b5SBjoern A. Zeeb /* osl multi-precedence packet queue */
25*b4c3e9b5SBjoern A. Zeeb #define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
26*b4c3e9b5SBjoern A. Zeeb #define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
27*b4c3e9b5SBjoern A. Zeeb
28*b4c3e9b5SBjoern A. Zeeb #define BCME_STRLEN 64 /* Max string length for BCM errors */
29*b4c3e9b5SBjoern A. Zeeb
30*b4c3e9b5SBjoern A. Zeeb /* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
31*b4c3e9b5SBjoern A. Zeeb #define PKTBUFSZ 2048
32*b4c3e9b5SBjoern A. Zeeb
33*b4c3e9b5SBjoern A. Zeeb #ifndef setbit
34*b4c3e9b5SBjoern A. Zeeb #ifndef NBBY /* the BSD family defines NBBY */
35*b4c3e9b5SBjoern A. Zeeb #define NBBY 8 /* 8 bits per byte */
36*b4c3e9b5SBjoern A. Zeeb #endif /* #ifndef NBBY */
37*b4c3e9b5SBjoern A. Zeeb #define setbit(a, i) (((u8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
38*b4c3e9b5SBjoern A. Zeeb #define clrbit(a, i) (((u8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
39*b4c3e9b5SBjoern A. Zeeb #define isset(a, i) (((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
40*b4c3e9b5SBjoern A. Zeeb #define isclr(a, i) ((((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
41*b4c3e9b5SBjoern A. Zeeb #endif /* setbit */
42*b4c3e9b5SBjoern A. Zeeb
43*b4c3e9b5SBjoern A. Zeeb #define NBITS(type) (sizeof(type) * 8)
44*b4c3e9b5SBjoern A. Zeeb #define NBITVAL(nbits) (1 << (nbits))
45*b4c3e9b5SBjoern A. Zeeb #define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
46*b4c3e9b5SBjoern A. Zeeb #define NBITMASK(nbits) MAXBITVAL(nbits)
47*b4c3e9b5SBjoern A. Zeeb #define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
48*b4c3e9b5SBjoern A. Zeeb
49*b4c3e9b5SBjoern A. Zeeb /* crc defines */
50*b4c3e9b5SBjoern A. Zeeb #define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
51*b4c3e9b5SBjoern A. Zeeb #define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
52*b4c3e9b5SBjoern A. Zeeb
53*b4c3e9b5SBjoern A. Zeeb /* 18-bytes of Ethernet address buffer length */
54*b4c3e9b5SBjoern A. Zeeb #define ETHER_ADDR_STR_LEN 18
55*b4c3e9b5SBjoern A. Zeeb
56*b4c3e9b5SBjoern A. Zeeb struct pktq_prec {
57*b4c3e9b5SBjoern A. Zeeb struct sk_buff_head skblist;
58*b4c3e9b5SBjoern A. Zeeb u16 max; /* maximum number of queued packets */
59*b4c3e9b5SBjoern A. Zeeb };
60*b4c3e9b5SBjoern A. Zeeb
61*b4c3e9b5SBjoern A. Zeeb /* multi-priority pkt queue */
62*b4c3e9b5SBjoern A. Zeeb struct pktq {
63*b4c3e9b5SBjoern A. Zeeb u16 num_prec; /* number of precedences in use */
64*b4c3e9b5SBjoern A. Zeeb u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
65*b4c3e9b5SBjoern A. Zeeb u16 max; /* total max packets */
66*b4c3e9b5SBjoern A. Zeeb u16 len; /* total number of packets */
67*b4c3e9b5SBjoern A. Zeeb /*
68*b4c3e9b5SBjoern A. Zeeb * q array must be last since # of elements can be either
69*b4c3e9b5SBjoern A. Zeeb * PKTQ_MAX_PREC or 1
70*b4c3e9b5SBjoern A. Zeeb */
71*b4c3e9b5SBjoern A. Zeeb struct pktq_prec q[PKTQ_MAX_PREC];
72*b4c3e9b5SBjoern A. Zeeb };
73*b4c3e9b5SBjoern A. Zeeb
74*b4c3e9b5SBjoern A. Zeeb /* operations on a specific precedence in packet queue */
75*b4c3e9b5SBjoern A. Zeeb
pktq_plen(struct pktq * pq,int prec)76*b4c3e9b5SBjoern A. Zeeb static inline int pktq_plen(struct pktq *pq, int prec)
77*b4c3e9b5SBjoern A. Zeeb {
78*b4c3e9b5SBjoern A. Zeeb return pq->q[prec].skblist.qlen;
79*b4c3e9b5SBjoern A. Zeeb }
80*b4c3e9b5SBjoern A. Zeeb
pktq_pavail(struct pktq * pq,int prec)81*b4c3e9b5SBjoern A. Zeeb static inline int pktq_pavail(struct pktq *pq, int prec)
82*b4c3e9b5SBjoern A. Zeeb {
83*b4c3e9b5SBjoern A. Zeeb return pq->q[prec].max - pq->q[prec].skblist.qlen;
84*b4c3e9b5SBjoern A. Zeeb }
85*b4c3e9b5SBjoern A. Zeeb
pktq_pfull(struct pktq * pq,int prec)86*b4c3e9b5SBjoern A. Zeeb static inline bool pktq_pfull(struct pktq *pq, int prec)
87*b4c3e9b5SBjoern A. Zeeb {
88*b4c3e9b5SBjoern A. Zeeb return pq->q[prec].skblist.qlen >= pq->q[prec].max;
89*b4c3e9b5SBjoern A. Zeeb }
90*b4c3e9b5SBjoern A. Zeeb
pktq_pempty(struct pktq * pq,int prec)91*b4c3e9b5SBjoern A. Zeeb static inline bool pktq_pempty(struct pktq *pq, int prec)
92*b4c3e9b5SBjoern A. Zeeb {
93*b4c3e9b5SBjoern A. Zeeb return skb_queue_empty(&pq->q[prec].skblist);
94*b4c3e9b5SBjoern A. Zeeb }
95*b4c3e9b5SBjoern A. Zeeb
pktq_ppeek(struct pktq * pq,int prec)96*b4c3e9b5SBjoern A. Zeeb static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
97*b4c3e9b5SBjoern A. Zeeb {
98*b4c3e9b5SBjoern A. Zeeb return skb_peek(&pq->q[prec].skblist);
99*b4c3e9b5SBjoern A. Zeeb }
100*b4c3e9b5SBjoern A. Zeeb
pktq_ppeek_tail(struct pktq * pq,int prec)101*b4c3e9b5SBjoern A. Zeeb static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
102*b4c3e9b5SBjoern A. Zeeb {
103*b4c3e9b5SBjoern A. Zeeb return skb_peek_tail(&pq->q[prec].skblist);
104*b4c3e9b5SBjoern A. Zeeb }
105*b4c3e9b5SBjoern A. Zeeb
106*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
107*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
108*b4c3e9b5SBjoern A. Zeeb struct sk_buff *p);
109*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
110*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
111*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
112*b4c3e9b5SBjoern A. Zeeb bool (*match_fn)(struct sk_buff *p,
113*b4c3e9b5SBjoern A. Zeeb void *arg),
114*b4c3e9b5SBjoern A. Zeeb void *arg);
115*b4c3e9b5SBjoern A. Zeeb
116*b4c3e9b5SBjoern A. Zeeb /* packet primitives */
117*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
118*b4c3e9b5SBjoern A. Zeeb void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
119*b4c3e9b5SBjoern A. Zeeb
120*b4c3e9b5SBjoern A. Zeeb /* Empty the queue at particular precedence level */
121*b4c3e9b5SBjoern A. Zeeb /* callback function fn(pkt, arg) returns true if pkt belongs to if */
122*b4c3e9b5SBjoern A. Zeeb void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
123*b4c3e9b5SBjoern A. Zeeb bool (*fn)(struct sk_buff *, void *), void *arg);
124*b4c3e9b5SBjoern A. Zeeb
125*b4c3e9b5SBjoern A. Zeeb /* operations on a set of precedences in packet queue */
126*b4c3e9b5SBjoern A. Zeeb
127*b4c3e9b5SBjoern A. Zeeb int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
128*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
129*b4c3e9b5SBjoern A. Zeeb
130*b4c3e9b5SBjoern A. Zeeb /* operations on packet queue as a whole */
131*b4c3e9b5SBjoern A. Zeeb
pktq_len(struct pktq * pq)132*b4c3e9b5SBjoern A. Zeeb static inline int pktq_len(struct pktq *pq)
133*b4c3e9b5SBjoern A. Zeeb {
134*b4c3e9b5SBjoern A. Zeeb return (int)pq->len;
135*b4c3e9b5SBjoern A. Zeeb }
136*b4c3e9b5SBjoern A. Zeeb
pktq_max(struct pktq * pq)137*b4c3e9b5SBjoern A. Zeeb static inline int pktq_max(struct pktq *pq)
138*b4c3e9b5SBjoern A. Zeeb {
139*b4c3e9b5SBjoern A. Zeeb return (int)pq->max;
140*b4c3e9b5SBjoern A. Zeeb }
141*b4c3e9b5SBjoern A. Zeeb
pktq_avail(struct pktq * pq)142*b4c3e9b5SBjoern A. Zeeb static inline int pktq_avail(struct pktq *pq)
143*b4c3e9b5SBjoern A. Zeeb {
144*b4c3e9b5SBjoern A. Zeeb return (int)(pq->max - pq->len);
145*b4c3e9b5SBjoern A. Zeeb }
146*b4c3e9b5SBjoern A. Zeeb
pktq_full(struct pktq * pq)147*b4c3e9b5SBjoern A. Zeeb static inline bool pktq_full(struct pktq *pq)
148*b4c3e9b5SBjoern A. Zeeb {
149*b4c3e9b5SBjoern A. Zeeb return pq->len >= pq->max;
150*b4c3e9b5SBjoern A. Zeeb }
151*b4c3e9b5SBjoern A. Zeeb
pktq_empty(struct pktq * pq)152*b4c3e9b5SBjoern A. Zeeb static inline bool pktq_empty(struct pktq *pq)
153*b4c3e9b5SBjoern A. Zeeb {
154*b4c3e9b5SBjoern A. Zeeb return pq->len == 0;
155*b4c3e9b5SBjoern A. Zeeb }
156*b4c3e9b5SBjoern A. Zeeb
157*b4c3e9b5SBjoern A. Zeeb void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
158*b4c3e9b5SBjoern A. Zeeb /* prec_out may be NULL if caller is not interested in return value */
159*b4c3e9b5SBjoern A. Zeeb struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
160*b4c3e9b5SBjoern A. Zeeb void brcmu_pktq_flush(struct pktq *pq, bool dir,
161*b4c3e9b5SBjoern A. Zeeb bool (*fn)(struct sk_buff *, void *), void *arg);
162*b4c3e9b5SBjoern A. Zeeb
163*b4c3e9b5SBjoern A. Zeeb /* externs */
164*b4c3e9b5SBjoern A. Zeeb /* ip address */
165*b4c3e9b5SBjoern A. Zeeb struct ipv4_addr;
166*b4c3e9b5SBjoern A. Zeeb
167*b4c3e9b5SBjoern A. Zeeb /*
168*b4c3e9b5SBjoern A. Zeeb * bitfield macros using masking and shift
169*b4c3e9b5SBjoern A. Zeeb *
170*b4c3e9b5SBjoern A. Zeeb * remark: the mask parameter should be a shifted mask.
171*b4c3e9b5SBjoern A. Zeeb */
brcmu_maskset32(u32 * var,u32 mask,u8 shift,u32 value)172*b4c3e9b5SBjoern A. Zeeb static inline void brcmu_maskset32(u32 *var, u32 mask, u8 shift, u32 value)
173*b4c3e9b5SBjoern A. Zeeb {
174*b4c3e9b5SBjoern A. Zeeb value = (value << shift) & mask;
175*b4c3e9b5SBjoern A. Zeeb *var = (*var & ~mask) | value;
176*b4c3e9b5SBjoern A. Zeeb }
brcmu_maskget32(u32 var,u32 mask,u8 shift)177*b4c3e9b5SBjoern A. Zeeb static inline u32 brcmu_maskget32(u32 var, u32 mask, u8 shift)
178*b4c3e9b5SBjoern A. Zeeb {
179*b4c3e9b5SBjoern A. Zeeb return (var & mask) >> shift;
180*b4c3e9b5SBjoern A. Zeeb }
brcmu_maskset16(u16 * var,u16 mask,u8 shift,u16 value)181*b4c3e9b5SBjoern A. Zeeb static inline void brcmu_maskset16(u16 *var, u16 mask, u8 shift, u16 value)
182*b4c3e9b5SBjoern A. Zeeb {
183*b4c3e9b5SBjoern A. Zeeb value = (value << shift) & mask;
184*b4c3e9b5SBjoern A. Zeeb *var = (*var & ~mask) | value;
185*b4c3e9b5SBjoern A. Zeeb }
brcmu_maskget16(u16 var,u16 mask,u8 shift)186*b4c3e9b5SBjoern A. Zeeb static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
187*b4c3e9b5SBjoern A. Zeeb {
188*b4c3e9b5SBjoern A. Zeeb return (var & mask) >> shift;
189*b4c3e9b5SBjoern A. Zeeb }
190*b4c3e9b5SBjoern A. Zeeb
191*b4c3e9b5SBjoern A. Zeeb /* externs */
192*b4c3e9b5SBjoern A. Zeeb /* format/print */
193*b4c3e9b5SBjoern A. Zeeb #ifdef DEBUG
194*b4c3e9b5SBjoern A. Zeeb void brcmu_prpkt(const char *msg, struct sk_buff *p0);
195*b4c3e9b5SBjoern A. Zeeb #else
196*b4c3e9b5SBjoern A. Zeeb #define brcmu_prpkt(a, b)
197*b4c3e9b5SBjoern A. Zeeb #endif /* DEBUG */
198*b4c3e9b5SBjoern A. Zeeb
199*b4c3e9b5SBjoern A. Zeeb #ifdef DEBUG
200*b4c3e9b5SBjoern A. Zeeb __printf(3, 4)
201*b4c3e9b5SBjoern A. Zeeb void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
202*b4c3e9b5SBjoern A. Zeeb #else
203*b4c3e9b5SBjoern A. Zeeb __printf(3, 4)
204*b4c3e9b5SBjoern A. Zeeb static inline
brcmu_dbg_hex_dump(const void * data,size_t size,const char * fmt,...)205*b4c3e9b5SBjoern A. Zeeb void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
206*b4c3e9b5SBjoern A. Zeeb {
207*b4c3e9b5SBjoern A. Zeeb }
208*b4c3e9b5SBjoern A. Zeeb #endif
209*b4c3e9b5SBjoern A. Zeeb
210*b4c3e9b5SBjoern A. Zeeb #define BRCMU_BOARDREV_LEN 8
211*b4c3e9b5SBjoern A. Zeeb #define BRCMU_DOTREV_LEN 16
212*b4c3e9b5SBjoern A. Zeeb
213*b4c3e9b5SBjoern A. Zeeb char *brcmu_boardrev_str(u32 brev, char *buf);
214*b4c3e9b5SBjoern A. Zeeb char *brcmu_dotrev_str(u32 dotrev, char *buf);
215*b4c3e9b5SBjoern A. Zeeb
216*b4c3e9b5SBjoern A. Zeeb #endif /* _BRCMU_UTILS_H_ */
217