xref: /linux/drivers/infiniband/sw/rxe/rxe_hdr.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_HDR_H
8 #define RXE_HDR_H
9 
10 /* extracted information about a packet carried in an sk_buff struct fits in
11  * the skbuff cb array. Must be at most 48 bytes. stored in control block of
12  * sk_buff for received packets.
13  */
14 struct rxe_pkt_info {
15 	struct rxe_dev		*rxe;		/* device that owns packet */
16 	struct rxe_qp		*qp;		/* qp that owns packet */
17 	struct rxe_send_wqe	*wqe;		/* send wqe */
18 	u8			*hdr;		/* points to bth */
19 	u32			mask;		/* useful info about pkt */
20 	u32			psn;		/* bth psn of packet */
21 	u16			pkey_index;	/* partition of pkt */
22 	u16			paylen;		/* length of bth - icrc */
23 	u8			port_num;	/* port pkt received on */
24 	u8			opcode;		/* bth opcode of packet */
25 };
26 
27 /* Macros should be used only for received skb */
SKB_TO_PKT(struct sk_buff * skb)28 static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
29 {
30 	BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
31 	return (void *)skb->cb;
32 }
33 
PKT_TO_SKB(struct rxe_pkt_info * pkt)34 static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
35 {
36 	return container_of((void *)pkt, struct sk_buff, cb);
37 }
38 
39 /*
40  * IBA header types and methods
41  *
42  * Some of these are for reference and completeness only since
43  * rxe does not currently support RD transport
44  * most of this could be moved into IB core. ib_pack.h has
45  * part of this but is incomplete
46  *
47  * Header specific routines to insert/extract values to/from headers
48  * the routines that are named __hhh_(set_)fff() take a pointer to a
49  * hhh header and get(set) the fff field. The routines named
50  * hhh_(set_)fff take a packet info struct and find the
51  * header and field based on the opcode in the packet.
52  * Conversion to/from network byte order from cpu order is also done.
53  */
54 
55 #define RXE_ICRC_SIZE		(4)
56 #define RXE_MAX_HDR_LENGTH	(80)
57 
58 /******************************************************************************
59  * Base Transport Header
60  ******************************************************************************/
61 struct rxe_bth {
62 	u8			opcode;
63 	u8			flags;
64 	__be16			pkey;
65 	__be32			qpn;
66 	__be32			apsn;
67 };
68 
69 #define BTH_TVER		(0)
70 #define BTH_DEF_PKEY		(0xffff)
71 
72 #define BTH_SE_MASK		(0x80)
73 #define BTH_MIG_MASK		(0x40)
74 #define BTH_PAD_MASK		(0x30)
75 #define BTH_TVER_MASK		(0x0f)
76 #define BTH_FECN_MASK		(0x80000000)
77 #define BTH_BECN_MASK		(0x40000000)
78 #define BTH_RESV6A_MASK		(0x3f000000)
79 #define BTH_QPN_MASK		(0x00ffffff)
80 #define BTH_ACK_MASK		(0x80000000)
81 #define BTH_RESV7_MASK		(0x7f000000)
82 #define BTH_PSN_MASK		(0x00ffffff)
83 
__bth_opcode(void * arg)84 static inline u8 __bth_opcode(void *arg)
85 {
86 	struct rxe_bth *bth = arg;
87 
88 	return bth->opcode;
89 }
90 
__bth_set_opcode(void * arg,u8 opcode)91 static inline void __bth_set_opcode(void *arg, u8 opcode)
92 {
93 	struct rxe_bth *bth = arg;
94 
95 	bth->opcode = opcode;
96 }
97 
__bth_se(void * arg)98 static inline u8 __bth_se(void *arg)
99 {
100 	struct rxe_bth *bth = arg;
101 
102 	return 0 != (BTH_SE_MASK & bth->flags);
103 }
104 
__bth_set_se(void * arg,int se)105 static inline void __bth_set_se(void *arg, int se)
106 {
107 	struct rxe_bth *bth = arg;
108 
109 	if (se)
110 		bth->flags |= BTH_SE_MASK;
111 	else
112 		bth->flags &= ~BTH_SE_MASK;
113 }
114 
__bth_mig(void * arg)115 static inline u8 __bth_mig(void *arg)
116 {
117 	struct rxe_bth *bth = arg;
118 
119 	return 0 != (BTH_MIG_MASK & bth->flags);
120 }
121 
__bth_set_mig(void * arg,u8 mig)122 static inline void __bth_set_mig(void *arg, u8 mig)
123 {
124 	struct rxe_bth *bth = arg;
125 
126 	if (mig)
127 		bth->flags |= BTH_MIG_MASK;
128 	else
129 		bth->flags &= ~BTH_MIG_MASK;
130 }
131 
__bth_pad(void * arg)132 static inline u8 __bth_pad(void *arg)
133 {
134 	struct rxe_bth *bth = arg;
135 
136 	return (BTH_PAD_MASK & bth->flags) >> 4;
137 }
138 
__bth_set_pad(void * arg,u8 pad)139 static inline void __bth_set_pad(void *arg, u8 pad)
140 {
141 	struct rxe_bth *bth = arg;
142 
143 	bth->flags = (BTH_PAD_MASK & (pad << 4)) |
144 			(~BTH_PAD_MASK & bth->flags);
145 }
146 
__bth_tver(void * arg)147 static inline u8 __bth_tver(void *arg)
148 {
149 	struct rxe_bth *bth = arg;
150 
151 	return BTH_TVER_MASK & bth->flags;
152 }
153 
__bth_set_tver(void * arg,u8 tver)154 static inline void __bth_set_tver(void *arg, u8 tver)
155 {
156 	struct rxe_bth *bth = arg;
157 
158 	bth->flags = (BTH_TVER_MASK & tver) |
159 			(~BTH_TVER_MASK & bth->flags);
160 }
161 
__bth_pkey(void * arg)162 static inline u16 __bth_pkey(void *arg)
163 {
164 	struct rxe_bth *bth = arg;
165 
166 	return be16_to_cpu(bth->pkey);
167 }
168 
__bth_set_pkey(void * arg,u16 pkey)169 static inline void __bth_set_pkey(void *arg, u16 pkey)
170 {
171 	struct rxe_bth *bth = arg;
172 
173 	bth->pkey = cpu_to_be16(pkey);
174 }
175 
__bth_qpn(void * arg)176 static inline u32 __bth_qpn(void *arg)
177 {
178 	struct rxe_bth *bth = arg;
179 
180 	return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
181 }
182 
__bth_set_qpn(void * arg,u32 qpn)183 static inline void __bth_set_qpn(void *arg, u32 qpn)
184 {
185 	struct rxe_bth *bth = arg;
186 	u32 resvqpn = be32_to_cpu(bth->qpn);
187 
188 	bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
189 			       (~BTH_QPN_MASK & resvqpn));
190 }
191 
__bth_fecn(void * arg)192 static inline int __bth_fecn(void *arg)
193 {
194 	struct rxe_bth *bth = arg;
195 
196 	return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
197 }
198 
__bth_set_fecn(void * arg,int fecn)199 static inline void __bth_set_fecn(void *arg, int fecn)
200 {
201 	struct rxe_bth *bth = arg;
202 
203 	if (fecn)
204 		bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
205 	else
206 		bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
207 }
208 
__bth_becn(void * arg)209 static inline int __bth_becn(void *arg)
210 {
211 	struct rxe_bth *bth = arg;
212 
213 	return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
214 }
215 
__bth_set_becn(void * arg,int becn)216 static inline void __bth_set_becn(void *arg, int becn)
217 {
218 	struct rxe_bth *bth = arg;
219 
220 	if (becn)
221 		bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
222 	else
223 		bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
224 }
225 
__bth_resv6a(void * arg)226 static inline u8 __bth_resv6a(void *arg)
227 {
228 	struct rxe_bth *bth = arg;
229 
230 	return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
231 }
232 
__bth_set_resv6a(void * arg)233 static inline void __bth_set_resv6a(void *arg)
234 {
235 	struct rxe_bth *bth = arg;
236 
237 	bth->qpn &= cpu_to_be32(~BTH_RESV6A_MASK);
238 }
239 
__bth_ack(void * arg)240 static inline int __bth_ack(void *arg)
241 {
242 	struct rxe_bth *bth = arg;
243 
244 	return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
245 }
246 
__bth_set_ack(void * arg,int ack)247 static inline void __bth_set_ack(void *arg, int ack)
248 {
249 	struct rxe_bth *bth = arg;
250 
251 	if (ack)
252 		bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
253 	else
254 		bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
255 }
256 
__bth_set_resv7(void * arg)257 static inline void __bth_set_resv7(void *arg)
258 {
259 	struct rxe_bth *bth = arg;
260 
261 	bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
262 }
263 
__bth_psn(void * arg)264 static inline u32 __bth_psn(void *arg)
265 {
266 	struct rxe_bth *bth = arg;
267 
268 	return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
269 }
270 
__bth_set_psn(void * arg,u32 psn)271 static inline void __bth_set_psn(void *arg, u32 psn)
272 {
273 	struct rxe_bth *bth = arg;
274 	u32 apsn = be32_to_cpu(bth->apsn);
275 
276 	bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
277 			(~BTH_PSN_MASK & apsn));
278 }
279 
bth_opcode(struct rxe_pkt_info * pkt)280 static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
281 {
282 	return __bth_opcode(pkt->hdr);
283 }
284 
bth_set_opcode(struct rxe_pkt_info * pkt,u8 opcode)285 static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
286 {
287 	__bth_set_opcode(pkt->hdr, opcode);
288 }
289 
bth_se(struct rxe_pkt_info * pkt)290 static inline u8 bth_se(struct rxe_pkt_info *pkt)
291 {
292 	return __bth_se(pkt->hdr);
293 }
294 
bth_set_se(struct rxe_pkt_info * pkt,int se)295 static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
296 {
297 	__bth_set_se(pkt->hdr, se);
298 }
299 
bth_mig(struct rxe_pkt_info * pkt)300 static inline u8 bth_mig(struct rxe_pkt_info *pkt)
301 {
302 	return __bth_mig(pkt->hdr);
303 }
304 
bth_set_mig(struct rxe_pkt_info * pkt,u8 mig)305 static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
306 {
307 	__bth_set_mig(pkt->hdr, mig);
308 }
309 
bth_pad(struct rxe_pkt_info * pkt)310 static inline u8 bth_pad(struct rxe_pkt_info *pkt)
311 {
312 	return __bth_pad(pkt->hdr);
313 }
314 
bth_set_pad(struct rxe_pkt_info * pkt,u8 pad)315 static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
316 {
317 	__bth_set_pad(pkt->hdr, pad);
318 }
319 
bth_tver(struct rxe_pkt_info * pkt)320 static inline u8 bth_tver(struct rxe_pkt_info *pkt)
321 {
322 	return __bth_tver(pkt->hdr);
323 }
324 
bth_set_tver(struct rxe_pkt_info * pkt,u8 tver)325 static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
326 {
327 	__bth_set_tver(pkt->hdr, tver);
328 }
329 
bth_pkey(struct rxe_pkt_info * pkt)330 static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
331 {
332 	return __bth_pkey(pkt->hdr);
333 }
334 
bth_set_pkey(struct rxe_pkt_info * pkt,u16 pkey)335 static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
336 {
337 	__bth_set_pkey(pkt->hdr, pkey);
338 }
339 
bth_qpn(struct rxe_pkt_info * pkt)340 static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
341 {
342 	return __bth_qpn(pkt->hdr);
343 }
344 
bth_set_qpn(struct rxe_pkt_info * pkt,u32 qpn)345 static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
346 {
347 	__bth_set_qpn(pkt->hdr, qpn);
348 }
349 
bth_fecn(struct rxe_pkt_info * pkt)350 static inline int bth_fecn(struct rxe_pkt_info *pkt)
351 {
352 	return __bth_fecn(pkt->hdr);
353 }
354 
bth_set_fecn(struct rxe_pkt_info * pkt,int fecn)355 static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
356 {
357 	__bth_set_fecn(pkt->hdr, fecn);
358 }
359 
bth_becn(struct rxe_pkt_info * pkt)360 static inline int bth_becn(struct rxe_pkt_info *pkt)
361 {
362 	return __bth_becn(pkt->hdr);
363 }
364 
bth_set_becn(struct rxe_pkt_info * pkt,int becn)365 static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
366 {
367 	__bth_set_becn(pkt->hdr, becn);
368 }
369 
bth_resv6a(struct rxe_pkt_info * pkt)370 static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
371 {
372 	return __bth_resv6a(pkt->hdr);
373 }
374 
bth_set_resv6a(struct rxe_pkt_info * pkt)375 static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
376 {
377 	__bth_set_resv6a(pkt->hdr);
378 }
379 
bth_ack(struct rxe_pkt_info * pkt)380 static inline int bth_ack(struct rxe_pkt_info *pkt)
381 {
382 	return __bth_ack(pkt->hdr);
383 }
384 
bth_set_ack(struct rxe_pkt_info * pkt,int ack)385 static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
386 {
387 	__bth_set_ack(pkt->hdr, ack);
388 }
389 
bth_set_resv7(struct rxe_pkt_info * pkt)390 static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
391 {
392 	__bth_set_resv7(pkt->hdr);
393 }
394 
bth_psn(struct rxe_pkt_info * pkt)395 static inline u32 bth_psn(struct rxe_pkt_info *pkt)
396 {
397 	return __bth_psn(pkt->hdr);
398 }
399 
bth_set_psn(struct rxe_pkt_info * pkt,u32 psn)400 static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
401 {
402 	__bth_set_psn(pkt->hdr, psn);
403 }
404 
bth_init(struct rxe_pkt_info * pkt,u8 opcode,int se,int mig,int pad,u16 pkey,u32 qpn,int ack_req,u32 psn)405 static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
406 			    int mig, int pad, u16 pkey, u32 qpn, int ack_req,
407 			    u32 psn)
408 {
409 	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
410 
411 	bth->opcode = opcode;
412 	bth->flags = (pad << 4) & BTH_PAD_MASK;
413 	if (se)
414 		bth->flags |= BTH_SE_MASK;
415 	if (mig)
416 		bth->flags |= BTH_MIG_MASK;
417 	bth->pkey = cpu_to_be16(pkey);
418 	bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
419 	psn &= BTH_PSN_MASK;
420 	if (ack_req)
421 		psn |= BTH_ACK_MASK;
422 	bth->apsn = cpu_to_be32(psn);
423 }
424 
425 /******************************************************************************
426  * Reliable Datagram Extended Transport Header
427  ******************************************************************************/
428 struct rxe_rdeth {
429 	__be32			een;
430 };
431 
432 #define RDETH_EEN_MASK		(0x00ffffff)
433 
__rdeth_een(void * arg)434 static inline u8 __rdeth_een(void *arg)
435 {
436 	struct rxe_rdeth *rdeth = arg;
437 
438 	return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
439 }
440 
__rdeth_set_een(void * arg,u32 een)441 static inline void __rdeth_set_een(void *arg, u32 een)
442 {
443 	struct rxe_rdeth *rdeth = arg;
444 
445 	rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
446 }
447 
rdeth_een(struct rxe_pkt_info * pkt)448 static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
449 {
450 	return __rdeth_een(pkt->hdr +
451 		rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
452 }
453 
rdeth_set_een(struct rxe_pkt_info * pkt,u32 een)454 static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
455 {
456 	__rdeth_set_een(pkt->hdr +
457 		rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
458 }
459 
460 /******************************************************************************
461  * Datagram Extended Transport Header
462  ******************************************************************************/
463 struct rxe_deth {
464 	__be32			qkey;
465 	__be32			sqp;
466 };
467 
468 #define GSI_QKEY		(0x80010000)
469 #define DETH_SQP_MASK		(0x00ffffff)
470 
__deth_qkey(void * arg)471 static inline u32 __deth_qkey(void *arg)
472 {
473 	struct rxe_deth *deth = arg;
474 
475 	return be32_to_cpu(deth->qkey);
476 }
477 
__deth_set_qkey(void * arg,u32 qkey)478 static inline void __deth_set_qkey(void *arg, u32 qkey)
479 {
480 	struct rxe_deth *deth = arg;
481 
482 	deth->qkey = cpu_to_be32(qkey);
483 }
484 
__deth_sqp(void * arg)485 static inline u32 __deth_sqp(void *arg)
486 {
487 	struct rxe_deth *deth = arg;
488 
489 	return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
490 }
491 
__deth_set_sqp(void * arg,u32 sqp)492 static inline void __deth_set_sqp(void *arg, u32 sqp)
493 {
494 	struct rxe_deth *deth = arg;
495 
496 	deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
497 }
498 
deth_qkey(struct rxe_pkt_info * pkt)499 static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
500 {
501 	return __deth_qkey(pkt->hdr +
502 		rxe_opcode[pkt->opcode].offset[RXE_DETH]);
503 }
504 
deth_set_qkey(struct rxe_pkt_info * pkt,u32 qkey)505 static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
506 {
507 	__deth_set_qkey(pkt->hdr +
508 		rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
509 }
510 
deth_sqp(struct rxe_pkt_info * pkt)511 static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
512 {
513 	return __deth_sqp(pkt->hdr +
514 		rxe_opcode[pkt->opcode].offset[RXE_DETH]);
515 }
516 
deth_set_sqp(struct rxe_pkt_info * pkt,u32 sqp)517 static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
518 {
519 	__deth_set_sqp(pkt->hdr +
520 		rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
521 }
522 
523 /******************************************************************************
524  * RDMA Extended Transport Header
525  ******************************************************************************/
526 struct rxe_reth {
527 	__be64			va;
528 	__be32			rkey;
529 	__be32			len;
530 };
531 
__reth_va(void * arg)532 static inline u64 __reth_va(void *arg)
533 {
534 	struct rxe_reth *reth = arg;
535 
536 	return be64_to_cpu(reth->va);
537 }
538 
__reth_set_va(void * arg,u64 va)539 static inline void __reth_set_va(void *arg, u64 va)
540 {
541 	struct rxe_reth *reth = arg;
542 
543 	reth->va = cpu_to_be64(va);
544 }
545 
__reth_rkey(void * arg)546 static inline u32 __reth_rkey(void *arg)
547 {
548 	struct rxe_reth *reth = arg;
549 
550 	return be32_to_cpu(reth->rkey);
551 }
552 
__reth_set_rkey(void * arg,u32 rkey)553 static inline void __reth_set_rkey(void *arg, u32 rkey)
554 {
555 	struct rxe_reth *reth = arg;
556 
557 	reth->rkey = cpu_to_be32(rkey);
558 }
559 
__reth_len(void * arg)560 static inline u32 __reth_len(void *arg)
561 {
562 	struct rxe_reth *reth = arg;
563 
564 	return be32_to_cpu(reth->len);
565 }
566 
__reth_set_len(void * arg,u32 len)567 static inline void __reth_set_len(void *arg, u32 len)
568 {
569 	struct rxe_reth *reth = arg;
570 
571 	reth->len = cpu_to_be32(len);
572 }
573 
reth_va(struct rxe_pkt_info * pkt)574 static inline u64 reth_va(struct rxe_pkt_info *pkt)
575 {
576 	return __reth_va(pkt->hdr +
577 		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
578 }
579 
reth_set_va(struct rxe_pkt_info * pkt,u64 va)580 static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
581 {
582 	__reth_set_va(pkt->hdr +
583 		rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
584 }
585 
reth_rkey(struct rxe_pkt_info * pkt)586 static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
587 {
588 	return __reth_rkey(pkt->hdr +
589 		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
590 }
591 
reth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)592 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
593 {
594 	__reth_set_rkey(pkt->hdr +
595 		rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
596 }
597 
reth_len(struct rxe_pkt_info * pkt)598 static inline u32 reth_len(struct rxe_pkt_info *pkt)
599 {
600 	return __reth_len(pkt->hdr +
601 		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
602 }
603 
reth_set_len(struct rxe_pkt_info * pkt,u32 len)604 static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
605 {
606 	__reth_set_len(pkt->hdr +
607 		rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
608 }
609 
610 /******************************************************************************
611  * FLUSH Extended Transport Header
612  ******************************************************************************/
613 
614 struct rxe_feth {
615 	__be32 bits;
616 };
617 
618 #define FETH_PLT_MASK		(0x0000000f) /* bits 3-0 */
619 #define FETH_SEL_MASK		(0x00000030) /* bits 5-4 */
620 #define FETH_SEL_SHIFT		(4U)
621 
__feth_plt(void * arg)622 static inline u32 __feth_plt(void *arg)
623 {
624 	struct rxe_feth *feth = arg;
625 
626 	return be32_to_cpu(feth->bits) & FETH_PLT_MASK;
627 }
628 
__feth_sel(void * arg)629 static inline u32 __feth_sel(void *arg)
630 {
631 	struct rxe_feth *feth = arg;
632 
633 	return (be32_to_cpu(feth->bits) & FETH_SEL_MASK) >> FETH_SEL_SHIFT;
634 }
635 
feth_plt(struct rxe_pkt_info * pkt)636 static inline u32 feth_plt(struct rxe_pkt_info *pkt)
637 {
638 	return __feth_plt(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
639 }
640 
feth_sel(struct rxe_pkt_info * pkt)641 static inline u32 feth_sel(struct rxe_pkt_info *pkt)
642 {
643 	return __feth_sel(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
644 }
645 
feth_init(struct rxe_pkt_info * pkt,u8 type,u8 level)646 static inline void feth_init(struct rxe_pkt_info *pkt, u8 type, u8 level)
647 {
648 	struct rxe_feth *feth = (struct rxe_feth *)
649 		    (pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
650 	u32 bits = ((level << FETH_SEL_SHIFT) & FETH_SEL_MASK) |
651 		   (type & FETH_PLT_MASK);
652 
653 	feth->bits = cpu_to_be32(bits);
654 }
655 
656 /******************************************************************************
657  * Atomic Extended Transport Header
658  ******************************************************************************/
659 struct rxe_atmeth {
660 	__be64			va;
661 	__be32			rkey;
662 	__be64			swap_add;
663 	__be64			comp;
664 } __packed;
665 
__atmeth_va(void * arg)666 static inline u64 __atmeth_va(void *arg)
667 {
668 	struct rxe_atmeth *atmeth = arg;
669 
670 	return be64_to_cpu(atmeth->va);
671 }
672 
__atmeth_set_va(void * arg,u64 va)673 static inline void __atmeth_set_va(void *arg, u64 va)
674 {
675 	struct rxe_atmeth *atmeth = arg;
676 
677 	atmeth->va = cpu_to_be64(va);
678 }
679 
__atmeth_rkey(void * arg)680 static inline u32 __atmeth_rkey(void *arg)
681 {
682 	struct rxe_atmeth *atmeth = arg;
683 
684 	return be32_to_cpu(atmeth->rkey);
685 }
686 
__atmeth_set_rkey(void * arg,u32 rkey)687 static inline void __atmeth_set_rkey(void *arg, u32 rkey)
688 {
689 	struct rxe_atmeth *atmeth = arg;
690 
691 	atmeth->rkey = cpu_to_be32(rkey);
692 }
693 
__atmeth_swap_add(void * arg)694 static inline u64 __atmeth_swap_add(void *arg)
695 {
696 	struct rxe_atmeth *atmeth = arg;
697 
698 	return be64_to_cpu(atmeth->swap_add);
699 }
700 
__atmeth_set_swap_add(void * arg,u64 swap_add)701 static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
702 {
703 	struct rxe_atmeth *atmeth = arg;
704 
705 	atmeth->swap_add = cpu_to_be64(swap_add);
706 }
707 
__atmeth_comp(void * arg)708 static inline u64 __atmeth_comp(void *arg)
709 {
710 	struct rxe_atmeth *atmeth = arg;
711 
712 	return be64_to_cpu(atmeth->comp);
713 }
714 
__atmeth_set_comp(void * arg,u64 comp)715 static inline void __atmeth_set_comp(void *arg, u64 comp)
716 {
717 	struct rxe_atmeth *atmeth = arg;
718 
719 	atmeth->comp = cpu_to_be64(comp);
720 }
721 
atmeth_va(struct rxe_pkt_info * pkt)722 static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
723 {
724 	return __atmeth_va(pkt->hdr +
725 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
726 }
727 
atmeth_set_va(struct rxe_pkt_info * pkt,u64 va)728 static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
729 {
730 	__atmeth_set_va(pkt->hdr +
731 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
732 }
733 
atmeth_rkey(struct rxe_pkt_info * pkt)734 static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
735 {
736 	return __atmeth_rkey(pkt->hdr +
737 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
738 }
739 
atmeth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)740 static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
741 {
742 	__atmeth_set_rkey(pkt->hdr +
743 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
744 }
745 
atmeth_swap_add(struct rxe_pkt_info * pkt)746 static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
747 {
748 	return __atmeth_swap_add(pkt->hdr +
749 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
750 }
751 
atmeth_set_swap_add(struct rxe_pkt_info * pkt,u64 swap_add)752 static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
753 {
754 	__atmeth_set_swap_add(pkt->hdr +
755 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
756 }
757 
atmeth_comp(struct rxe_pkt_info * pkt)758 static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
759 {
760 	return __atmeth_comp(pkt->hdr +
761 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
762 }
763 
atmeth_set_comp(struct rxe_pkt_info * pkt,u64 comp)764 static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
765 {
766 	__atmeth_set_comp(pkt->hdr +
767 		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
768 }
769 
770 /******************************************************************************
771  * Ack Extended Transport Header
772  ******************************************************************************/
773 struct rxe_aeth {
774 	__be32			smsn;
775 };
776 
777 #define AETH_SYN_MASK		(0xff000000)
778 #define AETH_MSN_MASK		(0x00ffffff)
779 
780 enum aeth_syndrome {
781 	AETH_TYPE_MASK		= 0xe0,
782 	AETH_ACK		= 0x00,
783 	AETH_RNR_NAK		= 0x20,
784 	AETH_RSVD		= 0x40,
785 	AETH_NAK		= 0x60,
786 	AETH_ACK_UNLIMITED	= 0x1f,
787 	AETH_NAK_PSN_SEQ_ERROR	= 0x60,
788 	AETH_NAK_INVALID_REQ	= 0x61,
789 	AETH_NAK_REM_ACC_ERR	= 0x62,
790 	AETH_NAK_REM_OP_ERR	= 0x63,
791 };
792 
__aeth_syn(void * arg)793 static inline u8 __aeth_syn(void *arg)
794 {
795 	struct rxe_aeth *aeth = arg;
796 
797 	return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
798 }
799 
__aeth_set_syn(void * arg,u8 syn)800 static inline void __aeth_set_syn(void *arg, u8 syn)
801 {
802 	struct rxe_aeth *aeth = arg;
803 	u32 smsn = be32_to_cpu(aeth->smsn);
804 
805 	aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
806 			 (~AETH_SYN_MASK & smsn));
807 }
808 
__aeth_msn(void * arg)809 static inline u32 __aeth_msn(void *arg)
810 {
811 	struct rxe_aeth *aeth = arg;
812 
813 	return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
814 }
815 
__aeth_set_msn(void * arg,u32 msn)816 static inline void __aeth_set_msn(void *arg, u32 msn)
817 {
818 	struct rxe_aeth *aeth = arg;
819 	u32 smsn = be32_to_cpu(aeth->smsn);
820 
821 	aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
822 			 (~AETH_MSN_MASK & smsn));
823 }
824 
aeth_syn(struct rxe_pkt_info * pkt)825 static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
826 {
827 	return __aeth_syn(pkt->hdr +
828 		rxe_opcode[pkt->opcode].offset[RXE_AETH]);
829 }
830 
aeth_set_syn(struct rxe_pkt_info * pkt,u8 syn)831 static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
832 {
833 	__aeth_set_syn(pkt->hdr +
834 		rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
835 }
836 
aeth_msn(struct rxe_pkt_info * pkt)837 static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
838 {
839 	return __aeth_msn(pkt->hdr +
840 		rxe_opcode[pkt->opcode].offset[RXE_AETH]);
841 }
842 
aeth_set_msn(struct rxe_pkt_info * pkt,u32 msn)843 static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
844 {
845 	__aeth_set_msn(pkt->hdr +
846 		rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
847 }
848 
849 /******************************************************************************
850  * Atomic Ack Extended Transport Header
851  ******************************************************************************/
852 struct rxe_atmack {
853 	__be64			orig;
854 };
855 
__atmack_orig(void * arg)856 static inline u64 __atmack_orig(void *arg)
857 {
858 	struct rxe_atmack *atmack = arg;
859 
860 	return be64_to_cpu(atmack->orig);
861 }
862 
__atmack_set_orig(void * arg,u64 orig)863 static inline void __atmack_set_orig(void *arg, u64 orig)
864 {
865 	struct rxe_atmack *atmack = arg;
866 
867 	atmack->orig = cpu_to_be64(orig);
868 }
869 
atmack_orig(struct rxe_pkt_info * pkt)870 static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
871 {
872 	return __atmack_orig(pkt->hdr +
873 		rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
874 }
875 
atmack_set_orig(struct rxe_pkt_info * pkt,u64 orig)876 static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
877 {
878 	__atmack_set_orig(pkt->hdr +
879 		rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
880 }
881 
882 /******************************************************************************
883  * Immediate Extended Transport Header
884  ******************************************************************************/
885 struct rxe_immdt {
886 	__be32			imm;
887 };
888 
__immdt_imm(void * arg)889 static inline __be32 __immdt_imm(void *arg)
890 {
891 	struct rxe_immdt *immdt = arg;
892 
893 	return immdt->imm;
894 }
895 
__immdt_set_imm(void * arg,__be32 imm)896 static inline void __immdt_set_imm(void *arg, __be32 imm)
897 {
898 	struct rxe_immdt *immdt = arg;
899 
900 	immdt->imm = imm;
901 }
902 
immdt_imm(struct rxe_pkt_info * pkt)903 static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
904 {
905 	return __immdt_imm(pkt->hdr +
906 		rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
907 }
908 
immdt_set_imm(struct rxe_pkt_info * pkt,__be32 imm)909 static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
910 {
911 	__immdt_set_imm(pkt->hdr +
912 		rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
913 }
914 
915 /******************************************************************************
916  * Invalidate Extended Transport Header
917  ******************************************************************************/
918 struct rxe_ieth {
919 	__be32			rkey;
920 };
921 
__ieth_rkey(void * arg)922 static inline u32 __ieth_rkey(void *arg)
923 {
924 	struct rxe_ieth *ieth = arg;
925 
926 	return be32_to_cpu(ieth->rkey);
927 }
928 
__ieth_set_rkey(void * arg,u32 rkey)929 static inline void __ieth_set_rkey(void *arg, u32 rkey)
930 {
931 	struct rxe_ieth *ieth = arg;
932 
933 	ieth->rkey = cpu_to_be32(rkey);
934 }
935 
ieth_rkey(struct rxe_pkt_info * pkt)936 static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
937 {
938 	return __ieth_rkey(pkt->hdr +
939 		rxe_opcode[pkt->opcode].offset[RXE_IETH]);
940 }
941 
ieth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)942 static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
943 {
944 	__ieth_set_rkey(pkt->hdr +
945 		rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
946 }
947 
948 enum rxe_hdr_length {
949 	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
950 	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
951 	RXE_IMMDT_BYTES		= sizeof(struct rxe_immdt),
952 	RXE_RETH_BYTES		= sizeof(struct rxe_reth),
953 	RXE_AETH_BYTES		= sizeof(struct rxe_aeth),
954 	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
955 	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
956 	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
957 	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
958 	RXE_FETH_BYTES		= sizeof(struct rxe_feth),
959 };
960 
header_size(struct rxe_pkt_info * pkt)961 static inline size_t header_size(struct rxe_pkt_info *pkt)
962 {
963 	return rxe_opcode[pkt->opcode].length;
964 }
965 
payload_addr(struct rxe_pkt_info * pkt)966 static inline void *payload_addr(struct rxe_pkt_info *pkt)
967 {
968 	return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
969 }
970 
payload_size(struct rxe_pkt_info * pkt)971 static inline size_t payload_size(struct rxe_pkt_info *pkt)
972 {
973 	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
974 		- bth_pad(pkt) - RXE_ICRC_SIZE;
975 }
976 
977 #endif /* RXE_HDR_H */
978