xref: /freebsd/sys/ofed/drivers/infiniband/core/cm_msgs.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004, 2011 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING the madirectory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use source and binary forms, with or
15  *     withmodification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retathe above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
34  * SOFTWARE.
35  */
36 
37 #if !defined(CM_MSGS_H)
38 #define CM_MSGS_H
39 
40 #include <rdma/ib_mad.h>
41 #include <rdma/ib_cm.h>
42 
43 /*
44  * Parameters to routines below should be in network-byte order, and values
45  * are returned in network-byte order.
46  */
47 
48 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
49 
50 struct cm_req_msg {
51 	struct ib_mad_hdr hdr;
52 
53 	__be32 local_comm_id;
54 	__be32 rsvd4;
55 	__be64 service_id;
56 	__be64 local_ca_guid;
57 	__be32 rsvd24;
58 	__be32 local_qkey;
59 	/* local QPN:24, responder resources:8 */
60 	__be32 offset32;
61 	/* local EECN:24, initiator depth:8 */
62 	__be32 offset36;
63 	/*
64 	 * remote EECN:24, remote CM response timeout:5,
65 	 * transport service type:2, end-to-end flow control:1
66 	 */
67 	__be32 offset40;
68 	/* starting PSN:24, local CM response timeout:5, retry count:3 */
69 	__be32 offset44;
70 	__be16 pkey;
71 	/* path MTU:4, RDC exists:1, RNR retry count:3. */
72 	u8 offset50;
73 	/* max CM Retries:4, SRQ:1, extended transport type:3 */
74 	u8 offset51;
75 
76 	__be16 primary_local_lid;
77 	__be16 primary_remote_lid;
78 	union ib_gid primary_local_gid;
79 	union ib_gid primary_remote_gid;
80 	/* flow label:20, rsvd:6, packet rate:6 */
81 	__be32 primary_offset88;
82 	u8 primary_traffic_class;
83 	u8 primary_hop_limit;
84 	/* SL:4, subnet local:1, rsvd:3 */
85 	u8 primary_offset94;
86 	/* local ACK timeout:5, rsvd:3 */
87 	u8 primary_offset95;
88 
89 	__be16 alt_local_lid;
90 	__be16 alt_remote_lid;
91 	union ib_gid alt_local_gid;
92 	union ib_gid alt_remote_gid;
93 	/* flow label:20, rsvd:6, packet rate:6 */
94 	__be32 alt_offset132;
95 	u8 alt_traffic_class;
96 	u8 alt_hop_limit;
97 	/* SL:4, subnet local:1, rsvd:3 */
98 	u8 alt_offset138;
99 	/* local ACK timeout:5, rsvd:3 */
100 	u8 alt_offset139;
101 
102 	u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
103 
104 } __attribute__ ((packed));
105 
cm_req_get_local_qpn(struct cm_req_msg * req_msg)106 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
107 {
108 	return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
109 }
110 
cm_req_set_local_qpn(struct cm_req_msg * req_msg,__be32 qpn)111 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
112 {
113 	req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
114 					 (be32_to_cpu(req_msg->offset32) &
115 					  0x000000FF));
116 }
117 
cm_req_get_resp_res(struct cm_req_msg * req_msg)118 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
119 {
120 	return (u8) be32_to_cpu(req_msg->offset32);
121 }
122 
cm_req_set_resp_res(struct cm_req_msg * req_msg,u8 resp_res)123 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
124 {
125 	req_msg->offset32 = cpu_to_be32(resp_res |
126 					(be32_to_cpu(req_msg->offset32) &
127 					 0xFFFFFF00));
128 }
129 
cm_req_get_init_depth(struct cm_req_msg * req_msg)130 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
131 {
132 	return (u8) be32_to_cpu(req_msg->offset36);
133 }
134 
cm_req_set_init_depth(struct cm_req_msg * req_msg,u8 init_depth)135 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
136 					 u8 init_depth)
137 {
138 	req_msg->offset36 = cpu_to_be32(init_depth |
139 					(be32_to_cpu(req_msg->offset36) &
140 					 0xFFFFFF00));
141 }
142 
cm_req_get_remote_resp_timeout(struct cm_req_msg * req_msg)143 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
144 {
145 	return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
146 }
147 
cm_req_set_remote_resp_timeout(struct cm_req_msg * req_msg,u8 resp_timeout)148 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
149 						  u8 resp_timeout)
150 {
151 	req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
152 					 (be32_to_cpu(req_msg->offset40) &
153 					  0xFFFFFF07));
154 }
155 
cm_req_get_qp_type(struct cm_req_msg * req_msg)156 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
157 {
158 	u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
159 	switch(transport_type) {
160 	case 0: return IB_QPT_RC;
161 	case 1: return IB_QPT_UC;
162 	case 3:
163 		switch (req_msg->offset51 & 0x7) {
164 		case 1: return IB_QPT_XRC_TGT;
165 		default: return 0;
166 		}
167 	default: return 0;
168 	}
169 }
170 
cm_req_set_qp_type(struct cm_req_msg * req_msg,enum ib_qp_type qp_type)171 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
172 				      enum ib_qp_type qp_type)
173 {
174 	switch(qp_type) {
175 	case IB_QPT_UC:
176 		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
177 						  req_msg->offset40) &
178 						   0xFFFFFFF9) | 0x2);
179 		break;
180 	case IB_QPT_XRC_INI:
181 		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
182 						 req_msg->offset40) &
183 						   0xFFFFFFF9) | 0x6);
184 		req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
185 		break;
186 	default:
187 		req_msg->offset40 = cpu_to_be32(be32_to_cpu(
188 						 req_msg->offset40) &
189 						  0xFFFFFFF9);
190 	}
191 }
192 
cm_req_get_flow_ctrl(struct cm_req_msg * req_msg)193 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
194 {
195 	return be32_to_cpu(req_msg->offset40) & 0x1;
196 }
197 
cm_req_set_flow_ctrl(struct cm_req_msg * req_msg,u8 flow_ctrl)198 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
199 					u8 flow_ctrl)
200 {
201 	req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
202 					 (be32_to_cpu(req_msg->offset40) &
203 					  0xFFFFFFFE));
204 }
205 
cm_req_get_starting_psn(struct cm_req_msg * req_msg)206 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
207 {
208 	return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
209 }
210 
cm_req_set_starting_psn(struct cm_req_msg * req_msg,__be32 starting_psn)211 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
212 					   __be32 starting_psn)
213 {
214 	req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
215 			    (be32_to_cpu(req_msg->offset44) & 0x000000FF));
216 }
217 
cm_req_get_local_resp_timeout(struct cm_req_msg * req_msg)218 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
219 {
220 	return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
221 }
222 
cm_req_set_local_resp_timeout(struct cm_req_msg * req_msg,u8 resp_timeout)223 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
224 						 u8 resp_timeout)
225 {
226 	req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
227 			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
228 }
229 
cm_req_get_retry_count(struct cm_req_msg * req_msg)230 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
231 {
232 	return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
233 }
234 
cm_req_set_retry_count(struct cm_req_msg * req_msg,u8 retry_count)235 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
236 					  u8 retry_count)
237 {
238 	req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
239 			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
240 }
241 
cm_req_get_path_mtu(struct cm_req_msg * req_msg)242 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
243 {
244 	return req_msg->offset50 >> 4;
245 }
246 
cm_req_set_path_mtu(struct cm_req_msg * req_msg,u8 path_mtu)247 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
248 {
249 	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
250 }
251 
cm_req_get_rnr_retry_count(struct cm_req_msg * req_msg)252 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
253 {
254 	return req_msg->offset50 & 0x7;
255 }
256 
cm_req_set_rnr_retry_count(struct cm_req_msg * req_msg,u8 rnr_retry_count)257 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
258 					      u8 rnr_retry_count)
259 {
260 	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
261 				  (rnr_retry_count & 0x7));
262 }
263 
cm_req_get_max_cm_retries(struct cm_req_msg * req_msg)264 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
265 {
266 	return req_msg->offset51 >> 4;
267 }
268 
cm_req_set_max_cm_retries(struct cm_req_msg * req_msg,u8 retries)269 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
270 					     u8 retries)
271 {
272 	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
273 }
274 
cm_req_get_srq(struct cm_req_msg * req_msg)275 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
276 {
277 	return (req_msg->offset51 & 0x8) >> 3;
278 }
279 
cm_req_set_srq(struct cm_req_msg * req_msg,u8 srq)280 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
281 {
282 	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
283 				  ((srq & 0x1) << 3));
284 }
285 
cm_req_get_primary_flow_label(struct cm_req_msg * req_msg)286 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
287 {
288 	return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
289 }
290 
cm_req_set_primary_flow_label(struct cm_req_msg * req_msg,__be32 flow_label)291 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
292 						 __be32 flow_label)
293 {
294 	req_msg->primary_offset88 = cpu_to_be32(
295 				    (be32_to_cpu(req_msg->primary_offset88) &
296 				     0x00000FFF) |
297 				     (be32_to_cpu(flow_label) << 12));
298 }
299 
cm_req_get_primary_packet_rate(struct cm_req_msg * req_msg)300 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
301 {
302 	return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
303 }
304 
cm_req_set_primary_packet_rate(struct cm_req_msg * req_msg,u8 rate)305 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
306 						  u8 rate)
307 {
308 	req_msg->primary_offset88 = cpu_to_be32(
309 				    (be32_to_cpu(req_msg->primary_offset88) &
310 				     0xFFFFFFC0) | (rate & 0x3F));
311 }
312 
cm_req_get_primary_sl(struct cm_req_msg * req_msg)313 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
314 {
315 	return (u8) (req_msg->primary_offset94 >> 4);
316 }
317 
cm_req_set_primary_sl(struct cm_req_msg * req_msg,u8 sl)318 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
319 {
320 	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
321 					  (sl << 4));
322 }
323 
cm_req_get_primary_subnet_local(struct cm_req_msg * req_msg)324 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
325 {
326 	return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
327 }
328 
cm_req_set_primary_subnet_local(struct cm_req_msg * req_msg,u8 subnet_local)329 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
330 						   u8 subnet_local)
331 {
332 	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
333 					  ((subnet_local & 0x1) << 3));
334 }
335 
cm_req_get_primary_local_ack_timeout(struct cm_req_msg * req_msg)336 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
337 {
338 	return (u8) (req_msg->primary_offset95 >> 3);
339 }
340 
cm_req_set_primary_local_ack_timeout(struct cm_req_msg * req_msg,u8 local_ack_timeout)341 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
342 							u8 local_ack_timeout)
343 {
344 	req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
345 					  (local_ack_timeout << 3));
346 }
347 
cm_req_get_alt_flow_label(struct cm_req_msg * req_msg)348 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
349 {
350 	return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
351 }
352 
cm_req_set_alt_flow_label(struct cm_req_msg * req_msg,__be32 flow_label)353 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
354 					     __be32 flow_label)
355 {
356 	req_msg->alt_offset132 = cpu_to_be32(
357 				 (be32_to_cpu(req_msg->alt_offset132) &
358 				  0x00000FFF) |
359 				  (be32_to_cpu(flow_label) << 12));
360 }
361 
cm_req_get_alt_packet_rate(struct cm_req_msg * req_msg)362 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
363 {
364 	return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
365 }
366 
cm_req_set_alt_packet_rate(struct cm_req_msg * req_msg,u8 rate)367 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
368 					      u8 rate)
369 {
370 	req_msg->alt_offset132 = cpu_to_be32(
371 				 (be32_to_cpu(req_msg->alt_offset132) &
372 				  0xFFFFFFC0) | (rate & 0x3F));
373 }
374 
cm_req_get_alt_sl(struct cm_req_msg * req_msg)375 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
376 {
377 	return (u8) (req_msg->alt_offset138 >> 4);
378 }
379 
cm_req_set_alt_sl(struct cm_req_msg * req_msg,u8 sl)380 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
381 {
382 	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
383 				       (sl << 4));
384 }
385 
cm_req_get_alt_subnet_local(struct cm_req_msg * req_msg)386 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
387 {
388 	return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
389 }
390 
cm_req_set_alt_subnet_local(struct cm_req_msg * req_msg,u8 subnet_local)391 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
392 					       u8 subnet_local)
393 {
394 	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
395 				       ((subnet_local & 0x1) << 3));
396 }
397 
cm_req_get_alt_local_ack_timeout(struct cm_req_msg * req_msg)398 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
399 {
400 	return (u8) (req_msg->alt_offset139 >> 3);
401 }
402 
cm_req_set_alt_local_ack_timeout(struct cm_req_msg * req_msg,u8 local_ack_timeout)403 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
404 						    u8 local_ack_timeout)
405 {
406 	req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
407 				       (local_ack_timeout << 3));
408 }
409 
410 /* Message REJected or MRAed */
411 enum cm_msg_response {
412 	CM_MSG_RESPONSE_REQ = 0x0,
413 	CM_MSG_RESPONSE_REP = 0x1,
414 	CM_MSG_RESPONSE_OTHER = 0x2
415 };
416 
417  struct cm_mra_msg {
418 	struct ib_mad_hdr hdr;
419 
420 	__be32 local_comm_id;
421 	__be32 remote_comm_id;
422 	/* message MRAed:2, rsvd:6 */
423 	u8 offset8;
424 	/* service timeout:5, rsvd:3 */
425 	u8 offset9;
426 
427 	u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
428 
429 } __attribute__ ((packed));
430 
cm_mra_get_msg_mraed(struct cm_mra_msg * mra_msg)431 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
432 {
433 	return (u8) (mra_msg->offset8 >> 6);
434 }
435 
cm_mra_set_msg_mraed(struct cm_mra_msg * mra_msg,u8 msg)436 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
437 {
438 	mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
439 }
440 
cm_mra_get_service_timeout(struct cm_mra_msg * mra_msg)441 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
442 {
443 	return (u8) (mra_msg->offset9 >> 3);
444 }
445 
cm_mra_set_service_timeout(struct cm_mra_msg * mra_msg,u8 service_timeout)446 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
447 					      u8 service_timeout)
448 {
449 	mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
450 				 (service_timeout << 3));
451 }
452 
453 struct cm_rej_msg {
454 	struct ib_mad_hdr hdr;
455 
456 	__be32 local_comm_id;
457 	__be32 remote_comm_id;
458 	/* message REJected:2, rsvd:6 */
459 	u8 offset8;
460 	/* reject info length:7, rsvd:1. */
461 	u8 offset9;
462 	__be16 reason;
463 	u8 ari[IB_CM_REJ_ARI_LENGTH];
464 
465 	u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
466 
467 } __attribute__ ((packed));
468 
cm_rej_get_msg_rejected(struct cm_rej_msg * rej_msg)469 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
470 {
471 	return (u8) (rej_msg->offset8 >> 6);
472 }
473 
cm_rej_set_msg_rejected(struct cm_rej_msg * rej_msg,u8 msg)474 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
475 {
476 	rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
477 }
478 
cm_rej_get_reject_info_len(struct cm_rej_msg * rej_msg)479 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
480 {
481 	return (u8) (rej_msg->offset9 >> 1);
482 }
483 
cm_rej_set_reject_info_len(struct cm_rej_msg * rej_msg,u8 len)484 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
485 					      u8 len)
486 {
487 	rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
488 }
489 
490 struct cm_rep_msg {
491 	struct ib_mad_hdr hdr;
492 
493 	__be32 local_comm_id;
494 	__be32 remote_comm_id;
495 	__be32 local_qkey;
496 	/* local QPN:24, rsvd:8 */
497 	__be32 offset12;
498 	/* local EECN:24, rsvd:8 */
499 	__be32 offset16;
500 	/* starting PSN:24 rsvd:8 */
501 	__be32 offset20;
502 	u8 resp_resources;
503 	u8 initiator_depth;
504 	/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
505 	u8 offset26;
506 	/* RNR retry count:3, SRQ:1, rsvd:5 */
507 	u8 offset27;
508 	__be64 local_ca_guid;
509 
510 	u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
511 
512 } __attribute__ ((packed));
513 
cm_rep_get_local_qpn(struct cm_rep_msg * rep_msg)514 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
515 {
516 	return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
517 }
518 
cm_rep_set_local_qpn(struct cm_rep_msg * rep_msg,__be32 qpn)519 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
520 {
521 	rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
522 			    (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
523 }
524 
cm_rep_get_local_eecn(struct cm_rep_msg * rep_msg)525 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
526 {
527 	return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
528 }
529 
cm_rep_set_local_eecn(struct cm_rep_msg * rep_msg,__be32 eecn)530 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
531 {
532 	rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
533 			    (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
534 }
535 
cm_rep_get_qpn(struct cm_rep_msg * rep_msg,enum ib_qp_type qp_type)536 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
537 {
538 	return (qp_type == IB_QPT_XRC_INI) ?
539 		cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
540 }
541 
cm_rep_get_starting_psn(struct cm_rep_msg * rep_msg)542 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
543 {
544 	return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
545 }
546 
cm_rep_set_starting_psn(struct cm_rep_msg * rep_msg,__be32 starting_psn)547 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
548 					   __be32 starting_psn)
549 {
550 	rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
551 			    (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
552 }
553 
cm_rep_get_target_ack_delay(struct cm_rep_msg * rep_msg)554 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
555 {
556 	return (u8) (rep_msg->offset26 >> 3);
557 }
558 
cm_rep_set_target_ack_delay(struct cm_rep_msg * rep_msg,u8 target_ack_delay)559 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
560 					       u8 target_ack_delay)
561 {
562 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
563 				  (target_ack_delay << 3));
564 }
565 
cm_rep_get_failover(struct cm_rep_msg * rep_msg)566 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
567 {
568 	return (u8) ((rep_msg->offset26 & 0x06) >> 1);
569 }
570 
cm_rep_set_failover(struct cm_rep_msg * rep_msg,u8 failover)571 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
572 {
573 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
574 				  ((failover & 0x3) << 1));
575 }
576 
cm_rep_get_flow_ctrl(struct cm_rep_msg * rep_msg)577 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
578 {
579 	return (u8) (rep_msg->offset26 & 0x01);
580 }
581 
cm_rep_set_flow_ctrl(struct cm_rep_msg * rep_msg,u8 flow_ctrl)582 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
583 					    u8 flow_ctrl)
584 {
585 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
586 				  (flow_ctrl & 0x1));
587 }
588 
cm_rep_get_rnr_retry_count(struct cm_rep_msg * rep_msg)589 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
590 {
591 	return (u8) (rep_msg->offset27 >> 5);
592 }
593 
cm_rep_set_rnr_retry_count(struct cm_rep_msg * rep_msg,u8 rnr_retry_count)594 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
595 					      u8 rnr_retry_count)
596 {
597 	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
598 				  (rnr_retry_count << 5));
599 }
600 
cm_rep_get_srq(struct cm_rep_msg * rep_msg)601 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
602 {
603 	return (u8) ((rep_msg->offset27 >> 4) & 0x1);
604 }
605 
cm_rep_set_srq(struct cm_rep_msg * rep_msg,u8 srq)606 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
607 {
608 	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
609 				  ((srq & 0x1) << 4));
610 }
611 
612 struct cm_rtu_msg {
613 	struct ib_mad_hdr hdr;
614 
615 	__be32 local_comm_id;
616 	__be32 remote_comm_id;
617 
618 	u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
619 
620 } __attribute__ ((packed));
621 
622 struct cm_dreq_msg {
623 	struct ib_mad_hdr hdr;
624 
625 	__be32 local_comm_id;
626 	__be32 remote_comm_id;
627 	/* remote QPN/EECN:24, rsvd:8 */
628 	__be32 offset8;
629 
630 	u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
631 
632 } __attribute__ ((packed));
633 
cm_dreq_get_remote_qpn(struct cm_dreq_msg * dreq_msg)634 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
635 {
636 	return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
637 }
638 
cm_dreq_set_remote_qpn(struct cm_dreq_msg * dreq_msg,__be32 qpn)639 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
640 {
641 	dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
642 			    (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
643 }
644 
645 struct cm_drep_msg {
646 	struct ib_mad_hdr hdr;
647 
648 	__be32 local_comm_id;
649 	__be32 remote_comm_id;
650 
651 	u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
652 
653 } __attribute__ ((packed));
654 
655 struct cm_lap_msg {
656 	struct ib_mad_hdr hdr;
657 
658 	__be32 local_comm_id;
659 	__be32 remote_comm_id;
660 
661 	__be32 rsvd8;
662 	/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
663 	__be32 offset12;
664 	__be32 rsvd16;
665 
666 	__be16 alt_local_lid;
667 	__be16 alt_remote_lid;
668 	union ib_gid alt_local_gid;
669 	union ib_gid alt_remote_gid;
670 	/* flow label:20, rsvd:4, traffic class:8 */
671 	__be32 offset56;
672 	u8 alt_hop_limit;
673 	/* rsvd:2, packet rate:6 */
674 	u8 offset61;
675 	/* SL:4, subnet local:1, rsvd:3 */
676 	u8 offset62;
677 	/* local ACK timeout:5, rsvd:3 */
678 	u8 offset63;
679 
680 	u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
681 } __attribute__  ((packed));
682 
cm_lap_get_remote_qpn(struct cm_lap_msg * lap_msg)683 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
684 {
685 	return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
686 }
687 
cm_lap_set_remote_qpn(struct cm_lap_msg * lap_msg,__be32 qpn)688 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
689 {
690 	lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
691 					 (be32_to_cpu(lap_msg->offset12) &
692 					  0x000000FF));
693 }
694 
cm_lap_get_remote_resp_timeout(struct cm_lap_msg * lap_msg)695 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
696 {
697 	return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
698 }
699 
cm_lap_set_remote_resp_timeout(struct cm_lap_msg * lap_msg,u8 resp_timeout)700 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
701 						  u8 resp_timeout)
702 {
703 	lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
704 					 (be32_to_cpu(lap_msg->offset12) &
705 					  0xFFFFFF07));
706 }
707 
cm_lap_get_flow_label(struct cm_lap_msg * lap_msg)708 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
709 {
710 	return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
711 }
712 
cm_lap_set_flow_label(struct cm_lap_msg * lap_msg,__be32 flow_label)713 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
714 					 __be32 flow_label)
715 {
716 	lap_msg->offset56 = cpu_to_be32(
717 				 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
718 				 (be32_to_cpu(flow_label) << 12));
719 }
720 
cm_lap_get_traffic_class(struct cm_lap_msg * lap_msg)721 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
722 {
723 	return (u8) be32_to_cpu(lap_msg->offset56);
724 }
725 
cm_lap_set_traffic_class(struct cm_lap_msg * lap_msg,u8 traffic_class)726 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
727 					    u8 traffic_class)
728 {
729 	lap_msg->offset56 = cpu_to_be32(traffic_class |
730 					 (be32_to_cpu(lap_msg->offset56) &
731 					  0xFFFFFF00));
732 }
733 
cm_lap_get_packet_rate(struct cm_lap_msg * lap_msg)734 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
735 {
736 	return lap_msg->offset61 & 0x3F;
737 }
738 
cm_lap_set_packet_rate(struct cm_lap_msg * lap_msg,u8 packet_rate)739 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
740 					  u8 packet_rate)
741 {
742 	lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
743 }
744 
cm_lap_get_sl(struct cm_lap_msg * lap_msg)745 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
746 {
747 	return lap_msg->offset62 >> 4;
748 }
749 
cm_lap_set_sl(struct cm_lap_msg * lap_msg,u8 sl)750 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
751 {
752 	lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
753 }
754 
cm_lap_get_subnet_local(struct cm_lap_msg * lap_msg)755 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
756 {
757 	return (lap_msg->offset62 >> 3) & 0x1;
758 }
759 
cm_lap_set_subnet_local(struct cm_lap_msg * lap_msg,u8 subnet_local)760 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
761 					   u8 subnet_local)
762 {
763 	lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
764 			     (lap_msg->offset61 & 0xF7);
765 }
cm_lap_get_local_ack_timeout(struct cm_lap_msg * lap_msg)766 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
767 {
768 	return lap_msg->offset63 >> 3;
769 }
770 
cm_lap_set_local_ack_timeout(struct cm_lap_msg * lap_msg,u8 local_ack_timeout)771 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
772 						u8 local_ack_timeout)
773 {
774 	lap_msg->offset63 = (local_ack_timeout << 3) |
775 			    (lap_msg->offset63 & 0x07);
776 }
777 
778 struct cm_apr_msg {
779 	struct ib_mad_hdr hdr;
780 
781 	__be32 local_comm_id;
782 	__be32 remote_comm_id;
783 
784 	u8 info_length;
785 	u8 ap_status;
786 	__be16 rsvd;
787 	u8 info[IB_CM_APR_INFO_LENGTH];
788 
789 	u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
790 } __attribute__ ((packed));
791 
792 struct cm_sidr_req_msg {
793 	struct ib_mad_hdr hdr;
794 
795 	__be32 request_id;
796 	__be16 pkey;
797 	__be16 rsvd;
798 	__be64 service_id;
799 
800 	u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
801 } __attribute__ ((packed));
802 
803 struct cm_sidr_rep_msg {
804 	struct ib_mad_hdr hdr;
805 
806 	__be32 request_id;
807 	u8 status;
808 	u8 info_length;
809 	__be16 rsvd;
810 	/* QPN:24, rsvd:8 */
811 	__be32 offset8;
812 	__be64 service_id;
813 	__be32 qkey;
814 	u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
815 
816 	u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
817 } __attribute__ ((packed));
818 
cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg * sidr_rep_msg)819 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
820 {
821 	return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
822 }
823 
cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg * sidr_rep_msg,__be32 qpn)824 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
825 				       __be32 qpn)
826 {
827 	sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
828 					(be32_to_cpu(sidr_rep_msg->offset8) &
829 					 0x000000FF));
830 }
831 
832 #endif /* CM_MSGS_H */
833