xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_cm.c (revision cddbc3b40812213ff00041f79174cac0be360a2a)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "qlnxr_def.h"
32 #include "rdma_common.h"
33 #include "qlnxr_cm.h"
34 
35 void
36 qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info)
37 {
38 	info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
39 }
40 
41 void
42 qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
43 		struct qlnxr_qp *qp,
44 		struct ib_qp_init_attr *attrs)
45 {
46 	QL_DPRINT12(dev->ha, "enter\n");
47 
48 	dev->gsi_qp_created = 1;
49 	dev->gsi_sqcq = get_qlnxr_cq((attrs->send_cq));
50 	dev->gsi_rqcq = get_qlnxr_cq((attrs->recv_cq));
51 	dev->gsi_qp = qp;
52 
53 	QL_DPRINT12(dev->ha, "exit\n");
54 
55 	return;
56 }
57 
58 void
59 qlnxr_ll2_complete_tx_packet(void *cxt,
60 		uint8_t connection_handle,
61 		void *cookie,
62 		dma_addr_t first_frag_addr,
63 		bool b_last_fragment,
64 		bool b_last_packet)
65 {
66 	struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
67 	struct ecore_roce_ll2_packet *pkt = cookie;
68 	struct qlnxr_cq *cq = dev->gsi_sqcq;
69 	struct qlnxr_qp *qp = dev->gsi_qp;
70 	unsigned long flags;
71 
72 	QL_DPRINT12(dev->ha, "enter\n");
73 
74 	qlnx_dma_free_coherent(&dev->ha->cdev, pkt->header.vaddr,
75 			pkt->header.baddr, pkt->header.len);
76 	kfree(pkt);
77 
78 	spin_lock_irqsave(&qp->q_lock, flags);
79 
80 	qlnxr_inc_sw_gsi_cons(&qp->sq);
81 
82 	spin_unlock_irqrestore(&qp->q_lock, flags);
83 
84 	if (cq->ibcq.comp_handler)
85 		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
86 
87 	QL_DPRINT12(dev->ha, "exit\n");
88 
89 	return;
90 }
91 
92 void
93 qlnxr_ll2_complete_rx_packet(void *cxt,
94 		struct ecore_ll2_comp_rx_data *data)
95 {
96 	struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
97 	struct qlnxr_cq *cq = dev->gsi_rqcq;
98 	// struct qlnxr_qp *qp = dev->gsi_qp;
99 	struct qlnxr_qp *qp = NULL;
100 	unsigned long flags;
101 	uint32_t qp_num = 0;
102 	// uint32_t delay_count = 0, gsi_cons = 0;
103 	//void * dest_va;
104 
105 	QL_DPRINT12(dev->ha, "enter\n");
106 
107 	if (data->u.data_length_error) {
108 		/* TODO: add statistic */
109 	}
110 
111 	if (data->cookie == NULL) {
112 		QL_DPRINT12(dev->ha, "cookie is NULL, bad sign\n");
113 	}
114 
115 	qp_num = (0xFF << 16) | data->qp_id;
116 
117 	if (data->qp_id == 1) {
118 		qp = dev->gsi_qp;
119 	} else {
120 		/* TODO: This will be needed for UD QP support */
121 		/* For RoCEv1 this is invalid */
122 		QL_DPRINT12(dev->ha, "invalid QP\n");
123 		return;
124 	}
125 	/* note: currently only one recv sg is supported */
126 	QL_DPRINT12(dev->ha, "MAD received on QP : %x\n", data->rx_buf_addr);
127 
128 	spin_lock_irqsave(&qp->q_lock, flags);
129 
130 	qp->rqe_wr_id[qp->rq.gsi_cons].rc =
131 		data->u.data_length_error ? -EINVAL : 0;
132 	qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
133 	/* note: length stands for data length i.e. GRH is excluded */
134 	qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
135 		data->length.data_length;
136 	*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
137 		ntohl(data->opaque_data_0);
138 	*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
139 		ntohs((u16)data->opaque_data_1);
140 
141 	qlnxr_inc_sw_gsi_cons(&qp->rq);
142 
143 	spin_unlock_irqrestore(&qp->q_lock, flags);
144 
145 	if (cq->ibcq.comp_handler)
146 		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
147 
148 	QL_DPRINT12(dev->ha, "exit\n");
149 
150 	return;
151 }
152 
153 void qlnxr_ll2_release_rx_packet(void *cxt,
154 		u8 connection_handle,
155 		void *cookie,
156 		dma_addr_t rx_buf_addr,
157 		bool b_last_packet)
158 {
159 	/* Do nothing... */
160 }
161 
162 static void
163 qlnxr_destroy_gsi_cq(struct qlnxr_dev *dev,
164 		struct ib_qp_init_attr *attrs)
165 {
166 	struct ecore_rdma_destroy_cq_in_params iparams;
167 	struct ecore_rdma_destroy_cq_out_params oparams;
168 	struct qlnxr_cq *cq;
169 
170 	QL_DPRINT12(dev->ha, "enter\n");
171 
172 	cq = get_qlnxr_cq((attrs->send_cq));
173 	iparams.icid = cq->icid;
174 	ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
175 	ecore_chain_free(&dev->ha->cdev, &cq->pbl);
176 
177 	cq = get_qlnxr_cq((attrs->recv_cq));
178 	/* if a dedicated recv_cq was used, delete it too */
179 	if (iparams.icid != cq->icid) {
180 		iparams.icid = cq->icid;
181 		ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
182 		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
183 	}
184 
185 	QL_DPRINT12(dev->ha, "exit\n");
186 
187 	return;
188 }
189 
190 static inline int
191 qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
192 		struct ib_qp_init_attr *attrs)
193 {
194 	QL_DPRINT12(dev->ha, "enter\n");
195 
196 	if (attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE) {
197 		QL_DPRINT11(dev->ha,
198 			"(attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE)\n");
199 		return -EINVAL;
200 	}
201 
202 	if (attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR) {
203 		QL_DPRINT11(dev->ha,
204 			"(attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR)\n");
205 		return -EINVAL;
206 	}
207 
208 	if (attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR) {
209 		QL_DPRINT11(dev->ha,
210 			"(attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR)\n");
211 		return -EINVAL;
212 	}
213 
214 	QL_DPRINT12(dev->ha, "exit\n");
215 
216 	return 0;
217 }
218 
219 
220 static int
221 qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
222 {
223 	enum ecore_ll2_roce_flavor_type roce_flavor;
224 	struct ecore_ll2_tx_pkt_info ll2_tx_pkt;
225 	int rc;
226 	int i;
227 
228 	QL_DPRINT12(dev->ha, "enter\n");
229 
230 	memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
231 
232 	if (pkt->roce_mode != ROCE_V1) {
233 		QL_DPRINT11(dev->ha, "roce_mode != ROCE_V1\n");
234 		return (-1);
235 	}
236 
237 	roce_flavor = (pkt->roce_mode == ROCE_V1) ?
238 		ECORE_LL2_ROCE : ECORE_LL2_RROCE;
239 
240 	ll2_tx_pkt.num_of_bds = 1 /* hdr */ +  pkt->n_seg;
241 	ll2_tx_pkt.vlan = 0; /* ??? */
242 	ll2_tx_pkt.tx_dest = ECORE_LL2_TX_DEST_NW;
243 	ll2_tx_pkt.ecore_roce_flavor = roce_flavor;
244 	ll2_tx_pkt.first_frag = pkt->header.baddr;
245 	ll2_tx_pkt.first_frag_len = pkt->header.len;
246 	ll2_tx_pkt.cookie = pkt;
247 	ll2_tx_pkt.enable_ip_cksum = 1; // Only for RoCEv2:IPv4
248 
249 	/* tx header */
250 	rc = ecore_ll2_prepare_tx_packet(dev->rdma_ctx,
251 			dev->gsi_ll2_handle,
252 			&ll2_tx_pkt,
253 			1);
254 	if (rc) {
255 
256 		QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
257 
258 		/* TX failed while posting header - release resources*/
259                 qlnx_dma_free_coherent(&dev->ha->cdev,
260 			pkt->header.vaddr,
261 			pkt->header.baddr,
262                         pkt->header.len);
263 
264 		kfree(pkt);
265 
266 		return rc;
267 	}
268 
269 	/* tx payload */
270 	for (i = 0; i < pkt->n_seg; i++) {
271 		rc = ecore_ll2_set_fragment_of_tx_packet(dev->rdma_ctx,
272 						       dev->gsi_ll2_handle,
273 						       pkt->payload[i].baddr,
274 						       pkt->payload[i].len);
275 		if (rc) {
276 			/* if failed not much to do here, partial packet has
277 			 * been posted we can't free memory, will need to wait
278 			 * for completion
279 			 */
280 			QL_DPRINT11(dev->ha,
281 				"ecore_ll2_set_fragment_of_tx_packet failed\n");
282 			return rc;
283 		}
284 	}
285 	struct ecore_ll2_stats stats = {0};
286 	rc = ecore_ll2_get_stats(dev->rdma_ctx, dev->gsi_ll2_handle, &stats);
287 	if (rc) {
288 		QL_DPRINT11(dev->ha, "failed to obtain ll2 stats\n");
289 	}
290 	QL_DPRINT12(dev->ha, "exit\n");
291 
292 	return 0;
293 }
294 
295 int
296 qlnxr_ll2_stop(struct qlnxr_dev *dev)
297 {
298 	int rc;
299 
300 	QL_DPRINT12(dev->ha, "enter\n");
301 
302 	if (dev->gsi_ll2_handle == 0xFF)
303 		return 0;
304 
305 	/* remove LL2 MAC address filter */
306 	rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx,
307 			  dev->gsi_ll2_mac_address, NULL);
308 
309 	rc = ecore_ll2_terminate_connection(dev->rdma_ctx,
310 			dev->gsi_ll2_handle);
311 
312 	ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
313 
314 	dev->gsi_ll2_handle = 0xFF;
315 
316 	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
317 	return rc;
318 }
319 
320 int qlnxr_ll2_start(struct qlnxr_dev *dev,
321 		   struct ib_qp_init_attr *attrs,
322 		   struct qlnxr_qp *qp)
323 {
324 	struct ecore_ll2_acquire_data data;
325 	struct ecore_ll2_cbs cbs;
326 	int rc;
327 
328 	QL_DPRINT12(dev->ha, "enter\n");
329 
330 	/* configure and start LL2 */
331 	cbs.rx_comp_cb = qlnxr_ll2_complete_rx_packet;
332 	cbs.tx_comp_cb = qlnxr_ll2_complete_tx_packet;
333 	cbs.rx_release_cb = qlnxr_ll2_release_rx_packet;
334 	cbs.tx_release_cb = qlnxr_ll2_complete_tx_packet;
335 	cbs.cookie = dev;
336 	dev->gsi_ll2_handle = 0xFF;
337 
338 	memset(&data, 0, sizeof(data));
339 	data.input.conn_type = ECORE_LL2_TYPE_ROCE;
340 	data.input.mtu = dev->ha->ifp->if_mtu;
341 	data.input.rx_num_desc = 8 * 1024;
342 	data.input.rx_drop_ttl0_flg = 1;
343 	data.input.rx_vlan_removal_en = 0;
344 	data.input.tx_num_desc = 8 * 1024;
345 	data.input.tx_tc = 0;
346 	data.input.tx_dest = ECORE_LL2_TX_DEST_NW;
347 	data.input.ai_err_packet_too_big = ECORE_LL2_DROP_PACKET;
348 	data.input.ai_err_no_buf = ECORE_LL2_DROP_PACKET;
349 	data.input.gsi_enable = 1;
350 	data.p_connection_handle = &dev->gsi_ll2_handle;
351 	data.cbs = &cbs;
352 
353 	rc = ecore_ll2_acquire_connection(dev->rdma_ctx, &data);
354 
355 	if (rc) {
356 		QL_DPRINT11(dev->ha,
357 			"ecore_ll2_acquire_connection failed: %d\n",
358 			rc);
359 		return rc;
360 	}
361 
362 	QL_DPRINT11(dev->ha,
363 		"ll2 connection acquired successfully\n");
364 	rc = ecore_ll2_establish_connection(dev->rdma_ctx,
365 		dev->gsi_ll2_handle);
366 
367 	if (rc) {
368 		QL_DPRINT11(dev->ha,
369 			"ecore_ll2_establish_connection failed\n", rc);
370 		goto err1;
371 	}
372 
373 	QL_DPRINT11(dev->ha,
374 		"ll2 connection established successfully\n");
375 	rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx, NULL,
376 			dev->ha->primary_mac);
377 	if (rc) {
378 		QL_DPRINT11(dev->ha, "qlnx_rdma_ll2_set_mac_filter failed\n", rc);
379 		goto err2;
380 	}
381 
382 	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
383 	return 0;
384 
385 err2:
386 	ecore_ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
387 err1:
388 	ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
389 
390 	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
391 	return rc;
392 }
393 
394 struct ib_qp*
395 qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
396 		 struct ib_qp_init_attr *attrs,
397 		 struct qlnxr_qp *qp)
398 {
399 	int rc;
400 
401 	QL_DPRINT12(dev->ha, "enter\n");
402 
403 	rc = qlnxr_check_gsi_qp_attrs(dev, attrs);
404 
405 	if (rc) {
406 		QL_DPRINT11(dev->ha, "qlnxr_check_gsi_qp_attrs failed\n");
407 		return ERR_PTR(rc);
408 	}
409 
410 	rc = qlnxr_ll2_start(dev, attrs, qp);
411 	if (rc) {
412 		QL_DPRINT11(dev->ha, "qlnxr_ll2_start failed\n");
413 		return ERR_PTR(rc);
414 	}
415 
416 	/* create QP */
417 	qp->ibqp.qp_num = 1;
418 	qp->rq.max_wr = attrs->cap.max_recv_wr;
419 	qp->sq.max_wr = attrs->cap.max_send_wr;
420 
421 	qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
422 				GFP_KERNEL);
423 	if (!qp->rqe_wr_id) {
424 		QL_DPRINT11(dev->ha, "(!qp->rqe_wr_id)\n");
425 		goto err;
426 	}
427 
428 	qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
429 				GFP_KERNEL);
430 	if (!qp->wqe_wr_id) {
431 		QL_DPRINT11(dev->ha, "(!qp->wqe_wr_id)\n");
432 		goto err;
433 	}
434 
435 	qlnxr_store_gsi_qp_cq(dev, qp, attrs);
436 	memcpy(dev->gsi_ll2_mac_address, dev->ha->primary_mac, ETH_ALEN);
437 
438 	/* the GSI CQ is handled by the driver so remove it from the FW */
439 	qlnxr_destroy_gsi_cq(dev, attrs);
440 	dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
441 	dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
442 
443 	QL_DPRINT12(dev->ha, "exit &qp->ibqp = %p\n", &qp->ibqp);
444 
445 	return &qp->ibqp;
446 err:
447 	kfree(qp->rqe_wr_id);
448 
449 	rc = qlnxr_ll2_stop(dev);
450 
451 	QL_DPRINT12(dev->ha, "exit with error\n");
452 
453 	return ERR_PTR(-ENOMEM);
454 }
455 
456 int
457 qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
458 {
459 	int rc = 0;
460 
461 	QL_DPRINT12(dev->ha, "enter\n");
462 
463 	rc = qlnxr_ll2_stop(dev);
464 
465 	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
466 	return (rc);
467 }
468 
469 
470 static inline bool
471 qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
472 {
473 	u16 tmp_vlan_id;
474 	union ib_gid *dgid = &ah_attr->grh.dgid;
475 
476 	tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
477 	if (tmp_vlan_id < 0x1000) {
478 		*vlan_id = tmp_vlan_id;
479 		return true;
480 	} else {
481 		*vlan_id = 0;
482 		return false;
483 	}
484 }
485 
486 #define QLNXR_MAX_UD_HEADER_SIZE	(100)
487 #define QLNXR_GSI_QPN		(1)
488 static inline int
489 qlnxr_gsi_build_header(struct qlnxr_dev *dev,
490 		struct qlnxr_qp *qp,
491 		struct ib_send_wr *swr,
492 		struct ib_ud_header *udh,
493 		int *roce_mode)
494 {
495 	bool has_vlan = false, has_grh_ipv6 = true;
496 	struct ib_ah_attr *ah_attr = &get_qlnxr_ah((ud_wr(swr)->ah))->attr;
497 	struct ib_global_route *grh = &ah_attr->grh;
498 	union ib_gid sgid;
499 	int send_size = 0;
500 	u16 vlan_id = 0;
501 	u16 ether_type;
502 
503 #if __FreeBSD_version >= 1102000
504 	int rc = 0;
505 	int ip_ver = 0;
506 	bool has_udp = false;
507 #endif /* #if __FreeBSD_version >= 1102000 */
508 
509 
510 #if !DEFINE_IB_AH_ATTR_WITH_DMAC
511 	u8 mac[ETH_ALEN];
512 #endif
513 	int i;
514 
515 	send_size = 0;
516 	for (i = 0; i < swr->num_sge; ++i)
517 		send_size += swr->sg_list[i].length;
518 
519 	has_vlan = qlnxr_get_vlan_id_gsi(ah_attr, &vlan_id);
520 	ether_type = ETH_P_ROCE;
521 	*roce_mode = ROCE_V1;
522 	if (grh->sgid_index < QLNXR_MAX_SGID)
523 		sgid = dev->sgid_tbl[grh->sgid_index];
524 	else
525 		sgid = dev->sgid_tbl[0];
526 
527 #if __FreeBSD_version >= 1102000
528 
529 	rc = ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
530 			has_vlan, has_grh_ipv6, ip_ver, has_udp,
531 			0 /* immediate */, udh);
532 
533 	if (rc) {
534 		QL_DPRINT11(dev->ha, "gsi post send: failed to init header\n");
535 		return rc;
536 	}
537 
538 #else
539 	ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
540 			  has_vlan, has_grh_ipv6, 0 /* immediate */, udh);
541 
542 #endif /* #if __FreeBSD_version >= 1102000 */
543 
544 	/* ENET + VLAN headers*/
545 #if DEFINE_IB_AH_ATTR_WITH_DMAC
546 	memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
547 #else
548 	qlnxr_get_dmac(dev, ah_attr, mac);
549 	memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
550 #endif
551 	memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
552 	if (has_vlan) {
553 		udh->eth.type = htons(ETH_P_8021Q);
554 		udh->vlan.tag = htons(vlan_id);
555 		udh->vlan.type = htons(ether_type);
556 	} else {
557 		udh->eth.type = htons(ether_type);
558 	}
559 
560 	for (int j = 0; j < 4; j++) {
561 		QL_DPRINT12(dev->ha, "destination mac: %x\n",
562 				udh->eth.dmac_h[j]);
563 	}
564 	for (int j = 0; j < 4; j++) {
565 		QL_DPRINT12(dev->ha, "source mac: %x\n",
566 				udh->eth.smac_h[j]);
567 	}
568 
569 	QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
570 			"imm : %d, vlan :%d, AH: %p\n",
571 			qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
572 			0, has_vlan, get_qlnxr_ah((ud_wr(swr)->ah)));
573 
574 	if (has_grh_ipv6) {
575 		/* GRH / IPv6 header */
576 		udh->grh.traffic_class = grh->traffic_class;
577 		udh->grh.flow_label = grh->flow_label;
578 		udh->grh.hop_limit = grh->hop_limit;
579 		udh->grh.destination_gid = grh->dgid;
580 		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
581 		       sizeof(udh->grh.source_gid.raw));
582 		QL_DPRINT12(dev->ha, "header: tc: %x, flow_label : %x, "
583 			"hop_limit: %x \n", udh->grh.traffic_class,
584 			udh->grh.flow_label, udh->grh.hop_limit);
585 		for (i = 0; i < 16; i++) {
586 			QL_DPRINT12(dev->ha, "udh dgid = %x\n", udh->grh.destination_gid.raw[i]);
587 		}
588 		for (i = 0; i < 16; i++) {
589 			QL_DPRINT12(dev->ha, "udh sgid = %x\n", udh->grh.source_gid.raw[i]);
590 		}
591 		udh->grh.next_header = 0x1b;
592 	}
593 #ifdef DEFINE_IB_UD_HEADER_INIT_UDP_PRESENT
594         /* This is for RoCEv2 */
595 	else {
596                 /* IPv4 header */
597                 u32 ipv4_addr;
598 
599                 udh->ip4.protocol = IPPROTO_UDP;
600                 udh->ip4.tos = htonl(grh->flow_label);
601                 udh->ip4.frag_off = htons(IP_DF);
602                 udh->ip4.ttl = grh->hop_limit;
603 
604                 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
605                 udh->ip4.saddr = ipv4_addr;
606                 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
607                 udh->ip4.daddr = ipv4_addr;
608                 /* note: checksum is calculated by the device */
609         }
610 #endif
611 
612 	/* BTH */
613 	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
614 	udh->bth.pkey = QLNXR_ROCE_PKEY_DEFAULT;/* TODO: ib_get_cahced_pkey?! */
615 	//udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
616 	udh->bth.destination_qpn = OSAL_CPU_TO_BE32(ud_wr(swr)->remote_qpn);
617 	//udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
618 	udh->bth.psn = OSAL_CPU_TO_BE32((qp->sq_psn++) & ((1 << 24) - 1));
619 	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
620 
621 	/* DETH */
622 	//udh->deth.qkey = htonl(0x80010000); /* qp->qkey */ /* TODO: what is?! */
623 	//udh->deth.source_qpn = htonl(QLNXR_GSI_QPN);
624 	udh->deth.qkey = OSAL_CPU_TO_BE32(0x80010000); /* qp->qkey */ /* TODO: what is?! */
625 	udh->deth.source_qpn = OSAL_CPU_TO_BE32(QLNXR_GSI_QPN);
626 	QL_DPRINT12(dev->ha, "exit\n");
627 	return 0;
628 }
629 
630 static inline int
631 qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
632 	struct qlnxr_qp *qp, struct ib_send_wr *swr,
633 	struct ecore_roce_ll2_packet **p_packet)
634 {
635 	u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
636 	struct ecore_roce_ll2_packet *packet;
637 	int roce_mode, header_size;
638 	struct ib_ud_header udh;
639 	int i, rc;
640 
641 	QL_DPRINT12(dev->ha, "enter\n");
642 
643 	*p_packet = NULL;
644 
645 	rc = qlnxr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
646 	if (rc) {
647 		QL_DPRINT11(dev->ha,
648 			"qlnxr_gsi_build_header failed rc = %d\n", rc);
649 		return rc;
650 	}
651 
652 	header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
653 
654 	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
655 	if (!packet) {
656 		QL_DPRINT11(dev->ha, "packet == NULL\n");
657 		return -ENOMEM;
658 	}
659 
660 	packet->header.vaddr = qlnx_dma_alloc_coherent(&dev->ha->cdev,
661 					&packet->header.baddr,
662 					header_size);
663 	if (!packet->header.vaddr) {
664 		QL_DPRINT11(dev->ha, "packet->header.vaddr == NULL\n");
665 		kfree(packet);
666 		return -ENOMEM;
667 	}
668 
669 	if (memcmp(udh.eth.smac_h, udh.eth.dmac_h, ETH_ALEN))
670 		packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_NW;
671 	else
672 		packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_LB;
673 
674 	packet->roce_mode = roce_mode;
675 	memcpy(packet->header.vaddr, ud_header_buffer, header_size);
676 	packet->header.len = header_size;
677 	packet->n_seg = swr->num_sge;
678 	qp->wqe_wr_id[qp->sq.prod].bytes_len = IB_GRH_BYTES; //RDMA_GRH_BYTES
679 	for (i = 0; i < packet->n_seg; i++) {
680 		packet->payload[i].baddr = swr->sg_list[i].addr;
681 		packet->payload[i].len = swr->sg_list[i].length;
682 		qp->wqe_wr_id[qp->sq.prod].bytes_len +=
683 			packet->payload[i].len;
684 		QL_DPRINT11(dev->ha, "baddr: %p, len: %d\n",
685 				packet->payload[i].baddr,
686 				packet->payload[i].len);
687 	}
688 
689 	*p_packet = packet;
690 
691 	QL_DPRINT12(dev->ha, "exit, packet->n_seg: %d\n", packet->n_seg);
692 	return 0;
693 }
694 
695 int
696 qlnxr_gsi_post_send(struct ib_qp *ibqp,
697 		struct ib_send_wr *wr,
698 		struct ib_send_wr **bad_wr)
699 {
700 	struct ecore_roce_ll2_packet *pkt = NULL;
701 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
702 	struct qlnxr_dev *dev = qp->dev;
703 	unsigned long flags;
704 	int rc;
705 
706 	QL_DPRINT12(dev->ha, "exit\n");
707 
708 	if (qp->state != ECORE_ROCE_QP_STATE_RTS) {
709 		QL_DPRINT11(dev->ha,
710 			"(qp->state != ECORE_ROCE_QP_STATE_RTS)\n");
711 		*bad_wr = wr;
712 		return -EINVAL;
713 	}
714 
715 	if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
716 		QL_DPRINT11(dev->ha,
717 			"(wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE)\n");
718 		rc = -EINVAL;
719 		goto err;
720 	}
721 
722 	if (wr->opcode != IB_WR_SEND) {
723 		QL_DPRINT11(dev->ha, "(wr->opcode > IB_WR_SEND)\n");
724 		rc = -EINVAL;
725 		goto err;
726 	}
727 
728 	spin_lock_irqsave(&qp->q_lock, flags);
729 
730 	rc = qlnxr_gsi_build_packet(dev, qp, wr, &pkt);
731 	if(rc) {
732 		spin_unlock_irqrestore(&qp->q_lock, flags);
733 		QL_DPRINT11(dev->ha, "qlnxr_gsi_build_packet failed\n");
734 		goto err;
735 	}
736 
737 	rc = qlnxr_ll2_post_tx(dev, pkt);
738 
739 	if (!rc) {
740 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
741 		qp->wqe_wr_id[qp->sq.prod].signaled =
742 			!!(wr->send_flags & IB_SEND_SIGNALED);
743 		qp->wqe_wr_id[qp->sq.prod].opcode = IB_WC_SEND;
744 		qlnxr_inc_sw_prod(&qp->sq);
745 		QL_DPRINT11(dev->ha, "packet sent over gsi qp\n");
746 	} else {
747 		QL_DPRINT11(dev->ha, "qlnxr_ll2_post_tx failed\n");
748 		rc = -EAGAIN;
749 		*bad_wr = wr;
750 	}
751 
752 	spin_unlock_irqrestore(&qp->q_lock, flags);
753 
754 	if (wr->next != NULL) {
755 		*bad_wr = wr->next;
756 		rc=-EINVAL;
757 	}
758 
759 	QL_DPRINT12(dev->ha, "exit\n");
760 	return rc;
761 
762 err:
763 	*bad_wr = wr;
764 	QL_DPRINT12(dev->ha, "exit error\n");
765 	return rc;
766 }
767 
768 #define	QLNXR_LL2_RX_BUFFER_SIZE	(4 * 1024)
769 int
770 qlnxr_gsi_post_recv(struct ib_qp *ibqp,
771 		struct ib_recv_wr *wr,
772 		struct ib_recv_wr **bad_wr)
773 {
774 	struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
775 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
776 	unsigned long flags;
777 	int rc = 0;
778 
779 	QL_DPRINT12(dev->ha, "enter, wr: %p\n", wr);
780 
781 	if ((qp->state != ECORE_ROCE_QP_STATE_RTR) &&
782 	    (qp->state != ECORE_ROCE_QP_STATE_RTS)) {
783 		*bad_wr = wr;
784 		QL_DPRINT11(dev->ha, "exit 0\n");
785 		return -EINVAL;
786 	}
787 
788 	spin_lock_irqsave(&qp->q_lock, flags);
789 
790 	while (wr) {
791 		if (wr->num_sge > QLNXR_GSI_MAX_RECV_SGE) {
792 			QL_DPRINT11(dev->ha, "exit 1\n");
793 			goto err;
794 		}
795 
796 		rc = ecore_ll2_post_rx_buffer(dev->rdma_ctx,
797 				dev->gsi_ll2_handle,
798 				wr->sg_list[0].addr,
799 				wr->sg_list[0].length,
800 				0 /* cookie */,
801 				1 /* notify_fw */);
802 		if (rc) {
803 			QL_DPRINT11(dev->ha, "exit 2\n");
804 			goto err;
805 		}
806 
807 		memset(&qp->rqe_wr_id[qp->rq.prod], 0,
808 			sizeof(qp->rqe_wr_id[qp->rq.prod]));
809 		qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
810 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
811 
812 		qlnxr_inc_sw_prod(&qp->rq);
813 
814 		wr = wr->next;
815 	}
816 
817 	spin_unlock_irqrestore(&qp->q_lock, flags);
818 
819 	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
820 	return rc;
821 err:
822 
823 	spin_unlock_irqrestore(&qp->q_lock, flags);
824 	*bad_wr = wr;
825 
826 	QL_DPRINT12(dev->ha, "exit with -ENOMEM\n");
827 	return -ENOMEM;
828 }
829 
830 int
831 qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
832 {
833 	struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
834 	struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
835 	struct qlnxr_qp *qp = dev->gsi_qp;
836 	unsigned long flags;
837 	int i = 0;
838 
839 	QL_DPRINT12(dev->ha, "enter\n");
840 
841 	spin_lock_irqsave(&cq->cq_lock, flags);
842 
843 	while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
844 		memset(&wc[i], 0, sizeof(*wc));
845 
846 		wc[i].qp = &qp->ibqp;
847 		wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
848 		wc[i].opcode = IB_WC_RECV;
849 		wc[i].pkey_index = 0;
850 		wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc)?
851 			       IB_WC_GENERAL_ERR:IB_WC_SUCCESS;
852 		/* 0 - currently only one recv sg is supported */
853 		wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
854 		wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
855 
856 #if __FreeBSD_version >= 1100000
857 		memcpy(&wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac, ETH_ALEN);
858 		wc[i].wc_flags |= IB_WC_WITH_SMAC;
859 
860 		if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
861 			wc[i].wc_flags |= IB_WC_WITH_VLAN;
862 			wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
863 		}
864 
865 #endif
866 		qlnxr_inc_sw_cons(&qp->rq);
867 		i++;
868 	}
869 
870 	while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
871 		memset(&wc[i], 0, sizeof(*wc));
872 
873 		wc[i].qp = &qp->ibqp;
874 		wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
875 		wc[i].opcode = IB_WC_SEND;
876 		wc[i].status = IB_WC_SUCCESS;
877 
878 		qlnxr_inc_sw_cons(&qp->sq);
879 		i++;
880 	}
881 
882 	spin_unlock_irqrestore(&cq->cq_lock, flags);
883 
884 	QL_DPRINT12(dev->ha, "exit i = %d\n", i);
885 	return i;
886 }
887 
888