xref: /linux/drivers/infiniband/hw/qib/qib_ruc.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/spinlock.h>
35 #include <rdma/ib_smi.h>
36 
37 #include "qib.h"
38 #include "qib_mad.h"
39 
40 /*
41  * Convert the AETH RNR timeout code into the number of microseconds.
42  */
43 const u32 ib_qib_rnr_table[32] = {
44 	655360,	/* 00: 655.36 */
45 	10,	/* 01:    .01 */
46 	20,	/* 02     .02 */
47 	30,	/* 03:    .03 */
48 	40,	/* 04:    .04 */
49 	60,	/* 05:    .06 */
50 	80,	/* 06:    .08 */
51 	120,	/* 07:    .12 */
52 	160,	/* 08:    .16 */
53 	240,	/* 09:    .24 */
54 	320,	/* 0A:    .32 */
55 	480,	/* 0B:    .48 */
56 	640,	/* 0C:    .64 */
57 	960,	/* 0D:    .96 */
58 	1280,	/* 0E:   1.28 */
59 	1920,	/* 0F:   1.92 */
60 	2560,	/* 10:   2.56 */
61 	3840,	/* 11:   3.84 */
62 	5120,	/* 12:   5.12 */
63 	7680,	/* 13:   7.68 */
64 	10240,	/* 14:  10.24 */
65 	15360,	/* 15:  15.36 */
66 	20480,	/* 16:  20.48 */
67 	30720,	/* 17:  30.72 */
68 	40960,	/* 18:  40.96 */
69 	61440,	/* 19:  61.44 */
70 	81920,	/* 1A:  81.92 */
71 	122880,	/* 1B: 122.88 */
72 	163840,	/* 1C: 163.84 */
73 	245760,	/* 1D: 245.76 */
74 	327680,	/* 1E: 327.68 */
75 	491520	/* 1F: 491.52 */
76 };
77 
78 /*
79  * Validate a RWQE and fill in the SGE state.
80  * Return 1 if OK.
81  */
82 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
83 {
84 	int i, j, ret;
85 	struct ib_wc wc;
86 	struct qib_lkey_table *rkt;
87 	struct qib_pd *pd;
88 	struct qib_sge_state *ss;
89 
90 	rkt = &to_idev(qp->ibqp.device)->lk_table;
91 	pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
92 	ss = &qp->r_sge;
93 	ss->sg_list = qp->r_sg_list;
94 	qp->r_len = 0;
95 	for (i = j = 0; i < wqe->num_sge; i++) {
96 		if (wqe->sg_list[i].length == 0)
97 			continue;
98 		/* Check LKEY */
99 		if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
100 				 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
101 			goto bad_lkey;
102 		qp->r_len += wqe->sg_list[i].length;
103 		j++;
104 	}
105 	ss->num_sge = j;
106 	ss->total_len = qp->r_len;
107 	ret = 1;
108 	goto bail;
109 
110 bad_lkey:
111 	while (j) {
112 		struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
113 
114 		qib_put_mr(sge->mr);
115 	}
116 	ss->num_sge = 0;
117 	memset(&wc, 0, sizeof(wc));
118 	wc.wr_id = wqe->wr_id;
119 	wc.status = IB_WC_LOC_PROT_ERR;
120 	wc.opcode = IB_WC_RECV;
121 	wc.qp = &qp->ibqp;
122 	/* Signal solicited completion event. */
123 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
124 	ret = 0;
125 bail:
126 	return ret;
127 }
128 
129 /**
130  * qib_get_rwqe - copy the next RWQE into the QP's RWQE
131  * @qp: the QP
132  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
133  *
134  * Return -1 if there is a local error, 0 if no RWQE is available,
135  * otherwise return 1.
136  *
137  * Can be called from interrupt level.
138  */
139 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
140 {
141 	unsigned long flags;
142 	struct qib_rq *rq;
143 	struct qib_rwq *wq;
144 	struct qib_srq *srq;
145 	struct qib_rwqe *wqe;
146 	void (*handler)(struct ib_event *, void *);
147 	u32 tail;
148 	int ret;
149 
150 	if (qp->ibqp.srq) {
151 		srq = to_isrq(qp->ibqp.srq);
152 		handler = srq->ibsrq.event_handler;
153 		rq = &srq->rq;
154 	} else {
155 		srq = NULL;
156 		handler = NULL;
157 		rq = &qp->r_rq;
158 	}
159 
160 	spin_lock_irqsave(&rq->lock, flags);
161 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
162 		ret = 0;
163 		goto unlock;
164 	}
165 
166 	wq = rq->wq;
167 	tail = wq->tail;
168 	/* Validate tail before using it since it is user writable. */
169 	if (tail >= rq->size)
170 		tail = 0;
171 	if (unlikely(tail == wq->head)) {
172 		ret = 0;
173 		goto unlock;
174 	}
175 	/* Make sure entry is read after head index is read. */
176 	smp_rmb();
177 	wqe = get_rwqe_ptr(rq, tail);
178 	/*
179 	 * Even though we update the tail index in memory, the verbs
180 	 * consumer is not supposed to post more entries until a
181 	 * completion is generated.
182 	 */
183 	if (++tail >= rq->size)
184 		tail = 0;
185 	wq->tail = tail;
186 	if (!wr_id_only && !qib_init_sge(qp, wqe)) {
187 		ret = -1;
188 		goto unlock;
189 	}
190 	qp->r_wr_id = wqe->wr_id;
191 
192 	ret = 1;
193 	set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
194 	if (handler) {
195 		u32 n;
196 
197 		/*
198 		 * Validate head pointer value and compute
199 		 * the number of remaining WQEs.
200 		 */
201 		n = wq->head;
202 		if (n >= rq->size)
203 			n = 0;
204 		if (n < tail)
205 			n += rq->size - tail;
206 		else
207 			n -= tail;
208 		if (n < srq->limit) {
209 			struct ib_event ev;
210 
211 			srq->limit = 0;
212 			spin_unlock_irqrestore(&rq->lock, flags);
213 			ev.device = qp->ibqp.device;
214 			ev.element.srq = qp->ibqp.srq;
215 			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
216 			handler(&ev, srq->ibsrq.srq_context);
217 			goto bail;
218 		}
219 	}
220 unlock:
221 	spin_unlock_irqrestore(&rq->lock, flags);
222 bail:
223 	return ret;
224 }
225 
226 /*
227  * Switch to alternate path.
228  * The QP s_lock should be held and interrupts disabled.
229  */
230 void qib_migrate_qp(struct qib_qp *qp)
231 {
232 	struct ib_event ev;
233 
234 	qp->s_mig_state = IB_MIG_MIGRATED;
235 	qp->remote_ah_attr = qp->alt_ah_attr;
236 	qp->port_num = qp->alt_ah_attr.port_num;
237 	qp->s_pkey_index = qp->s_alt_pkey_index;
238 
239 	ev.device = qp->ibqp.device;
240 	ev.element.qp = &qp->ibqp;
241 	ev.event = IB_EVENT_PATH_MIG;
242 	qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
243 }
244 
245 static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
246 {
247 	if (!index) {
248 		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
249 
250 		return ppd->guid;
251 	}
252 	return ibp->guids[index - 1];
253 }
254 
255 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
256 {
257 	return (gid->global.interface_id == id &&
258 		(gid->global.subnet_prefix == gid_prefix ||
259 		 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
260 }
261 
262 /*
263  *
264  * This should be called with the QP r_lock held.
265  *
266  * The s_lock will be acquired around the qib_migrate_qp() call.
267  */
268 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
269 		      int has_grh, struct qib_qp *qp, u32 bth0)
270 {
271 	__be64 guid;
272 	unsigned long flags;
273 
274 	if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
275 		if (!has_grh) {
276 			if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
277 				goto err;
278 		} else {
279 			if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
280 				goto err;
281 			guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
282 			if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
283 				goto err;
284 			if (!gid_ok(&hdr->u.l.grh.sgid,
285 			    qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
286 			    qp->alt_ah_attr.grh.dgid.global.interface_id))
287 				goto err;
288 		}
289 		if (!qib_pkey_ok((u16)bth0,
290 				 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
291 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
292 				      (u16)bth0,
293 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
294 				      0, qp->ibqp.qp_num,
295 				      hdr->lrh[3], hdr->lrh[1]);
296 			goto err;
297 		}
298 		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
299 		if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
300 		    ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
301 			goto err;
302 		spin_lock_irqsave(&qp->s_lock, flags);
303 		qib_migrate_qp(qp);
304 		spin_unlock_irqrestore(&qp->s_lock, flags);
305 	} else {
306 		if (!has_grh) {
307 			if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
308 				goto err;
309 		} else {
310 			if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
311 				goto err;
312 			guid = get_sguid(ibp,
313 					 qp->remote_ah_attr.grh.sgid_index);
314 			if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
315 				goto err;
316 			if (!gid_ok(&hdr->u.l.grh.sgid,
317 			    qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
318 			    qp->remote_ah_attr.grh.dgid.global.interface_id))
319 				goto err;
320 		}
321 		if (!qib_pkey_ok((u16)bth0,
322 				 qib_get_pkey(ibp, qp->s_pkey_index))) {
323 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
324 				      (u16)bth0,
325 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
326 				      0, qp->ibqp.qp_num,
327 				      hdr->lrh[3], hdr->lrh[1]);
328 			goto err;
329 		}
330 		/* Validate the SLID. See Ch. 9.6.1.5 */
331 		if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
332 		    ppd_from_ibp(ibp)->port != qp->port_num)
333 			goto err;
334 		if (qp->s_mig_state == IB_MIG_REARM &&
335 		    !(bth0 & IB_BTH_MIG_REQ))
336 			qp->s_mig_state = IB_MIG_ARMED;
337 	}
338 
339 	return 0;
340 
341 err:
342 	return 1;
343 }
344 
345 /**
346  * qib_ruc_loopback - handle UC and RC lookback requests
347  * @sqp: the sending QP
348  *
349  * This is called from qib_do_send() to
350  * forward a WQE addressed to the same HCA.
351  * Note that although we are single threaded due to the tasklet, we still
352  * have to protect against post_send().  We don't have to worry about
353  * receive interrupts since this is a connected protocol and all packets
354  * will pass through here.
355  */
356 static void qib_ruc_loopback(struct qib_qp *sqp)
357 {
358 	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
359 	struct qib_qp *qp;
360 	struct qib_swqe *wqe;
361 	struct qib_sge *sge;
362 	unsigned long flags;
363 	struct ib_wc wc;
364 	u64 sdata;
365 	atomic64_t *maddr;
366 	enum ib_wc_status send_status;
367 	int release;
368 	int ret;
369 
370 	/*
371 	 * Note that we check the responder QP state after
372 	 * checking the requester's state.
373 	 */
374 	qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
375 
376 	spin_lock_irqsave(&sqp->s_lock, flags);
377 
378 	/* Return if we are already busy processing a work request. */
379 	if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
380 	    !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
381 		goto unlock;
382 
383 	sqp->s_flags |= QIB_S_BUSY;
384 
385 again:
386 	if (sqp->s_last == sqp->s_head)
387 		goto clr_busy;
388 	wqe = get_swqe_ptr(sqp, sqp->s_last);
389 
390 	/* Return if it is not OK to start a new work reqeust. */
391 	if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
392 		if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
393 			goto clr_busy;
394 		/* We are in the error state, flush the work request. */
395 		send_status = IB_WC_WR_FLUSH_ERR;
396 		goto flush_send;
397 	}
398 
399 	/*
400 	 * We can rely on the entry not changing without the s_lock
401 	 * being held until we update s_last.
402 	 * We increment s_cur to indicate s_last is in progress.
403 	 */
404 	if (sqp->s_last == sqp->s_cur) {
405 		if (++sqp->s_cur >= sqp->s_size)
406 			sqp->s_cur = 0;
407 	}
408 	spin_unlock_irqrestore(&sqp->s_lock, flags);
409 
410 	if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
411 	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
412 		ibp->n_pkt_drops++;
413 		/*
414 		 * For RC, the requester would timeout and retry so
415 		 * shortcut the timeouts and just signal too many retries.
416 		 */
417 		if (sqp->ibqp.qp_type == IB_QPT_RC)
418 			send_status = IB_WC_RETRY_EXC_ERR;
419 		else
420 			send_status = IB_WC_SUCCESS;
421 		goto serr;
422 	}
423 
424 	memset(&wc, 0, sizeof(wc));
425 	send_status = IB_WC_SUCCESS;
426 
427 	release = 1;
428 	sqp->s_sge.sge = wqe->sg_list[0];
429 	sqp->s_sge.sg_list = wqe->sg_list + 1;
430 	sqp->s_sge.num_sge = wqe->wr.num_sge;
431 	sqp->s_len = wqe->length;
432 	switch (wqe->wr.opcode) {
433 	case IB_WR_SEND_WITH_IMM:
434 		wc.wc_flags = IB_WC_WITH_IMM;
435 		wc.ex.imm_data = wqe->wr.ex.imm_data;
436 		/* FALLTHROUGH */
437 	case IB_WR_SEND:
438 		ret = qib_get_rwqe(qp, 0);
439 		if (ret < 0)
440 			goto op_err;
441 		if (!ret)
442 			goto rnr_nak;
443 		break;
444 
445 	case IB_WR_RDMA_WRITE_WITH_IMM:
446 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
447 			goto inv_err;
448 		wc.wc_flags = IB_WC_WITH_IMM;
449 		wc.ex.imm_data = wqe->wr.ex.imm_data;
450 		ret = qib_get_rwqe(qp, 1);
451 		if (ret < 0)
452 			goto op_err;
453 		if (!ret)
454 			goto rnr_nak;
455 		/* FALLTHROUGH */
456 	case IB_WR_RDMA_WRITE:
457 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
458 			goto inv_err;
459 		if (wqe->length == 0)
460 			break;
461 		if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
462 					  wqe->wr.wr.rdma.remote_addr,
463 					  wqe->wr.wr.rdma.rkey,
464 					  IB_ACCESS_REMOTE_WRITE)))
465 			goto acc_err;
466 		qp->r_sge.sg_list = NULL;
467 		qp->r_sge.num_sge = 1;
468 		qp->r_sge.total_len = wqe->length;
469 		break;
470 
471 	case IB_WR_RDMA_READ:
472 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
473 			goto inv_err;
474 		if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
475 					  wqe->wr.wr.rdma.remote_addr,
476 					  wqe->wr.wr.rdma.rkey,
477 					  IB_ACCESS_REMOTE_READ)))
478 			goto acc_err;
479 		release = 0;
480 		sqp->s_sge.sg_list = NULL;
481 		sqp->s_sge.num_sge = 1;
482 		qp->r_sge.sge = wqe->sg_list[0];
483 		qp->r_sge.sg_list = wqe->sg_list + 1;
484 		qp->r_sge.num_sge = wqe->wr.num_sge;
485 		qp->r_sge.total_len = wqe->length;
486 		break;
487 
488 	case IB_WR_ATOMIC_CMP_AND_SWP:
489 	case IB_WR_ATOMIC_FETCH_AND_ADD:
490 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
491 			goto inv_err;
492 		if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
493 					  wqe->wr.wr.atomic.remote_addr,
494 					  wqe->wr.wr.atomic.rkey,
495 					  IB_ACCESS_REMOTE_ATOMIC)))
496 			goto acc_err;
497 		/* Perform atomic OP and save result. */
498 		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
499 		sdata = wqe->wr.wr.atomic.compare_add;
500 		*(u64 *) sqp->s_sge.sge.vaddr =
501 			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
502 			(u64) atomic64_add_return(sdata, maddr) - sdata :
503 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
504 				      sdata, wqe->wr.wr.atomic.swap);
505 		qib_put_mr(qp->r_sge.sge.mr);
506 		qp->r_sge.num_sge = 0;
507 		goto send_comp;
508 
509 	default:
510 		send_status = IB_WC_LOC_QP_OP_ERR;
511 		goto serr;
512 	}
513 
514 	sge = &sqp->s_sge.sge;
515 	while (sqp->s_len) {
516 		u32 len = sqp->s_len;
517 
518 		if (len > sge->length)
519 			len = sge->length;
520 		if (len > sge->sge_length)
521 			len = sge->sge_length;
522 		BUG_ON(len == 0);
523 		qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
524 		sge->vaddr += len;
525 		sge->length -= len;
526 		sge->sge_length -= len;
527 		if (sge->sge_length == 0) {
528 			if (!release)
529 				qib_put_mr(sge->mr);
530 			if (--sqp->s_sge.num_sge)
531 				*sge = *sqp->s_sge.sg_list++;
532 		} else if (sge->length == 0 && sge->mr->lkey) {
533 			if (++sge->n >= QIB_SEGSZ) {
534 				if (++sge->m >= sge->mr->mapsz)
535 					break;
536 				sge->n = 0;
537 			}
538 			sge->vaddr =
539 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
540 			sge->length =
541 				sge->mr->map[sge->m]->segs[sge->n].length;
542 		}
543 		sqp->s_len -= len;
544 	}
545 	if (release)
546 		qib_put_ss(&qp->r_sge);
547 
548 	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
549 		goto send_comp;
550 
551 	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
552 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
553 	else
554 		wc.opcode = IB_WC_RECV;
555 	wc.wr_id = qp->r_wr_id;
556 	wc.status = IB_WC_SUCCESS;
557 	wc.byte_len = wqe->length;
558 	wc.qp = &qp->ibqp;
559 	wc.src_qp = qp->remote_qpn;
560 	wc.slid = qp->remote_ah_attr.dlid;
561 	wc.sl = qp->remote_ah_attr.sl;
562 	wc.port_num = 1;
563 	/* Signal completion event if the solicited bit is set. */
564 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
565 		       wqe->wr.send_flags & IB_SEND_SOLICITED);
566 
567 send_comp:
568 	spin_lock_irqsave(&sqp->s_lock, flags);
569 	ibp->n_loop_pkts++;
570 flush_send:
571 	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
572 	qib_send_complete(sqp, wqe, send_status);
573 	goto again;
574 
575 rnr_nak:
576 	/* Handle RNR NAK */
577 	if (qp->ibqp.qp_type == IB_QPT_UC)
578 		goto send_comp;
579 	ibp->n_rnr_naks++;
580 	/*
581 	 * Note: we don't need the s_lock held since the BUSY flag
582 	 * makes this single threaded.
583 	 */
584 	if (sqp->s_rnr_retry == 0) {
585 		send_status = IB_WC_RNR_RETRY_EXC_ERR;
586 		goto serr;
587 	}
588 	if (sqp->s_rnr_retry_cnt < 7)
589 		sqp->s_rnr_retry--;
590 	spin_lock_irqsave(&sqp->s_lock, flags);
591 	if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
592 		goto clr_busy;
593 	sqp->s_flags |= QIB_S_WAIT_RNR;
594 	sqp->s_timer.function = qib_rc_rnr_retry;
595 	sqp->s_timer.expires = jiffies +
596 		usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
597 	add_timer(&sqp->s_timer);
598 	goto clr_busy;
599 
600 op_err:
601 	send_status = IB_WC_REM_OP_ERR;
602 	wc.status = IB_WC_LOC_QP_OP_ERR;
603 	goto err;
604 
605 inv_err:
606 	send_status = IB_WC_REM_INV_REQ_ERR;
607 	wc.status = IB_WC_LOC_QP_OP_ERR;
608 	goto err;
609 
610 acc_err:
611 	send_status = IB_WC_REM_ACCESS_ERR;
612 	wc.status = IB_WC_LOC_PROT_ERR;
613 err:
614 	/* responder goes to error state */
615 	qib_rc_error(qp, wc.status);
616 
617 serr:
618 	spin_lock_irqsave(&sqp->s_lock, flags);
619 	qib_send_complete(sqp, wqe, send_status);
620 	if (sqp->ibqp.qp_type == IB_QPT_RC) {
621 		int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
622 
623 		sqp->s_flags &= ~QIB_S_BUSY;
624 		spin_unlock_irqrestore(&sqp->s_lock, flags);
625 		if (lastwqe) {
626 			struct ib_event ev;
627 
628 			ev.device = sqp->ibqp.device;
629 			ev.element.qp = &sqp->ibqp;
630 			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
631 			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
632 		}
633 		goto done;
634 	}
635 clr_busy:
636 	sqp->s_flags &= ~QIB_S_BUSY;
637 unlock:
638 	spin_unlock_irqrestore(&sqp->s_lock, flags);
639 done:
640 	if (qp && atomic_dec_and_test(&qp->refcount))
641 		wake_up(&qp->wait);
642 }
643 
644 /**
645  * qib_make_grh - construct a GRH header
646  * @ibp: a pointer to the IB port
647  * @hdr: a pointer to the GRH header being constructed
648  * @grh: the global route address to send to
649  * @hwords: the number of 32 bit words of header being sent
650  * @nwords: the number of 32 bit words of data being sent
651  *
652  * Return the size of the header in 32 bit words.
653  */
654 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
655 		 struct ib_global_route *grh, u32 hwords, u32 nwords)
656 {
657 	hdr->version_tclass_flow =
658 		cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
659 			    (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
660 			    (grh->flow_label << IB_GRH_FLOW_SHIFT));
661 	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
662 	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
663 	hdr->next_hdr = IB_GRH_NEXT_HDR;
664 	hdr->hop_limit = grh->hop_limit;
665 	/* The SGID is 32-bit aligned. */
666 	hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
667 	hdr->sgid.global.interface_id = grh->sgid_index ?
668 		ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
669 	hdr->dgid = grh->dgid;
670 
671 	/* GRH header size in 32-bit words. */
672 	return sizeof(struct ib_grh) / sizeof(u32);
673 }
674 
675 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
676 			 u32 bth0, u32 bth2)
677 {
678 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
679 	u16 lrh0;
680 	u32 nwords;
681 	u32 extra_bytes;
682 
683 	/* Construct the header. */
684 	extra_bytes = -qp->s_cur_size & 3;
685 	nwords = (qp->s_cur_size + extra_bytes) >> 2;
686 	lrh0 = QIB_LRH_BTH;
687 	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
688 		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
689 					       &qp->remote_ah_attr.grh,
690 					       qp->s_hdrwords, nwords);
691 		lrh0 = QIB_LRH_GRH;
692 	}
693 	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
694 		qp->remote_ah_attr.sl << 4;
695 	qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
696 	qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
697 	qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
698 	qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
699 				       qp->remote_ah_attr.src_path_bits);
700 	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
701 	bth0 |= extra_bytes << 20;
702 	if (qp->s_mig_state == IB_MIG_MIGRATED)
703 		bth0 |= IB_BTH_MIG_REQ;
704 	ohdr->bth[0] = cpu_to_be32(bth0);
705 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
706 	ohdr->bth[2] = cpu_to_be32(bth2);
707 	this_cpu_inc(ibp->pmastats->n_unicast_xmit);
708 }
709 
710 /**
711  * qib_do_send - perform a send on a QP
712  * @work: contains a pointer to the QP
713  *
714  * Process entries in the send work queue until credit or queue is
715  * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
716  * Otherwise, two threads could send packets out of order.
717  */
718 void qib_do_send(struct work_struct *work)
719 {
720 	struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
721 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
722 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
723 	int (*make_req)(struct qib_qp *qp);
724 	unsigned long flags;
725 
726 	if ((qp->ibqp.qp_type == IB_QPT_RC ||
727 	     qp->ibqp.qp_type == IB_QPT_UC) &&
728 	    (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
729 		qib_ruc_loopback(qp);
730 		return;
731 	}
732 
733 	if (qp->ibqp.qp_type == IB_QPT_RC)
734 		make_req = qib_make_rc_req;
735 	else if (qp->ibqp.qp_type == IB_QPT_UC)
736 		make_req = qib_make_uc_req;
737 	else
738 		make_req = qib_make_ud_req;
739 
740 	spin_lock_irqsave(&qp->s_lock, flags);
741 
742 	/* Return if we are already busy processing a work request. */
743 	if (!qib_send_ok(qp)) {
744 		spin_unlock_irqrestore(&qp->s_lock, flags);
745 		return;
746 	}
747 
748 	qp->s_flags |= QIB_S_BUSY;
749 
750 	spin_unlock_irqrestore(&qp->s_lock, flags);
751 
752 	do {
753 		/* Check for a constructed packet to be sent. */
754 		if (qp->s_hdrwords != 0) {
755 			/*
756 			 * If the packet cannot be sent now, return and
757 			 * the send tasklet will be woken up later.
758 			 */
759 			if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
760 					   qp->s_cur_sge, qp->s_cur_size))
761 				break;
762 			/* Record that s_hdr is empty. */
763 			qp->s_hdrwords = 0;
764 		}
765 	} while (make_req(qp));
766 }
767 
768 /*
769  * This should be called with s_lock held.
770  */
771 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
772 		       enum ib_wc_status status)
773 {
774 	u32 old_last, last;
775 	unsigned i;
776 
777 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
778 		return;
779 
780 	for (i = 0; i < wqe->wr.num_sge; i++) {
781 		struct qib_sge *sge = &wqe->sg_list[i];
782 
783 		qib_put_mr(sge->mr);
784 	}
785 	if (qp->ibqp.qp_type == IB_QPT_UD ||
786 	    qp->ibqp.qp_type == IB_QPT_SMI ||
787 	    qp->ibqp.qp_type == IB_QPT_GSI)
788 		atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
789 
790 	/* See ch. 11.2.4.1 and 10.7.3.1 */
791 	if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
792 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
793 	    status != IB_WC_SUCCESS) {
794 		struct ib_wc wc;
795 
796 		memset(&wc, 0, sizeof(wc));
797 		wc.wr_id = wqe->wr.wr_id;
798 		wc.status = status;
799 		wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
800 		wc.qp = &qp->ibqp;
801 		if (status == IB_WC_SUCCESS)
802 			wc.byte_len = wqe->length;
803 		qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
804 			     status != IB_WC_SUCCESS);
805 	}
806 
807 	last = qp->s_last;
808 	old_last = last;
809 	if (++last >= qp->s_size)
810 		last = 0;
811 	qp->s_last = last;
812 	if (qp->s_acked == old_last)
813 		qp->s_acked = last;
814 	if (qp->s_cur == old_last)
815 		qp->s_cur = last;
816 	if (qp->s_tail == old_last)
817 		qp->s_tail = last;
818 	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
819 		qp->s_draining = 0;
820 }
821