xref: /linux/drivers/infiniband/sw/rxe/rxe_resp.c (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 
40 enum resp_states {
41 	RESPST_NONE,
42 	RESPST_GET_REQ,
43 	RESPST_CHK_PSN,
44 	RESPST_CHK_OP_SEQ,
45 	RESPST_CHK_OP_VALID,
46 	RESPST_CHK_RESOURCE,
47 	RESPST_CHK_LENGTH,
48 	RESPST_CHK_RKEY,
49 	RESPST_EXECUTE,
50 	RESPST_READ_REPLY,
51 	RESPST_COMPLETE,
52 	RESPST_ACKNOWLEDGE,
53 	RESPST_CLEANUP,
54 	RESPST_DUPLICATE_REQUEST,
55 	RESPST_ERR_MALFORMED_WQE,
56 	RESPST_ERR_UNSUPPORTED_OPCODE,
57 	RESPST_ERR_MISALIGNED_ATOMIC,
58 	RESPST_ERR_PSN_OUT_OF_SEQ,
59 	RESPST_ERR_MISSING_OPCODE_FIRST,
60 	RESPST_ERR_MISSING_OPCODE_LAST_C,
61 	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62 	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63 	RESPST_ERR_RNR,
64 	RESPST_ERR_RKEY_VIOLATION,
65 	RESPST_ERR_LENGTH,
66 	RESPST_ERR_CQ_OVERFLOW,
67 	RESPST_ERROR,
68 	RESPST_RESET,
69 	RESPST_DONE,
70 	RESPST_EXIT,
71 };
72 
73 static char *resp_state_name[] = {
74 	[RESPST_NONE]				= "NONE",
75 	[RESPST_GET_REQ]			= "GET_REQ",
76 	[RESPST_CHK_PSN]			= "CHK_PSN",
77 	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
78 	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
79 	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
80 	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
81 	[RESPST_CHK_RKEY]			= "CHK_RKEY",
82 	[RESPST_EXECUTE]			= "EXECUTE",
83 	[RESPST_READ_REPLY]			= "READ_REPLY",
84 	[RESPST_COMPLETE]			= "COMPLETE",
85 	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
86 	[RESPST_CLEANUP]			= "CLEANUP",
87 	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
88 	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
89 	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
90 	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
91 	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
92 	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
93 	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
94 	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
95 	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
96 	[RESPST_ERR_RNR]			= "ERR_RNR",
97 	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
98 	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
99 	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
100 	[RESPST_ERROR]				= "ERROR",
101 	[RESPST_RESET]				= "RESET",
102 	[RESPST_DONE]				= "DONE",
103 	[RESPST_EXIT]				= "EXIT",
104 };
105 
106 /* rxe_recv calls here to add a request packet to the input queue */
107 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108 			struct sk_buff *skb)
109 {
110 	int must_sched;
111 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112 
113 	skb_queue_tail(&qp->req_pkts, skb);
114 
115 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116 			(skb_queue_len(&qp->req_pkts) > 1);
117 
118 	rxe_run_task(&qp->resp.task, must_sched);
119 }
120 
121 static inline enum resp_states get_req(struct rxe_qp *qp,
122 				       struct rxe_pkt_info **pkt_p)
123 {
124 	struct sk_buff *skb;
125 
126 	if (qp->resp.state == QP_STATE_ERROR) {
127 		skb = skb_dequeue(&qp->req_pkts);
128 		if (skb) {
129 			/* drain request packet queue */
130 			rxe_drop_ref(qp);
131 			kfree_skb(skb);
132 			return RESPST_GET_REQ;
133 		}
134 
135 		/* go drain recv wr queue */
136 		return RESPST_CHK_RESOURCE;
137 	}
138 
139 	skb = skb_peek(&qp->req_pkts);
140 	if (!skb)
141 		return RESPST_EXIT;
142 
143 	*pkt_p = SKB_TO_PKT(skb);
144 
145 	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146 }
147 
148 static enum resp_states check_psn(struct rxe_qp *qp,
149 				  struct rxe_pkt_info *pkt)
150 {
151 	int diff = psn_compare(pkt->psn, qp->resp.psn);
152 
153 	switch (qp_type(qp)) {
154 	case IB_QPT_RC:
155 		if (diff > 0) {
156 			if (qp->resp.sent_psn_nak)
157 				return RESPST_CLEANUP;
158 
159 			qp->resp.sent_psn_nak = 1;
160 			return RESPST_ERR_PSN_OUT_OF_SEQ;
161 
162 		} else if (diff < 0) {
163 			return RESPST_DUPLICATE_REQUEST;
164 		}
165 
166 		if (qp->resp.sent_psn_nak)
167 			qp->resp.sent_psn_nak = 0;
168 
169 		break;
170 
171 	case IB_QPT_UC:
172 		if (qp->resp.drop_msg || diff != 0) {
173 			if (pkt->mask & RXE_START_MASK) {
174 				qp->resp.drop_msg = 0;
175 				return RESPST_CHK_OP_SEQ;
176 			}
177 
178 			qp->resp.drop_msg = 1;
179 			return RESPST_CLEANUP;
180 		}
181 		break;
182 	default:
183 		break;
184 	}
185 
186 	return RESPST_CHK_OP_SEQ;
187 }
188 
189 static enum resp_states check_op_seq(struct rxe_qp *qp,
190 				     struct rxe_pkt_info *pkt)
191 {
192 	switch (qp_type(qp)) {
193 	case IB_QPT_RC:
194 		switch (qp->resp.opcode) {
195 		case IB_OPCODE_RC_SEND_FIRST:
196 		case IB_OPCODE_RC_SEND_MIDDLE:
197 			switch (pkt->opcode) {
198 			case IB_OPCODE_RC_SEND_MIDDLE:
199 			case IB_OPCODE_RC_SEND_LAST:
200 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
201 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
202 				return RESPST_CHK_OP_VALID;
203 			default:
204 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
205 			}
206 
207 		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
208 		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
209 			switch (pkt->opcode) {
210 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
211 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
212 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
213 				return RESPST_CHK_OP_VALID;
214 			default:
215 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
216 			}
217 
218 		default:
219 			switch (pkt->opcode) {
220 			case IB_OPCODE_RC_SEND_MIDDLE:
221 			case IB_OPCODE_RC_SEND_LAST:
222 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
223 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
224 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
225 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
226 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
227 				return RESPST_ERR_MISSING_OPCODE_FIRST;
228 			default:
229 				return RESPST_CHK_OP_VALID;
230 			}
231 		}
232 		break;
233 
234 	case IB_QPT_UC:
235 		switch (qp->resp.opcode) {
236 		case IB_OPCODE_UC_SEND_FIRST:
237 		case IB_OPCODE_UC_SEND_MIDDLE:
238 			switch (pkt->opcode) {
239 			case IB_OPCODE_UC_SEND_MIDDLE:
240 			case IB_OPCODE_UC_SEND_LAST:
241 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
242 				return RESPST_CHK_OP_VALID;
243 			default:
244 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
245 			}
246 
247 		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
248 		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
249 			switch (pkt->opcode) {
250 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
251 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
252 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
253 				return RESPST_CHK_OP_VALID;
254 			default:
255 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
256 			}
257 
258 		default:
259 			switch (pkt->opcode) {
260 			case IB_OPCODE_UC_SEND_MIDDLE:
261 			case IB_OPCODE_UC_SEND_LAST:
262 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
263 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
264 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
265 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
266 				qp->resp.drop_msg = 1;
267 				return RESPST_CLEANUP;
268 			default:
269 				return RESPST_CHK_OP_VALID;
270 			}
271 		}
272 		break;
273 
274 	default:
275 		return RESPST_CHK_OP_VALID;
276 	}
277 }
278 
279 static enum resp_states check_op_valid(struct rxe_qp *qp,
280 				       struct rxe_pkt_info *pkt)
281 {
282 	switch (qp_type(qp)) {
283 	case IB_QPT_RC:
284 		if (((pkt->mask & RXE_READ_MASK) &&
285 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
286 		    ((pkt->mask & RXE_WRITE_MASK) &&
287 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
288 		    ((pkt->mask & RXE_ATOMIC_MASK) &&
289 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
290 			return RESPST_ERR_UNSUPPORTED_OPCODE;
291 		}
292 
293 		break;
294 
295 	case IB_QPT_UC:
296 		if ((pkt->mask & RXE_WRITE_MASK) &&
297 		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
298 			qp->resp.drop_msg = 1;
299 			return RESPST_CLEANUP;
300 		}
301 
302 		break;
303 
304 	case IB_QPT_UD:
305 	case IB_QPT_SMI:
306 	case IB_QPT_GSI:
307 		break;
308 
309 	default:
310 		WARN_ON(1);
311 		break;
312 	}
313 
314 	return RESPST_CHK_RESOURCE;
315 }
316 
317 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
318 {
319 	struct rxe_srq *srq = qp->srq;
320 	struct rxe_queue *q = srq->rq.queue;
321 	struct rxe_recv_wqe *wqe;
322 	struct ib_event ev;
323 
324 	if (srq->error)
325 		return RESPST_ERR_RNR;
326 
327 	spin_lock_bh(&srq->rq.consumer_lock);
328 
329 	wqe = queue_head(q);
330 	if (!wqe) {
331 		spin_unlock_bh(&srq->rq.consumer_lock);
332 		return RESPST_ERR_RNR;
333 	}
334 
335 	/* note kernel and user space recv wqes have same size */
336 	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
337 
338 	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
339 	advance_consumer(q);
340 
341 	if (srq->limit && srq->ibsrq.event_handler &&
342 	    (queue_count(q) < srq->limit)) {
343 		srq->limit = 0;
344 		goto event;
345 	}
346 
347 	spin_unlock_bh(&srq->rq.consumer_lock);
348 	return RESPST_CHK_LENGTH;
349 
350 event:
351 	spin_unlock_bh(&srq->rq.consumer_lock);
352 	ev.device = qp->ibqp.device;
353 	ev.element.srq = qp->ibqp.srq;
354 	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
355 	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
356 	return RESPST_CHK_LENGTH;
357 }
358 
359 static enum resp_states check_resource(struct rxe_qp *qp,
360 				       struct rxe_pkt_info *pkt)
361 {
362 	struct rxe_srq *srq = qp->srq;
363 
364 	if (qp->resp.state == QP_STATE_ERROR) {
365 		if (qp->resp.wqe) {
366 			qp->resp.status = IB_WC_WR_FLUSH_ERR;
367 			return RESPST_COMPLETE;
368 		} else if (!srq) {
369 			qp->resp.wqe = queue_head(qp->rq.queue);
370 			if (qp->resp.wqe) {
371 				qp->resp.status = IB_WC_WR_FLUSH_ERR;
372 				return RESPST_COMPLETE;
373 			} else {
374 				return RESPST_EXIT;
375 			}
376 		} else {
377 			return RESPST_EXIT;
378 		}
379 	}
380 
381 	if (pkt->mask & RXE_READ_OR_ATOMIC) {
382 		/* it is the requesters job to not send
383 		 * too many read/atomic ops, we just
384 		 * recycle the responder resource queue
385 		 */
386 		if (likely(qp->attr.max_dest_rd_atomic > 0))
387 			return RESPST_CHK_LENGTH;
388 		else
389 			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
390 	}
391 
392 	if (pkt->mask & RXE_RWR_MASK) {
393 		if (srq)
394 			return get_srq_wqe(qp);
395 
396 		qp->resp.wqe = queue_head(qp->rq.queue);
397 		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
398 	}
399 
400 	return RESPST_CHK_LENGTH;
401 }
402 
403 static enum resp_states check_length(struct rxe_qp *qp,
404 				     struct rxe_pkt_info *pkt)
405 {
406 	switch (qp_type(qp)) {
407 	case IB_QPT_RC:
408 		return RESPST_CHK_RKEY;
409 
410 	case IB_QPT_UC:
411 		return RESPST_CHK_RKEY;
412 
413 	default:
414 		return RESPST_CHK_RKEY;
415 	}
416 }
417 
418 static enum resp_states check_rkey(struct rxe_qp *qp,
419 				   struct rxe_pkt_info *pkt)
420 {
421 	struct rxe_mem *mem;
422 	u64 va;
423 	u32 rkey;
424 	u32 resid;
425 	u32 pktlen;
426 	int mtu = qp->mtu;
427 	enum resp_states state;
428 	int access;
429 
430 	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
431 		if (pkt->mask & RXE_RETH_MASK) {
432 			qp->resp.va = reth_va(pkt);
433 			qp->resp.rkey = reth_rkey(pkt);
434 			qp->resp.resid = reth_len(pkt);
435 		}
436 		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
437 						     : IB_ACCESS_REMOTE_WRITE;
438 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
439 		qp->resp.va = atmeth_va(pkt);
440 		qp->resp.rkey = atmeth_rkey(pkt);
441 		qp->resp.resid = sizeof(u64);
442 		access = IB_ACCESS_REMOTE_ATOMIC;
443 	} else {
444 		return RESPST_EXECUTE;
445 	}
446 
447 	va	= qp->resp.va;
448 	rkey	= qp->resp.rkey;
449 	resid	= qp->resp.resid;
450 	pktlen	= payload_size(pkt);
451 
452 	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
453 	if (!mem) {
454 		state = RESPST_ERR_RKEY_VIOLATION;
455 		goto err1;
456 	}
457 
458 	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
459 		state = RESPST_ERR_RKEY_VIOLATION;
460 		goto err1;
461 	}
462 
463 	if (mem_check_range(mem, va, resid)) {
464 		state = RESPST_ERR_RKEY_VIOLATION;
465 		goto err2;
466 	}
467 
468 	if (pkt->mask & RXE_WRITE_MASK)	 {
469 		if (resid > mtu) {
470 			if (pktlen != mtu || bth_pad(pkt)) {
471 				state = RESPST_ERR_LENGTH;
472 				goto err2;
473 			}
474 
475 			resid = mtu;
476 		} else {
477 			if (pktlen != resid) {
478 				state = RESPST_ERR_LENGTH;
479 				goto err2;
480 			}
481 			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
482 				/* This case may not be exactly that
483 				 * but nothing else fits.
484 				 */
485 				state = RESPST_ERR_LENGTH;
486 				goto err2;
487 			}
488 		}
489 	}
490 
491 	WARN_ON(qp->resp.mr);
492 
493 	qp->resp.mr = mem;
494 	return RESPST_EXECUTE;
495 
496 err2:
497 	rxe_drop_ref(mem);
498 err1:
499 	return state;
500 }
501 
502 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
503 				     int data_len)
504 {
505 	int err;
506 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
507 
508 	err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
509 			data_addr, data_len, to_mem_obj, NULL);
510 	if (unlikely(err))
511 		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
512 					: RESPST_ERR_MALFORMED_WQE;
513 
514 	return RESPST_NONE;
515 }
516 
517 static enum resp_states write_data_in(struct rxe_qp *qp,
518 				      struct rxe_pkt_info *pkt)
519 {
520 	enum resp_states rc = RESPST_NONE;
521 	int	err;
522 	int data_len = payload_size(pkt);
523 
524 	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
525 			   data_len, to_mem_obj, NULL);
526 	if (err) {
527 		rc = RESPST_ERR_RKEY_VIOLATION;
528 		goto out;
529 	}
530 
531 	qp->resp.va += data_len;
532 	qp->resp.resid -= data_len;
533 
534 out:
535 	return rc;
536 }
537 
538 /* Guarantee atomicity of atomic operations at the machine level. */
539 static DEFINE_SPINLOCK(atomic_ops_lock);
540 
541 static enum resp_states process_atomic(struct rxe_qp *qp,
542 				       struct rxe_pkt_info *pkt)
543 {
544 	u64 iova = atmeth_va(pkt);
545 	u64 *vaddr;
546 	enum resp_states ret;
547 	struct rxe_mem *mr = qp->resp.mr;
548 
549 	if (mr->state != RXE_MEM_STATE_VALID) {
550 		ret = RESPST_ERR_RKEY_VIOLATION;
551 		goto out;
552 	}
553 
554 	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
555 
556 	/* check vaddr is 8 bytes aligned. */
557 	if (!vaddr || (uintptr_t)vaddr & 7) {
558 		ret = RESPST_ERR_MISALIGNED_ATOMIC;
559 		goto out;
560 	}
561 
562 	spin_lock_bh(&atomic_ops_lock);
563 
564 	qp->resp.atomic_orig = *vaddr;
565 
566 	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
567 	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
568 		if (*vaddr == atmeth_comp(pkt))
569 			*vaddr = atmeth_swap_add(pkt);
570 	} else {
571 		*vaddr += atmeth_swap_add(pkt);
572 	}
573 
574 	spin_unlock_bh(&atomic_ops_lock);
575 
576 	ret = RESPST_NONE;
577 out:
578 	return ret;
579 }
580 
581 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
582 					  struct rxe_pkt_info *pkt,
583 					  struct rxe_pkt_info *ack,
584 					  int opcode,
585 					  int payload,
586 					  u32 psn,
587 					  u8 syndrome,
588 					  u32 *crcp)
589 {
590 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
591 	struct sk_buff *skb;
592 	u32 crc = 0;
593 	u32 *p;
594 	int paylen;
595 	int pad;
596 	int err;
597 
598 	/*
599 	 * allocate packet
600 	 */
601 	pad = (-payload) & 0x3;
602 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
603 
604 	skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
605 	if (!skb)
606 		return NULL;
607 
608 	ack->qp = qp;
609 	ack->opcode = opcode;
610 	ack->mask = rxe_opcode[opcode].mask;
611 	ack->offset = pkt->offset;
612 	ack->paylen = paylen;
613 
614 	/* fill in bth using the request packet headers */
615 	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
616 
617 	bth_set_opcode(ack, opcode);
618 	bth_set_qpn(ack, qp->attr.dest_qp_num);
619 	bth_set_pad(ack, pad);
620 	bth_set_se(ack, 0);
621 	bth_set_psn(ack, psn);
622 	bth_set_ack(ack, 0);
623 	ack->psn = psn;
624 
625 	if (ack->mask & RXE_AETH_MASK) {
626 		aeth_set_syn(ack, syndrome);
627 		aeth_set_msn(ack, qp->resp.msn);
628 	}
629 
630 	if (ack->mask & RXE_ATMACK_MASK)
631 		atmack_set_orig(ack, qp->resp.atomic_orig);
632 
633 	err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
634 	if (err) {
635 		kfree_skb(skb);
636 		return NULL;
637 	}
638 
639 	if (crcp) {
640 		/* CRC computation will be continued by the caller */
641 		*crcp = crc;
642 	} else {
643 		p = payload_addr(ack) + payload + bth_pad(ack);
644 		*p = ~crc;
645 	}
646 
647 	return skb;
648 }
649 
650 /* RDMA read response. If res is not NULL, then we have a current RDMA request
651  * being processed or replayed.
652  */
653 static enum resp_states read_reply(struct rxe_qp *qp,
654 				   struct rxe_pkt_info *req_pkt)
655 {
656 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
657 	struct rxe_pkt_info ack_pkt;
658 	struct sk_buff *skb;
659 	int mtu = qp->mtu;
660 	enum resp_states state;
661 	int payload;
662 	int opcode;
663 	int err;
664 	struct resp_res *res = qp->resp.res;
665 	u32 icrc;
666 	u32 *p;
667 
668 	if (!res) {
669 		/* This is the first time we process that request. Get a
670 		 * resource
671 		 */
672 		res = &qp->resp.resources[qp->resp.res_head];
673 
674 		free_rd_atomic_resource(qp, res);
675 		rxe_advance_resp_resource(qp);
676 
677 		res->type		= RXE_READ_MASK;
678 
679 		res->read.va		= qp->resp.va;
680 		res->read.va_org	= qp->resp.va;
681 
682 		res->first_psn		= req_pkt->psn;
683 		res->last_psn		= req_pkt->psn +
684 					  (reth_len(req_pkt) + mtu - 1) /
685 					  mtu - 1;
686 		res->cur_psn		= req_pkt->psn;
687 
688 		res->read.resid		= qp->resp.resid;
689 		res->read.length	= qp->resp.resid;
690 		res->read.rkey		= qp->resp.rkey;
691 
692 		/* note res inherits the reference to mr from qp */
693 		res->read.mr		= qp->resp.mr;
694 		qp->resp.mr		= NULL;
695 
696 		qp->resp.res		= res;
697 		res->state		= rdatm_res_state_new;
698 	}
699 
700 	if (res->state == rdatm_res_state_new) {
701 		if (res->read.resid <= mtu)
702 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
703 		else
704 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
705 	} else {
706 		if (res->read.resid > mtu)
707 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
708 		else
709 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
710 	}
711 
712 	res->state = rdatm_res_state_next;
713 
714 	payload = min_t(int, res->read.resid, mtu);
715 
716 	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
717 				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
718 	if (!skb)
719 		return RESPST_ERR_RNR;
720 
721 	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
722 			   payload, from_mem_obj, &icrc);
723 	if (err)
724 		pr_err("Failed copying memory\n");
725 
726 	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
727 	*p = ~icrc;
728 
729 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
730 	if (err) {
731 		pr_err("Failed sending RDMA reply.\n");
732 		kfree_skb(skb);
733 		return RESPST_ERR_RNR;
734 	}
735 
736 	res->read.va += payload;
737 	res->read.resid -= payload;
738 	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
739 
740 	if (res->read.resid > 0) {
741 		state = RESPST_DONE;
742 	} else {
743 		qp->resp.res = NULL;
744 		qp->resp.opcode = -1;
745 		qp->resp.psn = res->cur_psn;
746 		state = RESPST_CLEANUP;
747 	}
748 
749 	return state;
750 }
751 
752 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
753 				   struct rxe_pkt_info *pkt)
754 {
755 	struct sk_buff *skb = PKT_TO_SKB(pkt);
756 
757 	memset(hdr, 0, sizeof(*hdr));
758 	if (skb->protocol == htons(ETH_P_IP))
759 		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
760 	else if (skb->protocol == htons(ETH_P_IPV6))
761 		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
762 }
763 
764 /* Executes a new request. A retried request never reach that function (send
765  * and writes are discarded, and reads and atomics are retried elsewhere.
766  */
767 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
768 {
769 	enum resp_states err;
770 
771 	if (pkt->mask & RXE_SEND_MASK) {
772 		if (qp_type(qp) == IB_QPT_UD ||
773 		    qp_type(qp) == IB_QPT_SMI ||
774 		    qp_type(qp) == IB_QPT_GSI) {
775 			union rdma_network_hdr hdr;
776 
777 			build_rdma_network_hdr(&hdr, pkt);
778 
779 			err = send_data_in(qp, &hdr, sizeof(hdr));
780 			if (err)
781 				return err;
782 		}
783 		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
784 		if (err)
785 			return err;
786 	} else if (pkt->mask & RXE_WRITE_MASK) {
787 		err = write_data_in(qp, pkt);
788 		if (err)
789 			return err;
790 	} else if (pkt->mask & RXE_READ_MASK) {
791 		/* For RDMA Read we can increment the msn now. See C9-148. */
792 		qp->resp.msn++;
793 		return RESPST_READ_REPLY;
794 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
795 		err = process_atomic(qp, pkt);
796 		if (err)
797 			return err;
798 	} else
799 		/* Unreachable */
800 		WARN_ON(1);
801 
802 	/* We successfully processed this new request. */
803 	qp->resp.msn++;
804 
805 	/* next expected psn, read handles this separately */
806 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
807 
808 	qp->resp.opcode = pkt->opcode;
809 	qp->resp.status = IB_WC_SUCCESS;
810 
811 	if (pkt->mask & RXE_COMP_MASK)
812 		return RESPST_COMPLETE;
813 	else if (qp_type(qp) == IB_QPT_RC)
814 		return RESPST_ACKNOWLEDGE;
815 	else
816 		return RESPST_CLEANUP;
817 }
818 
819 static enum resp_states do_complete(struct rxe_qp *qp,
820 				    struct rxe_pkt_info *pkt)
821 {
822 	struct rxe_cqe cqe;
823 	struct ib_wc *wc = &cqe.ibwc;
824 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
825 	struct rxe_recv_wqe *wqe = qp->resp.wqe;
826 
827 	if (unlikely(!wqe))
828 		return RESPST_CLEANUP;
829 
830 	memset(&cqe, 0, sizeof(cqe));
831 
832 	wc->wr_id		= wqe->wr_id;
833 	wc->status		= qp->resp.status;
834 	wc->qp			= &qp->ibqp;
835 
836 	/* fields after status are not required for errors */
837 	if (wc->status == IB_WC_SUCCESS) {
838 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
839 				pkt->mask & RXE_WRITE_MASK) ?
840 					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
841 		wc->vendor_err = 0;
842 		wc->byte_len = wqe->dma.length - wqe->dma.resid;
843 
844 		/* fields after byte_len are different between kernel and user
845 		 * space
846 		 */
847 		if (qp->rcq->is_user) {
848 			uwc->wc_flags = IB_WC_GRH;
849 
850 			if (pkt->mask & RXE_IMMDT_MASK) {
851 				uwc->wc_flags |= IB_WC_WITH_IMM;
852 				uwc->ex.imm_data =
853 					(__u32 __force)immdt_imm(pkt);
854 			}
855 
856 			if (pkt->mask & RXE_IETH_MASK) {
857 				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
858 				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
859 			}
860 
861 			uwc->qp_num		= qp->ibqp.qp_num;
862 
863 			if (pkt->mask & RXE_DETH_MASK)
864 				uwc->src_qp = deth_sqp(pkt);
865 
866 			uwc->port_num		= qp->attr.port_num;
867 		} else {
868 			struct sk_buff *skb = PKT_TO_SKB(pkt);
869 
870 			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
871 			if (skb->protocol == htons(ETH_P_IP))
872 				wc->network_hdr_type = RDMA_NETWORK_IPV4;
873 			else
874 				wc->network_hdr_type = RDMA_NETWORK_IPV6;
875 
876 			if (pkt->mask & RXE_IMMDT_MASK) {
877 				wc->wc_flags |= IB_WC_WITH_IMM;
878 				wc->ex.imm_data = immdt_imm(pkt);
879 			}
880 
881 			if (pkt->mask & RXE_IETH_MASK) {
882 				struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
883 				struct rxe_mem *rmr;
884 
885 				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
886 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
887 
888 				rmr = rxe_pool_get_index(&rxe->mr_pool,
889 							 wc->ex.invalidate_rkey >> 8);
890 				if (unlikely(!rmr)) {
891 					pr_err("Bad rkey %#x invalidation\n",
892 					       wc->ex.invalidate_rkey);
893 					return RESPST_ERROR;
894 				}
895 				rmr->state = RXE_MEM_STATE_FREE;
896 			}
897 
898 			wc->qp			= &qp->ibqp;
899 
900 			if (pkt->mask & RXE_DETH_MASK)
901 				wc->src_qp = deth_sqp(pkt);
902 
903 			wc->port_num		= qp->attr.port_num;
904 		}
905 	}
906 
907 	/* have copy for srq and reference for !srq */
908 	if (!qp->srq)
909 		advance_consumer(qp->rq.queue);
910 
911 	qp->resp.wqe = NULL;
912 
913 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
914 		return RESPST_ERR_CQ_OVERFLOW;
915 
916 	if (qp->resp.state == QP_STATE_ERROR)
917 		return RESPST_CHK_RESOURCE;
918 
919 	if (!pkt)
920 		return RESPST_DONE;
921 	else if (qp_type(qp) == IB_QPT_RC)
922 		return RESPST_ACKNOWLEDGE;
923 	else
924 		return RESPST_CLEANUP;
925 }
926 
927 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
928 		    u8 syndrome, u32 psn)
929 {
930 	int err = 0;
931 	struct rxe_pkt_info ack_pkt;
932 	struct sk_buff *skb;
933 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
934 
935 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
936 				 0, psn, syndrome, NULL);
937 	if (!skb) {
938 		err = -ENOMEM;
939 		goto err1;
940 	}
941 
942 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
943 	if (err) {
944 		pr_err_ratelimited("Failed sending ack\n");
945 		kfree_skb(skb);
946 	}
947 
948 err1:
949 	return err;
950 }
951 
952 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
953 			   u8 syndrome)
954 {
955 	int rc = 0;
956 	struct rxe_pkt_info ack_pkt;
957 	struct sk_buff *skb;
958 	struct sk_buff *skb_copy;
959 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
960 	struct resp_res *res;
961 
962 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
963 				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
964 				 syndrome, NULL);
965 	if (!skb) {
966 		rc = -ENOMEM;
967 		goto out;
968 	}
969 
970 	skb_copy = skb_clone(skb, GFP_ATOMIC);
971 	if (skb_copy)
972 		rxe_add_ref(qp); /* for the new SKB */
973 	else {
974 		pr_warn("Could not clone atomic response\n");
975 		rc = -ENOMEM;
976 		goto out;
977 	}
978 
979 	res = &qp->resp.resources[qp->resp.res_head];
980 	free_rd_atomic_resource(qp, res);
981 	rxe_advance_resp_resource(qp);
982 
983 	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
984 
985 	res->type = RXE_ATOMIC_MASK;
986 	res->atomic.skb = skb;
987 	res->first_psn = ack_pkt.psn;
988 	res->last_psn  = ack_pkt.psn;
989 	res->cur_psn   = ack_pkt.psn;
990 
991 	rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
992 	if (rc) {
993 		pr_err_ratelimited("Failed sending ack\n");
994 		rxe_drop_ref(qp);
995 		kfree_skb(skb_copy);
996 	}
997 
998 out:
999 	return rc;
1000 }
1001 
1002 static enum resp_states acknowledge(struct rxe_qp *qp,
1003 				    struct rxe_pkt_info *pkt)
1004 {
1005 	if (qp_type(qp) != IB_QPT_RC)
1006 		return RESPST_CLEANUP;
1007 
1008 	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1009 		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1010 	else if (pkt->mask & RXE_ATOMIC_MASK)
1011 		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1012 	else if (bth_ack(pkt))
1013 		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1014 
1015 	return RESPST_CLEANUP;
1016 }
1017 
1018 static enum resp_states cleanup(struct rxe_qp *qp,
1019 				struct rxe_pkt_info *pkt)
1020 {
1021 	struct sk_buff *skb;
1022 
1023 	if (pkt) {
1024 		skb = skb_dequeue(&qp->req_pkts);
1025 		rxe_drop_ref(qp);
1026 		kfree_skb(skb);
1027 	}
1028 
1029 	if (qp->resp.mr) {
1030 		rxe_drop_ref(qp->resp.mr);
1031 		qp->resp.mr = NULL;
1032 	}
1033 
1034 	return RESPST_DONE;
1035 }
1036 
1037 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1038 {
1039 	int i;
1040 
1041 	for (i = 0; i < qp->attr.max_rd_atomic; i++) {
1042 		struct resp_res *res = &qp->resp.resources[i];
1043 
1044 		if (res->type == 0)
1045 			continue;
1046 
1047 		if (psn_compare(psn, res->first_psn) >= 0 &&
1048 		    psn_compare(psn, res->last_psn) <= 0) {
1049 			return res;
1050 		}
1051 	}
1052 
1053 	return NULL;
1054 }
1055 
1056 static enum resp_states duplicate_request(struct rxe_qp *qp,
1057 					  struct rxe_pkt_info *pkt)
1058 {
1059 	enum resp_states rc;
1060 
1061 	if (pkt->mask & RXE_SEND_MASK ||
1062 	    pkt->mask & RXE_WRITE_MASK) {
1063 		/* SEND. Ack again and cleanup. C9-105. */
1064 		if (bth_ack(pkt))
1065 			send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
1066 		rc = RESPST_CLEANUP;
1067 		goto out;
1068 	} else if (pkt->mask & RXE_READ_MASK) {
1069 		struct resp_res *res;
1070 
1071 		res = find_resource(qp, pkt->psn);
1072 		if (!res) {
1073 			/* Resource not found. Class D error.  Drop the
1074 			 * request.
1075 			 */
1076 			rc = RESPST_CLEANUP;
1077 			goto out;
1078 		} else {
1079 			/* Ensure this new request is the same as the previous
1080 			 * one or a subset of it.
1081 			 */
1082 			u64 iova = reth_va(pkt);
1083 			u32 resid = reth_len(pkt);
1084 
1085 			if (iova < res->read.va_org ||
1086 			    resid > res->read.length ||
1087 			    (iova + resid) > (res->read.va_org +
1088 					      res->read.length)) {
1089 				rc = RESPST_CLEANUP;
1090 				goto out;
1091 			}
1092 
1093 			if (reth_rkey(pkt) != res->read.rkey) {
1094 				rc = RESPST_CLEANUP;
1095 				goto out;
1096 			}
1097 
1098 			res->cur_psn = pkt->psn;
1099 			res->state = (pkt->psn == res->first_psn) ?
1100 					rdatm_res_state_new :
1101 					rdatm_res_state_replay;
1102 
1103 			/* Reset the resource, except length. */
1104 			res->read.va_org = iova;
1105 			res->read.va = iova;
1106 			res->read.resid = resid;
1107 
1108 			/* Replay the RDMA read reply. */
1109 			qp->resp.res = res;
1110 			rc = RESPST_READ_REPLY;
1111 			goto out;
1112 		}
1113 	} else {
1114 		struct resp_res *res;
1115 
1116 		/* Find the operation in our list of responder resources. */
1117 		res = find_resource(qp, pkt->psn);
1118 		if (res) {
1119 			struct sk_buff *skb_copy;
1120 
1121 			skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1122 			if (skb_copy) {
1123 				rxe_add_ref(qp); /* for the new SKB */
1124 			} else {
1125 				pr_warn("Couldn't clone atomic resp\n");
1126 				rc = RESPST_CLEANUP;
1127 				goto out;
1128 			}
1129 
1130 			/* Resend the result. */
1131 			rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1132 					     pkt, skb_copy);
1133 			if (rc) {
1134 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1135 				kfree_skb(skb_copy);
1136 				rc = RESPST_CLEANUP;
1137 				goto out;
1138 			}
1139 		}
1140 
1141 		/* Resource not found. Class D error. Drop the request. */
1142 		rc = RESPST_CLEANUP;
1143 		goto out;
1144 	}
1145 out:
1146 	return rc;
1147 }
1148 
1149 /* Process a class A or C. Both are treated the same in this implementation. */
1150 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1151 			      enum ib_wc_status status)
1152 {
1153 	qp->resp.aeth_syndrome	= syndrome;
1154 	qp->resp.status		= status;
1155 
1156 	/* indicate that we should go through the ERROR state */
1157 	qp->resp.goto_error	= 1;
1158 }
1159 
1160 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1161 {
1162 	/* UC */
1163 	if (qp->srq) {
1164 		/* Class E */
1165 		qp->resp.drop_msg = 1;
1166 		if (qp->resp.wqe) {
1167 			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1168 			return RESPST_COMPLETE;
1169 		} else {
1170 			return RESPST_CLEANUP;
1171 		}
1172 	} else {
1173 		/* Class D1. This packet may be the start of a
1174 		 * new message and could be valid. The previous
1175 		 * message is invalid and ignored. reset the
1176 		 * recv wr to its original state
1177 		 */
1178 		if (qp->resp.wqe) {
1179 			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1180 			qp->resp.wqe->dma.cur_sge = 0;
1181 			qp->resp.wqe->dma.sge_offset = 0;
1182 			qp->resp.opcode = -1;
1183 		}
1184 
1185 		if (qp->resp.mr) {
1186 			rxe_drop_ref(qp->resp.mr);
1187 			qp->resp.mr = NULL;
1188 		}
1189 
1190 		return RESPST_CLEANUP;
1191 	}
1192 }
1193 
1194 int rxe_responder(void *arg)
1195 {
1196 	struct rxe_qp *qp = (struct rxe_qp *)arg;
1197 	enum resp_states state;
1198 	struct rxe_pkt_info *pkt = NULL;
1199 	int ret = 0;
1200 
1201 	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1202 
1203 	if (!qp->valid) {
1204 		ret = -EINVAL;
1205 		goto done;
1206 	}
1207 
1208 	switch (qp->resp.state) {
1209 	case QP_STATE_RESET:
1210 		state = RESPST_RESET;
1211 		break;
1212 
1213 	default:
1214 		state = RESPST_GET_REQ;
1215 		break;
1216 	}
1217 
1218 	while (1) {
1219 		pr_debug("qp#%d state = %s\n", qp_num(qp),
1220 			 resp_state_name[state]);
1221 		switch (state) {
1222 		case RESPST_GET_REQ:
1223 			state = get_req(qp, &pkt);
1224 			break;
1225 		case RESPST_CHK_PSN:
1226 			state = check_psn(qp, pkt);
1227 			break;
1228 		case RESPST_CHK_OP_SEQ:
1229 			state = check_op_seq(qp, pkt);
1230 			break;
1231 		case RESPST_CHK_OP_VALID:
1232 			state = check_op_valid(qp, pkt);
1233 			break;
1234 		case RESPST_CHK_RESOURCE:
1235 			state = check_resource(qp, pkt);
1236 			break;
1237 		case RESPST_CHK_LENGTH:
1238 			state = check_length(qp, pkt);
1239 			break;
1240 		case RESPST_CHK_RKEY:
1241 			state = check_rkey(qp, pkt);
1242 			break;
1243 		case RESPST_EXECUTE:
1244 			state = execute(qp, pkt);
1245 			break;
1246 		case RESPST_COMPLETE:
1247 			state = do_complete(qp, pkt);
1248 			break;
1249 		case RESPST_READ_REPLY:
1250 			state = read_reply(qp, pkt);
1251 			break;
1252 		case RESPST_ACKNOWLEDGE:
1253 			state = acknowledge(qp, pkt);
1254 			break;
1255 		case RESPST_CLEANUP:
1256 			state = cleanup(qp, pkt);
1257 			break;
1258 		case RESPST_DUPLICATE_REQUEST:
1259 			state = duplicate_request(qp, pkt);
1260 			break;
1261 		case RESPST_ERR_PSN_OUT_OF_SEQ:
1262 			/* RC only - Class B. Drop packet. */
1263 			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1264 			state = RESPST_CLEANUP;
1265 			break;
1266 
1267 		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1268 		case RESPST_ERR_MISSING_OPCODE_FIRST:
1269 		case RESPST_ERR_MISSING_OPCODE_LAST_C:
1270 		case RESPST_ERR_UNSUPPORTED_OPCODE:
1271 		case RESPST_ERR_MISALIGNED_ATOMIC:
1272 			/* RC Only - Class C. */
1273 			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1274 					  IB_WC_REM_INV_REQ_ERR);
1275 			state = RESPST_COMPLETE;
1276 			break;
1277 
1278 		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1279 			state = do_class_d1e_error(qp);
1280 			break;
1281 		case RESPST_ERR_RNR:
1282 			if (qp_type(qp) == IB_QPT_RC) {
1283 				/* RC - class B */
1284 				send_ack(qp, pkt, AETH_RNR_NAK |
1285 					 (~AETH_TYPE_MASK &
1286 					 qp->attr.min_rnr_timer),
1287 					 pkt->psn);
1288 			} else {
1289 				/* UD/UC - class D */
1290 				qp->resp.drop_msg = 1;
1291 			}
1292 			state = RESPST_CLEANUP;
1293 			break;
1294 
1295 		case RESPST_ERR_RKEY_VIOLATION:
1296 			if (qp_type(qp) == IB_QPT_RC) {
1297 				/* Class C */
1298 				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1299 						  IB_WC_REM_ACCESS_ERR);
1300 				state = RESPST_COMPLETE;
1301 			} else {
1302 				qp->resp.drop_msg = 1;
1303 				if (qp->srq) {
1304 					/* UC/SRQ Class D */
1305 					qp->resp.status = IB_WC_REM_ACCESS_ERR;
1306 					state = RESPST_COMPLETE;
1307 				} else {
1308 					/* UC/non-SRQ Class E. */
1309 					state = RESPST_CLEANUP;
1310 				}
1311 			}
1312 			break;
1313 
1314 		case RESPST_ERR_LENGTH:
1315 			if (qp_type(qp) == IB_QPT_RC) {
1316 				/* Class C */
1317 				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1318 						  IB_WC_REM_INV_REQ_ERR);
1319 				state = RESPST_COMPLETE;
1320 			} else if (qp->srq) {
1321 				/* UC/UD - class E */
1322 				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1323 				state = RESPST_COMPLETE;
1324 			} else {
1325 				/* UC/UD - class D */
1326 				qp->resp.drop_msg = 1;
1327 				state = RESPST_CLEANUP;
1328 			}
1329 			break;
1330 
1331 		case RESPST_ERR_MALFORMED_WQE:
1332 			/* All, Class A. */
1333 			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1334 					  IB_WC_LOC_QP_OP_ERR);
1335 			state = RESPST_COMPLETE;
1336 			break;
1337 
1338 		case RESPST_ERR_CQ_OVERFLOW:
1339 			/* All - Class G */
1340 			state = RESPST_ERROR;
1341 			break;
1342 
1343 		case RESPST_DONE:
1344 			if (qp->resp.goto_error) {
1345 				state = RESPST_ERROR;
1346 				break;
1347 			}
1348 
1349 			goto done;
1350 
1351 		case RESPST_EXIT:
1352 			if (qp->resp.goto_error) {
1353 				state = RESPST_ERROR;
1354 				break;
1355 			}
1356 
1357 			goto exit;
1358 
1359 		case RESPST_RESET: {
1360 			struct sk_buff *skb;
1361 
1362 			while ((skb = skb_dequeue(&qp->req_pkts))) {
1363 				rxe_drop_ref(qp);
1364 				kfree_skb(skb);
1365 			}
1366 
1367 			while (!qp->srq && qp->rq.queue &&
1368 			       queue_head(qp->rq.queue))
1369 				advance_consumer(qp->rq.queue);
1370 
1371 			qp->resp.wqe = NULL;
1372 			goto exit;
1373 		}
1374 
1375 		case RESPST_ERROR:
1376 			qp->resp.goto_error = 0;
1377 			pr_warn("qp#%d moved to error state\n", qp_num(qp));
1378 			rxe_qp_error(qp);
1379 			goto exit;
1380 
1381 		default:
1382 			WARN_ON(1);
1383 		}
1384 	}
1385 
1386 exit:
1387 	ret = -EAGAIN;
1388 done:
1389 	return ret;
1390 }
1391