xref: /linux/drivers/infiniband/sw/rxe/rxe_resp.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12 
13 enum resp_states {
14 	RESPST_NONE,
15 	RESPST_GET_REQ,
16 	RESPST_CHK_PSN,
17 	RESPST_CHK_OP_SEQ,
18 	RESPST_CHK_OP_VALID,
19 	RESPST_CHK_RESOURCE,
20 	RESPST_CHK_LENGTH,
21 	RESPST_CHK_RKEY,
22 	RESPST_EXECUTE,
23 	RESPST_READ_REPLY,
24 	RESPST_COMPLETE,
25 	RESPST_ACKNOWLEDGE,
26 	RESPST_CLEANUP,
27 	RESPST_DUPLICATE_REQUEST,
28 	RESPST_ERR_MALFORMED_WQE,
29 	RESPST_ERR_UNSUPPORTED_OPCODE,
30 	RESPST_ERR_MISALIGNED_ATOMIC,
31 	RESPST_ERR_PSN_OUT_OF_SEQ,
32 	RESPST_ERR_MISSING_OPCODE_FIRST,
33 	RESPST_ERR_MISSING_OPCODE_LAST_C,
34 	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35 	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36 	RESPST_ERR_RNR,
37 	RESPST_ERR_RKEY_VIOLATION,
38 	RESPST_ERR_LENGTH,
39 	RESPST_ERR_CQ_OVERFLOW,
40 	RESPST_ERROR,
41 	RESPST_RESET,
42 	RESPST_DONE,
43 	RESPST_EXIT,
44 };
45 
46 static char *resp_state_name[] = {
47 	[RESPST_NONE]				= "NONE",
48 	[RESPST_GET_REQ]			= "GET_REQ",
49 	[RESPST_CHK_PSN]			= "CHK_PSN",
50 	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
51 	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
52 	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
53 	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
54 	[RESPST_CHK_RKEY]			= "CHK_RKEY",
55 	[RESPST_EXECUTE]			= "EXECUTE",
56 	[RESPST_READ_REPLY]			= "READ_REPLY",
57 	[RESPST_COMPLETE]			= "COMPLETE",
58 	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
59 	[RESPST_CLEANUP]			= "CLEANUP",
60 	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
61 	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
62 	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
63 	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
64 	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
65 	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
66 	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
67 	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
68 	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
69 	[RESPST_ERR_RNR]			= "ERR_RNR",
70 	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
71 	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
72 	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
73 	[RESPST_ERROR]				= "ERROR",
74 	[RESPST_RESET]				= "RESET",
75 	[RESPST_DONE]				= "DONE",
76 	[RESPST_EXIT]				= "EXIT",
77 };
78 
79 /* rxe_recv calls here to add a request packet to the input queue */
80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
81 {
82 	int must_sched;
83 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
84 
85 	skb_queue_tail(&qp->req_pkts, skb);
86 
87 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
88 			(skb_queue_len(&qp->req_pkts) > 1);
89 
90 	rxe_run_task(&qp->resp.task, must_sched);
91 }
92 
93 static inline enum resp_states get_req(struct rxe_qp *qp,
94 				       struct rxe_pkt_info **pkt_p)
95 {
96 	struct sk_buff *skb;
97 
98 	if (qp->resp.state == QP_STATE_ERROR) {
99 		while ((skb = skb_dequeue(&qp->req_pkts))) {
100 			rxe_drop_ref(qp);
101 			kfree_skb(skb);
102 		}
103 
104 		/* go drain recv wr queue */
105 		return RESPST_CHK_RESOURCE;
106 	}
107 
108 	skb = skb_peek(&qp->req_pkts);
109 	if (!skb)
110 		return RESPST_EXIT;
111 
112 	*pkt_p = SKB_TO_PKT(skb);
113 
114 	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
115 }
116 
117 static enum resp_states check_psn(struct rxe_qp *qp,
118 				  struct rxe_pkt_info *pkt)
119 {
120 	int diff = psn_compare(pkt->psn, qp->resp.psn);
121 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
122 
123 	switch (qp_type(qp)) {
124 	case IB_QPT_RC:
125 		if (diff > 0) {
126 			if (qp->resp.sent_psn_nak)
127 				return RESPST_CLEANUP;
128 
129 			qp->resp.sent_psn_nak = 1;
130 			rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
131 			return RESPST_ERR_PSN_OUT_OF_SEQ;
132 
133 		} else if (diff < 0) {
134 			rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
135 			return RESPST_DUPLICATE_REQUEST;
136 		}
137 
138 		if (qp->resp.sent_psn_nak)
139 			qp->resp.sent_psn_nak = 0;
140 
141 		break;
142 
143 	case IB_QPT_UC:
144 		if (qp->resp.drop_msg || diff != 0) {
145 			if (pkt->mask & RXE_START_MASK) {
146 				qp->resp.drop_msg = 0;
147 				return RESPST_CHK_OP_SEQ;
148 			}
149 
150 			qp->resp.drop_msg = 1;
151 			return RESPST_CLEANUP;
152 		}
153 		break;
154 	default:
155 		break;
156 	}
157 
158 	return RESPST_CHK_OP_SEQ;
159 }
160 
161 static enum resp_states check_op_seq(struct rxe_qp *qp,
162 				     struct rxe_pkt_info *pkt)
163 {
164 	switch (qp_type(qp)) {
165 	case IB_QPT_RC:
166 		switch (qp->resp.opcode) {
167 		case IB_OPCODE_RC_SEND_FIRST:
168 		case IB_OPCODE_RC_SEND_MIDDLE:
169 			switch (pkt->opcode) {
170 			case IB_OPCODE_RC_SEND_MIDDLE:
171 			case IB_OPCODE_RC_SEND_LAST:
172 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
173 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
174 				return RESPST_CHK_OP_VALID;
175 			default:
176 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
177 			}
178 
179 		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
180 		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
181 			switch (pkt->opcode) {
182 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
183 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
184 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
185 				return RESPST_CHK_OP_VALID;
186 			default:
187 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
188 			}
189 
190 		default:
191 			switch (pkt->opcode) {
192 			case IB_OPCODE_RC_SEND_MIDDLE:
193 			case IB_OPCODE_RC_SEND_LAST:
194 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
195 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
196 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
197 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
198 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
199 				return RESPST_ERR_MISSING_OPCODE_FIRST;
200 			default:
201 				return RESPST_CHK_OP_VALID;
202 			}
203 		}
204 		break;
205 
206 	case IB_QPT_UC:
207 		switch (qp->resp.opcode) {
208 		case IB_OPCODE_UC_SEND_FIRST:
209 		case IB_OPCODE_UC_SEND_MIDDLE:
210 			switch (pkt->opcode) {
211 			case IB_OPCODE_UC_SEND_MIDDLE:
212 			case IB_OPCODE_UC_SEND_LAST:
213 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
214 				return RESPST_CHK_OP_VALID;
215 			default:
216 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
217 			}
218 
219 		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
220 		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
221 			switch (pkt->opcode) {
222 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
223 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
224 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
225 				return RESPST_CHK_OP_VALID;
226 			default:
227 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
228 			}
229 
230 		default:
231 			switch (pkt->opcode) {
232 			case IB_OPCODE_UC_SEND_MIDDLE:
233 			case IB_OPCODE_UC_SEND_LAST:
234 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
235 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
236 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
237 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
238 				qp->resp.drop_msg = 1;
239 				return RESPST_CLEANUP;
240 			default:
241 				return RESPST_CHK_OP_VALID;
242 			}
243 		}
244 		break;
245 
246 	default:
247 		return RESPST_CHK_OP_VALID;
248 	}
249 }
250 
251 static enum resp_states check_op_valid(struct rxe_qp *qp,
252 				       struct rxe_pkt_info *pkt)
253 {
254 	switch (qp_type(qp)) {
255 	case IB_QPT_RC:
256 		if (((pkt->mask & RXE_READ_MASK) &&
257 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
258 		    ((pkt->mask & RXE_WRITE_MASK) &&
259 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
260 		    ((pkt->mask & RXE_ATOMIC_MASK) &&
261 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
262 			return RESPST_ERR_UNSUPPORTED_OPCODE;
263 		}
264 
265 		break;
266 
267 	case IB_QPT_UC:
268 		if ((pkt->mask & RXE_WRITE_MASK) &&
269 		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
270 			qp->resp.drop_msg = 1;
271 			return RESPST_CLEANUP;
272 		}
273 
274 		break;
275 
276 	case IB_QPT_UD:
277 	case IB_QPT_SMI:
278 	case IB_QPT_GSI:
279 		break;
280 
281 	default:
282 		WARN_ON_ONCE(1);
283 		break;
284 	}
285 
286 	return RESPST_CHK_RESOURCE;
287 }
288 
289 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
290 {
291 	struct rxe_srq *srq = qp->srq;
292 	struct rxe_queue *q = srq->rq.queue;
293 	struct rxe_recv_wqe *wqe;
294 	struct ib_event ev;
295 
296 	if (srq->error)
297 		return RESPST_ERR_RNR;
298 
299 	spin_lock_bh(&srq->rq.consumer_lock);
300 
301 	wqe = queue_head(q);
302 	if (!wqe) {
303 		spin_unlock_bh(&srq->rq.consumer_lock);
304 		return RESPST_ERR_RNR;
305 	}
306 
307 	/* note kernel and user space recv wqes have same size */
308 	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
309 
310 	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
311 	advance_consumer(q);
312 
313 	if (srq->limit && srq->ibsrq.event_handler &&
314 	    (queue_count(q) < srq->limit)) {
315 		srq->limit = 0;
316 		goto event;
317 	}
318 
319 	spin_unlock_bh(&srq->rq.consumer_lock);
320 	return RESPST_CHK_LENGTH;
321 
322 event:
323 	spin_unlock_bh(&srq->rq.consumer_lock);
324 	ev.device = qp->ibqp.device;
325 	ev.element.srq = qp->ibqp.srq;
326 	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
327 	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
328 	return RESPST_CHK_LENGTH;
329 }
330 
331 static enum resp_states check_resource(struct rxe_qp *qp,
332 				       struct rxe_pkt_info *pkt)
333 {
334 	struct rxe_srq *srq = qp->srq;
335 
336 	if (qp->resp.state == QP_STATE_ERROR) {
337 		if (qp->resp.wqe) {
338 			qp->resp.status = IB_WC_WR_FLUSH_ERR;
339 			return RESPST_COMPLETE;
340 		} else if (!srq) {
341 			qp->resp.wqe = queue_head(qp->rq.queue);
342 			if (qp->resp.wqe) {
343 				qp->resp.status = IB_WC_WR_FLUSH_ERR;
344 				return RESPST_COMPLETE;
345 			} else {
346 				return RESPST_EXIT;
347 			}
348 		} else {
349 			return RESPST_EXIT;
350 		}
351 	}
352 
353 	if (pkt->mask & RXE_READ_OR_ATOMIC) {
354 		/* it is the requesters job to not send
355 		 * too many read/atomic ops, we just
356 		 * recycle the responder resource queue
357 		 */
358 		if (likely(qp->attr.max_dest_rd_atomic > 0))
359 			return RESPST_CHK_LENGTH;
360 		else
361 			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
362 	}
363 
364 	if (pkt->mask & RXE_RWR_MASK) {
365 		if (srq)
366 			return get_srq_wqe(qp);
367 
368 		qp->resp.wqe = queue_head(qp->rq.queue);
369 		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
370 	}
371 
372 	return RESPST_CHK_LENGTH;
373 }
374 
375 static enum resp_states check_length(struct rxe_qp *qp,
376 				     struct rxe_pkt_info *pkt)
377 {
378 	switch (qp_type(qp)) {
379 	case IB_QPT_RC:
380 		return RESPST_CHK_RKEY;
381 
382 	case IB_QPT_UC:
383 		return RESPST_CHK_RKEY;
384 
385 	default:
386 		return RESPST_CHK_RKEY;
387 	}
388 }
389 
390 static enum resp_states check_rkey(struct rxe_qp *qp,
391 				   struct rxe_pkt_info *pkt)
392 {
393 	struct rxe_mem *mem = NULL;
394 	u64 va;
395 	u32 rkey;
396 	u32 resid;
397 	u32 pktlen;
398 	int mtu = qp->mtu;
399 	enum resp_states state;
400 	int access;
401 
402 	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
403 		if (pkt->mask & RXE_RETH_MASK) {
404 			qp->resp.va = reth_va(pkt);
405 			qp->resp.rkey = reth_rkey(pkt);
406 			qp->resp.resid = reth_len(pkt);
407 			qp->resp.length = reth_len(pkt);
408 		}
409 		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
410 						     : IB_ACCESS_REMOTE_WRITE;
411 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
412 		qp->resp.va = atmeth_va(pkt);
413 		qp->resp.rkey = atmeth_rkey(pkt);
414 		qp->resp.resid = sizeof(u64);
415 		access = IB_ACCESS_REMOTE_ATOMIC;
416 	} else {
417 		return RESPST_EXECUTE;
418 	}
419 
420 	/* A zero-byte op is not required to set an addr or rkey. */
421 	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
422 	    (pkt->mask & RXE_RETH_MASK) &&
423 	    reth_len(pkt) == 0) {
424 		return RESPST_EXECUTE;
425 	}
426 
427 	va	= qp->resp.va;
428 	rkey	= qp->resp.rkey;
429 	resid	= qp->resp.resid;
430 	pktlen	= payload_size(pkt);
431 
432 	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
433 	if (!mem) {
434 		state = RESPST_ERR_RKEY_VIOLATION;
435 		goto err;
436 	}
437 
438 	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
439 		state = RESPST_ERR_RKEY_VIOLATION;
440 		goto err;
441 	}
442 
443 	if (mem_check_range(mem, va, resid)) {
444 		state = RESPST_ERR_RKEY_VIOLATION;
445 		goto err;
446 	}
447 
448 	if (pkt->mask & RXE_WRITE_MASK)	 {
449 		if (resid > mtu) {
450 			if (pktlen != mtu || bth_pad(pkt)) {
451 				state = RESPST_ERR_LENGTH;
452 				goto err;
453 			}
454 		} else {
455 			if (pktlen != resid) {
456 				state = RESPST_ERR_LENGTH;
457 				goto err;
458 			}
459 			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
460 				/* This case may not be exactly that
461 				 * but nothing else fits.
462 				 */
463 				state = RESPST_ERR_LENGTH;
464 				goto err;
465 			}
466 		}
467 	}
468 
469 	WARN_ON_ONCE(qp->resp.mr);
470 
471 	qp->resp.mr = mem;
472 	return RESPST_EXECUTE;
473 
474 err:
475 	if (mem)
476 		rxe_drop_ref(mem);
477 	return state;
478 }
479 
480 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
481 				     int data_len)
482 {
483 	int err;
484 
485 	err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
486 			data_addr, data_len, to_mem_obj, NULL);
487 	if (unlikely(err))
488 		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
489 					: RESPST_ERR_MALFORMED_WQE;
490 
491 	return RESPST_NONE;
492 }
493 
494 static enum resp_states write_data_in(struct rxe_qp *qp,
495 				      struct rxe_pkt_info *pkt)
496 {
497 	enum resp_states rc = RESPST_NONE;
498 	int	err;
499 	int data_len = payload_size(pkt);
500 
501 	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
502 			   data_len, to_mem_obj, NULL);
503 	if (err) {
504 		rc = RESPST_ERR_RKEY_VIOLATION;
505 		goto out;
506 	}
507 
508 	qp->resp.va += data_len;
509 	qp->resp.resid -= data_len;
510 
511 out:
512 	return rc;
513 }
514 
515 /* Guarantee atomicity of atomic operations at the machine level. */
516 static DEFINE_SPINLOCK(atomic_ops_lock);
517 
518 static enum resp_states process_atomic(struct rxe_qp *qp,
519 				       struct rxe_pkt_info *pkt)
520 {
521 	u64 iova = atmeth_va(pkt);
522 	u64 *vaddr;
523 	enum resp_states ret;
524 	struct rxe_mem *mr = qp->resp.mr;
525 
526 	if (mr->state != RXE_MEM_STATE_VALID) {
527 		ret = RESPST_ERR_RKEY_VIOLATION;
528 		goto out;
529 	}
530 
531 	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
532 
533 	/* check vaddr is 8 bytes aligned. */
534 	if (!vaddr || (uintptr_t)vaddr & 7) {
535 		ret = RESPST_ERR_MISALIGNED_ATOMIC;
536 		goto out;
537 	}
538 
539 	spin_lock_bh(&atomic_ops_lock);
540 
541 	qp->resp.atomic_orig = *vaddr;
542 
543 	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
544 	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
545 		if (*vaddr == atmeth_comp(pkt))
546 			*vaddr = atmeth_swap_add(pkt);
547 	} else {
548 		*vaddr += atmeth_swap_add(pkt);
549 	}
550 
551 	spin_unlock_bh(&atomic_ops_lock);
552 
553 	ret = RESPST_NONE;
554 out:
555 	return ret;
556 }
557 
558 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
559 					  struct rxe_pkt_info *pkt,
560 					  struct rxe_pkt_info *ack,
561 					  int opcode,
562 					  int payload,
563 					  u32 psn,
564 					  u8 syndrome,
565 					  u32 *crcp)
566 {
567 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
568 	struct sk_buff *skb;
569 	u32 crc = 0;
570 	u32 *p;
571 	int paylen;
572 	int pad;
573 	int err;
574 
575 	/*
576 	 * allocate packet
577 	 */
578 	pad = (-payload) & 0x3;
579 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
580 
581 	skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
582 	if (!skb)
583 		return NULL;
584 
585 	ack->qp = qp;
586 	ack->opcode = opcode;
587 	ack->mask = rxe_opcode[opcode].mask;
588 	ack->offset = pkt->offset;
589 	ack->paylen = paylen;
590 
591 	/* fill in bth using the request packet headers */
592 	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
593 
594 	bth_set_opcode(ack, opcode);
595 	bth_set_qpn(ack, qp->attr.dest_qp_num);
596 	bth_set_pad(ack, pad);
597 	bth_set_se(ack, 0);
598 	bth_set_psn(ack, psn);
599 	bth_set_ack(ack, 0);
600 	ack->psn = psn;
601 
602 	if (ack->mask & RXE_AETH_MASK) {
603 		aeth_set_syn(ack, syndrome);
604 		aeth_set_msn(ack, qp->resp.msn);
605 	}
606 
607 	if (ack->mask & RXE_ATMACK_MASK)
608 		atmack_set_orig(ack, qp->resp.atomic_orig);
609 
610 	err = rxe_prepare(ack, skb, &crc);
611 	if (err) {
612 		kfree_skb(skb);
613 		return NULL;
614 	}
615 
616 	if (crcp) {
617 		/* CRC computation will be continued by the caller */
618 		*crcp = crc;
619 	} else {
620 		p = payload_addr(ack) + payload + bth_pad(ack);
621 		*p = ~crc;
622 	}
623 
624 	return skb;
625 }
626 
627 /* RDMA read response. If res is not NULL, then we have a current RDMA request
628  * being processed or replayed.
629  */
630 static enum resp_states read_reply(struct rxe_qp *qp,
631 				   struct rxe_pkt_info *req_pkt)
632 {
633 	struct rxe_pkt_info ack_pkt;
634 	struct sk_buff *skb;
635 	int mtu = qp->mtu;
636 	enum resp_states state;
637 	int payload;
638 	int opcode;
639 	int err;
640 	struct resp_res *res = qp->resp.res;
641 	u32 icrc;
642 	u32 *p;
643 
644 	if (!res) {
645 		/* This is the first time we process that request. Get a
646 		 * resource
647 		 */
648 		res = &qp->resp.resources[qp->resp.res_head];
649 
650 		free_rd_atomic_resource(qp, res);
651 		rxe_advance_resp_resource(qp);
652 
653 		res->type		= RXE_READ_MASK;
654 		res->replay		= 0;
655 
656 		res->read.va		= qp->resp.va;
657 		res->read.va_org	= qp->resp.va;
658 
659 		res->first_psn		= req_pkt->psn;
660 
661 		if (reth_len(req_pkt)) {
662 			res->last_psn	= (req_pkt->psn +
663 					   (reth_len(req_pkt) + mtu - 1) /
664 					   mtu - 1) & BTH_PSN_MASK;
665 		} else {
666 			res->last_psn	= res->first_psn;
667 		}
668 		res->cur_psn		= req_pkt->psn;
669 
670 		res->read.resid		= qp->resp.resid;
671 		res->read.length	= qp->resp.resid;
672 		res->read.rkey		= qp->resp.rkey;
673 
674 		/* note res inherits the reference to mr from qp */
675 		res->read.mr		= qp->resp.mr;
676 		qp->resp.mr		= NULL;
677 
678 		qp->resp.res		= res;
679 		res->state		= rdatm_res_state_new;
680 	}
681 
682 	if (res->state == rdatm_res_state_new) {
683 		if (res->read.resid <= mtu)
684 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
685 		else
686 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
687 	} else {
688 		if (res->read.resid > mtu)
689 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
690 		else
691 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
692 	}
693 
694 	res->state = rdatm_res_state_next;
695 
696 	payload = min_t(int, res->read.resid, mtu);
697 
698 	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
699 				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
700 	if (!skb)
701 		return RESPST_ERR_RNR;
702 
703 	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
704 			   payload, from_mem_obj, &icrc);
705 	if (err)
706 		pr_err("Failed copying memory\n");
707 
708 	if (bth_pad(&ack_pkt)) {
709 		struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
710 		u8 *pad = payload_addr(&ack_pkt) + payload;
711 
712 		memset(pad, 0, bth_pad(&ack_pkt));
713 		icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
714 	}
715 	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
716 	*p = ~icrc;
717 
718 	err = rxe_xmit_packet(qp, &ack_pkt, skb);
719 	if (err) {
720 		pr_err("Failed sending RDMA reply.\n");
721 		return RESPST_ERR_RNR;
722 	}
723 
724 	res->read.va += payload;
725 	res->read.resid -= payload;
726 	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
727 
728 	if (res->read.resid > 0) {
729 		state = RESPST_DONE;
730 	} else {
731 		qp->resp.res = NULL;
732 		if (!res->replay)
733 			qp->resp.opcode = -1;
734 		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
735 			qp->resp.psn = res->cur_psn;
736 		state = RESPST_CLEANUP;
737 	}
738 
739 	return state;
740 }
741 
742 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
743 				   struct rxe_pkt_info *pkt)
744 {
745 	struct sk_buff *skb = PKT_TO_SKB(pkt);
746 
747 	memset(hdr, 0, sizeof(*hdr));
748 	if (skb->protocol == htons(ETH_P_IP))
749 		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
750 	else if (skb->protocol == htons(ETH_P_IPV6))
751 		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
752 }
753 
754 /* Executes a new request. A retried request never reach that function (send
755  * and writes are discarded, and reads and atomics are retried elsewhere.
756  */
757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
758 {
759 	enum resp_states err;
760 
761 	if (pkt->mask & RXE_SEND_MASK) {
762 		if (qp_type(qp) == IB_QPT_UD ||
763 		    qp_type(qp) == IB_QPT_SMI ||
764 		    qp_type(qp) == IB_QPT_GSI) {
765 			union rdma_network_hdr hdr;
766 
767 			build_rdma_network_hdr(&hdr, pkt);
768 
769 			err = send_data_in(qp, &hdr, sizeof(hdr));
770 			if (err)
771 				return err;
772 		}
773 		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
774 		if (err)
775 			return err;
776 	} else if (pkt->mask & RXE_WRITE_MASK) {
777 		err = write_data_in(qp, pkt);
778 		if (err)
779 			return err;
780 	} else if (pkt->mask & RXE_READ_MASK) {
781 		/* For RDMA Read we can increment the msn now. See C9-148. */
782 		qp->resp.msn++;
783 		return RESPST_READ_REPLY;
784 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
785 		err = process_atomic(qp, pkt);
786 		if (err)
787 			return err;
788 	} else {
789 		/* Unreachable */
790 		WARN_ON_ONCE(1);
791 	}
792 
793 	/* next expected psn, read handles this separately */
794 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
795 	qp->resp.ack_psn = qp->resp.psn;
796 
797 	qp->resp.opcode = pkt->opcode;
798 	qp->resp.status = IB_WC_SUCCESS;
799 
800 	if (pkt->mask & RXE_COMP_MASK) {
801 		/* We successfully processed this new request. */
802 		qp->resp.msn++;
803 		return RESPST_COMPLETE;
804 	} else if (qp_type(qp) == IB_QPT_RC)
805 		return RESPST_ACKNOWLEDGE;
806 	else
807 		return RESPST_CLEANUP;
808 }
809 
810 static enum resp_states do_complete(struct rxe_qp *qp,
811 				    struct rxe_pkt_info *pkt)
812 {
813 	struct rxe_cqe cqe;
814 	struct ib_wc *wc = &cqe.ibwc;
815 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
816 	struct rxe_recv_wqe *wqe = qp->resp.wqe;
817 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
818 
819 	if (unlikely(!wqe))
820 		return RESPST_CLEANUP;
821 
822 	memset(&cqe, 0, sizeof(cqe));
823 
824 	if (qp->rcq->is_user) {
825 		uwc->status             = qp->resp.status;
826 		uwc->qp_num             = qp->ibqp.qp_num;
827 		uwc->wr_id              = wqe->wr_id;
828 	} else {
829 		wc->status              = qp->resp.status;
830 		wc->qp                  = &qp->ibqp;
831 		wc->wr_id               = wqe->wr_id;
832 	}
833 
834 	if (wc->status == IB_WC_SUCCESS) {
835 		rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
836 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
837 				pkt->mask & RXE_WRITE_MASK) ?
838 					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
839 		wc->vendor_err = 0;
840 		wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
841 				pkt->mask & RXE_WRITE_MASK) ?
842 					qp->resp.length : wqe->dma.length - wqe->dma.resid;
843 
844 		/* fields after byte_len are different between kernel and user
845 		 * space
846 		 */
847 		if (qp->rcq->is_user) {
848 			uwc->wc_flags = IB_WC_GRH;
849 
850 			if (pkt->mask & RXE_IMMDT_MASK) {
851 				uwc->wc_flags |= IB_WC_WITH_IMM;
852 				uwc->ex.imm_data = immdt_imm(pkt);
853 			}
854 
855 			if (pkt->mask & RXE_IETH_MASK) {
856 				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
857 				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
858 			}
859 
860 			uwc->qp_num		= qp->ibqp.qp_num;
861 
862 			if (pkt->mask & RXE_DETH_MASK)
863 				uwc->src_qp = deth_sqp(pkt);
864 
865 			uwc->port_num		= qp->attr.port_num;
866 		} else {
867 			struct sk_buff *skb = PKT_TO_SKB(pkt);
868 
869 			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
870 			if (skb->protocol == htons(ETH_P_IP))
871 				wc->network_hdr_type = RDMA_NETWORK_IPV4;
872 			else
873 				wc->network_hdr_type = RDMA_NETWORK_IPV6;
874 
875 			if (is_vlan_dev(skb->dev)) {
876 				wc->wc_flags |= IB_WC_WITH_VLAN;
877 				wc->vlan_id = vlan_dev_vlan_id(skb->dev);
878 			}
879 
880 			if (pkt->mask & RXE_IMMDT_MASK) {
881 				wc->wc_flags |= IB_WC_WITH_IMM;
882 				wc->ex.imm_data = immdt_imm(pkt);
883 			}
884 
885 			if (pkt->mask & RXE_IETH_MASK) {
886 				struct rxe_mem *rmr;
887 
888 				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
889 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
890 
891 				rmr = rxe_pool_get_index(&rxe->mr_pool,
892 							 wc->ex.invalidate_rkey >> 8);
893 				if (unlikely(!rmr)) {
894 					pr_err("Bad rkey %#x invalidation\n",
895 					       wc->ex.invalidate_rkey);
896 					return RESPST_ERROR;
897 				}
898 				rmr->state = RXE_MEM_STATE_FREE;
899 				rxe_drop_ref(rmr);
900 			}
901 
902 			wc->qp			= &qp->ibqp;
903 
904 			if (pkt->mask & RXE_DETH_MASK)
905 				wc->src_qp = deth_sqp(pkt);
906 
907 			wc->port_num		= qp->attr.port_num;
908 		}
909 	}
910 
911 	/* have copy for srq and reference for !srq */
912 	if (!qp->srq)
913 		advance_consumer(qp->rq.queue);
914 
915 	qp->resp.wqe = NULL;
916 
917 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
918 		return RESPST_ERR_CQ_OVERFLOW;
919 
920 	if (qp->resp.state == QP_STATE_ERROR)
921 		return RESPST_CHK_RESOURCE;
922 
923 	if (!pkt)
924 		return RESPST_DONE;
925 	else if (qp_type(qp) == IB_QPT_RC)
926 		return RESPST_ACKNOWLEDGE;
927 	else
928 		return RESPST_CLEANUP;
929 }
930 
931 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
932 		    u8 syndrome, u32 psn)
933 {
934 	int err = 0;
935 	struct rxe_pkt_info ack_pkt;
936 	struct sk_buff *skb;
937 
938 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
939 				 0, psn, syndrome, NULL);
940 	if (!skb) {
941 		err = -ENOMEM;
942 		goto err1;
943 	}
944 
945 	err = rxe_xmit_packet(qp, &ack_pkt, skb);
946 	if (err)
947 		pr_err_ratelimited("Failed sending ack\n");
948 
949 err1:
950 	return err;
951 }
952 
953 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
954 			   u8 syndrome)
955 {
956 	int rc = 0;
957 	struct rxe_pkt_info ack_pkt;
958 	struct sk_buff *skb;
959 	struct resp_res *res;
960 
961 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
962 				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
963 				 syndrome, NULL);
964 	if (!skb) {
965 		rc = -ENOMEM;
966 		goto out;
967 	}
968 
969 	rxe_add_ref(qp);
970 
971 	res = &qp->resp.resources[qp->resp.res_head];
972 	free_rd_atomic_resource(qp, res);
973 	rxe_advance_resp_resource(qp);
974 
975 	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
976 	memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
977 	       sizeof(skb->cb) - sizeof(ack_pkt));
978 
979 	skb_get(skb);
980 	res->type = RXE_ATOMIC_MASK;
981 	res->atomic.skb = skb;
982 	res->first_psn = ack_pkt.psn;
983 	res->last_psn  = ack_pkt.psn;
984 	res->cur_psn   = ack_pkt.psn;
985 
986 	rc = rxe_xmit_packet(qp, &ack_pkt, skb);
987 	if (rc) {
988 		pr_err_ratelimited("Failed sending ack\n");
989 		rxe_drop_ref(qp);
990 	}
991 out:
992 	return rc;
993 }
994 
995 static enum resp_states acknowledge(struct rxe_qp *qp,
996 				    struct rxe_pkt_info *pkt)
997 {
998 	if (qp_type(qp) != IB_QPT_RC)
999 		return RESPST_CLEANUP;
1000 
1001 	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1002 		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1003 	else if (pkt->mask & RXE_ATOMIC_MASK)
1004 		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1005 	else if (bth_ack(pkt))
1006 		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1007 
1008 	return RESPST_CLEANUP;
1009 }
1010 
1011 static enum resp_states cleanup(struct rxe_qp *qp,
1012 				struct rxe_pkt_info *pkt)
1013 {
1014 	struct sk_buff *skb;
1015 
1016 	if (pkt) {
1017 		skb = skb_dequeue(&qp->req_pkts);
1018 		rxe_drop_ref(qp);
1019 		kfree_skb(skb);
1020 	}
1021 
1022 	if (qp->resp.mr) {
1023 		rxe_drop_ref(qp->resp.mr);
1024 		qp->resp.mr = NULL;
1025 	}
1026 
1027 	return RESPST_DONE;
1028 }
1029 
1030 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1031 {
1032 	int i;
1033 
1034 	for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1035 		struct resp_res *res = &qp->resp.resources[i];
1036 
1037 		if (res->type == 0)
1038 			continue;
1039 
1040 		if (psn_compare(psn, res->first_psn) >= 0 &&
1041 		    psn_compare(psn, res->last_psn) <= 0) {
1042 			return res;
1043 		}
1044 	}
1045 
1046 	return NULL;
1047 }
1048 
1049 static enum resp_states duplicate_request(struct rxe_qp *qp,
1050 					  struct rxe_pkt_info *pkt)
1051 {
1052 	enum resp_states rc;
1053 	u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1054 
1055 	if (pkt->mask & RXE_SEND_MASK ||
1056 	    pkt->mask & RXE_WRITE_MASK) {
1057 		/* SEND. Ack again and cleanup. C9-105. */
1058 		if (bth_ack(pkt))
1059 			send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1060 		rc = RESPST_CLEANUP;
1061 		goto out;
1062 	} else if (pkt->mask & RXE_READ_MASK) {
1063 		struct resp_res *res;
1064 
1065 		res = find_resource(qp, pkt->psn);
1066 		if (!res) {
1067 			/* Resource not found. Class D error.  Drop the
1068 			 * request.
1069 			 */
1070 			rc = RESPST_CLEANUP;
1071 			goto out;
1072 		} else {
1073 			/* Ensure this new request is the same as the previous
1074 			 * one or a subset of it.
1075 			 */
1076 			u64 iova = reth_va(pkt);
1077 			u32 resid = reth_len(pkt);
1078 
1079 			if (iova < res->read.va_org ||
1080 			    resid > res->read.length ||
1081 			    (iova + resid) > (res->read.va_org +
1082 					      res->read.length)) {
1083 				rc = RESPST_CLEANUP;
1084 				goto out;
1085 			}
1086 
1087 			if (reth_rkey(pkt) != res->read.rkey) {
1088 				rc = RESPST_CLEANUP;
1089 				goto out;
1090 			}
1091 
1092 			res->cur_psn = pkt->psn;
1093 			res->state = (pkt->psn == res->first_psn) ?
1094 					rdatm_res_state_new :
1095 					rdatm_res_state_replay;
1096 			res->replay = 1;
1097 
1098 			/* Reset the resource, except length. */
1099 			res->read.va_org = iova;
1100 			res->read.va = iova;
1101 			res->read.resid = resid;
1102 
1103 			/* Replay the RDMA read reply. */
1104 			qp->resp.res = res;
1105 			rc = RESPST_READ_REPLY;
1106 			goto out;
1107 		}
1108 	} else {
1109 		struct resp_res *res;
1110 
1111 		/* Find the operation in our list of responder resources. */
1112 		res = find_resource(qp, pkt->psn);
1113 		if (res) {
1114 			skb_get(res->atomic.skb);
1115 			/* Resend the result. */
1116 			rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1117 			if (rc) {
1118 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1119 				rc = RESPST_CLEANUP;
1120 				goto out;
1121 			}
1122 		}
1123 
1124 		/* Resource not found. Class D error. Drop the request. */
1125 		rc = RESPST_CLEANUP;
1126 		goto out;
1127 	}
1128 out:
1129 	return rc;
1130 }
1131 
1132 /* Process a class A or C. Both are treated the same in this implementation. */
1133 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1134 			      enum ib_wc_status status)
1135 {
1136 	qp->resp.aeth_syndrome	= syndrome;
1137 	qp->resp.status		= status;
1138 
1139 	/* indicate that we should go through the ERROR state */
1140 	qp->resp.goto_error	= 1;
1141 }
1142 
1143 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1144 {
1145 	/* UC */
1146 	if (qp->srq) {
1147 		/* Class E */
1148 		qp->resp.drop_msg = 1;
1149 		if (qp->resp.wqe) {
1150 			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1151 			return RESPST_COMPLETE;
1152 		} else {
1153 			return RESPST_CLEANUP;
1154 		}
1155 	} else {
1156 		/* Class D1. This packet may be the start of a
1157 		 * new message and could be valid. The previous
1158 		 * message is invalid and ignored. reset the
1159 		 * recv wr to its original state
1160 		 */
1161 		if (qp->resp.wqe) {
1162 			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1163 			qp->resp.wqe->dma.cur_sge = 0;
1164 			qp->resp.wqe->dma.sge_offset = 0;
1165 			qp->resp.opcode = -1;
1166 		}
1167 
1168 		if (qp->resp.mr) {
1169 			rxe_drop_ref(qp->resp.mr);
1170 			qp->resp.mr = NULL;
1171 		}
1172 
1173 		return RESPST_CLEANUP;
1174 	}
1175 }
1176 
1177 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1178 {
1179 	struct sk_buff *skb;
1180 
1181 	while ((skb = skb_dequeue(&qp->req_pkts))) {
1182 		rxe_drop_ref(qp);
1183 		kfree_skb(skb);
1184 	}
1185 
1186 	if (notify)
1187 		return;
1188 
1189 	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1190 		advance_consumer(qp->rq.queue);
1191 }
1192 
1193 int rxe_responder(void *arg)
1194 {
1195 	struct rxe_qp *qp = (struct rxe_qp *)arg;
1196 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1197 	enum resp_states state;
1198 	struct rxe_pkt_info *pkt = NULL;
1199 	int ret = 0;
1200 
1201 	rxe_add_ref(qp);
1202 
1203 	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1204 
1205 	if (!qp->valid) {
1206 		ret = -EINVAL;
1207 		goto done;
1208 	}
1209 
1210 	switch (qp->resp.state) {
1211 	case QP_STATE_RESET:
1212 		state = RESPST_RESET;
1213 		break;
1214 
1215 	default:
1216 		state = RESPST_GET_REQ;
1217 		break;
1218 	}
1219 
1220 	while (1) {
1221 		pr_debug("qp#%d state = %s\n", qp_num(qp),
1222 			 resp_state_name[state]);
1223 		switch (state) {
1224 		case RESPST_GET_REQ:
1225 			state = get_req(qp, &pkt);
1226 			break;
1227 		case RESPST_CHK_PSN:
1228 			state = check_psn(qp, pkt);
1229 			break;
1230 		case RESPST_CHK_OP_SEQ:
1231 			state = check_op_seq(qp, pkt);
1232 			break;
1233 		case RESPST_CHK_OP_VALID:
1234 			state = check_op_valid(qp, pkt);
1235 			break;
1236 		case RESPST_CHK_RESOURCE:
1237 			state = check_resource(qp, pkt);
1238 			break;
1239 		case RESPST_CHK_LENGTH:
1240 			state = check_length(qp, pkt);
1241 			break;
1242 		case RESPST_CHK_RKEY:
1243 			state = check_rkey(qp, pkt);
1244 			break;
1245 		case RESPST_EXECUTE:
1246 			state = execute(qp, pkt);
1247 			break;
1248 		case RESPST_COMPLETE:
1249 			state = do_complete(qp, pkt);
1250 			break;
1251 		case RESPST_READ_REPLY:
1252 			state = read_reply(qp, pkt);
1253 			break;
1254 		case RESPST_ACKNOWLEDGE:
1255 			state = acknowledge(qp, pkt);
1256 			break;
1257 		case RESPST_CLEANUP:
1258 			state = cleanup(qp, pkt);
1259 			break;
1260 		case RESPST_DUPLICATE_REQUEST:
1261 			state = duplicate_request(qp, pkt);
1262 			break;
1263 		case RESPST_ERR_PSN_OUT_OF_SEQ:
1264 			/* RC only - Class B. Drop packet. */
1265 			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1266 			state = RESPST_CLEANUP;
1267 			break;
1268 
1269 		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1270 		case RESPST_ERR_MISSING_OPCODE_FIRST:
1271 		case RESPST_ERR_MISSING_OPCODE_LAST_C:
1272 		case RESPST_ERR_UNSUPPORTED_OPCODE:
1273 		case RESPST_ERR_MISALIGNED_ATOMIC:
1274 			/* RC Only - Class C. */
1275 			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1276 					  IB_WC_REM_INV_REQ_ERR);
1277 			state = RESPST_COMPLETE;
1278 			break;
1279 
1280 		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1281 			state = do_class_d1e_error(qp);
1282 			break;
1283 		case RESPST_ERR_RNR:
1284 			if (qp_type(qp) == IB_QPT_RC) {
1285 				rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1286 				/* RC - class B */
1287 				send_ack(qp, pkt, AETH_RNR_NAK |
1288 					 (~AETH_TYPE_MASK &
1289 					 qp->attr.min_rnr_timer),
1290 					 pkt->psn);
1291 			} else {
1292 				/* UD/UC - class D */
1293 				qp->resp.drop_msg = 1;
1294 			}
1295 			state = RESPST_CLEANUP;
1296 			break;
1297 
1298 		case RESPST_ERR_RKEY_VIOLATION:
1299 			if (qp_type(qp) == IB_QPT_RC) {
1300 				/* Class C */
1301 				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1302 						  IB_WC_REM_ACCESS_ERR);
1303 				state = RESPST_COMPLETE;
1304 			} else {
1305 				qp->resp.drop_msg = 1;
1306 				if (qp->srq) {
1307 					/* UC/SRQ Class D */
1308 					qp->resp.status = IB_WC_REM_ACCESS_ERR;
1309 					state = RESPST_COMPLETE;
1310 				} else {
1311 					/* UC/non-SRQ Class E. */
1312 					state = RESPST_CLEANUP;
1313 				}
1314 			}
1315 			break;
1316 
1317 		case RESPST_ERR_LENGTH:
1318 			if (qp_type(qp) == IB_QPT_RC) {
1319 				/* Class C */
1320 				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1321 						  IB_WC_REM_INV_REQ_ERR);
1322 				state = RESPST_COMPLETE;
1323 			} else if (qp->srq) {
1324 				/* UC/UD - class E */
1325 				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1326 				state = RESPST_COMPLETE;
1327 			} else {
1328 				/* UC/UD - class D */
1329 				qp->resp.drop_msg = 1;
1330 				state = RESPST_CLEANUP;
1331 			}
1332 			break;
1333 
1334 		case RESPST_ERR_MALFORMED_WQE:
1335 			/* All, Class A. */
1336 			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1337 					  IB_WC_LOC_QP_OP_ERR);
1338 			state = RESPST_COMPLETE;
1339 			break;
1340 
1341 		case RESPST_ERR_CQ_OVERFLOW:
1342 			/* All - Class G */
1343 			state = RESPST_ERROR;
1344 			break;
1345 
1346 		case RESPST_DONE:
1347 			if (qp->resp.goto_error) {
1348 				state = RESPST_ERROR;
1349 				break;
1350 			}
1351 
1352 			goto done;
1353 
1354 		case RESPST_EXIT:
1355 			if (qp->resp.goto_error) {
1356 				state = RESPST_ERROR;
1357 				break;
1358 			}
1359 
1360 			goto exit;
1361 
1362 		case RESPST_RESET:
1363 			rxe_drain_req_pkts(qp, false);
1364 			qp->resp.wqe = NULL;
1365 			goto exit;
1366 
1367 		case RESPST_ERROR:
1368 			qp->resp.goto_error = 0;
1369 			pr_warn("qp#%d moved to error state\n", qp_num(qp));
1370 			rxe_qp_error(qp);
1371 			goto exit;
1372 
1373 		default:
1374 			WARN_ON_ONCE(1);
1375 		}
1376 	}
1377 
1378 exit:
1379 	ret = -EAGAIN;
1380 done:
1381 	rxe_drop_ref(qp);
1382 	return ret;
1383 }
1384