xref: /linux/drivers/infiniband/sw/siw/siw_cq.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
5 
6 #include <linux/errno.h>
7 #include <linux/types.h>
8 
9 #include <rdma/ib_verbs.h>
10 
11 #include "siw.h"
12 
13 static int map_wc_opcode[SIW_NUM_OPCODES] = {
14 	[SIW_OP_WRITE] = IB_WC_RDMA_WRITE,
15 	[SIW_OP_SEND] = IB_WC_SEND,
16 	[SIW_OP_SEND_WITH_IMM] = IB_WC_SEND,
17 	[SIW_OP_READ] = IB_WC_RDMA_READ,
18 	[SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ,
19 	[SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP,
20 	[SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
21 	[SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV,
22 	[SIW_OP_REG_MR] = IB_WC_REG_MR,
23 	[SIW_OP_RECEIVE] = IB_WC_RECV,
24 	[SIW_OP_READ_RESPONSE] = -1 /* not used */
25 };
26 
27 static struct {
28 	enum siw_wc_status siw;
29 	enum ib_wc_status ib;
30 } map_cqe_status[SIW_NUM_WC_STATUS] = {
31 	{ SIW_WC_SUCCESS, IB_WC_SUCCESS },
32 	{ SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR },
33 	{ SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR },
34 	{ SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
35 	{ SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
36 	{ SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
37 	{ SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
38 	{ SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
39 	{ SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
40 	{ SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR }
41 };
42 
43 /*
44  * Reap one CQE from the CQ. Only used by kernel clients
45  * during CQ normal operation. Might be called during CQ
46  * flush for user mapped CQE array as well.
47  */
siw_reap_cqe(struct siw_cq * cq,struct ib_wc * wc)48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
49 {
50 	struct siw_cqe *cqe;
51 	unsigned long flags;
52 
53 	spin_lock_irqsave(&cq->lock, flags);
54 
55 	cqe = &cq->queue[cq->cq_get % cq->num_cqe];
56 	if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
57 		memset(wc, 0, sizeof(*wc));
58 		wc->wr_id = cqe->id;
59 		wc->byte_len = cqe->bytes;
60 
61 		/*
62 		 * During CQ flush, also user land CQE's may get
63 		 * reaped here, which do not hold a QP reference
64 		 * and do not qualify for memory extension verbs.
65 		 */
66 		if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
67 			if (cqe->flags & SIW_WQE_REM_INVAL) {
68 				wc->ex.invalidate_rkey = cqe->inval_stag;
69 				wc->wc_flags = IB_WC_WITH_INVALIDATE;
70 			}
71 			wc->qp = cqe->base_qp;
72 			wc->opcode = map_wc_opcode[cqe->opcode];
73 			wc->status = map_cqe_status[cqe->status].ib;
74 			siw_dbg_cq(cq,
75 				   "idx %u, type %d, flags %2x, id 0x%pK\n",
76 				   cq->cq_get % cq->num_cqe, cqe->opcode,
77 				   cqe->flags, (void *)(uintptr_t)cqe->id);
78 		} else {
79 			/*
80 			 * A malicious user may set invalid opcode or
81 			 * status in the user mmapped CQE array.
82 			 * Sanity check and correct values in that case
83 			 * to avoid out-of-bounds access to global arrays
84 			 * for opcode and status mapping.
85 			 */
86 			u8 opcode = cqe->opcode;
87 			u16 status = cqe->status;
88 
89 			if (opcode >= SIW_NUM_OPCODES) {
90 				opcode = 0;
91 				status = SIW_WC_GENERAL_ERR;
92 			} else if (status >= SIW_NUM_WC_STATUS) {
93 				status = SIW_WC_GENERAL_ERR;
94 			}
95 			wc->opcode = map_wc_opcode[opcode];
96 			wc->status = map_cqe_status[status].ib;
97 
98 		}
99 		WRITE_ONCE(cqe->flags, 0);
100 		cq->cq_get++;
101 
102 		spin_unlock_irqrestore(&cq->lock, flags);
103 
104 		return 1;
105 	}
106 	spin_unlock_irqrestore(&cq->lock, flags);
107 
108 	return 0;
109 }
110 
111 /*
112  * siw_cq_flush()
113  *
114  * Flush all CQ elements.
115  */
siw_cq_flush(struct siw_cq * cq)116 void siw_cq_flush(struct siw_cq *cq)
117 {
118 	struct ib_wc wc;
119 
120 	while (siw_reap_cqe(cq, &wc))
121 		;
122 }
123