xref: /linux/drivers/infiniband/hw/erdma/erdma_eq.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #include "erdma_verbs.h"
8 
9 #define MAX_POLL_CHUNK_SIZE 16
10 
11 void notify_eq(struct erdma_eq *eq)
12 {
13 	u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 		      FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
15 
16 	*eq->dbrec = db_data;
17 	writeq(db_data, eq->db);
18 
19 	atomic64_inc(&eq->notify_num);
20 }
21 
22 void *get_next_valid_eqe(struct erdma_eq *eq)
23 {
24 	u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
25 	u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
26 
27 	return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
28 }
29 
30 void erdma_aeq_event_handler(struct erdma_dev *dev)
31 {
32 	struct erdma_aeqe *aeqe;
33 	u32 cqn, qpn;
34 	struct erdma_qp *qp;
35 	struct erdma_cq *cq;
36 	struct ib_event event;
37 	u32 poll_cnt = 0;
38 
39 	memset(&event, 0, sizeof(event));
40 
41 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 		aeqe = get_next_valid_eqe(&dev->aeq);
43 		if (!aeqe)
44 			break;
45 
46 		dma_rmb();
47 
48 		dev->aeq.ci++;
49 		atomic64_inc(&dev->aeq.event_num);
50 		poll_cnt++;
51 
52 		if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 			      le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 			cqn = le32_to_cpu(aeqe->event_data0);
55 			cq = find_cq_by_cqn(dev, cqn);
56 			if (!cq)
57 				continue;
58 
59 			event.device = cq->ibcq.device;
60 			event.element.cq = &cq->ibcq;
61 			event.event = IB_EVENT_CQ_ERR;
62 			if (cq->ibcq.event_handler)
63 				cq->ibcq.event_handler(&event,
64 						       cq->ibcq.cq_context);
65 		} else {
66 			qpn = le32_to_cpu(aeqe->event_data0);
67 			qp = find_qp_by_qpn(dev, qpn);
68 			if (!qp)
69 				continue;
70 
71 			event.device = qp->ibqp.device;
72 			event.element.qp = &qp->ibqp;
73 			event.event = IB_EVENT_QP_FATAL;
74 			if (qp->ibqp.event_handler)
75 				qp->ibqp.event_handler(&event,
76 						       qp->ibqp.qp_context);
77 		}
78 	}
79 
80 	notify_eq(&dev->aeq);
81 }
82 
83 int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
84 {
85 	u32 buf_size = depth << EQE_SHIFT;
86 
87 	eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
88 				      &eq->qbuf_dma_addr, GFP_KERNEL);
89 	if (!eq->qbuf)
90 		return -ENOMEM;
91 
92 	eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
93 	if (!eq->dbrec)
94 		goto err_free_qbuf;
95 
96 	spin_lock_init(&eq->lock);
97 	atomic64_set(&eq->event_num, 0);
98 	atomic64_set(&eq->notify_num, 0);
99 	eq->ci = 0;
100 	eq->depth = depth;
101 
102 	return 0;
103 
104 err_free_qbuf:
105 	dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
106 			  eq->qbuf_dma_addr);
107 
108 	return -ENOMEM;
109 }
110 
111 void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
112 {
113 	dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
114 	dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
115 			  eq->qbuf_dma_addr);
116 }
117 
118 int erdma_aeq_init(struct erdma_dev *dev)
119 {
120 	struct erdma_eq *eq = &dev->aeq;
121 	int ret;
122 
123 	ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
124 	if (ret)
125 		return ret;
126 
127 	eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
128 
129 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
130 			  upper_32_bits(eq->qbuf_dma_addr));
131 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
132 			  lower_32_bits(eq->qbuf_dma_addr));
133 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
134 	erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
135 
136 	return 0;
137 }
138 
139 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
140 {
141 	struct erdma_dev *dev = ceq_cb->dev;
142 	struct erdma_cq *cq;
143 	u32 poll_cnt = 0;
144 	u64 *ceqe;
145 	int cqn;
146 
147 	if (!ceq_cb->ready)
148 		return;
149 
150 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
151 		ceqe = get_next_valid_eqe(&ceq_cb->eq);
152 		if (!ceqe)
153 			break;
154 
155 		dma_rmb();
156 		ceq_cb->eq.ci++;
157 		poll_cnt++;
158 		cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
159 
160 		cq = find_cq_by_cqn(dev, cqn);
161 		if (!cq)
162 			continue;
163 
164 		if (rdma_is_kernel_res(&cq->ibcq.res))
165 			cq->kern_cq.cmdsn++;
166 
167 		if (cq->ibcq.comp_handler)
168 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
169 	}
170 
171 	notify_eq(&ceq_cb->eq);
172 }
173 
174 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
175 {
176 	struct erdma_eq_cb *ceq_cb = data;
177 
178 	tasklet_schedule(&ceq_cb->tasklet);
179 
180 	return IRQ_HANDLED;
181 }
182 
183 static void erdma_intr_ceq_task(unsigned long data)
184 {
185 	erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
186 }
187 
188 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
189 {
190 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
191 	int err;
192 
193 	snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
194 		 pci_name(dev->pdev));
195 	eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
196 
197 	tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
198 		     (unsigned long)&dev->ceqs[ceqn]);
199 
200 	cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
201 			&eqc->irq.affinity_hint_mask);
202 
203 	err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
204 			  eqc->irq.name, eqc);
205 	if (err) {
206 		dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
207 		return err;
208 	}
209 
210 	irq_set_affinity_hint(eqc->irq.msix_vector,
211 			      &eqc->irq.affinity_hint_mask);
212 
213 	return 0;
214 }
215 
216 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
217 {
218 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
219 
220 	irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
221 	free_irq(eqc->irq.msix_vector, eqc);
222 }
223 
224 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
225 {
226 	struct erdma_cmdq_create_eq_req req;
227 
228 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
229 				CMDQ_OPCODE_CREATE_EQ);
230 	req.eqn = eqn;
231 	req.depth = ilog2(eq->depth);
232 	req.qbuf_addr = eq->qbuf_dma_addr;
233 	req.qtype = ERDMA_EQ_TYPE_CEQ;
234 	/* Vector index is the same as EQN. */
235 	req.vector_idx = eqn;
236 	req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
237 	req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
238 
239 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
240 }
241 
242 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
243 {
244 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
245 	int ret;
246 
247 	ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
248 	if (ret)
249 		return ret;
250 
251 	eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
252 		 (ceqn + 1) * ERDMA_DB_SIZE;
253 	dev->ceqs[ceqn].dev = dev;
254 	dev->ceqs[ceqn].ready = true;
255 
256 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
257 	ret = create_eq_cmd(dev, ceqn + 1, eq);
258 	if (ret) {
259 		erdma_eq_destroy(dev, eq);
260 		dev->ceqs[ceqn].ready = false;
261 	}
262 
263 	return ret;
264 }
265 
266 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
267 {
268 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
269 	struct erdma_cmdq_destroy_eq_req req;
270 	int err;
271 
272 	dev->ceqs[ceqn].ready = 0;
273 
274 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
275 				CMDQ_OPCODE_DESTROY_EQ);
276 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
277 	req.eqn = ceqn + 1;
278 	req.qtype = ERDMA_EQ_TYPE_CEQ;
279 	req.vector_idx = ceqn + 1;
280 
281 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
282 	if (err)
283 		return;
284 
285 	erdma_eq_destroy(dev, eq);
286 }
287 
288 int erdma_ceqs_init(struct erdma_dev *dev)
289 {
290 	u32 i, j;
291 	int err;
292 
293 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
294 		err = erdma_ceq_init_one(dev, i);
295 		if (err)
296 			goto out_err;
297 
298 		err = erdma_set_ceq_irq(dev, i);
299 		if (err) {
300 			erdma_ceq_uninit_one(dev, i);
301 			goto out_err;
302 		}
303 	}
304 
305 	return 0;
306 
307 out_err:
308 	for (j = 0; j < i; j++) {
309 		erdma_free_ceq_irq(dev, j);
310 		erdma_ceq_uninit_one(dev, j);
311 	}
312 
313 	return err;
314 }
315 
316 void erdma_ceqs_uninit(struct erdma_dev *dev)
317 {
318 	u32 i;
319 
320 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
321 		erdma_free_ceq_irq(dev, i);
322 		erdma_ceq_uninit_one(dev, i);
323 	}
324 }
325