xref: /linux/drivers/infiniband/hw/erdma/erdma_eq.c (revision af8e51644a70f612974a6e767fa7d896d3c23f88)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #include "erdma_verbs.h"
8 
9 #define MAX_POLL_CHUNK_SIZE 16
10 
11 void notify_eq(struct erdma_eq *eq)
12 {
13 	u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 		      FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
15 
16 	*eq->dbrec = db_data;
17 	writeq(db_data, eq->db);
18 
19 	atomic64_inc(&eq->notify_num);
20 }
21 
22 void *get_next_valid_eqe(struct erdma_eq *eq)
23 {
24 	u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
25 	u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
26 
27 	return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
28 }
29 
30 void erdma_aeq_event_handler(struct erdma_dev *dev)
31 {
32 	struct erdma_aeqe *aeqe;
33 	u32 cqn, qpn;
34 	struct erdma_qp *qp;
35 	struct erdma_cq *cq;
36 	struct ib_event event;
37 	u32 poll_cnt = 0;
38 
39 	memset(&event, 0, sizeof(event));
40 
41 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 		aeqe = get_next_valid_eqe(&dev->aeq);
43 		if (!aeqe)
44 			break;
45 
46 		dma_rmb();
47 
48 		dev->aeq.ci++;
49 		atomic64_inc(&dev->aeq.event_num);
50 		poll_cnt++;
51 
52 		if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 			      le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 			cqn = le32_to_cpu(aeqe->event_data0);
55 			cq = find_cq_by_cqn(dev, cqn);
56 			if (!cq)
57 				continue;
58 
59 			event.device = cq->ibcq.device;
60 			event.element.cq = &cq->ibcq;
61 			event.event = IB_EVENT_CQ_ERR;
62 			if (cq->ibcq.event_handler)
63 				cq->ibcq.event_handler(&event,
64 						       cq->ibcq.cq_context);
65 		} else {
66 			qpn = le32_to_cpu(aeqe->event_data0);
67 			qp = find_qp_by_qpn(dev, qpn);
68 			if (!qp)
69 				continue;
70 
71 			event.device = qp->ibqp.device;
72 			event.element.qp = &qp->ibqp;
73 			event.event = IB_EVENT_QP_FATAL;
74 			if (qp->ibqp.event_handler)
75 				qp->ibqp.event_handler(&event,
76 						       qp->ibqp.qp_context);
77 		}
78 	}
79 
80 	notify_eq(&dev->aeq);
81 }
82 
83 int erdma_aeq_init(struct erdma_dev *dev)
84 {
85 	struct erdma_eq *eq = &dev->aeq;
86 
87 	eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
88 
89 	eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
90 				      &eq->qbuf_dma_addr, GFP_KERNEL);
91 	if (!eq->qbuf)
92 		return -ENOMEM;
93 
94 	spin_lock_init(&eq->lock);
95 	atomic64_set(&eq->event_num, 0);
96 	atomic64_set(&eq->notify_num, 0);
97 
98 	eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
99 	eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
100 	if (!eq->dbrec)
101 		goto err_out;
102 
103 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
104 			  upper_32_bits(eq->qbuf_dma_addr));
105 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
106 			  lower_32_bits(eq->qbuf_dma_addr));
107 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
108 	erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
109 
110 	return 0;
111 
112 err_out:
113 	dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
114 			  eq->qbuf_dma_addr);
115 
116 	return -ENOMEM;
117 }
118 
119 void erdma_aeq_destroy(struct erdma_dev *dev)
120 {
121 	struct erdma_eq *eq = &dev->aeq;
122 
123 	dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
124 			  eq->qbuf_dma_addr);
125 
126 	dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
127 }
128 
129 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
130 {
131 	struct erdma_dev *dev = ceq_cb->dev;
132 	struct erdma_cq *cq;
133 	u32 poll_cnt = 0;
134 	u64 *ceqe;
135 	int cqn;
136 
137 	if (!ceq_cb->ready)
138 		return;
139 
140 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
141 		ceqe = get_next_valid_eqe(&ceq_cb->eq);
142 		if (!ceqe)
143 			break;
144 
145 		dma_rmb();
146 		ceq_cb->eq.ci++;
147 		poll_cnt++;
148 		cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
149 
150 		cq = find_cq_by_cqn(dev, cqn);
151 		if (!cq)
152 			continue;
153 
154 		if (rdma_is_kernel_res(&cq->ibcq.res))
155 			cq->kern_cq.cmdsn++;
156 
157 		if (cq->ibcq.comp_handler)
158 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
159 	}
160 
161 	notify_eq(&ceq_cb->eq);
162 }
163 
164 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
165 {
166 	struct erdma_eq_cb *ceq_cb = data;
167 
168 	tasklet_schedule(&ceq_cb->tasklet);
169 
170 	return IRQ_HANDLED;
171 }
172 
173 static void erdma_intr_ceq_task(unsigned long data)
174 {
175 	erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
176 }
177 
178 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
179 {
180 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
181 	int err;
182 
183 	snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
184 		 pci_name(dev->pdev));
185 	eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
186 
187 	tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
188 		     (unsigned long)&dev->ceqs[ceqn]);
189 
190 	cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
191 			&eqc->irq.affinity_hint_mask);
192 
193 	err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
194 			  eqc->irq.name, eqc);
195 	if (err) {
196 		dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
197 		return err;
198 	}
199 
200 	irq_set_affinity_hint(eqc->irq.msix_vector,
201 			      &eqc->irq.affinity_hint_mask);
202 
203 	return 0;
204 }
205 
206 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
207 {
208 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
209 
210 	irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
211 	free_irq(eqc->irq.msix_vector, eqc);
212 }
213 
214 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
215 {
216 	struct erdma_cmdq_create_eq_req req;
217 
218 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
219 				CMDQ_OPCODE_CREATE_EQ);
220 	req.eqn = eqn;
221 	req.depth = ilog2(eq->depth);
222 	req.qbuf_addr = eq->qbuf_dma_addr;
223 	req.qtype = ERDMA_EQ_TYPE_CEQ;
224 	/* Vector index is the same as EQN. */
225 	req.vector_idx = eqn;
226 	req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
227 	req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
228 
229 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
230 }
231 
232 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
233 {
234 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
235 	int ret;
236 
237 	eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
238 	eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
239 				      &eq->qbuf_dma_addr, GFP_KERNEL);
240 	if (!eq->qbuf)
241 		return -ENOMEM;
242 
243 	spin_lock_init(&eq->lock);
244 	atomic64_set(&eq->event_num, 0);
245 	atomic64_set(&eq->notify_num, 0);
246 
247 	eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
248 		 (ceqn + 1) * ERDMA_DB_SIZE;
249 
250 	eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
251 	if (!eq->dbrec) {
252 		dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
253 				  eq->qbuf, eq->qbuf_dma_addr);
254 		return -ENOMEM;
255 	}
256 
257 	eq->ci = 0;
258 	dev->ceqs[ceqn].dev = dev;
259 
260 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
261 	ret = create_eq_cmd(dev, ceqn + 1, eq);
262 	dev->ceqs[ceqn].ready = ret ? false : true;
263 
264 	return ret;
265 }
266 
267 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
268 {
269 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
270 	struct erdma_cmdq_destroy_eq_req req;
271 	int err;
272 
273 	dev->ceqs[ceqn].ready = 0;
274 
275 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
276 				CMDQ_OPCODE_DESTROY_EQ);
277 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
278 	req.eqn = ceqn + 1;
279 	req.qtype = ERDMA_EQ_TYPE_CEQ;
280 	req.vector_idx = ceqn + 1;
281 
282 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
283 	if (err)
284 		return;
285 
286 	dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
287 			  eq->qbuf_dma_addr);
288 	dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
289 }
290 
291 int erdma_ceqs_init(struct erdma_dev *dev)
292 {
293 	u32 i, j;
294 	int err;
295 
296 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
297 		err = erdma_ceq_init_one(dev, i);
298 		if (err)
299 			goto out_err;
300 
301 		err = erdma_set_ceq_irq(dev, i);
302 		if (err) {
303 			erdma_ceq_uninit_one(dev, i);
304 			goto out_err;
305 		}
306 	}
307 
308 	return 0;
309 
310 out_err:
311 	for (j = 0; j < i; j++) {
312 		erdma_free_ceq_irq(dev, j);
313 		erdma_ceq_uninit_one(dev, j);
314 	}
315 
316 	return err;
317 }
318 
319 void erdma_ceqs_uninit(struct erdma_dev *dev)
320 {
321 	u32 i;
322 
323 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
324 		erdma_free_ceq_irq(dev, i);
325 		erdma_ceq_uninit_one(dev, i);
326 	}
327 }
328