xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c (revision 23313771c7b99b3b8dba169bc71dae619d41ab56)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/delay.h>
5 
6 #include "hinic3_csr.h"
7 #include "hinic3_eqs.h"
8 #include "hinic3_hwdev.h"
9 #include "hinic3_hwif.h"
10 #include "hinic3_mbox.h"
11 
12 #define AEQ_CTRL_0_INTR_IDX_MASK      GENMASK(9, 0)
13 #define AEQ_CTRL_0_DMA_ATTR_MASK      GENMASK(17, 12)
14 #define AEQ_CTRL_0_PCI_INTF_IDX_MASK  GENMASK(22, 20)
15 #define AEQ_CTRL_0_INTR_MODE_MASK     BIT(31)
16 #define AEQ_CTRL_0_SET(val, member)  \
17 	FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
18 
19 #define AEQ_CTRL_1_LEN_MASK           GENMASK(20, 0)
20 #define AEQ_CTRL_1_ELEM_SIZE_MASK     GENMASK(25, 24)
21 #define AEQ_CTRL_1_PAGE_SIZE_MASK     GENMASK(31, 28)
22 #define AEQ_CTRL_1_SET(val, member)  \
23 	FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
24 
25 #define CEQ_CTRL_0_INTR_IDX_MASK      GENMASK(9, 0)
26 #define CEQ_CTRL_0_DMA_ATTR_MASK      GENMASK(17, 12)
27 #define CEQ_CTRL_0_LIMIT_KICK_MASK    GENMASK(23, 20)
28 #define CEQ_CTRL_0_PCI_INTF_IDX_MASK  GENMASK(25, 24)
29 #define CEQ_CTRL_0_PAGE_SIZE_MASK     GENMASK(30, 27)
30 #define CEQ_CTRL_0_INTR_MODE_MASK     BIT(31)
31 #define CEQ_CTRL_0_SET(val, member)  \
32 	FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
33 
34 #define CEQ_CTRL_1_LEN_MASK           GENMASK(19, 0)
35 #define CEQ_CTRL_1_SET(val, member)  \
36 	FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
37 
38 #define CEQE_TYPE_MASK                GENMASK(25, 23)
39 #define CEQE_TYPE(type)  \
40 	FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
41 
42 #define CEQE_DATA_MASK                GENMASK(25, 0)
43 #define CEQE_DATA(data)               ((data) & cpu_to_le32(CEQE_DATA_MASK))
44 
45 #define EQ_ELEM_DESC_TYPE_MASK        GENMASK(6, 0)
46 #define EQ_ELEM_DESC_SRC_MASK         BIT(7)
47 #define EQ_ELEM_DESC_SIZE_MASK        GENMASK(15, 8)
48 #define EQ_ELEM_DESC_WRAPPED_MASK     BIT(31)
49 #define EQ_ELEM_DESC_GET(val, member)  \
50 	FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
51 
52 #define EQ_CI_SIMPLE_INDIR_CI_MASK       GENMASK(20, 0)
53 #define EQ_CI_SIMPLE_INDIR_ARMED_MASK    BIT(21)
54 #define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK  GENMASK(31, 30)
55 #define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK  GENMASK(31, 24)
56 #define EQ_CI_SIMPLE_INDIR_SET(val, member)  \
57 	FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
58 
59 #define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq)  \
60 	(((eq)->type == HINIC3_AEQ) ?  \
61 	 HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR :  \
62 	 HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
63 
64 #define EQ_PROD_IDX_REG_ADDR(eq)  \
65 	(((eq)->type == HINIC3_AEQ) ?  \
66 	 HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
67 
68 #define EQ_HI_PHYS_ADDR_REG(type, pg_num)  \
69 	(((type) == HINIC3_AEQ) ?  \
70 	       HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) :  \
71 	       HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
72 
73 #define EQ_LO_PHYS_ADDR_REG(type, pg_num)  \
74 	(((type) == HINIC3_AEQ) ?  \
75 	       HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) :  \
76 	       HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
77 
78 #define EQ_MSIX_RESEND_TIMER_CLEAR  1
79 
80 #define HINIC3_EQ_MAX_PAGES(eq)  \
81 	((eq)->type == HINIC3_AEQ ?  \
82 	 HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
83 
84 #define HINIC3_TASK_PROCESS_EQE_LIMIT  1024
85 #define HINIC3_EQ_UPDATE_CI_STEP       64
86 #define HINIC3_EQS_WQ_NAME             "hinic3_eqs"
87 
88 #define HINIC3_EQ_VALID_SHIFT          31
89 #define HINIC3_EQ_WRAPPED(eq)  \
90 	((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
91 
92 #define HINIC3_EQ_WRAPPED_SHIFT        20
93 #define HINIC3_EQ_CONS_IDX(eq)  \
94 	((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
95 
96 static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
97 {
98 	return get_q_element(&eq->qpages, eq->cons_idx, NULL);
99 }
100 
101 static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
102 {
103 	return get_q_element(&eq->qpages, eq->cons_idx, NULL);
104 }
105 
106 int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
107 			   enum hinic3_aeq_type event,
108 			   hinic3_aeq_event_cb hwe_cb)
109 {
110 	struct hinic3_aeqs *aeqs;
111 
112 	aeqs = hwdev->aeqs;
113 	aeqs->aeq_cb[event] = hwe_cb;
114 	spin_lock_init(&aeqs->aeq_lock);
115 
116 	return 0;
117 }
118 
119 void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
120 			      enum hinic3_aeq_type event)
121 {
122 	struct hinic3_aeqs *aeqs;
123 
124 	aeqs = hwdev->aeqs;
125 
126 	spin_lock_bh(&aeqs->aeq_lock);
127 	aeqs->aeq_cb[event] = NULL;
128 	spin_unlock_bh(&aeqs->aeq_lock);
129 }
130 
131 int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
132 			   enum hinic3_ceq_event event,
133 			   hinic3_ceq_event_cb callback)
134 {
135 	struct hinic3_ceqs *ceqs;
136 
137 	ceqs = hwdev->ceqs;
138 	ceqs->ceq_cb[event] = callback;
139 	spin_lock_init(&ceqs->ceq_lock);
140 
141 	return 0;
142 }
143 
144 void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
145 			      enum hinic3_ceq_event event)
146 {
147 	struct hinic3_ceqs *ceqs;
148 
149 	ceqs = hwdev->ceqs;
150 
151 	spin_lock_bh(&ceqs->ceq_lock);
152 	ceqs->ceq_cb[event] = NULL;
153 	spin_unlock_bh(&ceqs->ceq_lock);
154 }
155 
156 /* Set consumer index in the hw. */
157 static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
158 {
159 	u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
160 	u32 eq_wrap_ci, val;
161 
162 	eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
163 	val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
164 	if (eq->type == HINIC3_AEQ) {
165 		val = val |
166 			EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
167 			EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
168 	} else {
169 		val = val |
170 			EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
171 			EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
172 	}
173 
174 	hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
175 }
176 
177 static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
178 {
179 	return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
180 }
181 
182 static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
183 {
184 	enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
185 	struct hinic3_hwdev *hwdev = ceqs->hwdev;
186 	__le32 ceqe_data = CEQE_DATA(ceqe);
187 
188 	if (event >= HINIC3_MAX_CEQ_EVENTS) {
189 		dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
190 			 event, ceqe_data);
191 		return;
192 	}
193 
194 	spin_lock_bh(&ceqs->ceq_lock);
195 	if (ceqs->ceq_cb[event])
196 		ceqs->ceq_cb[event](hwdev, ceqe_data);
197 
198 	spin_unlock_bh(&ceqs->ceq_lock);
199 }
200 
201 static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
202 {
203 	return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
204 }
205 
206 static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
207 			      const struct hinic3_aeq_elem *aeqe_pos)
208 {
209 	struct hinic3_hwdev *hwdev = aeqs->hwdev;
210 	u8 data[HINIC3_AEQE_DATA_SIZE], size;
211 	enum hinic3_aeq_type event;
212 	hinic3_aeq_event_cb hwe_cb;
213 
214 	if (EQ_ELEM_DESC_GET(aeqe, SRC))
215 		return;
216 
217 	event = EQ_ELEM_DESC_GET(aeqe, TYPE);
218 	if (event >= HINIC3_MAX_AEQ_EVENTS) {
219 		dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
220 		return;
221 	}
222 
223 	memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
224 	swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
225 	size = EQ_ELEM_DESC_GET(aeqe, SIZE);
226 
227 	spin_lock_bh(&aeqs->aeq_lock);
228 	hwe_cb = aeqs->aeq_cb[event];
229 	if (hwe_cb)
230 		hwe_cb(aeqs->hwdev, data, size);
231 	spin_unlock_bh(&aeqs->aeq_lock);
232 }
233 
234 static int aeq_irq_handler(struct hinic3_eq *eq)
235 {
236 	const struct hinic3_aeq_elem *aeqe_pos;
237 	struct hinic3_aeqs *aeqs;
238 	u32 i, eqe_cnt = 0;
239 	__le32 aeqe;
240 
241 	aeqs = aeq_to_aeqs(eq);
242 	for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
243 		aeqe_pos = get_curr_aeq_elem(eq);
244 		aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
245 		/* HW updates wrapped bit, when it adds eq element event */
246 		if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
247 			return 0;
248 
249 		/* Prevent speculative reads from element */
250 		dma_rmb();
251 		aeq_event_handler(aeqs, aeqe, aeqe_pos);
252 		eq->cons_idx++;
253 		if (eq->cons_idx == eq->eq_len) {
254 			eq->cons_idx = 0;
255 			eq->wrapped = !eq->wrapped;
256 		}
257 
258 		if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
259 			eqe_cnt = 0;
260 			set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
261 		}
262 	}
263 
264 	return -EAGAIN;
265 }
266 
267 static int ceq_irq_handler(struct hinic3_eq *eq)
268 {
269 	struct hinic3_ceqs *ceqs;
270 	u32 eqe_cnt = 0;
271 	__be32 ceqe_raw;
272 	__le32 ceqe;
273 	u32 i;
274 
275 	ceqs = ceq_to_ceqs(eq);
276 	for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
277 		ceqe_raw = *get_curr_ceq_elem(eq);
278 		ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
279 
280 		/* HW updates wrapped bit, when it adds eq element event */
281 		if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
282 			return 0;
283 
284 		ceq_event_handler(ceqs, ceqe);
285 		eq->cons_idx++;
286 		if (eq->cons_idx == eq->eq_len) {
287 			eq->cons_idx = 0;
288 			eq->wrapped = !eq->wrapped;
289 		}
290 
291 		if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
292 			eqe_cnt = 0;
293 			set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
294 		}
295 	}
296 
297 	return -EAGAIN;
298 }
299 
300 static void reschedule_aeq_handler(struct hinic3_eq *eq)
301 {
302 	struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
303 
304 	queue_work(aeqs->workq, &eq->aeq_work);
305 }
306 
307 static int eq_irq_handler(struct hinic3_eq *eq)
308 {
309 	int err;
310 
311 	if (eq->type == HINIC3_AEQ)
312 		err = aeq_irq_handler(eq);
313 	else
314 		err = ceq_irq_handler(eq);
315 
316 	set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
317 			HINIC3_EQ_ARMED);
318 
319 	return err;
320 }
321 
322 static void aeq_irq_work(struct work_struct *work)
323 {
324 	struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
325 	int err;
326 
327 	err = eq_irq_handler(eq);
328 	if (err)
329 		reschedule_aeq_handler(eq);
330 }
331 
332 static irqreturn_t aeq_interrupt(int irq, void *data)
333 {
334 	struct workqueue_struct *workq;
335 	struct hinic3_eq *aeq = data;
336 	struct hinic3_hwdev *hwdev;
337 	struct hinic3_aeqs *aeqs;
338 
339 	aeqs = aeq_to_aeqs(aeq);
340 	hwdev = aeq->hwdev;
341 
342 	/* clear resend timer cnt register */
343 	workq = aeqs->workq;
344 	hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
345 					  EQ_MSIX_RESEND_TIMER_CLEAR);
346 	queue_work(workq, &aeq->aeq_work);
347 
348 	return IRQ_HANDLED;
349 }
350 
351 static irqreturn_t ceq_interrupt(int irq, void *data)
352 {
353 	struct hinic3_eq *ceq = data;
354 	int err;
355 
356 	/* clear resend timer counters */
357 	hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
358 					  EQ_MSIX_RESEND_TIMER_CLEAR);
359 	err = eq_irq_handler(ceq);
360 	if (err)
361 		return IRQ_NONE;
362 
363 	return IRQ_HANDLED;
364 }
365 
366 static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
367 				   u32 ctrl0, u32 ctrl1)
368 {
369 	struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
370 	struct mgmt_msg_params msg_params = {};
371 	int err;
372 
373 	ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
374 	ceq_ctrl.q_id = q_id;
375 	ceq_ctrl.ctrl0 = ctrl0;
376 	ceq_ctrl.ctrl1 = ctrl1;
377 
378 	mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
379 
380 	err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
381 				       COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
382 	if (err || ceq_ctrl.head.status) {
383 		dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
384 			q_id, err, ceq_ctrl.head.status);
385 		return -EFAULT;
386 	}
387 
388 	return 0;
389 }
390 
391 static int set_eq_ctrls(struct hinic3_eq *eq)
392 {
393 	struct hinic3_hwif *hwif = eq->hwdev->hwif;
394 	struct hinic3_queue_pages *qpages;
395 	u8 pci_intf_idx, elem_size;
396 	u32 mask, ctrl0, ctrl1;
397 	u32 page_size_val;
398 	int err;
399 
400 	qpages = &eq->qpages;
401 	page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
402 	pci_intf_idx = hwif->attr.pci_intf_idx;
403 
404 	if (eq->type == HINIC3_AEQ) {
405 		/* set ctrl0 using read-modify-write */
406 		mask = AEQ_CTRL_0_INTR_IDX_MASK |
407 		       AEQ_CTRL_0_DMA_ATTR_MASK |
408 		       AEQ_CTRL_0_PCI_INTF_IDX_MASK |
409 		       AEQ_CTRL_0_INTR_MODE_MASK;
410 		ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
411 		ctrl0 = (ctrl0 & ~mask) |
412 			AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
413 			AEQ_CTRL_0_SET(0, DMA_ATTR) |
414 			AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
415 			AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
416 		hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
417 
418 		/* HW expects log2(number of 32 byte units). */
419 		elem_size = qpages->elem_size_shift - 5;
420 		ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
421 			AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
422 			AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
423 		hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
424 	} else {
425 		ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
426 			CEQ_CTRL_0_SET(0, DMA_ATTR) |
427 			CEQ_CTRL_0_SET(0, LIMIT_KICK) |
428 			CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
429 			CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
430 			CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
431 
432 		ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
433 
434 		/* set ceq ctrl reg through mgmt cpu */
435 		err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
436 					      ctrl1);
437 		if (err)
438 			return err;
439 	}
440 
441 	return 0;
442 }
443 
444 static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
445 {
446 	__be32 *ceqe;
447 	u32 i;
448 
449 	for (i = 0; i < eq->eq_len; i++) {
450 		ceqe = get_q_element(&eq->qpages, i, NULL);
451 		*ceqe = cpu_to_be32(init_val);
452 	}
453 
454 	wmb();    /* Clear ceq elements bit */
455 }
456 
457 static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
458 {
459 	struct hinic3_aeq_elem *aeqe;
460 	u32 i;
461 
462 	for (i = 0; i < eq->eq_len; i++) {
463 		aeqe = get_q_element(&eq->qpages, i, NULL);
464 		aeqe->desc = cpu_to_be32(init_val);
465 	}
466 
467 	wmb();    /* Clear aeq elements bit */
468 }
469 
470 static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
471 {
472 	if (eq->type == HINIC3_AEQ)
473 		aeq_elements_init(eq, init_val);
474 	else
475 		ceq_elements_init(eq, init_val);
476 }
477 
478 static int alloc_eq_pages(struct hinic3_eq *eq)
479 {
480 	struct hinic3_hwif *hwif = eq->hwdev->hwif;
481 	struct hinic3_queue_pages *qpages;
482 	dma_addr_t page_paddr;
483 	u32 reg, init_val;
484 	u16 pg_idx;
485 	int err;
486 
487 	qpages = &eq->qpages;
488 	err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
489 	if (err)
490 		return err;
491 
492 	for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
493 		page_paddr = qpages->pages[pg_idx].align_paddr;
494 		reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
495 		hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
496 		reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
497 		hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
498 	}
499 
500 	init_val = HINIC3_EQ_WRAPPED(eq);
501 	eq_elements_init(eq, init_val);
502 
503 	return 0;
504 }
505 
506 static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
507 {
508 	u32 max_pages, min_page_size, page_size, total_size;
509 
510 	/* No need for complicated arithmetic. All values must be power of 2.
511 	 * Multiplications give power of 2 and divisions give power of 2 without
512 	 * remainder.
513 	 */
514 	max_pages = HINIC3_EQ_MAX_PAGES(eq);
515 	min_page_size = HINIC3_MIN_PAGE_SIZE;
516 	total_size = eq->eq_len * elem_size;
517 
518 	if (total_size <= max_pages * min_page_size)
519 		page_size = min_page_size;
520 	else
521 		page_size = total_size / max_pages;
522 
523 	hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
524 }
525 
526 static int request_eq_irq(struct hinic3_eq *eq)
527 {
528 	int err;
529 
530 	if (eq->type == HINIC3_AEQ) {
531 		INIT_WORK(&eq->aeq_work, aeq_irq_work);
532 		snprintf(eq->irq_name, sizeof(eq->irq_name),
533 			 "hinic3_aeq%u@pci:%s", eq->q_id,
534 			 pci_name(eq->hwdev->pdev));
535 		err = request_irq(eq->irq_id, aeq_interrupt, 0,
536 				  eq->irq_name, eq);
537 	} else {
538 		snprintf(eq->irq_name, sizeof(eq->irq_name),
539 			 "hinic3_ceq%u@pci:%s", eq->q_id,
540 			 pci_name(eq->hwdev->pdev));
541 		err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
542 					   IRQF_ONESHOT, eq->irq_name, eq);
543 	}
544 
545 	return err;
546 }
547 
548 static void reset_eq(struct hinic3_eq *eq)
549 {
550 	/* clear eq_len to force eqe drop in hardware */
551 	if (eq->type == HINIC3_AEQ)
552 		hinic3_hwif_write_reg(eq->hwdev->hwif,
553 				      HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
554 	else
555 		hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
556 
557 	hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
558 }
559 
560 static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
561 		   u32 q_len, enum hinic3_eq_type type,
562 		   struct msix_entry *msix_entry)
563 {
564 	u32 elem_size;
565 	int err;
566 
567 	eq->hwdev = hwdev;
568 	eq->q_id = q_id;
569 	eq->type = type;
570 	eq->eq_len = q_len;
571 
572 	/* Indirect access should set q_id first */
573 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
574 			      eq->q_id);
575 
576 	reset_eq(eq);
577 
578 	eq->cons_idx = 0;
579 	eq->wrapped = 0;
580 
581 	elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
582 	eq_calc_page_size_and_num(eq, elem_size);
583 
584 	err = alloc_eq_pages(eq);
585 	if (err) {
586 		dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
587 		return err;
588 	}
589 
590 	eq->msix_entry_idx = msix_entry->entry;
591 	eq->irq_id = msix_entry->vector;
592 
593 	err = set_eq_ctrls(eq);
594 	if (err) {
595 		dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
596 		goto err_free_queue_pages;
597 	}
598 
599 	set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
600 
601 	err = request_eq_irq(eq);
602 	if (err) {
603 		dev_err(hwdev->dev,
604 			"Failed to request irq for the eq, err: %d\n", err);
605 		goto err_free_queue_pages;
606 	}
607 
608 	hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
609 
610 	return 0;
611 
612 err_free_queue_pages:
613 	hinic3_queue_pages_free(hwdev, &eq->qpages);
614 
615 	return err;
616 }
617 
618 static void remove_eq(struct hinic3_eq *eq)
619 {
620 	hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
621 			      HINIC3_MSIX_DISABLE);
622 	free_irq(eq->irq_id, eq);
623 	/* Indirect access should set q_id first */
624 	hinic3_hwif_write_reg(eq->hwdev->hwif,
625 			      HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
626 			      eq->q_id);
627 
628 	if (eq->type == HINIC3_AEQ) {
629 		disable_work_sync(&eq->aeq_work);
630 		/* clear eq_len to avoid hw access host memory */
631 		hinic3_hwif_write_reg(eq->hwdev->hwif,
632 				      HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
633 	} else {
634 		hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
635 	}
636 
637 	/* update consumer index to avoid invalid interrupt */
638 	eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
639 					    EQ_PROD_IDX_REG_ADDR(eq));
640 	set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
641 	hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
642 }
643 
644 int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
645 		     struct msix_entry *msix_entries)
646 {
647 	struct hinic3_aeqs *aeqs;
648 	u16 q_id;
649 	int err;
650 
651 	aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
652 	if (!aeqs)
653 		return -ENOMEM;
654 
655 	hwdev->aeqs = aeqs;
656 	aeqs->hwdev = hwdev;
657 	aeqs->num_aeqs = num_aeqs;
658 	aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
659 				      HINIC3_MAX_AEQS);
660 	if (!aeqs->workq) {
661 		dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
662 		err = -ENOMEM;
663 		goto err_free_aeqs;
664 	}
665 
666 	for (q_id = 0; q_id < num_aeqs; q_id++) {
667 		err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
668 			      HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
669 			      &msix_entries[q_id]);
670 		if (err) {
671 			dev_err(hwdev->dev, "Failed to init aeq %u\n",
672 				q_id);
673 			goto err_remove_eqs;
674 		}
675 	}
676 	for (q_id = 0; q_id < num_aeqs; q_id++)
677 		hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
678 				      HINIC3_MSIX_ENABLE);
679 
680 	return 0;
681 
682 err_remove_eqs:
683 	while (q_id > 0) {
684 		q_id--;
685 		remove_eq(&aeqs->aeq[q_id]);
686 	}
687 
688 	destroy_workqueue(aeqs->workq);
689 
690 err_free_aeqs:
691 	kfree(aeqs);
692 
693 	return err;
694 }
695 
696 void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
697 {
698 	struct hinic3_aeqs *aeqs = hwdev->aeqs;
699 	enum hinic3_aeq_type aeq_event;
700 	struct hinic3_eq *eq;
701 	u16 q_id;
702 
703 	for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
704 		eq = aeqs->aeq + q_id;
705 		remove_eq(eq);
706 		hinic3_free_irq(hwdev, eq->irq_id);
707 	}
708 
709 	for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
710 		hinic3_aeq_unregister_cb(hwdev, aeq_event);
711 
712 	destroy_workqueue(aeqs->workq);
713 
714 	kfree(aeqs);
715 }
716 
717 int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
718 		     struct msix_entry *msix_entries)
719 {
720 	struct hinic3_ceqs *ceqs;
721 	u16 q_id;
722 	int err;
723 
724 	ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
725 	if (!ceqs)
726 		return -ENOMEM;
727 
728 	hwdev->ceqs = ceqs;
729 	ceqs->hwdev = hwdev;
730 	ceqs->num_ceqs = num_ceqs;
731 
732 	for (q_id = 0; q_id < num_ceqs; q_id++) {
733 		err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
734 			      HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
735 			      &msix_entries[q_id]);
736 		if (err) {
737 			dev_err(hwdev->dev, "Failed to init ceq %u\n",
738 				q_id);
739 			goto err_free_ceqs;
740 		}
741 	}
742 	for (q_id = 0; q_id < num_ceqs; q_id++)
743 		hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
744 				      HINIC3_MSIX_ENABLE);
745 
746 	return 0;
747 
748 err_free_ceqs:
749 	while (q_id > 0) {
750 		q_id--;
751 		remove_eq(&ceqs->ceq[q_id]);
752 	}
753 
754 	kfree(ceqs);
755 
756 	return err;
757 }
758 
759 void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
760 {
761 	struct hinic3_ceqs *ceqs = hwdev->ceqs;
762 	enum hinic3_ceq_event ceq_event;
763 	struct hinic3_eq *eq;
764 	u16 q_id;
765 
766 	for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
767 		eq = ceqs->ceq + q_id;
768 		remove_eq(eq);
769 		hinic3_free_irq(hwdev, eq->irq_id);
770 	}
771 
772 	for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
773 		hinic3_ceq_unregister_cb(hwdev, ceq_event);
774 
775 	kfree(ceqs);
776 }
777