xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c (revision ec2e0fb07d789976c601bec19ecced7a501c3705)
1a4511307SFan Gong // SPDX-License-Identifier: GPL-2.0
2a4511307SFan Gong // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3a4511307SFan Gong 
4a4511307SFan Gong #include <linux/delay.h>
5a4511307SFan Gong 
6a4511307SFan Gong #include "hinic3_csr.h"
7a4511307SFan Gong #include "hinic3_eqs.h"
8a4511307SFan Gong #include "hinic3_hwdev.h"
9a4511307SFan Gong #include "hinic3_hwif.h"
10a4511307SFan Gong #include "hinic3_mbox.h"
11a4511307SFan Gong 
12a4511307SFan Gong #define AEQ_CTRL_0_INTR_IDX_MASK      GENMASK(9, 0)
13a4511307SFan Gong #define AEQ_CTRL_0_DMA_ATTR_MASK      GENMASK(17, 12)
14a4511307SFan Gong #define AEQ_CTRL_0_PCI_INTF_IDX_MASK  GENMASK(22, 20)
15a4511307SFan Gong #define AEQ_CTRL_0_INTR_MODE_MASK     BIT(31)
16a4511307SFan Gong #define AEQ_CTRL_0_SET(val, member)  \
17a4511307SFan Gong 	FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
18a4511307SFan Gong 
19a4511307SFan Gong #define AEQ_CTRL_1_LEN_MASK           GENMASK(20, 0)
20a4511307SFan Gong #define AEQ_CTRL_1_ELEM_SIZE_MASK     GENMASK(25, 24)
21a4511307SFan Gong #define AEQ_CTRL_1_PAGE_SIZE_MASK     GENMASK(31, 28)
22a4511307SFan Gong #define AEQ_CTRL_1_SET(val, member)  \
23a4511307SFan Gong 	FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
24a4511307SFan Gong 
25*c4bbfd9bSFan Gong #define CEQ_CTRL_0_INTR_IDX_MASK      GENMASK(9, 0)
26*c4bbfd9bSFan Gong #define CEQ_CTRL_0_DMA_ATTR_MASK      GENMASK(17, 12)
27*c4bbfd9bSFan Gong #define CEQ_CTRL_0_LIMIT_KICK_MASK    GENMASK(23, 20)
28*c4bbfd9bSFan Gong #define CEQ_CTRL_0_PCI_INTF_IDX_MASK  GENMASK(25, 24)
29*c4bbfd9bSFan Gong #define CEQ_CTRL_0_PAGE_SIZE_MASK     GENMASK(30, 27)
30*c4bbfd9bSFan Gong #define CEQ_CTRL_0_INTR_MODE_MASK     BIT(31)
31*c4bbfd9bSFan Gong #define CEQ_CTRL_0_SET(val, member)  \
32*c4bbfd9bSFan Gong 	FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
33*c4bbfd9bSFan Gong 
34*c4bbfd9bSFan Gong #define CEQ_CTRL_1_LEN_MASK           GENMASK(19, 0)
35*c4bbfd9bSFan Gong #define CEQ_CTRL_1_SET(val, member)  \
36*c4bbfd9bSFan Gong 	FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
37*c4bbfd9bSFan Gong 
38*c4bbfd9bSFan Gong #define CEQE_TYPE_MASK                GENMASK(25, 23)
39*c4bbfd9bSFan Gong #define CEQE_TYPE(type)  \
40*c4bbfd9bSFan Gong 	FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
41*c4bbfd9bSFan Gong 
42*c4bbfd9bSFan Gong #define CEQE_DATA_MASK                GENMASK(25, 0)
43*c4bbfd9bSFan Gong #define CEQE_DATA(data)               ((data) & cpu_to_le32(CEQE_DATA_MASK))
44*c4bbfd9bSFan Gong 
45a4511307SFan Gong #define EQ_ELEM_DESC_TYPE_MASK        GENMASK(6, 0)
46a4511307SFan Gong #define EQ_ELEM_DESC_SRC_MASK         BIT(7)
47a4511307SFan Gong #define EQ_ELEM_DESC_SIZE_MASK        GENMASK(15, 8)
48a4511307SFan Gong #define EQ_ELEM_DESC_WRAPPED_MASK     BIT(31)
49a4511307SFan Gong #define EQ_ELEM_DESC_GET(val, member)  \
50a4511307SFan Gong 	FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
51a4511307SFan Gong 
52a4511307SFan Gong #define EQ_CI_SIMPLE_INDIR_CI_MASK       GENMASK(20, 0)
53a4511307SFan Gong #define EQ_CI_SIMPLE_INDIR_ARMED_MASK    BIT(21)
54a4511307SFan Gong #define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK  GENMASK(31, 30)
55*c4bbfd9bSFan Gong #define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK  GENMASK(31, 24)
56a4511307SFan Gong #define EQ_CI_SIMPLE_INDIR_SET(val, member)  \
57a4511307SFan Gong 	FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
58a4511307SFan Gong 
59*c4bbfd9bSFan Gong #define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq)  \
60*c4bbfd9bSFan Gong 	(((eq)->type == HINIC3_AEQ) ?  \
61*c4bbfd9bSFan Gong 	 HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR :  \
62*c4bbfd9bSFan Gong 	 HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
63a4511307SFan Gong 
64*c4bbfd9bSFan Gong #define EQ_PROD_IDX_REG_ADDR(eq)  \
65*c4bbfd9bSFan Gong 	(((eq)->type == HINIC3_AEQ) ?  \
66*c4bbfd9bSFan Gong 	 HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
67a4511307SFan Gong 
68a4511307SFan Gong #define EQ_HI_PHYS_ADDR_REG(type, pg_num)  \
69*c4bbfd9bSFan Gong 	(((type) == HINIC3_AEQ) ?  \
70*c4bbfd9bSFan Gong 	       HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) :  \
71*c4bbfd9bSFan Gong 	       HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
72a4511307SFan Gong 
73a4511307SFan Gong #define EQ_LO_PHYS_ADDR_REG(type, pg_num)  \
74*c4bbfd9bSFan Gong 	(((type) == HINIC3_AEQ) ?  \
75*c4bbfd9bSFan Gong 	       HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) :  \
76*c4bbfd9bSFan Gong 	       HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
77a4511307SFan Gong 
78a4511307SFan Gong #define EQ_MSIX_RESEND_TIMER_CLEAR  1
79a4511307SFan Gong 
80*c4bbfd9bSFan Gong #define HINIC3_EQ_MAX_PAGES(eq)  \
81*c4bbfd9bSFan Gong 	((eq)->type == HINIC3_AEQ ?  \
82*c4bbfd9bSFan Gong 	 HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
83a4511307SFan Gong 
84a4511307SFan Gong #define HINIC3_TASK_PROCESS_EQE_LIMIT  1024
85a4511307SFan Gong #define HINIC3_EQ_UPDATE_CI_STEP       64
86a4511307SFan Gong #define HINIC3_EQS_WQ_NAME             "hinic3_eqs"
87a4511307SFan Gong 
88a4511307SFan Gong #define HINIC3_EQ_VALID_SHIFT          31
89a4511307SFan Gong #define HINIC3_EQ_WRAPPED(eq)  \
90a4511307SFan Gong 	((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
91a4511307SFan Gong 
92a4511307SFan Gong #define HINIC3_EQ_WRAPPED_SHIFT        20
93a4511307SFan Gong #define HINIC3_EQ_CONS_IDX(eq)  \
94a4511307SFan Gong 	((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
95a4511307SFan Gong 
96a4511307SFan Gong static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
97a4511307SFan Gong {
98a4511307SFan Gong 	return get_q_element(&eq->qpages, eq->cons_idx, NULL);
99a4511307SFan Gong }
100a4511307SFan Gong 
101*c4bbfd9bSFan Gong static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
102*c4bbfd9bSFan Gong {
103*c4bbfd9bSFan Gong 	return get_q_element(&eq->qpages, eq->cons_idx, NULL);
104*c4bbfd9bSFan Gong }
105*c4bbfd9bSFan Gong 
106a4511307SFan Gong int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
107a4511307SFan Gong 			   enum hinic3_aeq_type event,
108a4511307SFan Gong 			   hinic3_aeq_event_cb hwe_cb)
109a4511307SFan Gong {
110a4511307SFan Gong 	struct hinic3_aeqs *aeqs;
111a4511307SFan Gong 
112a4511307SFan Gong 	aeqs = hwdev->aeqs;
113a4511307SFan Gong 	aeqs->aeq_cb[event] = hwe_cb;
114a4511307SFan Gong 	spin_lock_init(&aeqs->aeq_lock);
115a4511307SFan Gong 
116a4511307SFan Gong 	return 0;
117a4511307SFan Gong }
118a4511307SFan Gong 
119a4511307SFan Gong void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
120a4511307SFan Gong 			      enum hinic3_aeq_type event)
121a4511307SFan Gong {
122a4511307SFan Gong 	struct hinic3_aeqs *aeqs;
123a4511307SFan Gong 
124a4511307SFan Gong 	aeqs = hwdev->aeqs;
125a4511307SFan Gong 
126a4511307SFan Gong 	spin_lock_bh(&aeqs->aeq_lock);
127a4511307SFan Gong 	aeqs->aeq_cb[event] = NULL;
128a4511307SFan Gong 	spin_unlock_bh(&aeqs->aeq_lock);
129a4511307SFan Gong }
130a4511307SFan Gong 
131*c4bbfd9bSFan Gong int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
132*c4bbfd9bSFan Gong 			   enum hinic3_ceq_event event,
133*c4bbfd9bSFan Gong 			   hinic3_ceq_event_cb callback)
134*c4bbfd9bSFan Gong {
135*c4bbfd9bSFan Gong 	struct hinic3_ceqs *ceqs;
136*c4bbfd9bSFan Gong 
137*c4bbfd9bSFan Gong 	ceqs = hwdev->ceqs;
138*c4bbfd9bSFan Gong 	ceqs->ceq_cb[event] = callback;
139*c4bbfd9bSFan Gong 	spin_lock_init(&ceqs->ceq_lock);
140*c4bbfd9bSFan Gong 
141*c4bbfd9bSFan Gong 	return 0;
142*c4bbfd9bSFan Gong }
143*c4bbfd9bSFan Gong 
144*c4bbfd9bSFan Gong void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
145*c4bbfd9bSFan Gong 			      enum hinic3_ceq_event event)
146*c4bbfd9bSFan Gong {
147*c4bbfd9bSFan Gong 	struct hinic3_ceqs *ceqs;
148*c4bbfd9bSFan Gong 
149*c4bbfd9bSFan Gong 	ceqs = hwdev->ceqs;
150*c4bbfd9bSFan Gong 
151*c4bbfd9bSFan Gong 	spin_lock_bh(&ceqs->ceq_lock);
152*c4bbfd9bSFan Gong 	ceqs->ceq_cb[event] = NULL;
153*c4bbfd9bSFan Gong 	spin_unlock_bh(&ceqs->ceq_lock);
154*c4bbfd9bSFan Gong }
155*c4bbfd9bSFan Gong 
156a4511307SFan Gong /* Set consumer index in the hw. */
157a4511307SFan Gong static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
158a4511307SFan Gong {
159*c4bbfd9bSFan Gong 	u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
160a4511307SFan Gong 	u32 eq_wrap_ci, val;
161a4511307SFan Gong 
162a4511307SFan Gong 	eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
163*c4bbfd9bSFan Gong 	val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
164*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ) {
165*c4bbfd9bSFan Gong 		val = val |
166a4511307SFan Gong 			EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
167a4511307SFan Gong 			EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
168*c4bbfd9bSFan Gong 	} else {
169*c4bbfd9bSFan Gong 		val = val |
170*c4bbfd9bSFan Gong 			EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
171*c4bbfd9bSFan Gong 			EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
172*c4bbfd9bSFan Gong 	}
173a4511307SFan Gong 
174a4511307SFan Gong 	hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
175a4511307SFan Gong }
176a4511307SFan Gong 
177*c4bbfd9bSFan Gong static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
178*c4bbfd9bSFan Gong {
179*c4bbfd9bSFan Gong 	return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
180*c4bbfd9bSFan Gong }
181*c4bbfd9bSFan Gong 
182*c4bbfd9bSFan Gong static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
183*c4bbfd9bSFan Gong {
184*c4bbfd9bSFan Gong 	enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
185*c4bbfd9bSFan Gong 	struct hinic3_hwdev *hwdev = ceqs->hwdev;
186*c4bbfd9bSFan Gong 	__le32 ceqe_data = CEQE_DATA(ceqe);
187*c4bbfd9bSFan Gong 
188*c4bbfd9bSFan Gong 	if (event >= HINIC3_MAX_CEQ_EVENTS) {
189*c4bbfd9bSFan Gong 		dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
190*c4bbfd9bSFan Gong 			 event, ceqe_data);
191*c4bbfd9bSFan Gong 		return;
192*c4bbfd9bSFan Gong 	}
193*c4bbfd9bSFan Gong 
194*c4bbfd9bSFan Gong 	spin_lock_bh(&ceqs->ceq_lock);
195*c4bbfd9bSFan Gong 	if (ceqs->ceq_cb[event])
196*c4bbfd9bSFan Gong 		ceqs->ceq_cb[event](hwdev, ceqe_data);
197*c4bbfd9bSFan Gong 
198*c4bbfd9bSFan Gong 	spin_unlock_bh(&ceqs->ceq_lock);
199*c4bbfd9bSFan Gong }
200*c4bbfd9bSFan Gong 
201a4511307SFan Gong static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
202a4511307SFan Gong {
203a4511307SFan Gong 	return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
204a4511307SFan Gong }
205a4511307SFan Gong 
206a4511307SFan Gong static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
207a4511307SFan Gong 			      const struct hinic3_aeq_elem *aeqe_pos)
208a4511307SFan Gong {
209a4511307SFan Gong 	struct hinic3_hwdev *hwdev = aeqs->hwdev;
210a4511307SFan Gong 	u8 data[HINIC3_AEQE_DATA_SIZE], size;
211a4511307SFan Gong 	enum hinic3_aeq_type event;
212a4511307SFan Gong 	hinic3_aeq_event_cb hwe_cb;
213a4511307SFan Gong 
214a4511307SFan Gong 	if (EQ_ELEM_DESC_GET(aeqe, SRC))
215a4511307SFan Gong 		return;
216a4511307SFan Gong 
217a4511307SFan Gong 	event = EQ_ELEM_DESC_GET(aeqe, TYPE);
218a4511307SFan Gong 	if (event >= HINIC3_MAX_AEQ_EVENTS) {
219a4511307SFan Gong 		dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
220a4511307SFan Gong 		return;
221a4511307SFan Gong 	}
222a4511307SFan Gong 
223a4511307SFan Gong 	memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
224a4511307SFan Gong 	swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
225a4511307SFan Gong 	size = EQ_ELEM_DESC_GET(aeqe, SIZE);
226a4511307SFan Gong 
227a4511307SFan Gong 	spin_lock_bh(&aeqs->aeq_lock);
228a4511307SFan Gong 	hwe_cb = aeqs->aeq_cb[event];
229a4511307SFan Gong 	if (hwe_cb)
230a4511307SFan Gong 		hwe_cb(aeqs->hwdev, data, size);
231a4511307SFan Gong 	spin_unlock_bh(&aeqs->aeq_lock);
232a4511307SFan Gong }
233a4511307SFan Gong 
234a4511307SFan Gong static int aeq_irq_handler(struct hinic3_eq *eq)
235a4511307SFan Gong {
236a4511307SFan Gong 	const struct hinic3_aeq_elem *aeqe_pos;
237a4511307SFan Gong 	struct hinic3_aeqs *aeqs;
238a4511307SFan Gong 	u32 i, eqe_cnt = 0;
239a4511307SFan Gong 	__le32 aeqe;
240a4511307SFan Gong 
241a4511307SFan Gong 	aeqs = aeq_to_aeqs(eq);
242a4511307SFan Gong 	for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
243a4511307SFan Gong 		aeqe_pos = get_curr_aeq_elem(eq);
244a4511307SFan Gong 		aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
245a4511307SFan Gong 		/* HW updates wrapped bit, when it adds eq element event */
246a4511307SFan Gong 		if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
247a4511307SFan Gong 			return 0;
248a4511307SFan Gong 
249a4511307SFan Gong 		/* Prevent speculative reads from element */
250a4511307SFan Gong 		dma_rmb();
251a4511307SFan Gong 		aeq_event_handler(aeqs, aeqe, aeqe_pos);
252a4511307SFan Gong 		eq->cons_idx++;
253a4511307SFan Gong 		if (eq->cons_idx == eq->eq_len) {
254a4511307SFan Gong 			eq->cons_idx = 0;
255a4511307SFan Gong 			eq->wrapped = !eq->wrapped;
256a4511307SFan Gong 		}
257a4511307SFan Gong 
258a4511307SFan Gong 		if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
259a4511307SFan Gong 			eqe_cnt = 0;
260a4511307SFan Gong 			set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
261a4511307SFan Gong 		}
262a4511307SFan Gong 	}
263a4511307SFan Gong 
264a4511307SFan Gong 	return -EAGAIN;
265a4511307SFan Gong }
266a4511307SFan Gong 
267*c4bbfd9bSFan Gong static int ceq_irq_handler(struct hinic3_eq *eq)
268*c4bbfd9bSFan Gong {
269*c4bbfd9bSFan Gong 	struct hinic3_ceqs *ceqs;
270*c4bbfd9bSFan Gong 	u32 eqe_cnt = 0;
271*c4bbfd9bSFan Gong 	__be32 ceqe_raw;
272*c4bbfd9bSFan Gong 	__le32 ceqe;
273*c4bbfd9bSFan Gong 	u32 i;
274*c4bbfd9bSFan Gong 
275*c4bbfd9bSFan Gong 	ceqs = ceq_to_ceqs(eq);
276*c4bbfd9bSFan Gong 	for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
277*c4bbfd9bSFan Gong 		ceqe_raw = *get_curr_ceq_elem(eq);
278*c4bbfd9bSFan Gong 		ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
279*c4bbfd9bSFan Gong 
280*c4bbfd9bSFan Gong 		/* HW updates wrapped bit, when it adds eq element event */
281*c4bbfd9bSFan Gong 		if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
282*c4bbfd9bSFan Gong 			return 0;
283*c4bbfd9bSFan Gong 
284*c4bbfd9bSFan Gong 		ceq_event_handler(ceqs, ceqe);
285*c4bbfd9bSFan Gong 		eq->cons_idx++;
286*c4bbfd9bSFan Gong 		if (eq->cons_idx == eq->eq_len) {
287*c4bbfd9bSFan Gong 			eq->cons_idx = 0;
288*c4bbfd9bSFan Gong 			eq->wrapped = !eq->wrapped;
289*c4bbfd9bSFan Gong 		}
290*c4bbfd9bSFan Gong 
291*c4bbfd9bSFan Gong 		if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
292*c4bbfd9bSFan Gong 			eqe_cnt = 0;
293*c4bbfd9bSFan Gong 			set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
294*c4bbfd9bSFan Gong 		}
295*c4bbfd9bSFan Gong 	}
296*c4bbfd9bSFan Gong 
297*c4bbfd9bSFan Gong 	return -EAGAIN;
298*c4bbfd9bSFan Gong }
299*c4bbfd9bSFan Gong 
300*c4bbfd9bSFan Gong static void reschedule_aeq_handler(struct hinic3_eq *eq)
301a4511307SFan Gong {
302a4511307SFan Gong 	struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
303a4511307SFan Gong 
304a4511307SFan Gong 	queue_work(aeqs->workq, &eq->aeq_work);
305a4511307SFan Gong }
306a4511307SFan Gong 
307a4511307SFan Gong static int eq_irq_handler(struct hinic3_eq *eq)
308a4511307SFan Gong {
309a4511307SFan Gong 	int err;
310a4511307SFan Gong 
311*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ)
312a4511307SFan Gong 		err = aeq_irq_handler(eq);
313*c4bbfd9bSFan Gong 	else
314*c4bbfd9bSFan Gong 		err = ceq_irq_handler(eq);
315a4511307SFan Gong 
316a4511307SFan Gong 	set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
317a4511307SFan Gong 			HINIC3_EQ_ARMED);
318a4511307SFan Gong 
319a4511307SFan Gong 	return err;
320a4511307SFan Gong }
321a4511307SFan Gong 
322*c4bbfd9bSFan Gong static void aeq_irq_work(struct work_struct *work)
323a4511307SFan Gong {
324a4511307SFan Gong 	struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
325a4511307SFan Gong 	int err;
326a4511307SFan Gong 
327a4511307SFan Gong 	err = eq_irq_handler(eq);
328a4511307SFan Gong 	if (err)
329*c4bbfd9bSFan Gong 		reschedule_aeq_handler(eq);
330a4511307SFan Gong }
331a4511307SFan Gong 
332a4511307SFan Gong static irqreturn_t aeq_interrupt(int irq, void *data)
333a4511307SFan Gong {
334a4511307SFan Gong 	struct workqueue_struct *workq;
335a4511307SFan Gong 	struct hinic3_eq *aeq = data;
336a4511307SFan Gong 	struct hinic3_hwdev *hwdev;
337a4511307SFan Gong 	struct hinic3_aeqs *aeqs;
338a4511307SFan Gong 
339a4511307SFan Gong 	aeqs = aeq_to_aeqs(aeq);
340a4511307SFan Gong 	hwdev = aeq->hwdev;
341a4511307SFan Gong 
342a4511307SFan Gong 	/* clear resend timer cnt register */
343a4511307SFan Gong 	workq = aeqs->workq;
344a4511307SFan Gong 	hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
345a4511307SFan Gong 					  EQ_MSIX_RESEND_TIMER_CLEAR);
346a4511307SFan Gong 	queue_work(workq, &aeq->aeq_work);
347a4511307SFan Gong 
348a4511307SFan Gong 	return IRQ_HANDLED;
349a4511307SFan Gong }
350a4511307SFan Gong 
351*c4bbfd9bSFan Gong static irqreturn_t ceq_interrupt(int irq, void *data)
352*c4bbfd9bSFan Gong {
353*c4bbfd9bSFan Gong 	struct hinic3_eq *ceq = data;
354*c4bbfd9bSFan Gong 	int err;
355*c4bbfd9bSFan Gong 
356*c4bbfd9bSFan Gong 	/* clear resend timer counters */
357*c4bbfd9bSFan Gong 	hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
358*c4bbfd9bSFan Gong 					  EQ_MSIX_RESEND_TIMER_CLEAR);
359*c4bbfd9bSFan Gong 	err = eq_irq_handler(ceq);
360*c4bbfd9bSFan Gong 	if (err)
361*c4bbfd9bSFan Gong 		return IRQ_NONE;
362*c4bbfd9bSFan Gong 
363*c4bbfd9bSFan Gong 	return IRQ_HANDLED;
364*c4bbfd9bSFan Gong }
365*c4bbfd9bSFan Gong 
366*c4bbfd9bSFan Gong static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
367*c4bbfd9bSFan Gong 				   u32 ctrl0, u32 ctrl1)
368*c4bbfd9bSFan Gong {
369*c4bbfd9bSFan Gong 	struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
370*c4bbfd9bSFan Gong 	struct mgmt_msg_params msg_params = {};
371*c4bbfd9bSFan Gong 	int err;
372*c4bbfd9bSFan Gong 
373*c4bbfd9bSFan Gong 	ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
374*c4bbfd9bSFan Gong 	ceq_ctrl.q_id = q_id;
375*c4bbfd9bSFan Gong 	ceq_ctrl.ctrl0 = ctrl0;
376*c4bbfd9bSFan Gong 	ceq_ctrl.ctrl1 = ctrl1;
377*c4bbfd9bSFan Gong 
378*c4bbfd9bSFan Gong 	mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
379*c4bbfd9bSFan Gong 
380*c4bbfd9bSFan Gong 	err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
381*c4bbfd9bSFan Gong 				       COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
382*c4bbfd9bSFan Gong 	if (err || ceq_ctrl.head.status) {
383*c4bbfd9bSFan Gong 		dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
384*c4bbfd9bSFan Gong 			q_id, err, ceq_ctrl.head.status);
385*c4bbfd9bSFan Gong 		return -EFAULT;
386*c4bbfd9bSFan Gong 	}
387*c4bbfd9bSFan Gong 
388*c4bbfd9bSFan Gong 	return 0;
389*c4bbfd9bSFan Gong }
390*c4bbfd9bSFan Gong 
391a4511307SFan Gong static int set_eq_ctrls(struct hinic3_eq *eq)
392a4511307SFan Gong {
393a4511307SFan Gong 	struct hinic3_hwif *hwif = eq->hwdev->hwif;
394a4511307SFan Gong 	struct hinic3_queue_pages *qpages;
395a4511307SFan Gong 	u8 pci_intf_idx, elem_size;
396a4511307SFan Gong 	u32 mask, ctrl0, ctrl1;
397a4511307SFan Gong 	u32 page_size_val;
398*c4bbfd9bSFan Gong 	int err;
399a4511307SFan Gong 
400a4511307SFan Gong 	qpages = &eq->qpages;
401a4511307SFan Gong 	page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
402a4511307SFan Gong 	pci_intf_idx = hwif->attr.pci_intf_idx;
403a4511307SFan Gong 
404*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ) {
405a4511307SFan Gong 		/* set ctrl0 using read-modify-write */
406a4511307SFan Gong 		mask = AEQ_CTRL_0_INTR_IDX_MASK |
407a4511307SFan Gong 		       AEQ_CTRL_0_DMA_ATTR_MASK |
408a4511307SFan Gong 		       AEQ_CTRL_0_PCI_INTF_IDX_MASK |
409a4511307SFan Gong 		       AEQ_CTRL_0_INTR_MODE_MASK;
410a4511307SFan Gong 		ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
411a4511307SFan Gong 		ctrl0 = (ctrl0 & ~mask) |
412a4511307SFan Gong 			AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
413a4511307SFan Gong 			AEQ_CTRL_0_SET(0, DMA_ATTR) |
414a4511307SFan Gong 			AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
415a4511307SFan Gong 			AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
416a4511307SFan Gong 		hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
417a4511307SFan Gong 
418a4511307SFan Gong 		/* HW expects log2(number of 32 byte units). */
419a4511307SFan Gong 		elem_size = qpages->elem_size_shift - 5;
420a4511307SFan Gong 		ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
421a4511307SFan Gong 			AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
422a4511307SFan Gong 			AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
423a4511307SFan Gong 		hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
424*c4bbfd9bSFan Gong 	} else {
425*c4bbfd9bSFan Gong 		ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
426*c4bbfd9bSFan Gong 			CEQ_CTRL_0_SET(0, DMA_ATTR) |
427*c4bbfd9bSFan Gong 			CEQ_CTRL_0_SET(0, LIMIT_KICK) |
428*c4bbfd9bSFan Gong 			CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
429*c4bbfd9bSFan Gong 			CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
430*c4bbfd9bSFan Gong 			CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
431*c4bbfd9bSFan Gong 
432*c4bbfd9bSFan Gong 		ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
433*c4bbfd9bSFan Gong 
434*c4bbfd9bSFan Gong 		/* set ceq ctrl reg through mgmt cpu */
435*c4bbfd9bSFan Gong 		err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
436*c4bbfd9bSFan Gong 					      ctrl1);
437*c4bbfd9bSFan Gong 		if (err)
438*c4bbfd9bSFan Gong 			return err;
439*c4bbfd9bSFan Gong 	}
440a4511307SFan Gong 
441a4511307SFan Gong 	return 0;
442a4511307SFan Gong }
443a4511307SFan Gong 
444*c4bbfd9bSFan Gong static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
445*c4bbfd9bSFan Gong {
446*c4bbfd9bSFan Gong 	__be32 *ceqe;
447*c4bbfd9bSFan Gong 	u32 i;
448*c4bbfd9bSFan Gong 
449*c4bbfd9bSFan Gong 	for (i = 0; i < eq->eq_len; i++) {
450*c4bbfd9bSFan Gong 		ceqe = get_q_element(&eq->qpages, i, NULL);
451*c4bbfd9bSFan Gong 		*ceqe = cpu_to_be32(init_val);
452*c4bbfd9bSFan Gong 	}
453*c4bbfd9bSFan Gong 
454*c4bbfd9bSFan Gong 	wmb();    /* Clear ceq elements bit */
455*c4bbfd9bSFan Gong }
456*c4bbfd9bSFan Gong 
457a4511307SFan Gong static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
458a4511307SFan Gong {
459a4511307SFan Gong 	struct hinic3_aeq_elem *aeqe;
460a4511307SFan Gong 	u32 i;
461a4511307SFan Gong 
462a4511307SFan Gong 	for (i = 0; i < eq->eq_len; i++) {
463a4511307SFan Gong 		aeqe = get_q_element(&eq->qpages, i, NULL);
464a4511307SFan Gong 		aeqe->desc = cpu_to_be32(init_val);
465a4511307SFan Gong 	}
466a4511307SFan Gong 
467a4511307SFan Gong 	wmb();    /* Clear aeq elements bit */
468a4511307SFan Gong }
469a4511307SFan Gong 
470a4511307SFan Gong static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
471a4511307SFan Gong {
472*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ)
473a4511307SFan Gong 		aeq_elements_init(eq, init_val);
474*c4bbfd9bSFan Gong 	else
475*c4bbfd9bSFan Gong 		ceq_elements_init(eq, init_val);
476a4511307SFan Gong }
477a4511307SFan Gong 
478a4511307SFan Gong static int alloc_eq_pages(struct hinic3_eq *eq)
479a4511307SFan Gong {
480a4511307SFan Gong 	struct hinic3_hwif *hwif = eq->hwdev->hwif;
481a4511307SFan Gong 	struct hinic3_queue_pages *qpages;
482a4511307SFan Gong 	dma_addr_t page_paddr;
483a4511307SFan Gong 	u32 reg, init_val;
484a4511307SFan Gong 	u16 pg_idx;
485a4511307SFan Gong 	int err;
486a4511307SFan Gong 
487a4511307SFan Gong 	qpages = &eq->qpages;
488a4511307SFan Gong 	err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
489a4511307SFan Gong 	if (err)
490a4511307SFan Gong 		return err;
491a4511307SFan Gong 
492a4511307SFan Gong 	for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
493a4511307SFan Gong 		page_paddr = qpages->pages[pg_idx].align_paddr;
494a4511307SFan Gong 		reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
495a4511307SFan Gong 		hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
496a4511307SFan Gong 		reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
497a4511307SFan Gong 		hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
498a4511307SFan Gong 	}
499a4511307SFan Gong 
500a4511307SFan Gong 	init_val = HINIC3_EQ_WRAPPED(eq);
501a4511307SFan Gong 	eq_elements_init(eq, init_val);
502a4511307SFan Gong 
503a4511307SFan Gong 	return 0;
504a4511307SFan Gong }
505a4511307SFan Gong 
506a4511307SFan Gong static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
507a4511307SFan Gong {
508a4511307SFan Gong 	u32 max_pages, min_page_size, page_size, total_size;
509a4511307SFan Gong 
510a4511307SFan Gong 	/* No need for complicated arithmetic. All values must be power of 2.
511a4511307SFan Gong 	 * Multiplications give power of 2 and divisions give power of 2 without
512a4511307SFan Gong 	 * remainder.
513a4511307SFan Gong 	 */
514*c4bbfd9bSFan Gong 	max_pages = HINIC3_EQ_MAX_PAGES(eq);
515a4511307SFan Gong 	min_page_size = HINIC3_MIN_PAGE_SIZE;
516a4511307SFan Gong 	total_size = eq->eq_len * elem_size;
517a4511307SFan Gong 
518a4511307SFan Gong 	if (total_size <= max_pages * min_page_size)
519a4511307SFan Gong 		page_size = min_page_size;
520a4511307SFan Gong 	else
521a4511307SFan Gong 		page_size = total_size / max_pages;
522a4511307SFan Gong 
523a4511307SFan Gong 	hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
524a4511307SFan Gong }
525a4511307SFan Gong 
526a4511307SFan Gong static int request_eq_irq(struct hinic3_eq *eq)
527a4511307SFan Gong {
528*c4bbfd9bSFan Gong 	int err;
529*c4bbfd9bSFan Gong 
530*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ) {
531*c4bbfd9bSFan Gong 		INIT_WORK(&eq->aeq_work, aeq_irq_work);
532a4511307SFan Gong 		snprintf(eq->irq_name, sizeof(eq->irq_name),
533a4511307SFan Gong 			 "hinic3_aeq%u@pci:%s", eq->q_id,
534a4511307SFan Gong 			 pci_name(eq->hwdev->pdev));
535*c4bbfd9bSFan Gong 		err = request_irq(eq->irq_id, aeq_interrupt, 0,
536a4511307SFan Gong 				  eq->irq_name, eq);
537*c4bbfd9bSFan Gong 	} else {
538*c4bbfd9bSFan Gong 		snprintf(eq->irq_name, sizeof(eq->irq_name),
539*c4bbfd9bSFan Gong 			 "hinic3_ceq%u@pci:%s", eq->q_id,
540*c4bbfd9bSFan Gong 			 pci_name(eq->hwdev->pdev));
541*c4bbfd9bSFan Gong 		err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
542*c4bbfd9bSFan Gong 					   IRQF_ONESHOT, eq->irq_name, eq);
543*c4bbfd9bSFan Gong 	}
544*c4bbfd9bSFan Gong 
545*c4bbfd9bSFan Gong 	return err;
546a4511307SFan Gong }
547a4511307SFan Gong 
548a4511307SFan Gong static void reset_eq(struct hinic3_eq *eq)
549a4511307SFan Gong {
550a4511307SFan Gong 	/* clear eq_len to force eqe drop in hardware */
551*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ)
552a4511307SFan Gong 		hinic3_hwif_write_reg(eq->hwdev->hwif,
553a4511307SFan Gong 				      HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
554*c4bbfd9bSFan Gong 	else
555*c4bbfd9bSFan Gong 		hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
556a4511307SFan Gong 
557*c4bbfd9bSFan Gong 	hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
558a4511307SFan Gong }
559a4511307SFan Gong 
560a4511307SFan Gong static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
561a4511307SFan Gong 		   u32 q_len, enum hinic3_eq_type type,
562a4511307SFan Gong 		   struct msix_entry *msix_entry)
563a4511307SFan Gong {
564a4511307SFan Gong 	u32 elem_size;
565a4511307SFan Gong 	int err;
566a4511307SFan Gong 
567a4511307SFan Gong 	eq->hwdev = hwdev;
568a4511307SFan Gong 	eq->q_id = q_id;
569a4511307SFan Gong 	eq->type = type;
570a4511307SFan Gong 	eq->eq_len = q_len;
571a4511307SFan Gong 
572a4511307SFan Gong 	/* Indirect access should set q_id first */
573a4511307SFan Gong 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
574a4511307SFan Gong 			      eq->q_id);
575a4511307SFan Gong 
576a4511307SFan Gong 	reset_eq(eq);
577a4511307SFan Gong 
578a4511307SFan Gong 	eq->cons_idx = 0;
579a4511307SFan Gong 	eq->wrapped = 0;
580a4511307SFan Gong 
581*c4bbfd9bSFan Gong 	elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
582a4511307SFan Gong 	eq_calc_page_size_and_num(eq, elem_size);
583a4511307SFan Gong 
584a4511307SFan Gong 	err = alloc_eq_pages(eq);
585a4511307SFan Gong 	if (err) {
586a4511307SFan Gong 		dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
587a4511307SFan Gong 		return err;
588a4511307SFan Gong 	}
589a4511307SFan Gong 
590a4511307SFan Gong 	eq->msix_entry_idx = msix_entry->entry;
591a4511307SFan Gong 	eq->irq_id = msix_entry->vector;
592a4511307SFan Gong 
593a4511307SFan Gong 	err = set_eq_ctrls(eq);
594a4511307SFan Gong 	if (err) {
595a4511307SFan Gong 		dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
596a4511307SFan Gong 		goto err_free_queue_pages;
597a4511307SFan Gong 	}
598a4511307SFan Gong 
599a4511307SFan Gong 	set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
600a4511307SFan Gong 
601a4511307SFan Gong 	err = request_eq_irq(eq);
602a4511307SFan Gong 	if (err) {
603a4511307SFan Gong 		dev_err(hwdev->dev,
604a4511307SFan Gong 			"Failed to request irq for the eq, err: %d\n", err);
605a4511307SFan Gong 		goto err_free_queue_pages;
606a4511307SFan Gong 	}
607a4511307SFan Gong 
608a4511307SFan Gong 	hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
609a4511307SFan Gong 
610a4511307SFan Gong 	return 0;
611a4511307SFan Gong 
612a4511307SFan Gong err_free_queue_pages:
613a4511307SFan Gong 	hinic3_queue_pages_free(hwdev, &eq->qpages);
614a4511307SFan Gong 
615a4511307SFan Gong 	return err;
616a4511307SFan Gong }
617a4511307SFan Gong 
618a4511307SFan Gong static void remove_eq(struct hinic3_eq *eq)
619a4511307SFan Gong {
620a4511307SFan Gong 	hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
621a4511307SFan Gong 			      HINIC3_MSIX_DISABLE);
622a4511307SFan Gong 	free_irq(eq->irq_id, eq);
623a4511307SFan Gong 	/* Indirect access should set q_id first */
624a4511307SFan Gong 	hinic3_hwif_write_reg(eq->hwdev->hwif,
625a4511307SFan Gong 			      HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
626a4511307SFan Gong 			      eq->q_id);
627a4511307SFan Gong 
628*c4bbfd9bSFan Gong 	if (eq->type == HINIC3_AEQ) {
629a4511307SFan Gong 		disable_work_sync(&eq->aeq_work);
630a4511307SFan Gong 		/* clear eq_len to avoid hw access host memory */
631a4511307SFan Gong 		hinic3_hwif_write_reg(eq->hwdev->hwif,
632a4511307SFan Gong 				      HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
633*c4bbfd9bSFan Gong 	} else {
634*c4bbfd9bSFan Gong 		hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
635*c4bbfd9bSFan Gong 	}
636a4511307SFan Gong 
637a4511307SFan Gong 	/* update consumer index to avoid invalid interrupt */
638a4511307SFan Gong 	eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
639*c4bbfd9bSFan Gong 					    EQ_PROD_IDX_REG_ADDR(eq));
640a4511307SFan Gong 	set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
641a4511307SFan Gong 	hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
642a4511307SFan Gong }
643a4511307SFan Gong 
644a4511307SFan Gong int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
645a4511307SFan Gong 		     struct msix_entry *msix_entries)
646a4511307SFan Gong {
647a4511307SFan Gong 	struct hinic3_aeqs *aeqs;
648a4511307SFan Gong 	u16 q_id;
649a4511307SFan Gong 	int err;
650a4511307SFan Gong 
651a4511307SFan Gong 	aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
652a4511307SFan Gong 	if (!aeqs)
653a4511307SFan Gong 		return -ENOMEM;
654a4511307SFan Gong 
655a4511307SFan Gong 	hwdev->aeqs = aeqs;
656a4511307SFan Gong 	aeqs->hwdev = hwdev;
657a4511307SFan Gong 	aeqs->num_aeqs = num_aeqs;
658a4511307SFan Gong 	aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
659a4511307SFan Gong 				      HINIC3_MAX_AEQS);
660a4511307SFan Gong 	if (!aeqs->workq) {
661a4511307SFan Gong 		dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
662a4511307SFan Gong 		err = -ENOMEM;
663a4511307SFan Gong 		goto err_free_aeqs;
664a4511307SFan Gong 	}
665a4511307SFan Gong 
666a4511307SFan Gong 	for (q_id = 0; q_id < num_aeqs; q_id++) {
667a4511307SFan Gong 		err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
668a4511307SFan Gong 			      HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
669a4511307SFan Gong 			      &msix_entries[q_id]);
670a4511307SFan Gong 		if (err) {
671a4511307SFan Gong 			dev_err(hwdev->dev, "Failed to init aeq %u\n",
672a4511307SFan Gong 				q_id);
673a4511307SFan Gong 			goto err_remove_eqs;
674a4511307SFan Gong 		}
675a4511307SFan Gong 	}
676a4511307SFan Gong 	for (q_id = 0; q_id < num_aeqs; q_id++)
677a4511307SFan Gong 		hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
678a4511307SFan Gong 				      HINIC3_MSIX_ENABLE);
679a4511307SFan Gong 
680a4511307SFan Gong 	return 0;
681a4511307SFan Gong 
682a4511307SFan Gong err_remove_eqs:
683a4511307SFan Gong 	while (q_id > 0) {
684a4511307SFan Gong 		q_id--;
685a4511307SFan Gong 		remove_eq(&aeqs->aeq[q_id]);
686a4511307SFan Gong 	}
687a4511307SFan Gong 
688a4511307SFan Gong 	destroy_workqueue(aeqs->workq);
689a4511307SFan Gong 
690a4511307SFan Gong err_free_aeqs:
691a4511307SFan Gong 	kfree(aeqs);
692a4511307SFan Gong 
693a4511307SFan Gong 	return err;
694a4511307SFan Gong }
695a4511307SFan Gong 
696a4511307SFan Gong void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
697a4511307SFan Gong {
698a4511307SFan Gong 	struct hinic3_aeqs *aeqs = hwdev->aeqs;
699a4511307SFan Gong 	enum hinic3_aeq_type aeq_event;
700a4511307SFan Gong 	struct hinic3_eq *eq;
701a4511307SFan Gong 	u16 q_id;
702a4511307SFan Gong 
703a4511307SFan Gong 	for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
704a4511307SFan Gong 		eq = aeqs->aeq + q_id;
705a4511307SFan Gong 		remove_eq(eq);
706a4511307SFan Gong 		hinic3_free_irq(hwdev, eq->irq_id);
707a4511307SFan Gong 	}
708a4511307SFan Gong 
709a4511307SFan Gong 	for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
710a4511307SFan Gong 		hinic3_aeq_unregister_cb(hwdev, aeq_event);
711a4511307SFan Gong 
712a4511307SFan Gong 	destroy_workqueue(aeqs->workq);
713a4511307SFan Gong 
714a4511307SFan Gong 	kfree(aeqs);
715a4511307SFan Gong }
716*c4bbfd9bSFan Gong 
717*c4bbfd9bSFan Gong int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
718*c4bbfd9bSFan Gong 		     struct msix_entry *msix_entries)
719*c4bbfd9bSFan Gong {
720*c4bbfd9bSFan Gong 	struct hinic3_ceqs *ceqs;
721*c4bbfd9bSFan Gong 	u16 q_id;
722*c4bbfd9bSFan Gong 	int err;
723*c4bbfd9bSFan Gong 
724*c4bbfd9bSFan Gong 	ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
725*c4bbfd9bSFan Gong 	if (!ceqs)
726*c4bbfd9bSFan Gong 		return -ENOMEM;
727*c4bbfd9bSFan Gong 
728*c4bbfd9bSFan Gong 	hwdev->ceqs = ceqs;
729*c4bbfd9bSFan Gong 	ceqs->hwdev = hwdev;
730*c4bbfd9bSFan Gong 	ceqs->num_ceqs = num_ceqs;
731*c4bbfd9bSFan Gong 
732*c4bbfd9bSFan Gong 	for (q_id = 0; q_id < num_ceqs; q_id++) {
733*c4bbfd9bSFan Gong 		err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
734*c4bbfd9bSFan Gong 			      HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
735*c4bbfd9bSFan Gong 			      &msix_entries[q_id]);
736*c4bbfd9bSFan Gong 		if (err) {
737*c4bbfd9bSFan Gong 			dev_err(hwdev->dev, "Failed to init ceq %u\n",
738*c4bbfd9bSFan Gong 				q_id);
739*c4bbfd9bSFan Gong 			goto err_free_ceqs;
740*c4bbfd9bSFan Gong 		}
741*c4bbfd9bSFan Gong 	}
742*c4bbfd9bSFan Gong 	for (q_id = 0; q_id < num_ceqs; q_id++)
743*c4bbfd9bSFan Gong 		hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
744*c4bbfd9bSFan Gong 				      HINIC3_MSIX_ENABLE);
745*c4bbfd9bSFan Gong 
746*c4bbfd9bSFan Gong 	return 0;
747*c4bbfd9bSFan Gong 
748*c4bbfd9bSFan Gong err_free_ceqs:
749*c4bbfd9bSFan Gong 	while (q_id > 0) {
750*c4bbfd9bSFan Gong 		q_id--;
751*c4bbfd9bSFan Gong 		remove_eq(&ceqs->ceq[q_id]);
752*c4bbfd9bSFan Gong 	}
753*c4bbfd9bSFan Gong 
754*c4bbfd9bSFan Gong 	kfree(ceqs);
755*c4bbfd9bSFan Gong 
756*c4bbfd9bSFan Gong 	return err;
757*c4bbfd9bSFan Gong }
758*c4bbfd9bSFan Gong 
759*c4bbfd9bSFan Gong void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
760*c4bbfd9bSFan Gong {
761*c4bbfd9bSFan Gong 	struct hinic3_ceqs *ceqs = hwdev->ceqs;
762*c4bbfd9bSFan Gong 	enum hinic3_ceq_event ceq_event;
763*c4bbfd9bSFan Gong 	struct hinic3_eq *eq;
764*c4bbfd9bSFan Gong 	u16 q_id;
765*c4bbfd9bSFan Gong 
766*c4bbfd9bSFan Gong 	for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
767*c4bbfd9bSFan Gong 		eq = ceqs->ceq + q_id;
768*c4bbfd9bSFan Gong 		remove_eq(eq);
769*c4bbfd9bSFan Gong 		hinic3_free_irq(hwdev, eq->irq_id);
770*c4bbfd9bSFan Gong 	}
771*c4bbfd9bSFan Gong 
772*c4bbfd9bSFan Gong 	for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
773*c4bbfd9bSFan Gong 		hinic3_ceq_unregister_cb(hwdev, ceq_event);
774*c4bbfd9bSFan Gong 
775*c4bbfd9bSFan Gong 	kfree(ceqs);
776*c4bbfd9bSFan Gong }
777