xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_int.c (revision 5944f899a2519c6321bac3c17cc076418643a088)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * File : ecore_int.c
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include "bcm_osal.h"
35 #include "ecore.h"
36 #include "ecore_spq.h"
37 #include "reg_addr.h"
38 #include "ecore_gtt_reg_addr.h"
39 #include "ecore_init_ops.h"
40 #include "ecore_rt_defs.h"
41 #include "ecore_int.h"
42 #include "reg_addr.h"
43 #include "ecore_hw.h"
44 #include "ecore_sriov.h"
45 #include "ecore_vf.h"
46 #include "ecore_hw_defs.h"
47 #include "ecore_hsi_common.h"
48 #include "ecore_mcp.h"
49 #include "ecore_dbg_fw_funcs.h"
50 
51 #ifdef DIAG
52 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
53  * and so the functions are lacking ecore prefix.
54  * If there would be other clients needing this [or if the content that isn't
55  * really optional there would increase], we'll need to re-think this.
56  */
57 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
58 							  struct ecore_ptt *ptt,
59 							  enum block_id block,
60 							  enum dbg_attn_type attn_type,
61 							  bool clear_status,
62 							  struct dbg_attn_block_result *results);
63 
64 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
65 							   struct dbg_attn_block_result *results);
66 
67 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
68 	dbg_read_attn(hwfn, ptt, id, type, clear, results)
69 #define ecore_dbg_parse_attn(hwfn, results) \
70 	dbg_parse_attn(hwfn, results)
71 #endif
72 
73 struct ecore_pi_info {
74 	ecore_int_comp_cb_t comp_cb;
75 	void *cookie; /* Will be sent to the completion callback function */
76 };
77 
78 struct ecore_sb_sp_info {
79 	struct ecore_sb_info sb_info;
80 	/* per protocol index data */
81 	struct ecore_pi_info pi_info_arr[PIS_PER_SB];
82 };
83 
84 enum ecore_attention_type {
85 	ECORE_ATTN_TYPE_ATTN,
86 	ECORE_ATTN_TYPE_PARITY,
87 };
88 
89 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
90 	ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
91 
92 struct aeu_invert_reg_bit {
93 	char bit_name[30];
94 
95 #define ATTENTION_PARITY		(1 << 0)
96 
97 #define ATTENTION_LENGTH_MASK		(0x00000ff0)
98 #define ATTENTION_LENGTH_SHIFT		(4)
99 #define ATTENTION_LENGTH(flags)		(((flags) & ATTENTION_LENGTH_MASK) >> \
100 					 ATTENTION_LENGTH_SHIFT)
101 #define ATTENTION_SINGLE		(1 << ATTENTION_LENGTH_SHIFT)
102 #define ATTENTION_PAR			(ATTENTION_SINGLE | ATTENTION_PARITY)
103 #define ATTENTION_PAR_INT		((2 << ATTENTION_LENGTH_SHIFT) | \
104 					 ATTENTION_PARITY)
105 
106 /* Multiple bits start with this offset */
107 #define ATTENTION_OFFSET_MASK		(0x000ff000)
108 #define ATTENTION_OFFSET_SHIFT		(12)
109 
110 #define ATTENTION_BB_MASK		(0x00700000)
111 #define ATTENTION_BB_SHIFT		(20)
112 #define ATTENTION_BB(value)		(value << ATTENTION_BB_SHIFT)
113 #define ATTENTION_BB_DIFFERENT		(1 << 23)
114 
115 #define	ATTENTION_CLEAR_ENABLE		(1 << 28)
116 	unsigned int flags;
117 
118 	/* Callback to call if attention will be triggered */
119 	enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
120 
121 	enum block_id block_index;
122 };
123 
124 struct aeu_invert_reg {
125 	struct aeu_invert_reg_bit bits[32];
126 };
127 
128 #define MAX_ATTN_GRPS		(8)
129 #define NUM_ATTN_REGS		(9)
130 
131 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
132 {
133 	u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
134 
135 	DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
136 		tmp);
137 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
138 		 0xffffffff);
139 
140 	return ECORE_SUCCESS;
141 }
142 
143 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK		(0x3c000)
144 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT	(14)
145 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK		(0x03fc0)
146 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT	(6)
147 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK	(0x00020)
148 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT	(5)
149 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK	(0x0001e)
150 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT	(1)
151 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK	(0x1)
152 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT	(0)
153 #define ECORE_PSWHST_ATTENTION_VF_DISABLED		(0x1)
154 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS		(0x1)
155 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK 	(0x1)
156 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT	(0)
157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK	(0x1e)
158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT	(1)
159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK	(0x20)
160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT	(5)
161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK	(0x3fc0)
162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT	(6)
163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK	(0x3c000)
164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT	(14)
165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK	(0x3fc0000)
166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT	(18)
167 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
168 {
169 	u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
170 
171 	/* Disabled VF access */
172 	if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
173 		u32 addr, data;
174 
175 		addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
176 				PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
177 		data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
178 				PSWHST_REG_VF_DISABLED_ERROR_DATA);
179 		DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
180 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >>
181 			     ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
182 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >>
183 			     ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
184 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
185 			     ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
186 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
187 			     ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
188 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
189 			     ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
190 			addr);
191 	}
192 
193 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
194 		       PSWHST_REG_INCORRECT_ACCESS_VALID);
195 	if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
196 		u32 addr, data, length;
197 
198 		addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
199 				PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
200 		data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
201 				PSWHST_REG_INCORRECT_ACCESS_DATA);
202 		length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
203 				  PSWHST_REG_INCORRECT_ACCESS_LENGTH);
204 
205 		DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
206 			addr, length,
207 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
208 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
209 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
210 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
211 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
212 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
213 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
214 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
215 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
216 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
217 			(u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
218 			     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
219 			data);
220 	}
221 
222 	/* TODO - We know 'some' of these are legal due to virtualization,
223 	 * but is it true for all of them?
224 	 */
225 	return ECORE_SUCCESS;
226 }
227 
228 #define ECORE_GRC_ATTENTION_VALID_BIT		(1 << 0)
229 #define ECORE_GRC_ATTENTION_ADDRESS_MASK	(0x7fffff << 0)
230 #define ECORE_GRC_ATTENTION_RDWR_BIT		(1 << 23)
231 #define ECORE_GRC_ATTENTION_MASTER_MASK		(0xf << 24)
232 #define ECORE_GRC_ATTENTION_MASTER_SHIFT	(24)
233 #define ECORE_GRC_ATTENTION_PF_MASK		(0xf)
234 #define ECORE_GRC_ATTENTION_VF_MASK		(0xff << 4)
235 #define ECORE_GRC_ATTENTION_VF_SHIFT		(4)
236 #define ECORE_GRC_ATTENTION_PRIV_MASK		(0x3 << 14)
237 #define ECORE_GRC_ATTENTION_PRIV_SHIFT		(14)
238 #define ECORE_GRC_ATTENTION_PRIV_VF		(0)
239 static const char* grc_timeout_attn_master_to_str(u8 master)
240 {
241 	switch(master) {
242 	case 1: return "PXP";
243 	case 2: return "MCP";
244 	case 3: return "MSDM";
245 	case 4: return "PSDM";
246 	case 5: return "YSDM";
247 	case 6: return "USDM";
248 	case 7: return "TSDM";
249 	case 8: return "XSDM";
250 	case 9: return "DBU";
251 	case 10: return "DMAE";
252 	default:
253 		return "Unknown";
254 	}
255 }
256 
257 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
258 {
259 	u32 tmp, tmp2;
260 
261 	/* We've already cleared the timeout interrupt register, so we learn
262 	 * of interrupts via the validity register
263 	 */
264 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
265 		       GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
266 	if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
267 		goto out;
268 
269 	/* Read the GRC timeout information */
270 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
271 		       GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
272 	tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
273 			GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
274 
275 	DP_INFO(p_hwfn->p_dev,
276 		"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
277 		tmp2, tmp,
278 		(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
279 		(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
280 		grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
281 					       ECORE_GRC_ATTENTION_MASTER_SHIFT),
282 		(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
283 		(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
284 		  ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
285 		 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
286 		(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
287 		ECORE_GRC_ATTENTION_VF_SHIFT);
288 
289 out:
290 	/* Regardles of anything else, clean the validity bit */
291 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
292 		 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
293 	return ECORE_SUCCESS;
294 }
295 
296 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
297 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
298 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
299 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
300 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
301 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
302 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
303 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
304 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME	(1 << 22)
305 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
306 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
307 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
308 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
309 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
310 {
311 	u32 tmp;
312 
313 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
314 		       PGLUE_B_REG_TX_ERR_WR_DETAILS2);
315 	if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
316 		u32 addr_lo, addr_hi, details;
317 
318 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
319 				   PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
320 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
321 				   PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
322 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
323 				   PGLUE_B_REG_TX_ERR_WR_DETAILS);
324 
325 		DP_INFO(p_hwfn, "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
326 			addr_hi, addr_lo, details,
327 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
328 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
329 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
330 			tmp,
331 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
332 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
333 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
334 	}
335 
336 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
337 		       PGLUE_B_REG_TX_ERR_RD_DETAILS2);
338 	if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
339 		u32 addr_lo, addr_hi, details;
340 
341 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
342 				   PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
343 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
344 				   PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
345 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
346 				   PGLUE_B_REG_TX_ERR_RD_DETAILS);
347 
348 		DP_INFO(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
349 			addr_hi, addr_lo, details,
350 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
351 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
352 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
353 			tmp,
354 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
355 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
356 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
357 	}
358 
359 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
360 		       PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
361 	if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
362 		DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
363 
364 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
365 		       PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
366 	if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
367 		u32 addr_hi, addr_lo;
368 
369 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
370 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
371 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
372 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
373 
374 		DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n",
375 			tmp, addr_hi, addr_lo);
376 	}
377 
378 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
379 		       PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
380 	if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
381 		u32 addr_hi, addr_lo, details;
382 
383 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
384 				   PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
385 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386 				   PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
387 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
388 				   PGLUE_B_REG_VF_ILT_ERR_DETAILS);
389 
390 		DP_INFO(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
391 			details, tmp, addr_hi, addr_lo);
392 	}
393 
394 	/* Clear the indications */
395 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
396 		 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
397 
398 	return ECORE_SUCCESS;
399 }
400 
401 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
402 {
403 	DP_NOTICE(p_hwfn, false, "FW assertion!\n");
404 
405 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
406 
407 	return ECORE_INVAL;
408 }
409 
410 static enum _ecore_status_t
411 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
412 {
413 	DP_INFO(p_hwfn, "General attention 35!\n");
414 
415 	return ECORE_SUCCESS;
416 }
417 
418 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
419 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
420 #define ECORE_DORQ_ATTENTION_SIZE_MASK	 (0x7f0000)
421 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT	 (16)
422 
423 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
424 {
425 	u32 reason;
426 
427 	reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
428 		 ECORE_DORQ_ATTENTION_REASON_MASK;
429 	if (reason) {
430 		u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
431 				       DORQ_REG_DB_DROP_DETAILS);
432 
433 		DP_INFO(p_hwfn->p_dev,
434 			"DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
435 			 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
436 				  DORQ_REG_DB_DROP_DETAILS_ADDRESS),
437 			(u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
438 			((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
439 			 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
440 	}
441 
442 	return ECORE_INVAL;
443 }
444 
445 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
446 {
447 #ifndef ASIC_ONLY
448 	if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
449 		u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
450 				   TM_REG_INT_STS_1);
451 
452 		if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
453 			    TM_REG_INT_STS_1_PEND_CONN_SCAN))
454 			return ECORE_INVAL;
455 
456 		if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
457 			   TM_REG_INT_STS_1_PEND_CONN_SCAN))
458 			DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
459 		val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
460 		val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
461 		       TM_REG_INT_MASK_1_PEND_TASK_SCAN;
462 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
463 
464 		return ECORE_SUCCESS;
465 	}
466 #endif
467 
468 	return ECORE_INVAL;
469 }
470 
471 /* Instead of major changes to the data-structure, we have a some 'special'
472  * identifiers for sources that changed meaning between adapters.
473  */
474 enum aeu_invert_reg_special_type {
475 	AEU_INVERT_REG_SPECIAL_CNIG_0,
476 	AEU_INVERT_REG_SPECIAL_CNIG_1,
477 	AEU_INVERT_REG_SPECIAL_CNIG_2,
478 	AEU_INVERT_REG_SPECIAL_CNIG_3,
479 	AEU_INVERT_REG_SPECIAL_MAX,
480 };
481 
482 static struct aeu_invert_reg_bit
483 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
484 	{"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
485 	{"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
486 	{"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
487 	{"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
488 };
489 
490 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
491 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
492 {
493 	{
494 		{	/* After Invert 1 */
495 			{"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
496 		}
497 	},
498 
499 	{
500 		{	/* After Invert 2 */
501 			{"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
502 			{"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
503 			{"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
504 			{"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
505 			{"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
506 			{"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
507 			{"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
508 			{"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
509 			{"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS},
510 		}
511 	},
512 
513 	{
514 		{	/* After Invert 3 */
515 			{"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
516 		}
517 	},
518 
519 	{
520 		{	/* After Invert 4 */
521 			{"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID},
522 			{"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
523 			{"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID},
524 			{"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
525 				       ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS},
526 			{"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
527 					  ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS},
528 			{"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
529 				       ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM},
530 			{"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
531 					  ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM},
532 			{"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
533 			{"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
534 			{"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
535 			{"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
536 			{"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
537 			{"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
538 			{"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
539 			{"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
540 			{"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
541 			{"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
542 			{"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
543 			{"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
544 		}
545 	},
546 
547 	{
548 		{	/* After Invert 5 */
549 			{"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
550 			{"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
551 			{"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
552 			{"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
553 			{"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
554 			{"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
555 			{"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
556 			{"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
557 			{"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
558 			{"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
559 			{"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
560 			{"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
561 			{"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
562 			{"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
563 			{"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
564 			{"TSEM", ATTENTION_PAR_INT,  OSAL_NULL, BLOCK_TSEM},
565 		}
566 	},
567 
568 	{
569 		{	/* After Invert 6 */
570 			{"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
571 			{"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
572 			{"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
573 			{"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
574 			{"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
575 			{"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
576 			{"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
577 			{"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
578 			{"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
579 			{"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
580 			{"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
581 			{"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
582 			{"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
583 			{"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
584 			{"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
585 			{"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
586 		}
587 	},
588 
589 	{
590 		{	/* After Invert 7 */
591 			{"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
592 			{"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
593 			{"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
594 			{"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
595 			{"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
596 			{"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
597 			{"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
598 			{"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
599 			{"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
600 			{"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
601 			{"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
602 			{"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
603 			{"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
604 			{"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
605 			{"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
606 			{"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
607 			{"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
608 		}
609 	},
610 
611 	{
612 		{	/* After Invert 8 */
613 			{"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
614 			{"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
615 			{"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
616 			{"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
617 			{"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
618 			{"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
619 			{"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
620 			{"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
621 			{"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
622 			{"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
623 			{"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
624 			{"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
625 			{"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
626 			{"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
627 			{"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
628 			{"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
629 			{"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
630 			{"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
631 			{"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
632 			{"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
633 			{"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
634 		}
635 	},
636 
637 	{
638 		{	/* After Invert 9 */
639 			{"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
640 			{"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
641 			{"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
642 			{"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
643 			{"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
644 		}
645 	},
646 
647 };
648 
649 static struct aeu_invert_reg_bit *
650 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
651 			struct aeu_invert_reg_bit *p_bit)
652 {
653 	if (!ECORE_IS_BB(p_hwfn->p_dev))
654 		return p_bit;
655 
656 	if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
657 		return p_bit;
658 
659 	return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
660 				  ATTENTION_BB_SHIFT];
661 }
662 
663 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
664 				     struct aeu_invert_reg_bit *p_bit)
665 {
666 	return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
667 		  ATTENTION_PARITY);
668 }
669 
670 #define ATTN_STATE_BITS		(0xfff)
671 #define ATTN_BITS_MASKABLE	(0x3ff)
672 struct ecore_sb_attn_info {
673 	/* Virtual & Physical address of the SB */
674 	struct atten_status_block	*sb_attn;
675 	dma_addr_t			sb_phys;
676 
677 	/* Last seen running index */
678 	u16				index;
679 
680 	/* A mask of the AEU bits resulting in a parity error */
681 	u32				parity_mask[NUM_ATTN_REGS];
682 
683 	/* A pointer to the attention description structure */
684 	struct aeu_invert_reg		*p_aeu_desc;
685 
686 	/* Previously asserted attentions, which are still unasserted */
687 	u16				known_attn;
688 
689 	/* Cleanup address for the link's general hw attention */
690 	u32				mfw_attn_addr;
691 };
692 
693 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
694 				 struct ecore_sb_attn_info *p_sb_desc)
695 {
696 	u16 rc = 0, index;
697 
698 	OSAL_MMIOWB(p_hwfn->p_dev);
699 
700 	index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
701 	if (p_sb_desc->index != index) {
702 		p_sb_desc->index = index;
703 		rc = ECORE_SB_ATT_IDX;
704 	}
705 
706 	OSAL_MMIOWB(p_hwfn->p_dev);
707 
708 	return rc;
709 }
710 
711 /**
712  * @brief ecore_int_assertion - handles asserted attention bits
713  *
714  * @param p_hwfn
715  * @param asserted_bits newly asserted bits
716  * @return enum _ecore_status_t
717  */
718 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
719 						u16 asserted_bits)
720 {
721 	struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
722 	u32 igu_mask;
723 
724 	/* Mask the source of the attention in the IGU */
725 	igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
726 			    IGU_REG_ATTENTION_ENABLE);
727 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
728 		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
729 	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
730 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
731 
732 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
733 		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
734 		   sb_attn_sw->known_attn,
735 		   sb_attn_sw->known_attn | asserted_bits);
736 	sb_attn_sw->known_attn |= asserted_bits;
737 
738 	/* Handle MCP events */
739 	if (asserted_bits & 0x100) {
740 		ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
741 		/* Clean the MCP attention */
742 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
743 			 sb_attn_sw->mfw_attn_addr, 0);
744 	}
745 
746 	/* FIXME - this will change once we'll have GOOD gtt definitions */
747 	DIRECT_REG_WR(p_hwfn,
748 		      (u8 OSAL_IOMEM*)p_hwfn->regview +
749 		      GTT_BAR0_MAP_REG_IGU_CMD +
750 		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
751 			IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
752 
753 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
754 		   asserted_bits);
755 
756 	return ECORE_SUCCESS;
757 }
758 
759 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
760 				 enum block_id id, enum dbg_attn_type type,
761 				 bool b_clear)
762 {
763 	struct dbg_attn_block_result attn_results;
764 	enum dbg_status status;
765 
766 	OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
767 
768 	status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
769 				     b_clear, &attn_results);
770 	if (status != DBG_STATUS_OK)
771 		DP_NOTICE(p_hwfn, true,
772 			  "Failed to parse attention information [status %d]\n",
773 			  status);
774 	else
775 #ifdef ATTN_DESC
776 		ecore_dbg_parse_attn(p_hwfn, &attn_results);
777 #else
778 		ecore_dbg_print_attn(p_hwfn, &attn_results);
779 #endif
780 }
781 
782 /**
783  * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
784  * cause of the attention
785  *
786  * @param p_hwfn
787  * @param p_aeu - descriptor of an AEU bit which caused the attention
788  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
789  *  this bit to this group.
790  * @param bit_index - index of this bit in the aeu_en_reg
791  *
792  * @return enum _ecore_status_t
793  */
794 static enum _ecore_status_t
795 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
796 			      struct aeu_invert_reg_bit *p_aeu,
797 			      u32 aeu_en_reg,
798 			      const char *p_bit_name,
799 			      u32 bitmask)
800 {
801 	enum _ecore_status_t rc = ECORE_INVAL;
802 	bool b_fatal = false;
803 
804 	DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
805 		p_bit_name, bitmask);
806 
807 	/* Call callback before clearing the interrupt status */
808 	if (p_aeu->cb) {
809 		DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
810 			p_bit_name);
811 		rc = p_aeu->cb(p_hwfn);
812 	}
813 
814 	if (rc != ECORE_SUCCESS)
815 		b_fatal = true;
816 
817 	/* Print HW block interrupt registers */
818 	if (p_aeu->block_index != MAX_BLOCK_ID)
819 		ecore_int_attn_print(p_hwfn, p_aeu->block_index,
820 				     ATTN_TYPE_INTERRUPT, !b_fatal);
821 
822 	/* Reach assertion if attention is fatal */
823 	if (b_fatal) {
824 		DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
825 			  p_bit_name);
826 
827 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
828 	}
829 
830 	/* Prevent this Attention from being asserted in the future */
831 	if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
832 	    p_hwfn->p_dev->attn_clr_en) {
833 		u32 val;
834 		u32 mask = ~bitmask;
835 		val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
836 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
837 		DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
838 			p_bit_name);
839 	}
840 
841 	return rc;
842 }
843 
844 /**
845  * @brief ecore_int_deassertion_parity - handle a single parity AEU source
846  *
847  * @param p_hwfn
848  * @param p_aeu - descriptor of an AEU bit which caused the parity
849  * @param aeu_en_reg - address of the AEU enable register
850  * @param bit_index
851  */
852 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
853 					 struct aeu_invert_reg_bit *p_aeu,
854 					 u32 aeu_en_reg, u8 bit_index)
855 {
856 	u32 block_id = p_aeu->block_index, mask, val;
857 
858 	DP_NOTICE(p_hwfn->p_dev, false,
859 		  "%s parity attention is set [address 0x%08x, bit %d]\n",
860 		  p_aeu->bit_name, aeu_en_reg, bit_index);
861 
862 	if (block_id == MAX_BLOCK_ID)
863 		return;
864 
865 	ecore_int_attn_print(p_hwfn, block_id,
866 			     ATTN_TYPE_PARITY, false);
867 
868 	/* In A0, there's a single parity bit for several blocks */
869 	if (block_id == BLOCK_BTB) {
870 		ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
871 				     ATTN_TYPE_PARITY, false);
872 		ecore_int_attn_print(p_hwfn, BLOCK_MCP,
873 				     ATTN_TYPE_PARITY, false);
874 	}
875 
876 	/* Prevent this parity error from being re-asserted */
877 	mask = ~(0x1 << bit_index);
878 	val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
879 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
880 	DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
881 		p_aeu->bit_name);
882 }
883 
884 /**
885  * @brief - handles deassertion of previously asserted attentions.
886  *
887  * @param p_hwfn
888  * @param deasserted_bits - newly deasserted bits
889  * @return enum _ecore_status_t
890  *
891  */
892 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
893 						  u16 deasserted_bits)
894 {
895 	struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
896 	u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
897 	u8 i, j, k, bit_idx;
898 	enum _ecore_status_t rc = ECORE_SUCCESS;
899 
900 	/* Read the attention registers in the AEU */
901 	for (i = 0; i < NUM_ATTN_REGS; i++) {
902 		aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
903 					  MISC_REG_AEU_AFTER_INVERT_1_IGU +
904 					  i * 0x4);
905 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
906 			   "Deasserted bits [%d]: %08x\n",
907 			   i, aeu_inv_arr[i]);
908 	}
909 
910 	/* Handle parity attentions first */
911 	for (i = 0; i < NUM_ATTN_REGS; i++)
912 	{
913 		struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
914 		u32 parities;
915 
916 		aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
917 		en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
918 		parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
919 
920 		/* Skip register in which no parity bit is currently set */
921 		if (!parities)
922 			continue;
923 
924 		for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
925 			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
926 
927 			if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
928 			    !!(parities & (1 << bit_idx)))
929 				ecore_int_deassertion_parity(p_hwfn, p_bit,
930 							     aeu_en, bit_idx);
931 
932 			bit_idx += ATTENTION_LENGTH(p_bit->flags);
933 		}
934 	}
935 
936 	/* Find non-parity cause for attention and act */
937 	for (k = 0; k < MAX_ATTN_GRPS; k++) {
938 		struct aeu_invert_reg_bit *p_aeu;
939 
940 		/* Handle only groups whose attention is currently deasserted */
941 		if (!(deasserted_bits & (1 << k)))
942 			continue;
943 
944 		for (i = 0; i < NUM_ATTN_REGS; i++) {
945 			u32 bits;
946 
947 			aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
948 				 i * sizeof(u32) +
949 				 k * sizeof(u32) * NUM_ATTN_REGS;
950 			en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
951 			bits = aeu_inv_arr[i] & en;
952 
953 			/* Skip if no bit from this group is currently set */
954 			if (!bits)
955 				continue;
956 
957 			/* Find all set bits from current register which belong
958 			 * to current group, making them responsible for the
959 			 * previous assertion.
960 			 */
961 			for (j = 0, bit_idx = 0; bit_idx < 32; j++)
962 			{
963 				long unsigned int bitmask;
964 				u8 bit, bit_len;
965 
966 				/* Need to account bits with changed meaning */
967 				p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
968 				p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
969 
970 				bit = bit_idx;
971 				bit_len = ATTENTION_LENGTH(p_aeu->flags);
972 				if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
973 					/* Skip Parity */
974 					bit++;
975 					bit_len--;
976 				}
977 
978 				/* Find the bits relating to HW-block, then
979 				 * shift so they'll become LSB.
980 				 */
981 				bitmask = bits & (((1 << bit_len) - 1) << bit);
982 				bitmask >>= bit;
983 
984 				if (bitmask) {
985 					u32 flags = p_aeu->flags;
986 					char bit_name[30];
987 					u8 num;
988 
989 					num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
990 								bit_len);
991 
992 					/* Some bits represent more than a
993 					 * a single interrupt. Correctly print
994 					 * their name.
995 					 */
996 					if (ATTENTION_LENGTH(flags) > 2 ||
997 					    ((flags & ATTENTION_PAR_INT) &&
998 					    ATTENTION_LENGTH(flags) > 1))
999 						OSAL_SNPRINTF(bit_name, 30,
1000 							      p_aeu->bit_name,
1001 							      num);
1002 					else
1003 						OSAL_STRNCPY(bit_name,
1004 							     p_aeu->bit_name,
1005 							     30);
1006 
1007 					/* We now need to pass bitmask in its
1008 					 * correct position.
1009 					 */
1010 					bitmask <<= bit;
1011 
1012 					/* Handle source of the attention */
1013 					ecore_int_deassertion_aeu_bit(p_hwfn,
1014 								      p_aeu,
1015 								      aeu_en,
1016 								      bit_name,
1017 								      bitmask);
1018 				}
1019 
1020 				bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1021 			}
1022 		}
1023 	}
1024 
1025 	/* Clear IGU indication for the deasserted bits */
1026 	/* FIXME - this will change once we'll have GOOD gtt definitions */
1027 	DIRECT_REG_WR(p_hwfn,
1028 		      (u8 OSAL_IOMEM*)p_hwfn->regview +
1029 				      GTT_BAR0_MAP_REG_IGU_CMD +
1030 				      ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1031 					IGU_CMD_INT_ACK_BASE) << 3),
1032 		      ~((u32)deasserted_bits));
1033 
1034 	/* Unmask deasserted attentions in IGU */
1035 	aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1036 			    IGU_REG_ATTENTION_ENABLE);
1037 	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1038 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1039 
1040 	/* Clear deassertion from inner state */
1041 	sb_attn_sw->known_attn &= ~deasserted_bits;
1042 
1043 	return rc;
1044 }
1045 
1046 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1047 {
1048 	struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1049 	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1050 	u16 index = 0, asserted_bits, deasserted_bits;
1051 	u32 attn_bits = 0, attn_acks = 0;
1052 	enum _ecore_status_t rc = ECORE_SUCCESS;
1053 
1054 	/* Read current attention bits/acks - safeguard against attentions
1055 	 * by guaranting work on a synchronized timeframe
1056 	 */
1057 	do {
1058 		index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1059 		attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1060 		attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1061 	} while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1062 	p_sb_attn->sb_index = index;
1063 
1064 	/* Attention / Deassertion are meaningful (and in correct state)
1065 	 * only when they differ and consistent with known state - deassertion
1066 	 * when previous attention & current ack, and assertion when current
1067 	 * attention with no previous attention
1068 	 */
1069 	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1070 			~p_sb_attn_sw->known_attn;
1071 	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1072 			  p_sb_attn_sw->known_attn;
1073 
1074 	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1075 		DP_INFO(p_hwfn,
1076 			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1077 			index, attn_bits, attn_acks, asserted_bits,
1078 			deasserted_bits, p_sb_attn_sw->known_attn);
1079 	else if (asserted_bits == 0x100)
1080 		DP_INFO(p_hwfn,
1081 			"MFW indication via attention\n");
1082 	else
1083 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1084 			   "MFW indication [deassertion]\n");
1085 
1086 	if (asserted_bits) {
1087 		rc = ecore_int_assertion(p_hwfn, asserted_bits);
1088 		if (rc)
1089 			return rc;
1090 	}
1091 
1092 	if (deasserted_bits)
1093 		rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1094 
1095 	return rc;
1096 }
1097 
1098 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1099 			      void OSAL_IOMEM *igu_addr, u32 ack_cons)
1100 {
1101 	struct igu_prod_cons_update igu_ack = { 0 };
1102 
1103 	igu_ack.sb_id_and_flags =
1104 		((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1105 		 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1106 		 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1107 		 (IGU_SEG_ACCESS_ATTN <<
1108 		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1109 
1110 	DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1111 
1112 	/* Both segments (interrupts & acks) are written to same place address;
1113 	 * Need to guarantee all commands will be received (in-order) by HW.
1114 	 */
1115 	OSAL_MMIOWB(p_hwfn->p_dev);
1116 	OSAL_BARRIER(p_hwfn->p_dev);
1117 }
1118 
1119 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1120 {
1121 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1122 	struct ecore_pi_info *pi_info = OSAL_NULL;
1123 	struct ecore_sb_attn_info *sb_attn;
1124 	struct ecore_sb_info *sb_info;
1125 	int arr_size;
1126 	u16 rc = 0;
1127 
1128 	if (!p_hwfn)
1129 		return;
1130 
1131 	if (!p_hwfn->p_sp_sb) {
1132 		DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1133 		return;
1134 	}
1135 
1136 	sb_info = &p_hwfn->p_sp_sb->sb_info;
1137 	arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1138 	if (!sb_info) {
1139 		DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
1140 		return;
1141 	}
1142 
1143 	if (!p_hwfn->p_sb_attn) {
1144 		DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1145 		return;
1146 	}
1147 	sb_attn =  p_hwfn->p_sb_attn;
1148 
1149 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1150 		   p_hwfn, p_hwfn->my_id);
1151 
1152 	/* Disable ack for def status block. Required both for msix +
1153 	 * inta in non-mask mode, in inta does no harm.
1154 	 */
1155 	ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1156 
1157 	/* Gather Interrupts/Attentions information */
1158 	if (!sb_info->sb_virt) {
1159 		DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1160 	} else {
1161 		u32 tmp_index = sb_info->sb_ack;
1162 		rc = ecore_sb_update_sb_idx(sb_info);
1163 		DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1164 			   "Interrupt indices: 0x%08x --> 0x%08x\n",
1165 			   tmp_index, sb_info->sb_ack);
1166 	}
1167 
1168 	if (!sb_attn || !sb_attn->sb_attn) {
1169 		DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
1170 	} else {
1171 		u16 tmp_index = sb_attn->index;
1172 
1173 		rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1174 		DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1175 			   "Attention indices: 0x%08x --> 0x%08x\n",
1176 			   tmp_index, sb_attn->index);
1177 	}
1178 
1179 	/* Check if we expect interrupts at this time. if not just ack them */
1180 	if (!(rc & ECORE_SB_EVENT_MASK)) {
1181 		ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1182 		return;
1183 	}
1184 
1185 	/* Check the validity of the DPC ptt. If not ack interrupts and fail */
1186 	if (!p_hwfn->p_dpc_ptt) {
1187 		DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1188 		ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1189 		return;
1190 	}
1191 
1192 	if (rc & ECORE_SB_ATT_IDX)
1193 		ecore_int_attentions(p_hwfn);
1194 
1195 	if (rc & ECORE_SB_IDX) {
1196 		int pi;
1197 
1198 		/* Since we only looked at the SB index, it's possible more
1199 		 * than a single protocol-index on the SB incremented.
1200 		 * Iterate over all configured protocol indices and check
1201 		 * whether something happened for each.
1202 		 */
1203 		for (pi = 0; pi < arr_size; pi++) {
1204 			pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1205 			if (pi_info->comp_cb != OSAL_NULL)
1206 				pi_info->comp_cb(p_hwfn, pi_info->cookie);
1207 		}
1208 	}
1209 
1210 	if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1211 		/* This should be done before the interrupts are enabled,
1212 		 * since otherwise a new attention will be generated.
1213 		 */
1214 		ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1215 	}
1216 
1217 	ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1218 }
1219 
1220 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1221 {
1222 	struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1223 
1224 	if (!p_sb)
1225 		return;
1226 
1227 	if (p_sb->sb_attn) {
1228 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1229 				       p_sb->sb_phys,
1230 				       SB_ATTN_ALIGNED_SIZE(p_hwfn));
1231 	}
1232 
1233 	OSAL_FREE(p_hwfn->p_dev, p_sb);
1234 	p_hwfn->p_sb_attn = OSAL_NULL;
1235 }
1236 
1237 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1238 				    struct ecore_ptt *p_ptt)
1239 {
1240 	struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1241 
1242 	OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1243 
1244 	sb_info->index = 0;
1245 	sb_info->known_attn = 0;
1246 
1247 	/* Configure Attention Status Block in IGU */
1248 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1249 		 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1250 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1251 		 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1252 }
1253 
1254 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1255 				   struct ecore_ptt *p_ptt,
1256 				   void *sb_virt_addr,
1257 				   dma_addr_t sb_phy_addr)
1258 {
1259 	struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1260 	int i, j, k;
1261 
1262 	sb_info->sb_attn = sb_virt_addr;
1263 	sb_info->sb_phys = sb_phy_addr;
1264 
1265 	/* Set the pointer to the AEU descriptors */
1266 	sb_info->p_aeu_desc = aeu_descs;
1267 
1268 	/* Calculate Parity Masks */
1269 	OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1270 	for (i = 0; i < NUM_ATTN_REGS; i++) {
1271 		/* j is array index, k is bit index */
1272 		for (j = 0, k = 0; k < 32; j++) {
1273 			struct aeu_invert_reg_bit *p_aeu;
1274 
1275 			p_aeu = &aeu_descs[i].bits[j];
1276 			if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1277 				sb_info->parity_mask[i] |= 1 << k;
1278 
1279 			k += ATTENTION_LENGTH(p_aeu->flags);
1280 		}
1281 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1282 			   "Attn Mask [Reg %d]: 0x%08x\n",
1283 			   i, sb_info->parity_mask[i]);
1284 	}
1285 
1286 	/* Set the address of cleanup for the mcp attention */
1287 	sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1288 				 MISC_REG_AEU_GENERAL_ATTN_0;
1289 
1290 	ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1291 }
1292 
1293 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1294 						    struct ecore_ptt *p_ptt)
1295 {
1296 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1297 	struct ecore_sb_attn_info *p_sb;
1298 	dma_addr_t p_phys = 0;
1299 	void *p_virt;
1300 
1301 	/* SB struct */
1302 	p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1303 	if (!p_sb) {
1304 		DP_NOTICE(p_dev, true, "Failed to allocate `struct ecore_sb_attn_info'\n");
1305 		return ECORE_NOMEM;
1306 	}
1307 
1308 	/* SB ring  */
1309 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1310 					 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1311 	if (!p_virt) {
1312 		DP_NOTICE(p_dev, true, "Failed to allocate status block (attentions)\n");
1313 		OSAL_FREE(p_dev, p_sb);
1314 		return ECORE_NOMEM;
1315 	}
1316 
1317 	/* Attention setup */
1318 	p_hwfn->p_sb_attn = p_sb;
1319 	ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1320 
1321 	return ECORE_SUCCESS;
1322 }
1323 
1324 /* coalescing timeout = timeset << (timer_res + 1) */
1325 #define ECORE_CAU_DEF_RX_USECS 24
1326 #define ECORE_CAU_DEF_TX_USECS 48
1327 
1328 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1329 			     struct cau_sb_entry *p_sb_entry,
1330 			     u8 pf_id, u16 vf_number, u8 vf_valid)
1331 {
1332 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1333 	u32 cau_state;
1334 	u8 timer_res;
1335 
1336 	OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1337 
1338 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1339 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1340 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1341 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1342 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1343 
1344 	cau_state = CAU_HC_DISABLE_STATE;
1345 
1346 	if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1347 		cau_state = CAU_HC_ENABLE_STATE;
1348 		if (!p_dev->rx_coalesce_usecs)
1349 			p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1350 		if (!p_dev->tx_coalesce_usecs)
1351 			p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1352 	}
1353 
1354 	/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1355 	if (p_dev->rx_coalesce_usecs <= 0x7F)
1356 		timer_res = 0;
1357 	else if (p_dev->rx_coalesce_usecs <= 0xFF)
1358 		timer_res = 1;
1359 	else
1360 		timer_res = 2;
1361 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1362 
1363 	if (p_dev->tx_coalesce_usecs <= 0x7F)
1364 		timer_res = 0;
1365 	else if (p_dev->tx_coalesce_usecs <= 0xFF)
1366 		timer_res = 1;
1367 	else
1368 		timer_res = 2;
1369 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1370 
1371 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1372 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1373 }
1374 
1375 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1376 				   struct ecore_ptt *p_ptt,
1377 				   u16 igu_sb_id, u32 pi_index,
1378 				   enum ecore_coalescing_fsm coalescing_fsm,
1379 				   u8 timeset)
1380 {
1381 	struct cau_pi_entry pi_entry;
1382 	u32 sb_offset, pi_offset;
1383 
1384 	if (IS_VF(p_hwfn->p_dev))
1385 		return;/* @@@TBD MichalK- VF CAU... */
1386 
1387 	sb_offset = igu_sb_id * PIS_PER_SB;
1388 	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1389 
1390 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1391 	if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1392 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1393 	else
1394 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1395 
1396 	pi_offset = sb_offset + pi_index;
1397 	if (p_hwfn->hw_init_done) {
1398 		ecore_wr(p_hwfn, p_ptt,
1399 			 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1400 			 *((u32 *)&(pi_entry)));
1401 	} else {
1402 		STORE_RT_REG(p_hwfn,
1403 			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1404 			     *((u32 *)&(pi_entry)));
1405 	}
1406 }
1407 
1408 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1409 			   struct ecore_ptt *p_ptt,
1410 			   struct ecore_sb_info *p_sb, u32 pi_index,
1411 			   enum ecore_coalescing_fsm coalescing_fsm,
1412 			   u8 timeset)
1413 {
1414 	_ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1415 			       pi_index, coalescing_fsm, timeset);
1416 }
1417 
1418 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1419 			   struct ecore_ptt *p_ptt,
1420 			   dma_addr_t sb_phys, u16 igu_sb_id,
1421 			   u16 vf_number, u8 vf_valid)
1422 {
1423 	struct cau_sb_entry sb_entry;
1424 
1425 	ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1426 				vf_number, vf_valid);
1427 
1428 	if (p_hwfn->hw_init_done) {
1429 		/* Wide-bus, initialize via DMAE */
1430 		u64 phys_addr = (u64)sb_phys;
1431 
1432 		ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
1433 				    CAU_REG_SB_ADDR_MEMORY +
1434 				    igu_sb_id * sizeof(u64), 2, 0);
1435 		ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
1436 				    CAU_REG_SB_VAR_MEMORY +
1437 				    igu_sb_id * sizeof(u64), 2, 0);
1438 	} else {
1439 		/* Initialize Status Block Address */
1440 		STORE_RT_REG_AGG(p_hwfn,
1441 				 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
1442 				 sb_phys);
1443 
1444 		STORE_RT_REG_AGG(p_hwfn,
1445 				 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
1446 				 sb_entry);
1447 	}
1448 
1449 	/* Configure pi coalescing if set */
1450 	if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1451 		/* eth will open queues for all tcs, so configure all of them
1452 		 * properly, rather than just the active ones
1453 		 */
1454 		u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1455 
1456 		u8 timeset, timer_res;
1457 		u8 i;
1458 
1459 		/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1460 		if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1461 			timer_res = 0;
1462 		else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1463 			timer_res = 1;
1464 		else
1465 			timer_res = 2;
1466 		timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1467 		_ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1468 				       ECORE_COAL_RX_STATE_MACHINE,
1469 				       timeset);
1470 
1471 		if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1472 			timer_res = 0;
1473 		else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1474 			timer_res = 1;
1475 		else
1476 			timer_res = 2;
1477 		timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1478 		for (i = 0; i < num_tc; i++) {
1479 			_ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1480 					       igu_sb_id, TX_PI(i),
1481 					       ECORE_COAL_TX_STATE_MACHINE,
1482 					       timeset);
1483 		}
1484 	}
1485 }
1486 
1487 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1488 			       struct ecore_ptt *p_ptt,
1489 			       struct ecore_sb_info *sb_info)
1490 {
1491 	/* zero status block and ack counter */
1492 	sb_info->sb_ack = 0;
1493 	OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1494 
1495 	if (IS_PF(p_hwfn->p_dev))
1496 		ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1497 				      sb_info->igu_sb_id, 0, 0);
1498 }
1499 
1500 struct ecore_igu_block *
1501 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1502 {
1503 	struct ecore_igu_block *p_block;
1504 	u16 igu_id;
1505 
1506 	for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1507 	     igu_id++) {
1508 		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1509 
1510 		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1511 		    !(p_block->status & ECORE_IGU_STATUS_FREE))
1512 			continue;
1513 
1514 		if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1515 		    b_is_pf)
1516 			return p_block;
1517 	}
1518 
1519 	return OSAL_NULL;
1520 }
1521 
1522 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1523 				  u16 vector_id)
1524 {
1525 	struct ecore_igu_block *p_block;
1526 	u16 igu_id;
1527 
1528 	for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1529 	     igu_id++) {
1530 		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1531 
1532 		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1533 		    !p_block->is_pf ||
1534 		    p_block->vector_number != vector_id)
1535 			continue;
1536 
1537 		return igu_id;
1538 	}
1539 
1540 	return ECORE_SB_INVALID_IDX;
1541 }
1542 
1543 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1544 {
1545 	u16 igu_sb_id;
1546 
1547 	/* Assuming continuous set of IGU SBs dedicated for given PF */
1548 	if (sb_id == ECORE_SP_SB_ID)
1549 		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1550 	else if (IS_PF(p_hwfn->p_dev))
1551 		igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1552 	else
1553 		igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1554 
1555 	if (igu_sb_id == ECORE_SB_INVALID_IDX)
1556 		DP_NOTICE(p_hwfn, true,
1557 			  "Slowpath SB vector %04x doesn't exist\n",
1558 			  sb_id);
1559 	else if (sb_id == ECORE_SP_SB_ID)
1560 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1561 			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1562 	else
1563 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1564 			   "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1565 
1566 	return igu_sb_id;
1567 }
1568 
1569 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1570 				       struct ecore_ptt *p_ptt,
1571 				       struct ecore_sb_info *sb_info,
1572 				       void *sb_virt_addr,
1573 				       dma_addr_t sb_phy_addr,
1574 				       u16 sb_id)
1575 {
1576 	sb_info->sb_virt = sb_virt_addr;
1577 	sb_info->sb_phys = sb_phy_addr;
1578 
1579 	sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1580 
1581 	if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1582 		return ECORE_INVAL;
1583 
1584 	/* Let the igu info reference the client's SB info */
1585 	if (sb_id != ECORE_SP_SB_ID) {
1586 		if (IS_PF(p_hwfn->p_dev)) {
1587 			struct ecore_igu_info *p_info;
1588 			struct ecore_igu_block *p_block;
1589 
1590 			p_info = p_hwfn->hw_info.p_igu_info;
1591 			p_block = &p_info->entry[sb_info->igu_sb_id];
1592 
1593 			p_block->sb_info = sb_info;
1594 			p_block->status &= ~ECORE_IGU_STATUS_FREE;
1595 			p_info->usage.free_cnt--;
1596 		} else {
1597 			ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1598 		}
1599 	}
1600 
1601 #ifdef ECORE_CONFIG_DIRECT_HWFN
1602 	sb_info->p_hwfn = p_hwfn;
1603 #endif
1604 	sb_info->p_dev = p_hwfn->p_dev;
1605 
1606 	/* The igu address will hold the absolute address that needs to be
1607 	 * written to for a specific status block
1608 	 */
1609 	if (IS_PF(p_hwfn->p_dev)) {
1610 		sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview +
1611 				    GTT_BAR0_MAP_REG_IGU_CMD +
1612 				    (sb_info->igu_sb_id << 3);
1613 
1614 	} else {
1615 		sb_info->igu_addr =
1616 			(u8 OSAL_IOMEM*)p_hwfn->regview +
1617 			PXP_VF_BAR0_START_IGU +
1618 			((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1619 	}
1620 
1621 	sb_info->flags |= ECORE_SB_INFO_INIT;
1622 
1623 	ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1624 
1625 	return ECORE_SUCCESS;
1626 }
1627 
1628 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1629 					  struct ecore_sb_info *sb_info,
1630 					  u16 sb_id)
1631 {
1632 	struct ecore_igu_info *p_info;
1633 	struct ecore_igu_block *p_block;
1634 
1635 	if (sb_info == OSAL_NULL)
1636 		return ECORE_SUCCESS;
1637 
1638 	/* zero status block and ack counter */
1639 	sb_info->sb_ack = 0;
1640 	OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1641 
1642 	if (IS_VF(p_hwfn->p_dev)) {
1643 		ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1644 		return ECORE_SUCCESS;
1645 	}
1646 
1647 	p_info = p_hwfn->hw_info.p_igu_info;
1648 	p_block = &p_info->entry[sb_info->igu_sb_id];
1649 
1650 	/* Vector 0 is reserved to Default SB */
1651 	if (p_block->vector_number == 0) {
1652 		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1653 		return ECORE_INVAL;
1654 	}
1655 
1656 	/* Lose reference to client's SB info, and fix counters */
1657 	p_block->sb_info = OSAL_NULL;
1658 	p_block->status |= ECORE_IGU_STATUS_FREE;
1659 	p_info->usage.free_cnt++;
1660 
1661 	return ECORE_SUCCESS;
1662 }
1663 
1664 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1665 {
1666 	struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1667 
1668 	if (!p_sb)
1669 		return;
1670 
1671 	if (p_sb->sb_info.sb_virt) {
1672 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1673 				       p_sb->sb_info.sb_virt,
1674 				       p_sb->sb_info.sb_phys,
1675 				       SB_ALIGNED_SIZE(p_hwfn));
1676 	}
1677 
1678 	OSAL_FREE(p_hwfn->p_dev, p_sb);
1679 	p_hwfn->p_sp_sb = OSAL_NULL;
1680 }
1681 
1682 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1683 						  struct ecore_ptt *p_ptt)
1684 {
1685 	struct ecore_sb_sp_info *p_sb;
1686 	dma_addr_t p_phys = 0;
1687 	void *p_virt;
1688 
1689 	/* SB struct */
1690 	p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
1691 	if (!p_sb) {
1692 		DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sb_info'\n");
1693 		return ECORE_NOMEM;
1694 	}
1695 
1696 	/* SB ring  */
1697 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1698 					 &p_phys,
1699 					 SB_ALIGNED_SIZE(p_hwfn));
1700 	if (!p_virt) {
1701 		DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1702 		OSAL_FREE(p_hwfn->p_dev, p_sb);
1703 		return ECORE_NOMEM;
1704 	}
1705 
1706 
1707 	/* Status Block setup */
1708 	p_hwfn->p_sp_sb = p_sb;
1709 	ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1710 			  p_virt, p_phys, ECORE_SP_SB_ID);
1711 
1712 	OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1713 
1714 	return ECORE_SUCCESS;
1715 }
1716 
1717 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1718 					   ecore_int_comp_cb_t comp_cb,
1719 					   void *cookie,
1720 					   u8 *sb_idx,
1721 					   __le16 **p_fw_cons)
1722 {
1723 	struct ecore_sb_sp_info *p_sp_sb  = p_hwfn->p_sp_sb;
1724 	enum _ecore_status_t rc = ECORE_NOMEM;
1725 	u8 pi;
1726 
1727 	/* Look for a free index */
1728 	for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1729 		if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1730 			continue;
1731 
1732 		p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1733 		p_sp_sb->pi_info_arr[pi].cookie = cookie;
1734 		*sb_idx = pi;
1735 		*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1736 		rc = ECORE_SUCCESS;
1737 		break;
1738 	}
1739 
1740 	return rc;
1741 }
1742 
1743 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
1744 					     u8 pi)
1745 {
1746 	struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1747 
1748 	if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1749 		return ECORE_NOMEM;
1750 
1751 	p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1752 	p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1753 
1754 	return ECORE_SUCCESS;
1755 }
1756 
1757 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1758 {
1759 	return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1760 }
1761 
1762 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1763 			      struct ecore_ptt	*p_ptt,
1764 			      enum ecore_int_mode int_mode)
1765 {
1766 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1767 
1768 #ifndef ASIC_ONLY
1769 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1770 		DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1771 		igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1772 	}
1773 #endif
1774 
1775 	p_hwfn->p_dev->int_mode = int_mode;
1776 	switch (p_hwfn->p_dev->int_mode) {
1777 	case ECORE_INT_MODE_INTA:
1778 		igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1779 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1780 		break;
1781 
1782 	case ECORE_INT_MODE_MSI:
1783 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1784 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1785 		break;
1786 
1787 	case ECORE_INT_MODE_MSIX:
1788 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1789 		break;
1790 	case ECORE_INT_MODE_POLL:
1791 		break;
1792 	}
1793 
1794 	ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1795 }
1796 
1797 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1798 				      struct ecore_ptt *p_ptt)
1799 {
1800 #ifndef ASIC_ONLY
1801 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1802 		DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
1803 		return;
1804 	}
1805 #endif
1806 
1807 	/* Configure AEU signal change to produce attentions */
1808 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1809 	ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1810 	ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1811 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1812 
1813 	/* Flush the writes to IGU */
1814 	OSAL_MMIOWB(p_hwfn->p_dev);
1815 
1816 	/* Unmask AEU signals toward IGU */
1817 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1818 }
1819 
1820 enum _ecore_status_t
1821 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1822 			  enum ecore_int_mode int_mode)
1823 {
1824 	enum _ecore_status_t rc = ECORE_SUCCESS;
1825 	u32 tmp;
1826 
1827 	/* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1828 	 * attentions. Since we're waiting for BRCM answer regarding this
1829 	 * attention, in the meanwhile we simply mask it.
1830 	 */
1831 	tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1832 	tmp &= ~0x800;
1833 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1834 
1835 	ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1836 
1837 	if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1838 		rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1839 		if (rc != ECORE_SUCCESS) {
1840 			DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
1841 			return ECORE_NORESOURCES;
1842 		}
1843 		p_hwfn->b_int_requested = true;
1844 	}
1845 
1846 	/* Enable interrupt Generation */
1847 	ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1848 
1849 	p_hwfn->b_int_enabled = 1;
1850 
1851 	return rc;
1852 }
1853 
1854 void ecore_int_igu_disable_int(struct ecore_hwfn	*p_hwfn,
1855 			       struct ecore_ptt		*p_ptt)
1856 {
1857 	p_hwfn->b_int_enabled = 0;
1858 
1859 	if (IS_VF(p_hwfn->p_dev))
1860 		return;
1861 
1862 	ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1863 }
1864 
1865 #define IGU_CLEANUP_SLEEP_LENGTH		(1000)
1866 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1867 				     struct ecore_ptt *p_ptt,
1868 				     u16 igu_sb_id,
1869 				     bool cleanup_set,
1870 				     u16 opaque_fid)
1871 {
1872 	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1873 	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1874 	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1875 	u8  type = 0; /* FIXME MichalS type??? */
1876 
1877 	OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1878 			   IGU_REG_CLEANUP_STATUS_0) != 0x200);
1879 
1880 	/* USE Control Command Register to perform cleanup. There is an
1881 	 * option to do this using IGU bar, but then it can't be used for VFs.
1882 	 */
1883 
1884 	/* Set the data field */
1885 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1886 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1887 	SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1888 
1889 	/* Set the control register */
1890 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1891 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1892 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1893 
1894 	ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1895 
1896 	OSAL_BARRIER(p_hwfn->p_dev);
1897 
1898 	ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1899 
1900 	/* Flush the write to IGU */
1901 	OSAL_MMIOWB(p_hwfn->p_dev);
1902 
1903 	/* calculate where to read the status bit from */
1904 	sb_bit = 1 << (igu_sb_id % 32);
1905 	sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1906 
1907 	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1908 
1909 	/* Now wait for the command to complete */
1910 	while (--sleep_cnt) {
1911 		val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1912 		if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1913 			break;
1914 		OSAL_MSLEEP(5);
1915 	}
1916 
1917 	if (!sleep_cnt)
1918 		DP_NOTICE(p_hwfn, true,
1919 			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1920 			  val, igu_sb_id);
1921 }
1922 
1923 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1924 				       struct ecore_ptt *p_ptt,
1925 				       u16 igu_sb_id, u16 opaque, bool b_set)
1926 {
1927 	struct ecore_igu_block *p_block;
1928 	int pi, i;
1929 
1930 	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1931 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1932 		   "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1933 		   igu_sb_id, p_block->function_id, p_block->is_pf,
1934 		   p_block->vector_number);
1935 
1936 	/* Set */
1937 	if (b_set)
1938 		ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1939 
1940 	/* Clear */
1941 	ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1942 
1943 	/* Wait for the IGU SB to cleanup */
1944 	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1945 		u32 val;
1946 
1947 		val = ecore_rd(p_hwfn, p_ptt,
1948 			       IGU_REG_WRITE_DONE_PENDING +
1949 			       ((igu_sb_id / 32) * 4));
1950 		if (val & (1 << (igu_sb_id % 32)))
1951 			OSAL_UDELAY(10);
1952 		else
1953 			break;
1954 	}
1955 	if (i == IGU_CLEANUP_SLEEP_LENGTH)
1956 		DP_NOTICE(p_hwfn, true,
1957 			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1958 			  igu_sb_id);
1959 
1960 	/* Clear the CAU for the SB */
1961 	for (pi = 0; pi < 12; pi++)
1962 		ecore_wr(p_hwfn, p_ptt,
1963 			 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1964 }
1965 
1966 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1967 				 struct ecore_ptt *p_ptt,
1968 				 bool b_set,
1969 				 bool b_slowpath)
1970 {
1971 	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1972 	struct ecore_igu_block *p_block;
1973 	u16 igu_sb_id = 0;
1974 	u32 val = 0;
1975 
1976 	/* @@@TBD MichalK temporary... should be moved to init-tool... */
1977 	val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1978 	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1979 	val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1980 	ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1981 	/* end temporary */
1982 
1983 	for (igu_sb_id = 0;
1984 	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1985 	     igu_sb_id++) {
1986 		p_block = &p_info->entry[igu_sb_id];
1987 
1988 		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1989 		    !p_block->is_pf ||
1990 		    (p_block->status & ECORE_IGU_STATUS_DSB))
1991 			continue;
1992 
1993 		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1994 						  p_hwfn->hw_info.opaque_fid,
1995 						  b_set);
1996 	}
1997 
1998 	if (b_slowpath)
1999 		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2000 						  p_info->igu_dsb_id,
2001 						  p_hwfn->hw_info.opaque_fid,
2002 						  b_set);
2003 }
2004 
2005 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2006 			    struct ecore_ptt *p_ptt)
2007 {
2008 	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2009 	struct ecore_igu_block *p_block;
2010 	int pf_sbs, vf_sbs;
2011 	u16 igu_sb_id;
2012 	u32 val, rval;
2013 
2014 	if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2015 		/* We're using an old MFW - have to prevent any switching
2016 		 * of SBs between PF and VFs as later driver wouldn't be
2017 		 * able to tell which belongs to which.
2018 		 */
2019 		p_info->b_allow_pf_vf_change = false;
2020 	} else {
2021 		/* Use the numbers the MFW have provided -
2022 		 * don't forget MFW accounts for the default SB as well.
2023 		 */
2024 		p_info->b_allow_pf_vf_change = true;
2025 
2026 		if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2027 			DP_INFO(p_hwfn,
2028 				"MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2029 				RESC_NUM(p_hwfn, ECORE_SB) - 1,
2030 				p_info->usage.cnt);
2031 			p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2032 		}
2033 
2034 		/* TODO - how do we learn about VF SBs from MFW? */
2035 		if (IS_PF_SRIOV(p_hwfn)) {
2036 			u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2037 
2038 			if (vfs != p_info->usage.iov_cnt)
2039 				DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2040 					   "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2041 					   p_info->usage.iov_cnt, vfs);
2042 
2043 			/* At this point we know how many SBs we have totally
2044 			 * in IGU + number of PF SBs. So we can validate that
2045 			 * we'd have sufficient for VF.
2046 			 */
2047 			if (vfs > p_info->usage.free_cnt +
2048 				  p_info->usage.free_cnt_iov -
2049 				  p_info->usage.cnt) {
2050 				DP_NOTICE(p_hwfn, true,
2051 					  "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2052 					  p_info->usage.free_cnt +
2053 					  p_info->usage.free_cnt_iov,
2054 					  p_info->usage.cnt, vfs);
2055 				return ECORE_INVAL;
2056 			}
2057 
2058 			/* Currently cap the number of VFs SBs by the
2059 			 * number of VFs.
2060 			 */
2061 			p_info->usage.iov_cnt = vfs;
2062 		}
2063 	}
2064 
2065 	/* Mark all SBs as free, now in the right PF/VFs division */
2066 	p_info->usage.free_cnt = p_info->usage.cnt;
2067 	p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2068 	p_info->usage.orig = p_info->usage.cnt;
2069 	p_info->usage.iov_orig = p_info->usage.iov_cnt;
2070 
2071 	/* We now proceed to re-configure the IGU cam to reflect the initial
2072 	 * configuration. We can start with the Default SB.
2073 	 */
2074 	pf_sbs = p_info->usage.cnt;
2075 	vf_sbs = p_info->usage.iov_cnt;
2076 
2077 	for (igu_sb_id = p_info->igu_dsb_id;
2078 	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2079 	     igu_sb_id++) {
2080 		p_block = &p_info->entry[igu_sb_id];
2081 		val = 0;
2082 
2083 		if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2084 			continue;
2085 
2086 		if (p_block->status & ECORE_IGU_STATUS_DSB) {
2087 			p_block->function_id = p_hwfn->rel_pf_id;
2088 			p_block->is_pf = 1;
2089 			p_block->vector_number = 0;
2090 			p_block->status = ECORE_IGU_STATUS_VALID |
2091 					  ECORE_IGU_STATUS_PF |
2092 					  ECORE_IGU_STATUS_DSB;
2093 		} else if (pf_sbs) {
2094 			pf_sbs--;
2095 			p_block->function_id = p_hwfn->rel_pf_id;
2096 			p_block->is_pf = 1;
2097 			p_block->vector_number = p_info->usage.cnt - pf_sbs;
2098 			p_block->status = ECORE_IGU_STATUS_VALID |
2099 					  ECORE_IGU_STATUS_PF |
2100 					  ECORE_IGU_STATUS_FREE;
2101 		} else if (vf_sbs) {
2102 			p_block->function_id =
2103 				p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2104 				p_info->usage.iov_cnt - vf_sbs;
2105 			p_block->is_pf = 0;
2106 			p_block->vector_number = 0;
2107 			p_block->status = ECORE_IGU_STATUS_VALID |
2108 					  ECORE_IGU_STATUS_FREE;
2109 			vf_sbs--;
2110 		} else {
2111 			p_block->function_id = 0;
2112 			p_block->is_pf = 0;
2113 			p_block->vector_number = 0;
2114 		}
2115 
2116 		SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2117 			  p_block->function_id);
2118 		SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2119 		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2120 			  p_block->vector_number);
2121 
2122 		/* VF entries would be enabled when VF is initializaed */
2123 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2124 
2125 		rval = ecore_rd(p_hwfn, p_ptt,
2126 				IGU_REG_MAPPING_MEMORY +
2127 				sizeof(u32) * igu_sb_id);
2128 
2129 		if (rval != val) {
2130 			ecore_wr(p_hwfn, p_ptt,
2131 				 IGU_REG_MAPPING_MEMORY +
2132 				 sizeof(u32) * igu_sb_id,
2133 				 val);
2134 
2135 			DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2136 				   "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2137 				   igu_sb_id, p_block->function_id,
2138 				   p_block->is_pf, p_block->vector_number,
2139 				   rval, val);
2140 		}
2141 	}
2142 
2143 	return 0;
2144 }
2145 
2146 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2147 				    struct ecore_ptt *p_ptt)
2148 {
2149 	struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2150 
2151 	/* Return all the usage indications to default prior to the reset;
2152 	 * The reset expects the !orig to reflect the initial status of the
2153 	 * SBs, and would re-calculate the originals based on those.
2154 	 */
2155 	p_cnt->cnt = p_cnt->orig;
2156 	p_cnt->free_cnt = p_cnt->orig;
2157 	p_cnt->iov_cnt = p_cnt->iov_orig;
2158 	p_cnt->free_cnt_iov = p_cnt->iov_orig;
2159 	p_cnt->orig = 0;
2160 	p_cnt->iov_orig = 0;
2161 
2162 	/* TODO - we probably need to re-configure the CAU as well... */
2163 	return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2164 }
2165 
2166 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2167 					 struct ecore_ptt *p_ptt,
2168 					 u16 igu_sb_id)
2169 {
2170 	u32 val = ecore_rd(p_hwfn, p_ptt,
2171 			   IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2172 	struct ecore_igu_block *p_block;
2173 
2174 	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2175 
2176 	/* Fill the block information */
2177 	p_block->function_id = GET_FIELD(val,
2178 					 IGU_MAPPING_LINE_FUNCTION_NUMBER);
2179 	p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2180 	p_block->vector_number = GET_FIELD(val,
2181 					   IGU_MAPPING_LINE_VECTOR_NUMBER);
2182 	p_block->igu_sb_id = igu_sb_id;
2183 }
2184 
2185 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2186 					    struct ecore_ptt *p_ptt)
2187 {
2188 	struct ecore_igu_info *p_igu_info;
2189 	struct ecore_igu_block *p_block;
2190 	u32 min_vf = 0, max_vf = 0;
2191 	u16 igu_sb_id;
2192 
2193 	p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2194 						 GFP_KERNEL,
2195 						 sizeof(*p_igu_info));
2196 	if (!p_hwfn->hw_info.p_igu_info)
2197 		return ECORE_NOMEM;
2198 	p_igu_info = p_hwfn->hw_info.p_igu_info;
2199 
2200 	/* Distinguish between existent and onn-existent default SB */
2201 	p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2202 
2203 	/* Find the range of VF ids whose SB belong to this PF */
2204 	if (p_hwfn->p_dev->p_iov_info) {
2205 		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2206 
2207 		min_vf = p_iov->first_vf_in_pf;
2208 		max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2209 	}
2210 
2211 	for (igu_sb_id = 0;
2212 	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2213 	     igu_sb_id++) {
2214 		/* Read current entry; Notice it might not belong to this PF */
2215 		ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2216 		p_block = &p_igu_info->entry[igu_sb_id];
2217 
2218 		if ((p_block->is_pf) &&
2219 		    (p_block->function_id == p_hwfn->rel_pf_id)) {
2220 			p_block->status = ECORE_IGU_STATUS_PF |
2221 					  ECORE_IGU_STATUS_VALID |
2222 					  ECORE_IGU_STATUS_FREE;
2223 
2224 			if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2225 				p_igu_info->usage.cnt++;
2226 		} else if (!(p_block->is_pf) &&
2227 			   (p_block->function_id >= min_vf) &&
2228 			   (p_block->function_id < max_vf)) {
2229 			/* Available for VFs of this PF */
2230 			p_block->status = ECORE_IGU_STATUS_VALID |
2231 					  ECORE_IGU_STATUS_FREE;
2232 
2233 			if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2234 				p_igu_info->usage.iov_cnt++;
2235 		}
2236 
2237 		/* Mark the First entry belonging to the PF or its VFs
2238 		 * as the default SB [we'll reset IGU prior to first usage].
2239 		 */
2240 		if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2241 		    (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2242 			p_igu_info->igu_dsb_id = igu_sb_id;
2243 			p_block->status |= ECORE_IGU_STATUS_DSB;
2244 		}
2245 
2246 		/* While this isn't suitable for all clients, limit number
2247 		 * of prints by having each PF print only its entries with the
2248 		 * exception of PF0 which would print everything.
2249 		 */
2250 		if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2251 		    (p_hwfn->abs_pf_id == 0))
2252 			DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2253 				   "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2254 				   igu_sb_id, p_block->function_id,
2255 				   p_block->is_pf, p_block->vector_number);
2256 	}
2257 
2258 	if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2259 		DP_NOTICE(p_hwfn, true,
2260 			  "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2261 			  p_igu_info->igu_dsb_id);
2262 		return ECORE_INVAL;
2263 	}
2264 
2265 	/* All non default SB are considered free at this point */
2266 	p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2267 	p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2268 
2269 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2270 		   "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2271 		   p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2272 		   p_igu_info->usage.iov_cnt);
2273 
2274 	return ECORE_SUCCESS;
2275 }
2276 
2277 enum _ecore_status_t
2278 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2279 			  u16 sb_id, bool b_to_vf)
2280 {
2281 	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2282 	struct ecore_igu_block *p_block = OSAL_NULL;
2283 	u16 igu_sb_id = 0, vf_num = 0;
2284 	u32 val = 0;
2285 
2286 	if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2287 		return ECORE_INVAL;
2288 
2289 	if (sb_id == ECORE_SP_SB_ID)
2290 		return ECORE_INVAL;
2291 
2292 	if (!p_info->b_allow_pf_vf_change) {
2293 		DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2294 		return ECORE_INVAL;
2295 	}
2296 
2297 	/* If we're moving a SB from PF to VF, the client had to specify
2298 	 * which vector it wants to move.
2299 	 */
2300 	if (b_to_vf) {
2301 		igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2302 		if (igu_sb_id == ECORE_SB_INVALID_IDX)
2303 			return ECORE_INVAL;
2304 	}
2305 
2306 	/* If we're moving a SB from VF to PF, need to validate there isn't
2307 	 * already a line configured for that vector.
2308 	 */
2309 	if (!b_to_vf) {
2310 		if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2311 		    ECORE_SB_INVALID_IDX)
2312 			return ECORE_INVAL;
2313 	}
2314 
2315 	/* We need to validate that the SB can actually be relocated.
2316 	 * This would also handle the previous case where we've explicitly
2317 	 * stated which IGU SB needs to move.
2318 	 */
2319 	for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2320 	     igu_sb_id++) {
2321 		p_block = &p_info->entry[igu_sb_id];
2322 
2323 		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2324 		    !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2325 		    (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2326 			if (b_to_vf)
2327 				return ECORE_INVAL;
2328 			else
2329 				continue;
2330 		}
2331 
2332 		break;
2333 	}
2334 
2335 	if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2336 		DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2337 			   "Failed to find a free SB to move\n");
2338 		return ECORE_INVAL;
2339 	}
2340 
2341 	/* At this point, p_block points to the SB we want to relocate */
2342 	if (b_to_vf) {
2343 		p_block->status &= ~ECORE_IGU_STATUS_PF;
2344 
2345 		/* It doesn't matter which VF number we choose, since we're
2346 		 * going to disable the line; But let's keep it in range.
2347 		 */
2348 		vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2349 
2350 		p_block->function_id = (u8)vf_num;
2351 		p_block->is_pf = 0;
2352 		p_block->vector_number = 0;
2353 
2354 		p_info->usage.cnt--;
2355 		p_info->usage.free_cnt--;
2356 		p_info->usage.iov_cnt++;
2357 		p_info->usage.free_cnt_iov++;
2358 
2359 		/* TODO - if SBs aren't really the limiting factor,
2360 		 * then it might not be accurate [in the since that
2361 		 * we might not need decrement the feature].
2362 		 */
2363 		p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2364 		p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2365 	} else {
2366 		p_block->status |= ECORE_IGU_STATUS_PF;
2367 		p_block->function_id = p_hwfn->rel_pf_id;
2368 		p_block->is_pf = 1;
2369 		p_block->vector_number = sb_id + 1;
2370 
2371 		p_info->usage.cnt++;
2372 		p_info->usage.free_cnt++;
2373 		p_info->usage.iov_cnt--;
2374 		p_info->usage.free_cnt_iov--;
2375 
2376 		p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2377 		p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2378 	}
2379 
2380 	/* Update the IGU and CAU with the new configuration */
2381 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2382 		  p_block->function_id);
2383 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2384 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2385 	SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2386 		  p_block->vector_number);
2387 
2388 	ecore_wr(p_hwfn, p_ptt,
2389 		 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2390 		 val);
2391 
2392 	ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2393 			      igu_sb_id, vf_num,
2394 			      p_block->is_pf ? 0 : 1);
2395 
2396 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2397 		   "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2398 		   igu_sb_id, p_block->function_id,
2399 		   p_block->is_pf, p_block->vector_number);
2400 
2401 	return ECORE_SUCCESS;
2402 }
2403 
2404 /**
2405  * @brief Initialize igu runtime registers
2406  *
2407  * @param p_hwfn
2408  */
2409 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2410 {
2411 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2412 
2413 	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2414 }
2415 
2416 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2417 			  IGU_CMD_INT_ACK_BASE)
2418 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2419 			  IGU_CMD_INT_ACK_BASE)
2420 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2421 {
2422 	u32 intr_status_hi = 0, intr_status_lo = 0;
2423 	u64 intr_status = 0;
2424 
2425 	intr_status_lo = REG_RD(p_hwfn,
2426 				GTT_BAR0_MAP_REG_IGU_CMD +
2427 				LSB_IGU_CMD_ADDR * 8);
2428 	intr_status_hi = REG_RD(p_hwfn,
2429 				GTT_BAR0_MAP_REG_IGU_CMD +
2430 				MSB_IGU_CMD_ADDR * 8);
2431 	intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2432 
2433 	return intr_status;
2434 }
2435 
2436 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2437 {
2438 	OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2439 	p_hwfn->b_sp_dpc_enabled = true;
2440 }
2441 
2442 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2443 {
2444 	p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2445 	if (!p_hwfn->sp_dpc)
2446 		return ECORE_NOMEM;
2447 
2448 	return ECORE_SUCCESS;
2449 }
2450 
2451 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2452 {
2453 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2454 	p_hwfn->sp_dpc = OSAL_NULL;
2455 }
2456 
2457 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2458 				     struct ecore_ptt *p_ptt)
2459 {
2460 	enum _ecore_status_t rc = ECORE_SUCCESS;
2461 
2462 	rc = ecore_int_sp_dpc_alloc(p_hwfn);
2463 	if (rc != ECORE_SUCCESS) {
2464 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2465 		return rc;
2466 	}
2467 
2468 	rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2469 	if (rc != ECORE_SUCCESS) {
2470 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2471 		return rc;
2472 	}
2473 
2474 	rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2475 	if (rc != ECORE_SUCCESS)
2476 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2477 
2478 	return rc;
2479 }
2480 
2481 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2482 {
2483 	ecore_int_sp_sb_free(p_hwfn);
2484 	ecore_int_sb_attn_free(p_hwfn);
2485 	ecore_int_sp_dpc_free(p_hwfn);
2486 }
2487 
2488 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2489 {
2490 	if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2491 		return;
2492 
2493 	ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2494 	ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2495 	ecore_int_sp_dpc_setup(p_hwfn);
2496 }
2497 
2498 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2499 			   struct ecore_sb_cnt_info *p_sb_cnt_info)
2500 {
2501 	struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2502 
2503 	if (!p_igu_info || !p_sb_cnt_info)
2504 		return;
2505 
2506 	OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2507 		    sizeof(*p_sb_cnt_info));
2508 }
2509 
2510 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2511 {
2512 	int i;
2513 
2514 	for_each_hwfn(p_dev, i)
2515 		p_dev->hwfns[i].b_int_requested = false;
2516 }
2517 
2518 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2519 {
2520 	p_dev->attn_clr_en = clr_enable;
2521 }
2522 
2523 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2524 					     struct ecore_ptt *p_ptt,
2525 					     u8 timer_res, u16 sb_id, bool tx)
2526 {
2527 	struct cau_sb_entry sb_entry;
2528 	enum _ecore_status_t rc;
2529 
2530 	if (!p_hwfn->hw_init_done) {
2531 		DP_ERR(p_hwfn, "hardware not initialized yet\n");
2532 		return ECORE_INVAL;
2533 	}
2534 
2535 	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2536 				 sb_id * sizeof(u64),
2537 				 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2538 	if (rc != ECORE_SUCCESS) {
2539 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2540 		return rc;
2541 	}
2542 
2543 	if (tx)
2544 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2545 	else
2546 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2547 
2548 	rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2549 				 (u64)(osal_uintptr_t)&sb_entry,
2550 				 CAU_REG_SB_VAR_MEMORY +
2551 				 sb_id * sizeof(u64), 2, 0);
2552 	if (rc != ECORE_SUCCESS) {
2553 		DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2554 		return rc;
2555 	}
2556 
2557 	return rc;
2558 }
2559 
2560 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2561 					  struct ecore_ptt *p_ptt,
2562 					  struct ecore_sb_info *p_sb,
2563 					  struct ecore_sb_info_dbg *p_info)
2564 {
2565 	u16 sbid = p_sb->igu_sb_id;
2566 	int i;
2567 
2568 	if (IS_VF(p_hwfn->p_dev))
2569 		return ECORE_INVAL;
2570 
2571 	if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2572 		return ECORE_INVAL;
2573 
2574 	p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2575 				    IGU_REG_PRODUCER_MEMORY + sbid * 4);
2576 	p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2577 				    IGU_REG_CONSUMER_MEM + sbid * 4);
2578 
2579 	for (i = 0; i < PIS_PER_SB; i++)
2580 		p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2581 					      CAU_REG_PI_MEMORY +
2582 					      sbid * 4 * PIS_PER_SB +  i * 4);
2583 
2584 	return ECORE_SUCCESS;
2585 }
2586