xref: /linux/drivers/net/ethernet/qlogic/qed/qed_dev.c (revision 3f2fb9a834cb1fcddbae22deca7fde136944dc89)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dev_api.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_mcp.h"
31 #include "qed_reg_addr.h"
32 #include "qed_sp.h"
33 
34 /* API common to all protocols */
35 void qed_init_dp(struct qed_dev *cdev,
36 		 u32 dp_module, u8 dp_level)
37 {
38 	u32 i;
39 
40 	cdev->dp_level = dp_level;
41 	cdev->dp_module = dp_module;
42 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44 
45 		p_hwfn->dp_level = dp_level;
46 		p_hwfn->dp_module = dp_module;
47 	}
48 }
49 
50 void qed_init_struct(struct qed_dev *cdev)
51 {
52 	u8 i;
53 
54 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56 
57 		p_hwfn->cdev = cdev;
58 		p_hwfn->my_id = i;
59 		p_hwfn->b_active = false;
60 
61 		mutex_init(&p_hwfn->dmae_info.mutex);
62 	}
63 
64 	/* hwfn 0 is always active */
65 	cdev->hwfns[0].b_active = true;
66 
67 	/* set the default cache alignment to 128 */
68 	cdev->cache_shift = 7;
69 }
70 
71 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72 {
73 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74 
75 	kfree(qm_info->qm_pq_params);
76 	qm_info->qm_pq_params = NULL;
77 	kfree(qm_info->qm_vport_params);
78 	qm_info->qm_vport_params = NULL;
79 	kfree(qm_info->qm_port_params);
80 	qm_info->qm_port_params = NULL;
81 }
82 
83 void qed_resc_free(struct qed_dev *cdev)
84 {
85 	int i;
86 
87 	kfree(cdev->fw_data);
88 	cdev->fw_data = NULL;
89 
90 	kfree(cdev->reset_stats);
91 
92 	for_each_hwfn(cdev, i) {
93 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94 
95 		kfree(p_hwfn->p_tx_cids);
96 		p_hwfn->p_tx_cids = NULL;
97 		kfree(p_hwfn->p_rx_cids);
98 		p_hwfn->p_rx_cids = NULL;
99 	}
100 
101 	for_each_hwfn(cdev, i) {
102 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
103 
104 		qed_cxt_mngr_free(p_hwfn);
105 		qed_qm_info_free(p_hwfn);
106 		qed_spq_free(p_hwfn);
107 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
108 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
109 		qed_int_free(p_hwfn);
110 		qed_dmae_info_free(p_hwfn);
111 	}
112 }
113 
114 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
115 {
116 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
117 	struct init_qm_port_params *p_qm_port;
118 	u8 num_vports, i, vport_id, num_ports;
119 	u16 num_pqs, multi_cos_tcs = 1;
120 
121 	memset(qm_info, 0, sizeof(*qm_info));
122 
123 	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
124 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
125 
126 	/* Sanity checking that setup requires legal number of resources */
127 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
128 		DP_ERR(p_hwfn,
129 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
130 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
131 		return -EINVAL;
132 	}
133 
134 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
135 	 */
136 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
137 					num_pqs, GFP_KERNEL);
138 	if (!qm_info->qm_pq_params)
139 		goto alloc_err;
140 
141 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
142 					   num_vports, GFP_KERNEL);
143 	if (!qm_info->qm_vport_params)
144 		goto alloc_err;
145 
146 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
147 					  MAX_NUM_PORTS, GFP_KERNEL);
148 	if (!qm_info->qm_port_params)
149 		goto alloc_err;
150 
151 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
152 
153 	/* First init per-TC PQs */
154 	for (i = 0; i < multi_cos_tcs; i++) {
155 		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
156 
157 		params->vport_id = vport_id;
158 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
159 		params->wrr_group = 1;
160 	}
161 
162 	/* Then init pure-LB PQ */
163 	qm_info->pure_lb_pq = i;
164 	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
165 	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
166 	qm_info->qm_pq_params[i].wrr_group = 1;
167 	i++;
168 
169 	qm_info->offload_pq = 0;
170 	qm_info->num_pqs = num_pqs;
171 	qm_info->num_vports = num_vports;
172 
173 	/* Initialize qm port parameters */
174 	num_ports = p_hwfn->cdev->num_ports_in_engines;
175 	for (i = 0; i < num_ports; i++) {
176 		p_qm_port = &qm_info->qm_port_params[i];
177 		p_qm_port->active = 1;
178 		p_qm_port->num_active_phys_tcs = 4;
179 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
180 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
181 	}
182 
183 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
184 
185 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
186 
187 	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
188 
189 	qm_info->pf_wfq = 0;
190 	qm_info->pf_rl = 0;
191 	qm_info->vport_rl_en = 1;
192 
193 	return 0;
194 
195 alloc_err:
196 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
197 	kfree(qm_info->qm_pq_params);
198 	kfree(qm_info->qm_vport_params);
199 	kfree(qm_info->qm_port_params);
200 
201 	return -ENOMEM;
202 }
203 
204 int qed_resc_alloc(struct qed_dev *cdev)
205 {
206 	struct qed_consq *p_consq;
207 	struct qed_eq *p_eq;
208 	int i, rc = 0;
209 
210 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
211 	if (!cdev->fw_data)
212 		return -ENOMEM;
213 
214 	/* Allocate Memory for the Queue->CID mapping */
215 	for_each_hwfn(cdev, i) {
216 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
217 		int tx_size = sizeof(struct qed_hw_cid_data) *
218 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
219 		int rx_size = sizeof(struct qed_hw_cid_data) *
220 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
221 
222 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
223 		if (!p_hwfn->p_tx_cids) {
224 			DP_NOTICE(p_hwfn,
225 				  "Failed to allocate memory for Tx Cids\n");
226 			rc = -ENOMEM;
227 			goto alloc_err;
228 		}
229 
230 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
231 		if (!p_hwfn->p_rx_cids) {
232 			DP_NOTICE(p_hwfn,
233 				  "Failed to allocate memory for Rx Cids\n");
234 			rc = -ENOMEM;
235 			goto alloc_err;
236 		}
237 	}
238 
239 	for_each_hwfn(cdev, i) {
240 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
241 
242 		/* First allocate the context manager structure */
243 		rc = qed_cxt_mngr_alloc(p_hwfn);
244 		if (rc)
245 			goto alloc_err;
246 
247 		/* Set the HW cid/tid numbers (in the contest manager)
248 		 * Must be done prior to any further computations.
249 		 */
250 		rc = qed_cxt_set_pf_params(p_hwfn);
251 		if (rc)
252 			goto alloc_err;
253 
254 		/* Prepare and process QM requirements */
255 		rc = qed_init_qm_info(p_hwfn);
256 		if (rc)
257 			goto alloc_err;
258 
259 		/* Compute the ILT client partition */
260 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
261 		if (rc)
262 			goto alloc_err;
263 
264 		/* CID map / ILT shadow table / T2
265 		 * The talbes sizes are determined by the computations above
266 		 */
267 		rc = qed_cxt_tables_alloc(p_hwfn);
268 		if (rc)
269 			goto alloc_err;
270 
271 		/* SPQ, must follow ILT because initializes SPQ context */
272 		rc = qed_spq_alloc(p_hwfn);
273 		if (rc)
274 			goto alloc_err;
275 
276 		/* SP status block allocation */
277 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
278 							 RESERVED_PTT_DPC);
279 
280 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
281 		if (rc)
282 			goto alloc_err;
283 
284 		/* EQ */
285 		p_eq = qed_eq_alloc(p_hwfn, 256);
286 		if (!p_eq) {
287 			rc = -ENOMEM;
288 			goto alloc_err;
289 		}
290 		p_hwfn->p_eq = p_eq;
291 
292 		p_consq = qed_consq_alloc(p_hwfn);
293 		if (!p_consq) {
294 			rc = -ENOMEM;
295 			goto alloc_err;
296 		}
297 		p_hwfn->p_consq = p_consq;
298 
299 		/* DMA info initialization */
300 		rc = qed_dmae_info_alloc(p_hwfn);
301 		if (rc) {
302 			DP_NOTICE(p_hwfn,
303 				  "Failed to allocate memory for dmae_info structure\n");
304 			goto alloc_err;
305 		}
306 	}
307 
308 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
309 	if (!cdev->reset_stats) {
310 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
311 		rc = -ENOMEM;
312 		goto alloc_err;
313 	}
314 
315 	return 0;
316 
317 alloc_err:
318 	qed_resc_free(cdev);
319 	return rc;
320 }
321 
322 void qed_resc_setup(struct qed_dev *cdev)
323 {
324 	int i;
325 
326 	for_each_hwfn(cdev, i) {
327 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
328 
329 		qed_cxt_mngr_setup(p_hwfn);
330 		qed_spq_setup(p_hwfn);
331 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
332 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
333 
334 		/* Read shadow of current MFW mailbox */
335 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
336 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
337 		       p_hwfn->mcp_info->mfw_mb_cur,
338 		       p_hwfn->mcp_info->mfw_mb_length);
339 
340 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
341 	}
342 }
343 
344 #define FINAL_CLEANUP_POLL_CNT          (100)
345 #define FINAL_CLEANUP_POLL_TIME         (10)
346 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
347 		      struct qed_ptt *p_ptt,
348 		      u16 id)
349 {
350 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
351 	int rc = -EBUSY;
352 
353 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
354 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
355 
356 	command |= X_FINAL_CLEANUP_AGG_INT <<
357 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
358 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
359 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
360 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
361 
362 	/* Make sure notification is not set before initiating final cleanup */
363 	if (REG_RD(p_hwfn, addr)) {
364 		DP_NOTICE(
365 			p_hwfn,
366 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
367 		REG_WR(p_hwfn, addr, 0);
368 	}
369 
370 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
371 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
372 		   id, command);
373 
374 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
375 
376 	/* Poll until completion */
377 	while (!REG_RD(p_hwfn, addr) && count--)
378 		msleep(FINAL_CLEANUP_POLL_TIME);
379 
380 	if (REG_RD(p_hwfn, addr))
381 		rc = 0;
382 	else
383 		DP_NOTICE(p_hwfn,
384 			  "Failed to receive FW final cleanup notification\n");
385 
386 	/* Cleanup afterwards */
387 	REG_WR(p_hwfn, addr, 0);
388 
389 	return rc;
390 }
391 
392 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
393 {
394 	int hw_mode = 0;
395 
396 	hw_mode = (1 << MODE_BB_A0);
397 
398 	switch (p_hwfn->cdev->num_ports_in_engines) {
399 	case 1:
400 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
401 		break;
402 	case 2:
403 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
404 		break;
405 	case 4:
406 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
407 		break;
408 	default:
409 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
410 			  p_hwfn->cdev->num_ports_in_engines);
411 		return;
412 	}
413 
414 	switch (p_hwfn->cdev->mf_mode) {
415 	case QED_MF_DEFAULT:
416 	case QED_MF_NPAR:
417 		hw_mode |= 1 << MODE_MF_SI;
418 		break;
419 	case QED_MF_OVLAN:
420 		hw_mode |= 1 << MODE_MF_SD;
421 		break;
422 	default:
423 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
424 		hw_mode |= 1 << MODE_MF_SI;
425 	}
426 
427 	hw_mode |= 1 << MODE_ASIC;
428 
429 	p_hwfn->hw_info.hw_mode = hw_mode;
430 }
431 
432 /* Init run time data for all PFs on an engine. */
433 static void qed_init_cau_rt_data(struct qed_dev *cdev)
434 {
435 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
436 	int i, sb_id;
437 
438 	for_each_hwfn(cdev, i) {
439 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
440 		struct qed_igu_info *p_igu_info;
441 		struct qed_igu_block *p_block;
442 		struct cau_sb_entry sb_entry;
443 
444 		p_igu_info = p_hwfn->hw_info.p_igu_info;
445 
446 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
447 		     sb_id++) {
448 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
449 			if (!p_block->is_pf)
450 				continue;
451 
452 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
453 					      p_block->function_id,
454 					      0, 0);
455 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
456 					 sb_entry);
457 		}
458 	}
459 }
460 
461 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
462 			      struct qed_ptt *p_ptt,
463 			      int hw_mode)
464 {
465 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
466 	struct qed_qm_common_rt_init_params params;
467 	struct qed_dev *cdev = p_hwfn->cdev;
468 	int rc = 0;
469 
470 	qed_init_cau_rt_data(cdev);
471 
472 	/* Program GTT windows */
473 	qed_gtt_init(p_hwfn);
474 
475 	if (p_hwfn->mcp_info) {
476 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
477 			qm_info->pf_rl_en = 1;
478 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
479 			qm_info->pf_wfq_en = 1;
480 	}
481 
482 	memset(&params, 0, sizeof(params));
483 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
484 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
485 	params.pf_rl_en = qm_info->pf_rl_en;
486 	params.pf_wfq_en = qm_info->pf_wfq_en;
487 	params.vport_rl_en = qm_info->vport_rl_en;
488 	params.vport_wfq_en = qm_info->vport_wfq_en;
489 	params.port_params = qm_info->qm_port_params;
490 
491 	qed_qm_common_rt_init(p_hwfn, &params);
492 
493 	qed_cxt_hw_init_common(p_hwfn);
494 
495 	/* Close gate from NIG to BRB/Storm; By default they are open, but
496 	 * we close them to prevent NIG from passing data to reset blocks.
497 	 * Should have been done in the ENGINE phase, but init-tool lacks
498 	 * proper port-pretend capabilities.
499 	 */
500 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
501 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
502 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
503 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
504 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
505 	qed_port_unpretend(p_hwfn, p_ptt);
506 
507 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
508 	if (rc != 0)
509 		return rc;
510 
511 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
512 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
513 
514 	/* Disable relaxed ordering in the PCI config space */
515 	qed_wr(p_hwfn, p_ptt, 0x20b4,
516 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
517 
518 	return rc;
519 }
520 
521 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
522 			    struct qed_ptt *p_ptt,
523 			    int hw_mode)
524 {
525 	int rc = 0;
526 
527 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
528 			  hw_mode);
529 	return rc;
530 }
531 
532 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
533 			  struct qed_ptt *p_ptt,
534 			  int hw_mode,
535 			  bool b_hw_start,
536 			  enum qed_int_mode int_mode,
537 			  bool allow_npar_tx_switch)
538 {
539 	u8 rel_pf_id = p_hwfn->rel_pf_id;
540 	int rc = 0;
541 
542 	if (p_hwfn->mcp_info) {
543 		struct qed_mcp_function_info *p_info;
544 
545 		p_info = &p_hwfn->mcp_info->func_info;
546 		if (p_info->bandwidth_min)
547 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
548 
549 		/* Update rate limit once we'll actually have a link */
550 		p_hwfn->qm_info.pf_rl = 100;
551 	}
552 
553 	qed_cxt_hw_init_pf(p_hwfn);
554 
555 	qed_int_igu_init_rt(p_hwfn);
556 
557 	/* Set VLAN in NIG if needed */
558 	if (hw_mode & (1 << MODE_MF_SD)) {
559 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
560 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
561 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
562 			     p_hwfn->hw_info.ovlan);
563 	}
564 
565 	/* Enable classification by MAC if needed */
566 	if (hw_mode & (1 << MODE_MF_SI)) {
567 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
568 			   "Configuring TAGMAC_CLS_TYPE\n");
569 		STORE_RT_REG(p_hwfn,
570 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
571 	}
572 
573 	/* Protocl Configuration  */
574 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
575 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
576 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
577 
578 	/* Cleanup chip from previous driver if such remains exist */
579 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
580 	if (rc != 0)
581 		return rc;
582 
583 	/* PF Init sequence */
584 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
585 	if (rc)
586 		return rc;
587 
588 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
589 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
590 	if (rc)
591 		return rc;
592 
593 	/* Pure runtime initializations - directly to the HW  */
594 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
595 
596 	if (b_hw_start) {
597 		/* enable interrupts */
598 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
599 
600 		/* send function start command */
601 		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
602 		if (rc)
603 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
604 	}
605 	return rc;
606 }
607 
608 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
609 			       struct qed_ptt *p_ptt,
610 			       u8 enable)
611 {
612 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
613 
614 	/* Change PF in PXP */
615 	qed_wr(p_hwfn, p_ptt,
616 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
617 
618 	/* wait until value is set - try for 1 second every 50us */
619 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
620 		val = qed_rd(p_hwfn, p_ptt,
621 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
622 		if (val == set_val)
623 			break;
624 
625 		usleep_range(50, 60);
626 	}
627 
628 	if (val != set_val) {
629 		DP_NOTICE(p_hwfn,
630 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
631 		return -EAGAIN;
632 	}
633 
634 	return 0;
635 }
636 
637 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
638 				struct qed_ptt *p_main_ptt)
639 {
640 	/* Read shadow of current MFW mailbox */
641 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
642 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
643 	       p_hwfn->mcp_info->mfw_mb_cur,
644 	       p_hwfn->mcp_info->mfw_mb_length);
645 }
646 
647 int qed_hw_init(struct qed_dev *cdev,
648 		bool b_hw_start,
649 		enum qed_int_mode int_mode,
650 		bool allow_npar_tx_switch,
651 		const u8 *bin_fw_data)
652 {
653 	struct qed_storm_stats *p_stat;
654 	u32 load_code, param, *p_address;
655 	int rc, mfw_rc, i;
656 	u8 fw_vport = 0;
657 
658 	rc = qed_init_fw_data(cdev, bin_fw_data);
659 	if (rc != 0)
660 		return rc;
661 
662 	for_each_hwfn(cdev, i) {
663 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
664 
665 		rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
666 		if (rc != 0)
667 			return rc;
668 
669 		/* Enable DMAE in PXP */
670 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
671 
672 		qed_calc_hw_mode(p_hwfn);
673 
674 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
675 				      &load_code);
676 		if (rc) {
677 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
678 			return rc;
679 		}
680 
681 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
682 
683 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
684 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
685 			   rc, load_code);
686 
687 		p_hwfn->first_on_engine = (load_code ==
688 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
689 
690 		switch (load_code) {
691 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
692 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
693 						p_hwfn->hw_info.hw_mode);
694 			if (rc)
695 				break;
696 		/* Fall into */
697 		case FW_MSG_CODE_DRV_LOAD_PORT:
698 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
699 					      p_hwfn->hw_info.hw_mode);
700 			if (rc)
701 				break;
702 
703 		/* Fall into */
704 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
705 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
706 					    p_hwfn->hw_info.hw_mode,
707 					    b_hw_start, int_mode,
708 					    allow_npar_tx_switch);
709 			break;
710 		default:
711 			rc = -EINVAL;
712 			break;
713 		}
714 
715 		if (rc)
716 			DP_NOTICE(p_hwfn,
717 				  "init phase failed for loadcode 0x%x (rc %d)\n",
718 				   load_code, rc);
719 
720 		/* ACK mfw regardless of success or failure of initialization */
721 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
722 				     DRV_MSG_CODE_LOAD_DONE,
723 				     0, &load_code, &param);
724 		if (rc)
725 			return rc;
726 		if (mfw_rc) {
727 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
728 			return mfw_rc;
729 		}
730 
731 		p_hwfn->hw_init_done = true;
732 
733 		/* init PF stats */
734 		p_stat = &p_hwfn->storm_stats;
735 		p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
736 					 MSTORM_QUEUE_STAT_OFFSET(fw_vport);
737 		p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
738 
739 		p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
740 					 USTORM_QUEUE_STAT_OFFSET(fw_vport);
741 		p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
742 
743 		p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
744 					 PSTORM_QUEUE_STAT_OFFSET(fw_vport);
745 		p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
746 
747 		p_address = &p_stat->tstats.address;
748 		*p_address = BAR0_MAP_REG_TSDM_RAM +
749 			     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
750 		p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
751 	}
752 
753 	return 0;
754 }
755 
756 #define QED_HW_STOP_RETRY_LIMIT (10)
757 int qed_hw_stop(struct qed_dev *cdev)
758 {
759 	int rc = 0, t_rc;
760 	int i, j;
761 
762 	for_each_hwfn(cdev, j) {
763 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
764 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
765 
766 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
767 
768 		/* mark the hw as uninitialized... */
769 		p_hwfn->hw_init_done = false;
770 
771 		rc = qed_sp_pf_stop(p_hwfn);
772 		if (rc)
773 			return rc;
774 
775 		qed_wr(p_hwfn, p_ptt,
776 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
777 
778 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
779 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
780 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
781 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
782 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
783 
784 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
785 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
786 		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
787 			if ((!qed_rd(p_hwfn, p_ptt,
788 				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
789 			    (!qed_rd(p_hwfn, p_ptt,
790 				     TM_REG_PF_SCAN_ACTIVE_TASK)))
791 				break;
792 
793 			usleep_range(1000, 2000);
794 		}
795 		if (i == QED_HW_STOP_RETRY_LIMIT)
796 			DP_NOTICE(p_hwfn,
797 				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
798 				  (u8)qed_rd(p_hwfn, p_ptt,
799 					     TM_REG_PF_SCAN_ACTIVE_CONN),
800 				  (u8)qed_rd(p_hwfn, p_ptt,
801 					     TM_REG_PF_SCAN_ACTIVE_TASK));
802 
803 		/* Disable Attention Generation */
804 		qed_int_igu_disable_int(p_hwfn, p_ptt);
805 
806 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
807 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
808 
809 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
810 
811 		/* Need to wait 1ms to guarantee SBs are cleared */
812 		usleep_range(1000, 2000);
813 	}
814 
815 	/* Disable DMAE in PXP - in CMT, this should only be done for
816 	 * first hw-function, and only after all transactions have
817 	 * stopped for all active hw-functions.
818 	 */
819 	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
820 				   cdev->hwfns[0].p_main_ptt,
821 				   false);
822 	if (t_rc != 0)
823 		rc = t_rc;
824 
825 	return rc;
826 }
827 
828 void qed_hw_stop_fastpath(struct qed_dev *cdev)
829 {
830 	int i, j;
831 
832 	for_each_hwfn(cdev, j) {
833 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
834 		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
835 
836 		DP_VERBOSE(p_hwfn,
837 			   NETIF_MSG_IFDOWN,
838 			   "Shutting down the fastpath\n");
839 
840 		qed_wr(p_hwfn, p_ptt,
841 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
842 
843 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
844 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
845 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
846 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
847 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
848 
849 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
850 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
851 		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
852 			if ((!qed_rd(p_hwfn, p_ptt,
853 				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
854 			    (!qed_rd(p_hwfn, p_ptt,
855 				     TM_REG_PF_SCAN_ACTIVE_TASK)))
856 				break;
857 
858 			usleep_range(1000, 2000);
859 		}
860 		if (i == QED_HW_STOP_RETRY_LIMIT)
861 			DP_NOTICE(p_hwfn,
862 				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
863 				  (u8)qed_rd(p_hwfn, p_ptt,
864 					     TM_REG_PF_SCAN_ACTIVE_CONN),
865 				  (u8)qed_rd(p_hwfn, p_ptt,
866 					     TM_REG_PF_SCAN_ACTIVE_TASK));
867 
868 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
869 
870 		/* Need to wait 1ms to guarantee SBs are cleared */
871 		usleep_range(1000, 2000);
872 	}
873 }
874 
875 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
876 {
877 	/* Re-open incoming traffic */
878 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
879 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
880 }
881 
882 static int qed_reg_assert(struct qed_hwfn *hwfn,
883 			  struct qed_ptt *ptt, u32 reg,
884 			  bool expected)
885 {
886 	u32 assert_val = qed_rd(hwfn, ptt, reg);
887 
888 	if (assert_val != expected) {
889 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
890 			  reg, expected);
891 		return -EINVAL;
892 	}
893 
894 	return 0;
895 }
896 
897 int qed_hw_reset(struct qed_dev *cdev)
898 {
899 	int rc = 0;
900 	u32 unload_resp, unload_param;
901 	int i;
902 
903 	for_each_hwfn(cdev, i) {
904 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
905 
906 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
907 
908 		/* Check for incorrect states */
909 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
910 			       QM_REG_USG_CNT_PF_TX, 0);
911 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
912 			       QM_REG_USG_CNT_PF_OTHER, 0);
913 
914 		/* Disable PF in HW blocks */
915 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
916 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
917 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
918 		       TCFC_REG_STRONG_ENABLE_PF, 0);
919 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
920 		       CCFC_REG_STRONG_ENABLE_PF, 0);
921 
922 		/* Send unload command to MCP */
923 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
924 				 DRV_MSG_CODE_UNLOAD_REQ,
925 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
926 				 &unload_resp, &unload_param);
927 		if (rc) {
928 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
929 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
930 		}
931 
932 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
933 				 DRV_MSG_CODE_UNLOAD_DONE,
934 				 0, &unload_resp, &unload_param);
935 		if (rc) {
936 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
937 			return rc;
938 		}
939 	}
940 
941 	return rc;
942 }
943 
944 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
945 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
946 {
947 	qed_ptt_pool_free(p_hwfn);
948 	kfree(p_hwfn->hw_info.p_igu_info);
949 }
950 
951 /* Setup bar access */
952 static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
953 {
954 	int rc;
955 
956 	/* Allocate PTT pool */
957 	rc = qed_ptt_pool_alloc(p_hwfn);
958 	if (rc)
959 		return rc;
960 
961 	/* Allocate the main PTT */
962 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
963 
964 	/* clear indirect access */
965 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
966 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
967 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
968 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
969 
970 	/* Clean Previous errors if such exist */
971 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
972 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
973 	       1 << p_hwfn->abs_pf_id);
974 
975 	/* enable internal target-read */
976 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
977 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
978 
979 	return 0;
980 }
981 
982 static void get_function_id(struct qed_hwfn *p_hwfn)
983 {
984 	/* ME Register */
985 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
986 
987 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
988 
989 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
990 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
991 				      PXP_CONCRETE_FID_PFID);
992 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
993 				    PXP_CONCRETE_FID_PORT);
994 }
995 
996 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
997 {
998 	u32 *feat_num = p_hwfn->hw_info.feat_num;
999 	int num_features = 1;
1000 
1001 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1002 						num_features,
1003 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
1004 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1005 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1006 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1007 		   num_features);
1008 }
1009 
1010 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1011 {
1012 	u32 *resc_start = p_hwfn->hw_info.resc_start;
1013 	u32 *resc_num = p_hwfn->hw_info.resc_num;
1014 	int num_funcs, i;
1015 
1016 	num_funcs = MAX_NUM_PFS_BB;
1017 
1018 	resc_num[QED_SB] = min_t(u32,
1019 				 (MAX_SB_PER_PATH_BB / num_funcs),
1020 				 qed_int_get_num_sbs(p_hwfn, NULL));
1021 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1022 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1023 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1024 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1025 	resc_num[QED_RL] = 8;
1026 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1027 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1028 			     num_funcs;
1029 	resc_num[QED_ILT] = 950;
1030 
1031 	for (i = 0; i < QED_MAX_RESC; i++)
1032 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1033 
1034 	qed_hw_set_feat(p_hwfn);
1035 
1036 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1037 		   "The numbers for each resource are:\n"
1038 		   "SB = %d start = %d\n"
1039 		   "L2_QUEUE = %d start = %d\n"
1040 		   "VPORT = %d start = %d\n"
1041 		   "PQ = %d start = %d\n"
1042 		   "RL = %d start = %d\n"
1043 		   "MAC = %d start = %d\n"
1044 		   "VLAN = %d start = %d\n"
1045 		   "ILT = %d start = %d\n",
1046 		   p_hwfn->hw_info.resc_num[QED_SB],
1047 		   p_hwfn->hw_info.resc_start[QED_SB],
1048 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1049 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1050 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1051 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1052 		   p_hwfn->hw_info.resc_num[QED_PQ],
1053 		   p_hwfn->hw_info.resc_start[QED_PQ],
1054 		   p_hwfn->hw_info.resc_num[QED_RL],
1055 		   p_hwfn->hw_info.resc_start[QED_RL],
1056 		   p_hwfn->hw_info.resc_num[QED_MAC],
1057 		   p_hwfn->hw_info.resc_start[QED_MAC],
1058 		   p_hwfn->hw_info.resc_num[QED_VLAN],
1059 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1060 		   p_hwfn->hw_info.resc_num[QED_ILT],
1061 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1062 }
1063 
1064 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1065 			       struct qed_ptt *p_ptt)
1066 {
1067 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1068 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1069 	struct qed_mcp_link_params *link;
1070 
1071 	/* Read global nvm_cfg address */
1072 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1073 
1074 	/* Verify MCP has initialized it */
1075 	if (!nvm_cfg_addr) {
1076 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1077 		return -EINVAL;
1078 	}
1079 
1080 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1081 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1082 
1083 	/* Read Vendor Id / Device Id */
1084 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1085 	       offsetof(struct nvm_cfg1, glob) +
1086 	       offsetof(struct nvm_cfg1_glob, pci_id);
1087 	p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
1088 				    NVM_CFG1_GLOB_VENDOR_ID_MASK;
1089 
1090 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1091 	       offsetof(struct nvm_cfg1, glob) +
1092 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1093 
1094 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1095 
1096 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1097 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1098 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1099 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1100 		break;
1101 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1102 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1103 		break;
1104 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1105 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1106 		break;
1107 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1108 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1109 		break;
1110 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1111 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1112 		break;
1113 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1114 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1115 		break;
1116 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1117 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1118 		break;
1119 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1120 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1121 		break;
1122 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1123 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1124 		break;
1125 	default:
1126 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1127 			  core_cfg);
1128 		break;
1129 	}
1130 
1131 	/* Read default link configuration */
1132 	link = &p_hwfn->mcp_info->link_input;
1133 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1134 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1135 	link_temp = qed_rd(p_hwfn, p_ptt,
1136 			   port_cfg_addr +
1137 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1138 	link->speed.advertised_speeds =
1139 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1140 
1141 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1142 						link->speed.advertised_speeds;
1143 
1144 	link_temp = qed_rd(p_hwfn, p_ptt,
1145 			   port_cfg_addr +
1146 			   offsetof(struct nvm_cfg1_port, link_settings));
1147 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1148 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1149 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1150 		link->speed.autoneg = true;
1151 		break;
1152 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1153 		link->speed.forced_speed = 1000;
1154 		break;
1155 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1156 		link->speed.forced_speed = 10000;
1157 		break;
1158 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1159 		link->speed.forced_speed = 25000;
1160 		break;
1161 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1162 		link->speed.forced_speed = 40000;
1163 		break;
1164 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1165 		link->speed.forced_speed = 50000;
1166 		break;
1167 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1168 		link->speed.forced_speed = 100000;
1169 		break;
1170 	default:
1171 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1172 			  link_temp);
1173 	}
1174 
1175 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1176 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1177 	link->pause.autoneg = !!(link_temp &
1178 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1179 	link->pause.forced_rx = !!(link_temp &
1180 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1181 	link->pause.forced_tx = !!(link_temp &
1182 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1183 	link->loopback_mode = 0;
1184 
1185 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1186 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1187 		   link->speed.forced_speed, link->speed.advertised_speeds,
1188 		   link->speed.autoneg, link->pause.autoneg);
1189 
1190 	/* Read Multi-function information from shmem */
1191 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1192 	       offsetof(struct nvm_cfg1, glob) +
1193 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1194 
1195 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1196 
1197 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1198 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1199 
1200 	switch (mf_mode) {
1201 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1202 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1203 		break;
1204 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1205 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1206 		break;
1207 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1208 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1209 		break;
1210 	}
1211 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1212 		p_hwfn->cdev->mf_mode);
1213 
1214 	/* Read Multi-function information from shmem */
1215 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1216 		offsetof(struct nvm_cfg1, glob) +
1217 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1218 
1219 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1220 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1221 		__set_bit(QED_DEV_CAP_ETH,
1222 			  &p_hwfn->hw_info.device_capabilities);
1223 
1224 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1225 }
1226 
1227 static int
1228 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1229 		struct qed_ptt *p_ptt,
1230 		enum qed_pci_personality personality)
1231 {
1232 	u32 port_mode;
1233 	int rc;
1234 
1235 	/* Read the port mode */
1236 	port_mode = qed_rd(p_hwfn, p_ptt,
1237 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1238 
1239 	if (port_mode < 3) {
1240 		p_hwfn->cdev->num_ports_in_engines = 1;
1241 	} else if (port_mode <= 5) {
1242 		p_hwfn->cdev->num_ports_in_engines = 2;
1243 	} else {
1244 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1245 			  p_hwfn->cdev->num_ports_in_engines);
1246 
1247 		/* Default num_ports_in_engines to something */
1248 		p_hwfn->cdev->num_ports_in_engines = 1;
1249 	}
1250 
1251 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1252 
1253 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1254 	if (rc)
1255 		return rc;
1256 
1257 	if (qed_mcp_is_init(p_hwfn))
1258 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1259 				p_hwfn->mcp_info->func_info.mac);
1260 	else
1261 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1262 
1263 	if (qed_mcp_is_init(p_hwfn)) {
1264 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1265 			p_hwfn->hw_info.ovlan =
1266 				p_hwfn->mcp_info->func_info.ovlan;
1267 
1268 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1269 	}
1270 
1271 	if (qed_mcp_is_init(p_hwfn)) {
1272 		enum qed_pci_personality protocol;
1273 
1274 		protocol = p_hwfn->mcp_info->func_info.protocol;
1275 		p_hwfn->hw_info.personality = protocol;
1276 	}
1277 
1278 	qed_hw_get_resc(p_hwfn);
1279 
1280 	return rc;
1281 }
1282 
1283 static void qed_get_dev_info(struct qed_dev *cdev)
1284 {
1285 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1286 	u32 tmp;
1287 
1288 	/* Read Vendor Id / Device Id */
1289 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1290 			     &cdev->vendor_id);
1291 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1292 			     &cdev->device_id);
1293 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1294 				     MISCS_REG_CHIP_NUM);
1295 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1296 				     MISCS_REG_CHIP_REV);
1297 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1298 
1299 	cdev->type = QED_DEV_TYPE_BB;
1300 	/* Learn number of HW-functions */
1301 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1302 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1303 
1304 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1305 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1306 		cdev->num_hwfns = 2;
1307 	} else {
1308 		cdev->num_hwfns = 1;
1309 	}
1310 
1311 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1312 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1313 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1314 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1315 				       MISCS_REG_CHIP_METAL);
1316 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1317 
1318 	DP_INFO(cdev->hwfns,
1319 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1320 		cdev->chip_num, cdev->chip_rev,
1321 		cdev->chip_bond_id, cdev->chip_metal);
1322 }
1323 
1324 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1325 				 void __iomem *p_regview,
1326 				 void __iomem *p_doorbells,
1327 				 enum qed_pci_personality personality)
1328 {
1329 	int rc = 0;
1330 
1331 	/* Split PCI bars evenly between hwfns */
1332 	p_hwfn->regview = p_regview;
1333 	p_hwfn->doorbells = p_doorbells;
1334 
1335 	/* Validate that chip access is feasible */
1336 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1337 		DP_ERR(p_hwfn,
1338 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1339 		return -EINVAL;
1340 	}
1341 
1342 	get_function_id(p_hwfn);
1343 
1344 	rc = qed_hw_hwfn_prepare(p_hwfn);
1345 	if (rc) {
1346 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1347 		goto err0;
1348 	}
1349 
1350 	/* First hwfn learns basic information, e.g., number of hwfns */
1351 	if (!p_hwfn->my_id)
1352 		qed_get_dev_info(p_hwfn->cdev);
1353 
1354 	/* Initialize MCP structure */
1355 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1356 	if (rc) {
1357 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1358 		goto err1;
1359 	}
1360 
1361 	/* Read the device configuration information from the HW and SHMEM */
1362 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1363 	if (rc) {
1364 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1365 		goto err2;
1366 	}
1367 
1368 	/* Allocate the init RT array and initialize the init-ops engine */
1369 	rc = qed_init_alloc(p_hwfn);
1370 	if (rc) {
1371 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1372 		goto err2;
1373 	}
1374 
1375 	return rc;
1376 err2:
1377 	qed_mcp_free(p_hwfn);
1378 err1:
1379 	qed_hw_hwfn_free(p_hwfn);
1380 err0:
1381 	return rc;
1382 }
1383 
1384 static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
1385 			   u8			bar_id)
1386 {
1387 	u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1388 		       : PGLUE_B_REG_PF_BAR1_SIZE);
1389 	u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
1390 
1391 	/* Get the BAR size(in KB) from hardware given val */
1392 	return 1 << (val + 15);
1393 }
1394 
1395 int qed_hw_prepare(struct qed_dev *cdev,
1396 		   int personality)
1397 {
1398 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1399 	int rc;
1400 
1401 	/* Store the precompiled init data ptrs */
1402 	qed_init_iro_array(cdev);
1403 
1404 	/* Initialize the first hwfn - will learn number of hwfns */
1405 	rc = qed_hw_prepare_single(p_hwfn,
1406 				   cdev->regview,
1407 				   cdev->doorbells, personality);
1408 	if (rc)
1409 		return rc;
1410 
1411 	personality = p_hwfn->hw_info.personality;
1412 
1413 	/* Initialize the rest of the hwfns */
1414 	if (cdev->num_hwfns > 1) {
1415 		void __iomem *p_regview, *p_doorbell;
1416 		u8 __iomem *addr;
1417 
1418 		/* adjust bar offset for second engine */
1419 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
1420 		p_regview = addr;
1421 
1422 		/* adjust doorbell bar offset for second engine */
1423 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
1424 		p_doorbell = addr;
1425 
1426 		/* prepare second hw function */
1427 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1428 					   p_doorbell, personality);
1429 
1430 		/* in case of error, need to free the previously
1431 		 * initiliazed hwfn 0.
1432 		 */
1433 		if (rc) {
1434 			qed_init_free(p_hwfn);
1435 			qed_mcp_free(p_hwfn);
1436 			qed_hw_hwfn_free(p_hwfn);
1437 		}
1438 	}
1439 
1440 	return rc;
1441 }
1442 
1443 void qed_hw_remove(struct qed_dev *cdev)
1444 {
1445 	int i;
1446 
1447 	for_each_hwfn(cdev, i) {
1448 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1449 
1450 		qed_init_free(p_hwfn);
1451 		qed_hw_hwfn_free(p_hwfn);
1452 		qed_mcp_free(p_hwfn);
1453 	}
1454 }
1455 
1456 int qed_chain_alloc(struct qed_dev *cdev,
1457 		    enum qed_chain_use_mode intended_use,
1458 		    enum qed_chain_mode mode,
1459 		    u16 num_elems,
1460 		    size_t elem_size,
1461 		    struct qed_chain *p_chain)
1462 {
1463 	dma_addr_t p_pbl_phys = 0;
1464 	void *p_pbl_virt = NULL;
1465 	dma_addr_t p_phys = 0;
1466 	void *p_virt = NULL;
1467 	u16 page_cnt = 0;
1468 	size_t size;
1469 
1470 	if (mode == QED_CHAIN_MODE_SINGLE)
1471 		page_cnt = 1;
1472 	else
1473 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1474 
1475 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1476 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1477 				    size, &p_phys, GFP_KERNEL);
1478 	if (!p_virt) {
1479 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1480 		goto nomem;
1481 	}
1482 
1483 	if (mode == QED_CHAIN_MODE_PBL) {
1484 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1485 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1486 						size, &p_pbl_phys,
1487 						GFP_KERNEL);
1488 		if (!p_pbl_virt) {
1489 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1490 			goto nomem;
1491 		}
1492 
1493 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1494 				   (u8)elem_size, intended_use,
1495 				   p_pbl_phys, p_pbl_virt);
1496 	} else {
1497 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1498 			       (u8)elem_size, intended_use, mode);
1499 	}
1500 
1501 	return 0;
1502 
1503 nomem:
1504 	dma_free_coherent(&cdev->pdev->dev,
1505 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1506 			  p_virt, p_phys);
1507 	dma_free_coherent(&cdev->pdev->dev,
1508 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1509 			  p_pbl_virt, p_pbl_phys);
1510 
1511 	return -ENOMEM;
1512 }
1513 
1514 void qed_chain_free(struct qed_dev *cdev,
1515 		    struct qed_chain *p_chain)
1516 {
1517 	size_t size;
1518 
1519 	if (!p_chain->p_virt_addr)
1520 		return;
1521 
1522 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1523 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1524 		dma_free_coherent(&cdev->pdev->dev, size,
1525 				  p_chain->pbl.p_virt_table,
1526 				  p_chain->pbl.p_phys_table);
1527 	}
1528 
1529 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1530 	dma_free_coherent(&cdev->pdev->dev, size,
1531 			  p_chain->p_virt_addr,
1532 			  p_chain->p_phys_addr);
1533 }
1534 
1535 static void __qed_get_vport_stats(struct qed_dev *cdev,
1536 				  struct qed_eth_stats  *stats)
1537 {
1538 	int i, j;
1539 
1540 	memset(stats, 0, sizeof(*stats));
1541 
1542 	for_each_hwfn(cdev, i) {
1543 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1544 		struct eth_mstorm_per_queue_stat mstats;
1545 		struct eth_ustorm_per_queue_stat ustats;
1546 		struct eth_pstorm_per_queue_stat pstats;
1547 		struct tstorm_per_port_stat tstats;
1548 		struct port_stats port_stats;
1549 		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1550 
1551 		if (!p_ptt) {
1552 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1553 			continue;
1554 		}
1555 
1556 		memset(&mstats, 0, sizeof(mstats));
1557 		qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1558 				p_hwfn->storm_stats.mstats.address,
1559 				p_hwfn->storm_stats.mstats.len);
1560 
1561 		memset(&ustats, 0, sizeof(ustats));
1562 		qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1563 				p_hwfn->storm_stats.ustats.address,
1564 				p_hwfn->storm_stats.ustats.len);
1565 
1566 		memset(&pstats, 0, sizeof(pstats));
1567 		qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1568 				p_hwfn->storm_stats.pstats.address,
1569 				p_hwfn->storm_stats.pstats.len);
1570 
1571 		memset(&tstats, 0, sizeof(tstats));
1572 		qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1573 				p_hwfn->storm_stats.tstats.address,
1574 				p_hwfn->storm_stats.tstats.len);
1575 
1576 		memset(&port_stats, 0, sizeof(port_stats));
1577 
1578 		if (p_hwfn->mcp_info)
1579 			qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1580 					p_hwfn->mcp_info->port_addr +
1581 					offsetof(struct public_port, stats),
1582 					sizeof(port_stats));
1583 		qed_ptt_release(p_hwfn, p_ptt);
1584 
1585 		stats->no_buff_discards +=
1586 			HILO_64_REGPAIR(mstats.no_buff_discard);
1587 		stats->packet_too_big_discard +=
1588 			HILO_64_REGPAIR(mstats.packet_too_big_discard);
1589 		stats->ttl0_discard +=
1590 			HILO_64_REGPAIR(mstats.ttl0_discard);
1591 		stats->tpa_coalesced_pkts +=
1592 			HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1593 		stats->tpa_coalesced_events +=
1594 			HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1595 		stats->tpa_aborts_num +=
1596 			HILO_64_REGPAIR(mstats.tpa_aborts_num);
1597 		stats->tpa_coalesced_bytes +=
1598 			HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1599 
1600 		stats->rx_ucast_bytes +=
1601 			HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1602 		stats->rx_mcast_bytes +=
1603 			HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1604 		stats->rx_bcast_bytes +=
1605 			HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1606 		stats->rx_ucast_pkts +=
1607 			HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1608 		stats->rx_mcast_pkts +=
1609 			HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1610 		stats->rx_bcast_pkts +=
1611 			HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1612 
1613 		stats->mftag_filter_discards +=
1614 			HILO_64_REGPAIR(tstats.mftag_filter_discard);
1615 		stats->mac_filter_discards +=
1616 			HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1617 
1618 		stats->tx_ucast_bytes +=
1619 			HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1620 		stats->tx_mcast_bytes +=
1621 			HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1622 		stats->tx_bcast_bytes +=
1623 			HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1624 		stats->tx_ucast_pkts +=
1625 			HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1626 		stats->tx_mcast_pkts +=
1627 			HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1628 		stats->tx_bcast_pkts +=
1629 			HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1630 		stats->tx_err_drop_pkts +=
1631 			HILO_64_REGPAIR(pstats.error_drop_pkts);
1632 		stats->rx_64_byte_packets       += port_stats.pmm.r64;
1633 		stats->rx_127_byte_packets      += port_stats.pmm.r127;
1634 		stats->rx_255_byte_packets      += port_stats.pmm.r255;
1635 		stats->rx_511_byte_packets      += port_stats.pmm.r511;
1636 		stats->rx_1023_byte_packets     += port_stats.pmm.r1023;
1637 		stats->rx_1518_byte_packets     += port_stats.pmm.r1518;
1638 		stats->rx_1522_byte_packets     += port_stats.pmm.r1522;
1639 		stats->rx_2047_byte_packets     += port_stats.pmm.r2047;
1640 		stats->rx_4095_byte_packets     += port_stats.pmm.r4095;
1641 		stats->rx_9216_byte_packets     += port_stats.pmm.r9216;
1642 		stats->rx_16383_byte_packets    += port_stats.pmm.r16383;
1643 		stats->rx_crc_errors	    += port_stats.pmm.rfcs;
1644 		stats->rx_mac_crtl_frames       += port_stats.pmm.rxcf;
1645 		stats->rx_pause_frames	  += port_stats.pmm.rxpf;
1646 		stats->rx_pfc_frames	    += port_stats.pmm.rxpp;
1647 		stats->rx_align_errors	  += port_stats.pmm.raln;
1648 		stats->rx_carrier_errors	+= port_stats.pmm.rfcr;
1649 		stats->rx_oversize_packets      += port_stats.pmm.rovr;
1650 		stats->rx_jabbers	       += port_stats.pmm.rjbr;
1651 		stats->rx_undersize_packets     += port_stats.pmm.rund;
1652 		stats->rx_fragments	     += port_stats.pmm.rfrg;
1653 		stats->tx_64_byte_packets       += port_stats.pmm.t64;
1654 		stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
1655 		stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
1656 		stats->tx_256_to_511_byte_packets  += port_stats.pmm.t511;
1657 		stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
1658 		stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
1659 		stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
1660 		stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
1661 		stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
1662 		stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
1663 		stats->tx_pause_frames	  += port_stats.pmm.txpf;
1664 		stats->tx_pfc_frames	    += port_stats.pmm.txpp;
1665 		stats->tx_lpi_entry_count       += port_stats.pmm.tlpiec;
1666 		stats->tx_total_collisions      += port_stats.pmm.tncl;
1667 		stats->rx_mac_bytes	     += port_stats.pmm.rbyte;
1668 		stats->rx_mac_uc_packets	+= port_stats.pmm.rxuca;
1669 		stats->rx_mac_mc_packets	+= port_stats.pmm.rxmca;
1670 		stats->rx_mac_bc_packets	+= port_stats.pmm.rxbca;
1671 		stats->rx_mac_frames_ok	 += port_stats.pmm.rxpok;
1672 		stats->tx_mac_bytes	     += port_stats.pmm.tbyte;
1673 		stats->tx_mac_uc_packets	+= port_stats.pmm.txuca;
1674 		stats->tx_mac_mc_packets	+= port_stats.pmm.txmca;
1675 		stats->tx_mac_bc_packets	+= port_stats.pmm.txbca;
1676 		stats->tx_mac_ctrl_frames       += port_stats.pmm.txcf;
1677 
1678 		for (j = 0; j < 8; j++) {
1679 			stats->brb_truncates += port_stats.brb.brb_truncate[j];
1680 			stats->brb_discards += port_stats.brb.brb_discard[j];
1681 		}
1682 	}
1683 }
1684 
1685 void qed_get_vport_stats(struct qed_dev *cdev,
1686 			 struct qed_eth_stats *stats)
1687 {
1688 	u32 i;
1689 
1690 	if (!cdev) {
1691 		memset(stats, 0, sizeof(*stats));
1692 		return;
1693 	}
1694 
1695 	__qed_get_vport_stats(cdev, stats);
1696 
1697 	if (!cdev->reset_stats)
1698 		return;
1699 
1700 	/* Reduce the statistics baseline */
1701 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1702 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1703 }
1704 
1705 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1706 void qed_reset_vport_stats(struct qed_dev *cdev)
1707 {
1708 	int i;
1709 
1710 	for_each_hwfn(cdev, i) {
1711 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1712 		struct eth_mstorm_per_queue_stat mstats;
1713 		struct eth_ustorm_per_queue_stat ustats;
1714 		struct eth_pstorm_per_queue_stat pstats;
1715 		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1716 
1717 		if (!p_ptt) {
1718 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1719 			continue;
1720 		}
1721 
1722 		memset(&mstats, 0, sizeof(mstats));
1723 		qed_memcpy_to(p_hwfn, p_ptt,
1724 			      p_hwfn->storm_stats.mstats.address,
1725 			      &mstats,
1726 			      p_hwfn->storm_stats.mstats.len);
1727 
1728 		memset(&ustats, 0, sizeof(ustats));
1729 		qed_memcpy_to(p_hwfn, p_ptt,
1730 			      p_hwfn->storm_stats.ustats.address,
1731 			      &ustats,
1732 			      p_hwfn->storm_stats.ustats.len);
1733 
1734 		memset(&pstats, 0, sizeof(pstats));
1735 		qed_memcpy_to(p_hwfn, p_ptt,
1736 			      p_hwfn->storm_stats.pstats.address,
1737 			      &pstats,
1738 			      p_hwfn->storm_stats.pstats.len);
1739 
1740 		qed_ptt_release(p_hwfn, p_ptt);
1741 	}
1742 
1743 	/* PORT statistics are not necessarily reset, so we need to
1744 	 * read and create a baseline for future statistics.
1745 	 */
1746 	if (!cdev->reset_stats)
1747 		DP_INFO(cdev, "Reset stats not allocated\n");
1748 	else
1749 		__qed_get_vport_stats(cdev, cdev->reset_stats);
1750 }
1751 
1752 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1753 		    u16 src_id, u16 *dst_id)
1754 {
1755 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1756 		u16 min, max;
1757 
1758 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1759 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1760 		DP_NOTICE(p_hwfn,
1761 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1762 			  src_id, min, max);
1763 
1764 		return -EINVAL;
1765 	}
1766 
1767 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1768 
1769 	return 0;
1770 }
1771 
1772 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1773 		 u8 src_id, u8 *dst_id)
1774 {
1775 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1776 		u8 min, max;
1777 
1778 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1779 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1780 		DP_NOTICE(p_hwfn,
1781 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1782 			  src_id, min, max);
1783 
1784 		return -EINVAL;
1785 	}
1786 
1787 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1788 
1789 	return 0;
1790 }
1791 
1792 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1793 		   u8 src_id, u8 *dst_id)
1794 {
1795 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1796 		u8 min, max;
1797 
1798 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1799 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1800 		DP_NOTICE(p_hwfn,
1801 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1802 			  src_id, min, max);
1803 
1804 		return -EINVAL;
1805 	}
1806 
1807 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1808 
1809 	return 0;
1810 }
1811