xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_l2.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_l2.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "bcm_osal.h"
35 
36 #include "ecore.h"
37 #include "ecore_status.h"
38 #include "ecore_hsi_eth.h"
39 #include "ecore_chain.h"
40 #include "ecore_spq.h"
41 #include "ecore_init_fw_funcs.h"
42 #include "ecore_cxt.h"
43 #include "ecore_l2.h"
44 #include "ecore_sp_commands.h"
45 #include "ecore_gtt_reg_addr.h"
46 #include "ecore_iro.h"
47 #include "reg_addr.h"
48 #include "ecore_int.h"
49 #include "ecore_hw.h"
50 #include "ecore_vf.h"
51 #include "ecore_sriov.h"
52 #include "ecore_mcp.h"
53 
54 #define ECORE_MAX_SGES_NUM 16
55 #define CRC32_POLY 0x1edc6f41
56 
57 struct ecore_l2_info {
58 	u32 queues;
59 	unsigned long **pp_qid_usage;
60 
61 	/* The lock is meant to synchronize access to the qid usage */
62 	osal_mutex_t lock;
63 };
64 
65 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
66 {
67 	struct ecore_l2_info *p_l2_info;
68 	unsigned long **pp_qids;
69 	u32 i;
70 
71 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
72 		return ECORE_SUCCESS;
73 
74 	p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
75 	if (!p_l2_info)
76 		return ECORE_NOMEM;
77 	p_hwfn->p_l2_info = p_l2_info;
78 
79 	if (IS_PF(p_hwfn->p_dev)) {
80 		p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
81 	} else {
82 		u8 rx = 0, tx = 0;
83 
84 		ecore_vf_get_num_rxqs(p_hwfn, &rx);
85 		ecore_vf_get_num_txqs(p_hwfn, &tx);
86 
87 		p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
88 	}
89 
90 	pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
91 			       sizeof(unsigned long *) *
92 			       p_l2_info->queues);
93 	if (pp_qids == OSAL_NULL)
94 		return ECORE_NOMEM;
95 	p_l2_info->pp_qid_usage = pp_qids;
96 
97 	for (i = 0; i < p_l2_info->queues; i++) {
98 		pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
99 					  MAX_QUEUES_PER_QZONE / 8);
100 		if (pp_qids[i] == OSAL_NULL)
101 			return ECORE_NOMEM;
102 	}
103 
104 #ifdef CONFIG_ECORE_LOCK_ALLOC
105 	OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
106 #endif
107 
108 	return ECORE_SUCCESS;
109 }
110 
111 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
112 {
113 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
114 		return;
115 
116 	OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
117 }
118 
119 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
120 {
121 	u32 i;
122 
123 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
124 		return;
125 
126 	if (p_hwfn->p_l2_info == OSAL_NULL)
127 		return;
128 
129 	if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
130 		goto out_l2_info;
131 
132 	/* Free until hit first uninitialized entry */
133 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
134 		if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
135 			break;
136 		OSAL_VFREE(p_hwfn->p_dev,
137 			   p_hwfn->p_l2_info->pp_qid_usage[i]);
138 	}
139 
140 #ifdef CONFIG_ECORE_LOCK_ALLOC
141 	/* Lock is last to initialize, if everything else was */
142 	if (i == p_hwfn->p_l2_info->queues)
143 		OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
144 #endif
145 
146 	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
147 
148 out_l2_info:
149 	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
150 	p_hwfn->p_l2_info = OSAL_NULL;
151 }
152 
153 /* TODO - we'll need locking around these... */
154 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
155 					  struct ecore_queue_cid *p_cid)
156 {
157 	struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
158 	u16 queue_id = p_cid->rel.queue_id;
159 	bool b_rc = true;
160 	u8 first;
161 
162 	OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
163 
164 	if (queue_id > p_l2_info->queues) {
165 		DP_NOTICE(p_hwfn, true,
166 			  "Requested to increase usage for qzone %04x out of %08x\n",
167 			  queue_id, p_l2_info->queues);
168 		b_rc = false;
169 		goto out;
170 	}
171 
172 	first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
173 					     MAX_QUEUES_PER_QZONE);
174 	if (first >= MAX_QUEUES_PER_QZONE) {
175 		b_rc = false;
176 		goto out;
177 	}
178 
179 	OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
180 	p_cid->qid_usage_idx = first;
181 
182 out:
183 	OSAL_MUTEX_RELEASE(&p_l2_info->lock);
184 	return b_rc;
185 }
186 
187 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
188 					  struct ecore_queue_cid *p_cid)
189 {
190 	OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
191 
192 	OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
193 		       p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
194 
195 	OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
196 }
197 
198 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
199 				 struct ecore_queue_cid *p_cid)
200 {
201 	bool b_legacy_vf = !!(p_cid->vf_legacy &
202 			      ECORE_QCID_LEGACY_VF_CID);
203 
204 	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
205 	 * For legacy vf-queues, the CID doesn't go through here.
206 	 */
207 	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
208 		_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
209 
210 	/* VFs maintain the index inside queue-zone on their own */
211 	if (p_cid->vfid == ECORE_QUEUE_CID_PF)
212 		ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
213 
214 	OSAL_VFREE(p_hwfn->p_dev, p_cid);
215 }
216 
217 /* The internal is only meant to be directly called by PFs initializeing CIDs
218  * for their VFs.
219  */
220 static struct ecore_queue_cid *
221 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
222 			u16 opaque_fid, u32 cid,
223 			struct ecore_queue_start_common_params *p_params,
224 			struct ecore_queue_cid_vf_params *p_vf_params)
225 {
226 	struct ecore_queue_cid *p_cid;
227 	enum _ecore_status_t rc;
228 
229 	p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
230 	if (p_cid == OSAL_NULL)
231 		return OSAL_NULL;
232 
233 	p_cid->opaque_fid = opaque_fid;
234 	p_cid->cid = cid;
235 	p_cid->p_owner = p_hwfn;
236 
237 	/* Fill in parameters */
238 	p_cid->rel.vport_id = p_params->vport_id;
239 	p_cid->rel.queue_id = p_params->queue_id;
240 	p_cid->rel.stats_id = p_params->stats_id;
241 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
242 	p_cid->sb_idx = p_params->sb_idx;
243 
244 	/* Fill-in bits related to VFs' queues if information was provided */
245 	if (p_vf_params != OSAL_NULL) {
246 		p_cid->vfid = p_vf_params->vfid;
247 		p_cid->vf_qid = p_vf_params->vf_qid;
248 		p_cid->vf_legacy = p_vf_params->vf_legacy;
249 	} else {
250 		p_cid->vfid = ECORE_QUEUE_CID_PF;
251 	}
252 
253 	/* Don't try calculating the absolute indices for VFs */
254 	if (IS_VF(p_hwfn->p_dev)) {
255 		p_cid->abs = p_cid->rel;
256 
257 		goto out;
258 	}
259 
260 	/* Calculate the engine-absolute indices of the resources.
261 	 * The would guarantee they're valid later on.
262 	 * In some cases [SBs] we already have the right values.
263 	 */
264 	rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
265 	if (rc != ECORE_SUCCESS)
266 		goto fail;
267 
268 	rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
269 			       &p_cid->abs.queue_id);
270 	if (rc != ECORE_SUCCESS)
271 		goto fail;
272 
273 	/* In case of a PF configuring its VF's queues, the stats-id is already
274 	 * absolute [since there's a single index that's suitable per-VF].
275 	 */
276 	if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
277 		rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
278 				    &p_cid->abs.stats_id);
279 		if (rc != ECORE_SUCCESS)
280 			goto fail;
281 	} else {
282 		p_cid->abs.stats_id = p_cid->rel.stats_id;
283 	}
284 
285 out:
286 	/* VF-images have provided the qid_usage_idx on their own.
287 	 * Otherwise, we need to allocate a unique one.
288 	 */
289 	if (!p_vf_params) {
290 		if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
291 			goto fail;
292 	} else {
293 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
294 	}
295 
296 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
297 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
298 		   p_cid->opaque_fid, p_cid->cid,
299 		   p_cid->rel.vport_id, p_cid->abs.vport_id,
300 		   p_cid->rel.queue_id,	p_cid->qid_usage_idx,
301 		   p_cid->abs.queue_id,
302 		   p_cid->rel.stats_id, p_cid->abs.stats_id,
303 		   p_cid->sb_igu_id, p_cid->sb_idx);
304 
305 	return p_cid;
306 
307 fail:
308 	OSAL_VFREE(p_hwfn->p_dev, p_cid);
309 	return OSAL_NULL;
310 }
311 
312 struct ecore_queue_cid *
313 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
314 		       struct ecore_queue_start_common_params *p_params,
315 		       struct ecore_queue_cid_vf_params *p_vf_params)
316 {
317 	struct ecore_queue_cid *p_cid;
318 	u8 vfid = ECORE_CXT_PF_CID;
319 	bool b_legacy_vf = false;
320 	u32 cid = 0;
321 
322 	/* In case of legacy VFs, The CID can be derived from the additional
323 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
324 	 * use the vf_qid for this purpose as well.
325 	 */
326 	if (p_vf_params) {
327 		vfid = p_vf_params->vfid;
328 
329 		if (p_vf_params->vf_legacy &
330 		    ECORE_QCID_LEGACY_VF_CID) {
331 			b_legacy_vf = true;
332 			cid = p_vf_params->vf_qid;
333 		}
334 	}
335 
336 	/* Get a unique firmware CID for this queue, in case it's a PF.
337 	 * VF's don't need a CID as the queue configuration will be done
338 	 * by PF.
339 	 */
340 	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
341 		if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
342 					   &cid, vfid) != ECORE_SUCCESS) {
343 			DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
344 			return OSAL_NULL;
345 		}
346 	}
347 
348 	p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
349 					p_params, p_vf_params);
350 	if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
351 		_ecore_cxt_release_cid(p_hwfn, cid, vfid);
352 
353 	return p_cid;
354 }
355 
356 static struct ecore_queue_cid *
357 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
358 			  struct ecore_queue_start_common_params *p_params)
359 {
360 	return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
361 }
362 
363 enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
364 					      struct ecore_sp_vport_start_params *p_params)
365 {
366 	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
367 	struct ecore_spq_entry *p_ent = OSAL_NULL;
368 	struct ecore_sp_init_data init_data;
369 	u16 rx_mode = 0, tx_err = 0;
370 	u8 abs_vport_id = 0;
371 	enum _ecore_status_t rc = ECORE_NOTIMPL;
372 
373 	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
374 	if (rc != ECORE_SUCCESS)
375 		return rc;
376 
377 	/* Get SPQ entry */
378 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
379 	init_data.cid = ecore_spq_get_cid(p_hwfn);
380 	init_data.opaque_fid = p_params->opaque_fid;
381 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
382 
383 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
384 				   ETH_RAMROD_VPORT_START,
385 				   PROTOCOLID_ETH, &init_data);
386 	if (rc != ECORE_SUCCESS)
387 		return rc;
388 
389 	p_ramrod = &p_ent->ramrod.vport_start;
390 	p_ramrod->vport_id = abs_vport_id;
391 
392 	p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
393 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
394 	p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
395 	p_ramrod->drop_ttl0_en	= p_params->drop_ttl0;
396 	p_ramrod->untagged = p_params->only_untagged;
397 	p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
398 
399 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
400 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
401 
402 	p_ramrod->rx_mode.state	= OSAL_CPU_TO_LE16(rx_mode);
403 
404 	/* Handle requests for strict behavior on transmission errors */
405 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
406 		  p_params->b_err_illegal_vlan_mode ?
407 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
408 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
409 		  p_params->b_err_small_pkt ?
410 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
411 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
412 		  p_params->b_err_anti_spoof ?
413 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
414 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
415 		  p_params->b_err_illegal_inband_mode ?
416 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
417 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
418 		  p_params->b_err_vlan_insert_with_inband ?
419 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
420 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
421 		  p_params->b_err_big_pkt ?
422 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
423 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
424 		  p_params->b_err_ctrl_frame ?
425 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
426 	p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
427 
428 	/* TPA related fields */
429 	OSAL_MEMSET(&p_ramrod->tpa_param, 0,
430 		    sizeof(struct eth_vport_tpa_param));
431 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
432 
433 	switch (p_params->tpa_mode) {
434 	case ECORE_TPA_MODE_GRO:
435 		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
436 		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
437 		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu/2;
438 		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu/2;
439 		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
440 		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
441 		p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
442 		p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
443 		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
444 		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
445 		break;
446 	default:
447 		break;
448 	}
449 
450 	p_ramrod->tx_switching_en = p_params->tx_switching;
451 #ifndef ASIC_ONLY
452 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
453 		p_ramrod->tx_switching_en = 0;
454 #endif
455 
456 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
457 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
458 
459 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
460 	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
461 
462 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
463 }
464 
465 enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
466 					  struct ecore_sp_vport_start_params *p_params)
467 {
468 	if (IS_VF(p_hwfn->p_dev))
469 		return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
470 					       p_params->mtu,
471 					       p_params->remove_inner_vlan,
472 					       p_params->tpa_mode,
473 					       p_params->max_buffers_per_cqe,
474 					       p_params->only_untagged);
475 
476 	return ecore_sp_eth_vport_start(p_hwfn, p_params);
477 }
478 
479 static enum _ecore_status_t
480 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
481 			  struct vport_update_ramrod_data *p_ramrod,
482 			  struct ecore_rss_params *p_rss)
483 {
484 	struct eth_vport_rss_config *p_config;
485 	int i, table_size;
486 	enum _ecore_status_t rc = ECORE_SUCCESS;
487 
488 	if (!p_rss) {
489 		p_ramrod->common.update_rss_flg = 0;
490 		return rc;
491 	}
492 	p_config = &p_ramrod->rss_config;
493 
494 	OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
495 			   ETH_RSS_IND_TABLE_ENTRIES_NUM);
496 
497 	rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id,
498 			      &p_config->rss_id);
499 	if (rc != ECORE_SUCCESS)
500 		return rc;
501 
502 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
503 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
504 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
505 	p_config->update_rss_key = p_rss->update_rss_key;
506 
507 	p_config->rss_mode = p_rss->rss_enable ?
508 			     ETH_VPORT_RSS_MODE_REGULAR :
509 			     ETH_VPORT_RSS_MODE_DISABLED;
510 
511 	p_config->capabilities = 0;
512 
513 	SET_FIELD(p_config->capabilities,
514 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
515 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4));
516 	SET_FIELD(p_config->capabilities,
517 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
518 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6));
519 	SET_FIELD(p_config->capabilities,
520 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
521 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
522 	SET_FIELD(p_config->capabilities,
523 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
524 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
525 	SET_FIELD(p_config->capabilities,
526 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
527 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
528 	SET_FIELD(p_config->capabilities,
529 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
530 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
531 	p_config->tbl_size = p_rss->rss_table_size_log;
532 	p_config->capabilities =
533 		OSAL_CPU_TO_LE16(p_config->capabilities);
534 
535 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
536 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
537 		   p_ramrod->common.update_rss_flg,
538 		   p_config->rss_mode,
539 		   p_config->update_rss_capabilities,
540 		   p_config->capabilities,
541 		   p_config->update_rss_ind_table,
542 		   p_config->update_rss_key);
543 
544 	table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
545 				1 << p_config->tbl_size);
546 	for (i = 0; i < table_size; i++) {
547 		struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
548 
549 		if (!p_queue)
550 			return ECORE_INVAL;
551 
552 		p_config->indirection_table[i] =
553 				OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
554 	}
555 
556 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
557 		   "Configured RSS indirection table [%d entries]:\n",
558 		   table_size);
559 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
560 		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
561 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
562 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
563 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
564 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
565 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
566 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
567 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
568 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
569 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
570 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
571 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
572 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
573 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
574 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
575 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
576 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
577 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
578 	}
579 
580 	for (i = 0; i <  10; i++)
581 		p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
582 
583 	return rc;
584 }
585 
586 static void
587 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
588 			    struct vport_update_ramrod_data *p_ramrod,
589 			    struct ecore_filter_accept_flags accept_flags)
590 {
591 	p_ramrod->common.update_rx_mode_flg =
592 					accept_flags.update_rx_mode_config;
593 	p_ramrod->common.update_tx_mode_flg =
594 					accept_flags.update_tx_mode_config;
595 
596 #ifndef ASIC_ONLY
597 	/* On B0 emulation we cannot enable Tx, since this would cause writes
598 	 * to PVFC HW block which isn't implemented in emulation.
599 	 */
600 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
601 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
602 			   "Non-Asic - prevent Tx mode in vport update\n");
603 		p_ramrod->common.update_tx_mode_flg = 0;
604 	}
605 #endif
606 
607 	/* Set Rx mode accept flags */
608 	if (p_ramrod->common.update_rx_mode_flg) {
609 		u8 accept_filter = accept_flags.rx_accept_filter;
610 		u16 state = 0;
611 
612 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
613 			  !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
614 			   !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
615 
616 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
617 			  !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
618 
619 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
620 			  !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
621 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
622 
623 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
624 			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
625 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
626 
627 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
628 			  !!(accept_filter & ECORE_ACCEPT_BCAST));
629 
630 		p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
631 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
632 			   "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
633 			   p_ramrod->common.vport_id, state);
634 	}
635 
636 	/* Set Tx mode accept flags */
637 	if (p_ramrod->common.update_tx_mode_flg) {
638 		u8 accept_filter = accept_flags.tx_accept_filter;
639 		u16 state = 0;
640 
641 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
642 			  !!(accept_filter & ECORE_ACCEPT_NONE));
643 
644 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
645 			  !!(accept_filter & ECORE_ACCEPT_NONE));
646 
647 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
648 			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
649 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
650 
651 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
652 			  !!(accept_filter & ECORE_ACCEPT_BCAST));
653 
654 		p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
655 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
656 			   "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
657 			   p_ramrod->common.vport_id, state);
658 	}
659 }
660 
661 static void
662 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
663 			      struct ecore_sge_tpa_params *p_params)
664 {
665 	struct eth_vport_tpa_param *p_tpa;
666 
667 	if (!p_params) {
668 		p_ramrod->common.update_tpa_param_flg = 0;
669 		p_ramrod->common.update_tpa_en_flg = 0;
670 		p_ramrod->common.update_tpa_param_flg = 0;
671 		return;
672 	}
673 
674 	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
675 	p_tpa = &p_ramrod->tpa_param;
676 	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
677 	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
678 	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
679 	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
680 
681 	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
682 	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
683 	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
684 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
685 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
686 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
687 	p_tpa->tpa_max_size = p_params->tpa_max_size;
688 	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
689 	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
690 }
691 
692 static void
693 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
694 			  struct ecore_sp_vport_update_params *p_params)
695 {
696 	int i;
697 
698 	OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
699 		    sizeof(p_ramrod->approx_mcast.bins));
700 
701 	if (!p_params->update_approx_mcast_flg)
702 		return;
703 
704 	p_ramrod->common.update_approx_mcast_flg = 1;
705 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
706 		u32 *p_bins = (u32 *)p_params->bins;
707 
708 		p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
709 	}
710 }
711 
712 enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
713 					   struct ecore_sp_vport_update_params *p_params,
714 					   enum spq_mode comp_mode,
715 					   struct ecore_spq_comp_cb *p_comp_data)
716 {
717 	struct ecore_rss_params *p_rss_params = p_params->rss_params;
718 	struct vport_update_ramrod_data_cmn *p_cmn;
719 	struct ecore_sp_init_data init_data;
720 	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
721 	struct ecore_spq_entry *p_ent = OSAL_NULL;
722 	u8 abs_vport_id = 0, val;
723 	enum _ecore_status_t rc = ECORE_NOTIMPL;
724 
725 	if (IS_VF(p_hwfn->p_dev)) {
726 		rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
727 		return rc;
728 	}
729 
730 	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
731 	if (rc != ECORE_SUCCESS)
732 		return rc;
733 
734 	/* Get SPQ entry */
735 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
736 	init_data.cid = ecore_spq_get_cid(p_hwfn);
737 	init_data.opaque_fid = p_params->opaque_fid;
738 	init_data.comp_mode = comp_mode;
739 	init_data.p_comp_data = p_comp_data;
740 
741 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
742 				   ETH_RAMROD_VPORT_UPDATE,
743 				   PROTOCOLID_ETH, &init_data);
744 	if (rc != ECORE_SUCCESS)
745 		return rc;
746 
747 	/* Copy input params to ramrod according to FW struct */
748 	p_ramrod = &p_ent->ramrod.vport_update;
749 	p_cmn = &p_ramrod->common;
750 
751 	p_cmn->vport_id = abs_vport_id;
752 
753 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
754 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
755 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
756 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
757 
758 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
759 	val = p_params->update_accept_any_vlan_flg;
760 	p_cmn->update_accept_any_vlan_flg = val;
761 
762 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
763 	val = p_params->update_inner_vlan_removal_flg;
764 	p_cmn->update_inner_vlan_removal_en_flg = val;
765 
766 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
767 	val = p_params->update_default_vlan_enable_flg;
768 	p_cmn->update_default_vlan_en_flg = val;
769 
770 	p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
771 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
772 
773 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
774 
775 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
776 #ifndef ASIC_ONLY
777 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
778 		if (p_ramrod->common.tx_switching_en ||
779 		    p_ramrod->common.update_tx_switching_en_flg) {
780 			DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n");
781 			p_ramrod->common.tx_switching_en = 0;
782 			p_ramrod->common.update_tx_switching_en_flg = 1;
783 		}
784 #endif
785 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
786 
787 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
788 	val = p_params->update_anti_spoofing_en_flg;
789 	p_ramrod->common.update_anti_spoofing_en_flg = val;
790 
791 	rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
792 	if (rc != ECORE_SUCCESS) {
793 		/* Return spq entry which is taken in ecore_sp_init_request()*/
794 		ecore_spq_return_entry(p_hwfn, p_ent);
795 		return rc;
796 	}
797 
798 	/* Update mcast bins for VFs, PF doesn't use this functionality */
799 	ecore_sp_update_mcast_bin(p_ramrod, p_params);
800 
801 	ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
802 	ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
803 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
804 }
805 
806 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
807 					  u16 opaque_fid,
808 					  u8 vport_id)
809 {
810 	struct vport_stop_ramrod_data *p_ramrod;
811 	struct ecore_sp_init_data init_data;
812 	struct ecore_spq_entry *p_ent;
813 	u8 abs_vport_id = 0;
814 	enum _ecore_status_t rc;
815 
816 	if (IS_VF(p_hwfn->p_dev))
817 		return ecore_vf_pf_vport_stop(p_hwfn);
818 
819 	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
820 	if (rc != ECORE_SUCCESS)
821 		return rc;
822 
823 	/* Get SPQ entry */
824 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
825 	init_data.cid = ecore_spq_get_cid(p_hwfn);
826 	init_data.opaque_fid = opaque_fid;
827 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
828 
829 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
830 				   ETH_RAMROD_VPORT_STOP,
831 				   PROTOCOLID_ETH, &init_data);
832 	if (rc != ECORE_SUCCESS)
833 		return rc;
834 
835 	p_ramrod = &p_ent->ramrod.vport_stop;
836 	p_ramrod->vport_id = abs_vport_id;
837 
838 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
839 }
840 
841 static enum _ecore_status_t
842 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
843 			 struct ecore_filter_accept_flags *p_accept_flags)
844 {
845 	struct ecore_sp_vport_update_params s_params;
846 
847 	OSAL_MEMSET(&s_params, 0, sizeof(s_params));
848 	OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
849 		    sizeof(struct ecore_filter_accept_flags));
850 
851 	return ecore_vf_pf_vport_update(p_hwfn, &s_params);
852 }
853 
854 enum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev,
855 					     u8 vport,
856 					     struct ecore_filter_accept_flags accept_flags,
857 					     u8 update_accept_any_vlan,
858 					     u8 accept_any_vlan,
859 					     enum spq_mode comp_mode,
860 					     struct ecore_spq_comp_cb *p_comp_data)
861 {
862 	struct ecore_sp_vport_update_params vport_update_params;
863 	int i, rc;
864 
865 	/* Prepare and send the vport rx_mode change */
866 	OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
867 	vport_update_params.vport_id = vport;
868 	vport_update_params.accept_flags = accept_flags;
869 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
870 	vport_update_params.accept_any_vlan = accept_any_vlan;
871 
872 	for_each_hwfn(p_dev, i) {
873 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
874 
875 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
876 
877 		if (IS_VF(p_dev)) {
878 			rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
879 			if (rc != ECORE_SUCCESS)
880 				return rc;
881 			continue;
882 		}
883 
884 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
885 					   comp_mode, p_comp_data);
886 		if (rc != ECORE_SUCCESS) {
887 			DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
888 			return rc;
889 		}
890 
891 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
892 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
893 			   accept_flags.rx_accept_filter,
894 			   accept_flags.tx_accept_filter);
895 
896 		if (update_accept_any_vlan)
897 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 				   "accept_any_vlan=%d configured\n",
899 				   accept_any_vlan);
900 	}
901 
902 	return 0;
903 }
904 
905 enum _ecore_status_t
906 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
907 			   struct ecore_queue_cid *p_cid,
908 			   u16 bd_max_bytes,
909 			   dma_addr_t bd_chain_phys_addr,
910 			   dma_addr_t cqe_pbl_addr,
911 			   u16 cqe_pbl_size)
912 {
913 	struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
914 	struct ecore_spq_entry *p_ent = OSAL_NULL;
915 	struct ecore_sp_init_data init_data;
916 	enum _ecore_status_t rc = ECORE_NOTIMPL;
917 
918 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
919 		   p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
920 		   p_cid->abs.vport_id, p_cid->sb_igu_id);
921 
922 	/* Get SPQ entry */
923 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
924 	init_data.cid = p_cid->cid;
925 	init_data.opaque_fid = p_cid->opaque_fid;
926 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
927 
928 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
929 				   ETH_RAMROD_RX_QUEUE_START,
930 				   PROTOCOLID_ETH, &init_data);
931 	if (rc != ECORE_SUCCESS)
932 		return rc;
933 
934 	p_ramrod = &p_ent->ramrod.rx_queue_start;
935 
936 	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
937 	p_ramrod->sb_index = p_cid->sb_idx;
938 	p_ramrod->vport_id = p_cid->abs.vport_id;
939 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
940 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
941 	p_ramrod->complete_cqe_flg = 0;
942 	p_ramrod->complete_event_flg = 1;
943 
944 	p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
945 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
946 
947 	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
948 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
949 
950 	if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
951 		bool b_legacy_vf = !!(p_cid->vf_legacy &
952 				      ECORE_QCID_LEGACY_VF_RX_PROD);
953 
954 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
955 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n",
956 			   b_legacy_vf ? " [legacy]" : "",
957 			   p_cid->vf_qid);
958 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
959 	}
960 
961 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
962 }
963 
964 static enum _ecore_status_t
965 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
966 			    struct ecore_queue_cid *p_cid,
967 			    u16 bd_max_bytes,
968 			    dma_addr_t bd_chain_phys_addr,
969 			    dma_addr_t cqe_pbl_addr,
970 			    u16 cqe_pbl_size,
971 			    void OSAL_IOMEM **pp_prod)
972 {
973 	u32 init_prod_val = 0;
974 
975 	*pp_prod = (u8 OSAL_IOMEM*)
976 		    p_hwfn->regview +
977 		    GTT_BAR0_MAP_REG_MSDM_RAM +
978 		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
979 
980 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
981 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
982 			  (u32 *)(&init_prod_val));
983 
984 	return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
985 					  bd_max_bytes,
986 					  bd_chain_phys_addr,
987 					  cqe_pbl_addr, cqe_pbl_size);
988 }
989 
990 enum _ecore_status_t
991 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
992 			 u16 opaque_fid,
993 			 struct ecore_queue_start_common_params *p_params,
994 			 u16 bd_max_bytes,
995 			 dma_addr_t bd_chain_phys_addr,
996 			 dma_addr_t cqe_pbl_addr,
997 			 u16 cqe_pbl_size,
998 			 struct ecore_rxq_start_ret_params *p_ret_params)
999 {
1000 	struct ecore_queue_cid *p_cid;
1001 	enum _ecore_status_t rc;
1002 
1003 	/* Allocate a CID for the queue */
1004 	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1005 	if (p_cid == OSAL_NULL)
1006 		return ECORE_NOMEM;
1007 
1008 	if (IS_PF(p_hwfn->p_dev))
1009 		rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1010 						 bd_max_bytes,
1011 						 bd_chain_phys_addr,
1012 						 cqe_pbl_addr, cqe_pbl_size,
1013 						 &p_ret_params->p_prod);
1014 	else
1015 		rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1016 					   bd_max_bytes,
1017 					   bd_chain_phys_addr,
1018 					   cqe_pbl_addr,
1019 					   cqe_pbl_size,
1020 					   &p_ret_params->p_prod);
1021 
1022 	/* Provide the caller with a reference to as handler */
1023 	if (rc != ECORE_SUCCESS)
1024 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1025 	else
1026 		p_ret_params->p_handle = (void *)p_cid;
1027 
1028 	return rc;
1029 }
1030 
1031 enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1032 						   void **pp_rxq_handles,
1033 						   u8 num_rxqs,
1034 						   u8 complete_cqe_flg,
1035 						   u8 complete_event_flg,
1036 						   enum spq_mode comp_mode,
1037 						   struct ecore_spq_comp_cb *p_comp_data)
1038 {
1039 	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1040 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1041 	struct ecore_sp_init_data init_data;
1042 	struct ecore_queue_cid *p_cid;
1043 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1044 	u8 i;
1045 
1046 	if (IS_VF(p_hwfn->p_dev))
1047 		return ecore_vf_pf_rxqs_update(p_hwfn,
1048 					       (struct ecore_queue_cid **)
1049 					       pp_rxq_handles,
1050 					       num_rxqs,
1051 					       complete_cqe_flg,
1052 					       complete_event_flg);
1053 
1054 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1055 	init_data.comp_mode = comp_mode;
1056 	init_data.p_comp_data = p_comp_data;
1057 
1058 	for (i = 0; i < num_rxqs; i++) {
1059 		p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1060 
1061 		/* Get SPQ entry */
1062 		init_data.cid = p_cid->cid;
1063 		init_data.opaque_fid = p_cid->opaque_fid;
1064 
1065 		rc = ecore_sp_init_request(p_hwfn, &p_ent,
1066 					   ETH_RAMROD_RX_QUEUE_UPDATE,
1067 					   PROTOCOLID_ETH, &init_data);
1068 		if (rc != ECORE_SUCCESS)
1069 			return rc;
1070 
1071 		p_ramrod = &p_ent->ramrod.rx_queue_update;
1072 		p_ramrod->vport_id = p_cid->abs.vport_id;
1073 
1074 		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1075 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1076 		p_ramrod->complete_event_flg = complete_event_flg;
1077 
1078 		rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1079 		if (rc != ECORE_SUCCESS)
1080 			return rc;
1081 	}
1082 
1083 	return rc;
1084 }
1085 
1086 static enum _ecore_status_t
1087 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1088 			   struct ecore_queue_cid *p_cid,
1089 			   bool b_eq_completion_only,
1090 			   bool b_cqe_completion)
1091 {
1092 	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1093 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1094 	struct ecore_sp_init_data init_data;
1095 	enum _ecore_status_t rc;
1096 
1097 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1098 	init_data.cid = p_cid->cid;
1099 	init_data.opaque_fid = p_cid->opaque_fid;
1100 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1101 
1102 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1103 				   ETH_RAMROD_RX_QUEUE_STOP,
1104 				   PROTOCOLID_ETH, &init_data);
1105 	if (rc != ECORE_SUCCESS)
1106 		return rc;
1107 
1108 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1109 	p_ramrod->vport_id = p_cid->abs.vport_id;
1110 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1111 
1112 	/* Cleaning the queue requires the completion to arrive there.
1113 	 * In addition, VFs require the answer to come as eqe to PF.
1114 	 */
1115 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1116 				      !b_eq_completion_only) ||
1117 				     b_cqe_completion;
1118 	p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1119 				       b_eq_completion_only;
1120 
1121 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1122 }
1123 
1124 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1125 					     void *p_rxq,
1126 					     bool eq_completion_only,
1127 					     bool cqe_completion)
1128 {
1129 	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1130 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1131 
1132 	if (IS_PF(p_hwfn->p_dev))
1133 		rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1134 						eq_completion_only,
1135 						cqe_completion);
1136 	else
1137 		rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1138 
1139 	if (rc == ECORE_SUCCESS)
1140 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1141 	return rc;
1142 }
1143 
1144 enum _ecore_status_t
1145 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1146 			   struct ecore_queue_cid *p_cid,
1147 			   dma_addr_t pbl_addr, u16 pbl_size,
1148 			   u16 pq_id)
1149 {
1150 	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1151 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1152 	struct ecore_sp_init_data init_data;
1153 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1154 
1155 	/* Get SPQ entry */
1156 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1157 	init_data.cid = p_cid->cid;
1158 	init_data.opaque_fid = p_cid->opaque_fid;
1159 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1160 
1161 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1162 				   ETH_RAMROD_TX_QUEUE_START,
1163 				   PROTOCOLID_ETH, &init_data);
1164 	if (rc != ECORE_SUCCESS)
1165 		return rc;
1166 
1167 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1168 	p_ramrod->vport_id = p_cid->abs.vport_id;
1169 
1170 	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1171 	p_ramrod->sb_index = p_cid->sb_idx;
1172 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1173 
1174 	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1175 	p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1176 
1177 	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1178 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1179 
1180 	p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1181 
1182 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1183 }
1184 
1185 static enum _ecore_status_t
1186 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1187 			    struct ecore_queue_cid *p_cid,
1188 			    u8 tc,
1189 			    dma_addr_t pbl_addr, u16 pbl_size,
1190 			    void OSAL_IOMEM **pp_doorbell)
1191 {
1192 	enum _ecore_status_t rc;
1193 
1194 	/* TODO - set tc in the pq_params for multi-cos */
1195 	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
1196 					pbl_addr, pbl_size,
1197 					ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
1198 	if (rc != ECORE_SUCCESS)
1199 		return rc;
1200 
1201 	/* Provide the caller with the necessary return values */
1202 	*pp_doorbell = (u8 OSAL_IOMEM *)
1203 		       p_hwfn->doorbells +
1204 		       DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1205 
1206 	return ECORE_SUCCESS;
1207 }
1208 
1209 enum _ecore_status_t
1210 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1211 			 struct ecore_queue_start_common_params *p_params,
1212 			 u8 tc,
1213 			 dma_addr_t pbl_addr, u16 pbl_size,
1214 			 struct ecore_txq_start_ret_params *p_ret_params)
1215 {
1216 	struct ecore_queue_cid *p_cid;
1217 	enum _ecore_status_t rc;
1218 
1219 	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1220 	if (p_cid == OSAL_NULL)
1221 		return ECORE_INVAL;
1222 
1223 	if (IS_PF(p_hwfn->p_dev))
1224 		rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1225 						 pbl_addr, pbl_size,
1226 						 &p_ret_params->p_doorbell);
1227 	else
1228 		rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1229 					   pbl_addr, pbl_size,
1230 					   &p_ret_params->p_doorbell);
1231 
1232 	if (rc != ECORE_SUCCESS)
1233 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1234 	else
1235 		p_ret_params->p_handle = (void *)p_cid;
1236 
1237 	return rc;
1238 }
1239 
1240 static enum _ecore_status_t
1241 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1242 			   struct ecore_queue_cid *p_cid)
1243 {
1244 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1245 	struct ecore_sp_init_data init_data;
1246 	enum _ecore_status_t rc;
1247 
1248 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1249 	init_data.cid = p_cid->cid;
1250 	init_data.opaque_fid = p_cid->opaque_fid;
1251 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1252 
1253 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1254 				   ETH_RAMROD_TX_QUEUE_STOP,
1255 				   PROTOCOLID_ETH, &init_data);
1256 	if (rc != ECORE_SUCCESS)
1257 		return rc;
1258 
1259 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1260 }
1261 
1262 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1263 					     void *p_handle)
1264 {
1265 	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1266 	enum _ecore_status_t rc;
1267 
1268 	if (IS_PF(p_hwfn->p_dev))
1269 		rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1270 	else
1271 		rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1272 
1273 	if (rc == ECORE_SUCCESS)
1274 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1275 	return rc;
1276 }
1277 
1278 static enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode)
1279 {
1280 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1281 
1282 	switch (opcode) {
1283 	case ECORE_FILTER_ADD:
1284 		action = ETH_FILTER_ACTION_ADD;
1285 		break;
1286 	case ECORE_FILTER_REMOVE:
1287 		action = ETH_FILTER_ACTION_REMOVE;
1288 		break;
1289 	case ECORE_FILTER_FLUSH:
1290 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1291 		break;
1292 	default:
1293 		action = MAX_ETH_FILTER_ACTION;
1294 	}
1295 
1296 	return action;
1297 }
1298 
1299 static enum _ecore_status_t
1300 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1301 			  u16 opaque_fid,
1302 			  struct ecore_filter_ucast *p_filter_cmd,
1303 			  struct vport_filter_update_ramrod_data **pp_ramrod,
1304 			  struct ecore_spq_entry **pp_ent,
1305 			  enum spq_mode comp_mode,
1306 			  struct ecore_spq_comp_cb *p_comp_data)
1307 {
1308 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1309 	struct vport_filter_update_ramrod_data *p_ramrod;
1310 	struct eth_filter_cmd *p_first_filter;
1311 	struct eth_filter_cmd *p_second_filter;
1312 	struct ecore_sp_init_data init_data;
1313 	enum eth_filter_action action;
1314 	enum _ecore_status_t rc;
1315 
1316 	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1317 			    &vport_to_remove_from);
1318 	if (rc != ECORE_SUCCESS)
1319 		return rc;
1320 
1321 	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1322 			    &vport_to_add_to);
1323 	if (rc != ECORE_SUCCESS)
1324 		return rc;
1325 
1326 	/* Get SPQ entry */
1327 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1328 	init_data.cid = ecore_spq_get_cid(p_hwfn);
1329 	init_data.opaque_fid = opaque_fid;
1330 	init_data.comp_mode = comp_mode;
1331 	init_data.p_comp_data = p_comp_data;
1332 
1333 	rc = ecore_sp_init_request(p_hwfn, pp_ent,
1334 				   ETH_RAMROD_FILTERS_UPDATE,
1335 				   PROTOCOLID_ETH, &init_data);
1336 	if (rc != ECORE_SUCCESS)
1337 		return rc;
1338 
1339 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1340 	p_ramrod = *pp_ramrod;
1341 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1342 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1343 
1344 #ifndef ASIC_ONLY
1345 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1346 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1347 			   "Non-Asic - prevent Tx filters\n");
1348 		p_ramrod->filter_cmd_hdr.tx = 0;
1349 	}
1350 
1351 #endif
1352 
1353 	switch (p_filter_cmd->opcode) {
1354 	case ECORE_FILTER_REPLACE:
1355 	case ECORE_FILTER_MOVE:
1356 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1357 	default:
1358 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1359 	}
1360 
1361 	p_first_filter = &p_ramrod->filter_cmds[0];
1362 	p_second_filter = &p_ramrod->filter_cmds[1];
1363 
1364 	switch (p_filter_cmd->type) {
1365 	case ECORE_FILTER_MAC:
1366 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1367 	case ECORE_FILTER_VLAN:
1368 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1369 	case ECORE_FILTER_MAC_VLAN:
1370 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1371 	case ECORE_FILTER_INNER_MAC:
1372 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1373 	case ECORE_FILTER_INNER_VLAN:
1374 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1375 	case ECORE_FILTER_INNER_PAIR:
1376 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1377 	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1378 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1379 		break;
1380 	case ECORE_FILTER_MAC_VNI_PAIR:
1381 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1382 	case ECORE_FILTER_VNI:
1383 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1384 	}
1385 
1386 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1387 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1388 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1389 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1390 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1391 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1392 		ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1393 				      &p_first_filter->mac_mid,
1394 				      &p_first_filter->mac_lsb,
1395 				      (u8 *)p_filter_cmd->mac);
1396 
1397 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1398 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1399 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1400 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1401 		p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1402 
1403 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1404 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1405 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1406 		p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1407 
1408 	if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1409 		p_second_filter->type = p_first_filter->type;
1410 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1411 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1412 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1413 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1414 		p_second_filter->vni = p_first_filter->vni;
1415 
1416 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1417 
1418 		p_first_filter->vport_id = vport_to_remove_from;
1419 
1420 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1421 		p_second_filter->vport_id = vport_to_add_to;
1422 	} else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1423 		p_first_filter->vport_id = vport_to_add_to;
1424 		OSAL_MEMCPY(p_second_filter, p_first_filter,
1425 			    sizeof(*p_second_filter));
1426 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1427 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1428 	} else {
1429 		action = ecore_filter_action(p_filter_cmd->opcode);
1430 
1431 		if (action == MAX_ETH_FILTER_ACTION) {
1432 			DP_NOTICE(p_hwfn, true,
1433 				  "%d is not supported yet\n",
1434 				  p_filter_cmd->opcode);
1435 			return ECORE_NOTIMPL;
1436 		}
1437 
1438 		p_first_filter->action = action;
1439 		p_first_filter->vport_id =
1440 			(p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1441 			vport_to_remove_from : vport_to_add_to;
1442 	}
1443 
1444 	return ECORE_SUCCESS;
1445 }
1446 
1447 enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1448 					       u16 opaque_fid,
1449 					       struct ecore_filter_ucast *p_filter_cmd,
1450 					       enum spq_mode comp_mode,
1451 					       struct ecore_spq_comp_cb *p_comp_data)
1452 {
1453 	struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1454 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1455 	struct eth_filter_cmd_header *p_header;
1456 	enum _ecore_status_t rc;
1457 
1458 	rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1459 				       &p_ramrod, &p_ent,
1460 				       comp_mode, p_comp_data);
1461 	if (rc != ECORE_SUCCESS) {
1462 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1463 		return rc;
1464 	}
1465 	p_header = &p_ramrod->filter_cmd_hdr;
1466 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1467 
1468 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1469 	if (rc != ECORE_SUCCESS) {
1470 		DP_ERR(p_hwfn,
1471 		       "Unicast filter ADD command failed %d\n",
1472 		       rc);
1473 		return rc;
1474 	}
1475 
1476 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1477 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1478 		   (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1479 		    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1480 		     "REMOVE" :
1481 		     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1482 		      "MOVE" : "REPLACE")),
1483 		   (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1484 		    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1485 		     "VLAN" : "MAC & VLAN"),
1486 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1487 		   p_filter_cmd->is_rx_filter,
1488 		   p_filter_cmd->is_tx_filter);
1489 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1490 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1491 		   p_filter_cmd->vport_to_add_to,
1492 		   p_filter_cmd->vport_to_remove_from,
1493 		   p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1494 		   p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1495 		   p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1496 		   p_filter_cmd->vlan);
1497 
1498 	return ECORE_SUCCESS;
1499 }
1500 
1501 /*******************************************************************************
1502  * Description:
1503  *         Calculates crc 32 on a buffer
1504  *         Note: crc32_length MUST be aligned to 8
1505  * Return:
1506  ******************************************************************************/
1507 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1508 {
1509 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1510 	u8  msb = 0, current_byte = 0;
1511 
1512 	if ((crc32_packet == OSAL_NULL) ||
1513 	    (crc32_length == 0) ||
1514 	    ((crc32_length % 8) != 0)) {
1515 		return crc32_result;
1516 	}
1517 
1518 	for (byte = 0; byte < crc32_length; byte++) {
1519 		current_byte = crc32_packet[byte];
1520 		for (bit = 0; bit < 8; bit++) {
1521 			msb = (u8)(crc32_result >> 31);
1522 			crc32_result = crc32_result << 1;
1523 			if (msb != (0x1 & (current_byte >> bit))) {
1524 				crc32_result = crc32_result ^ CRC32_POLY;
1525 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1526 			}
1527 		}
1528 	}
1529 
1530 	return crc32_result;
1531 }
1532 
1533 static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1534 {
1535 	u32 packet_buf[2] = {0};
1536 
1537 	OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1538 	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1539 }
1540 
1541 u8 ecore_mcast_bin_from_mac(u8 *mac)
1542 {
1543 	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1544 
1545 	return crc & 0xff;
1546 }
1547 
1548 static enum _ecore_status_t
1549 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1550 			  struct ecore_filter_mcast *p_filter_cmd,
1551 			  enum spq_mode comp_mode,
1552 			  struct ecore_spq_comp_cb *p_comp_data)
1553 {
1554 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1555 	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1556 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1557 	struct ecore_sp_init_data init_data;
1558 	u8 abs_vport_id = 0;
1559 	enum _ecore_status_t rc;
1560 	int i;
1561 
1562 	if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1563 		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1564 				    &abs_vport_id);
1565 	else
1566 		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1567 				    &abs_vport_id);
1568 	if (rc != ECORE_SUCCESS)
1569 		return rc;
1570 
1571 	/* Get SPQ entry */
1572 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1573 	init_data.cid = ecore_spq_get_cid(p_hwfn);
1574 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1575 	init_data.comp_mode = comp_mode;
1576 	init_data.p_comp_data = p_comp_data;
1577 
1578 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1579 				   ETH_RAMROD_VPORT_UPDATE,
1580 				   PROTOCOLID_ETH, &init_data);
1581 	if (rc != ECORE_SUCCESS) {
1582 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1583 		return rc;
1584 	}
1585 
1586 	p_ramrod = &p_ent->ramrod.vport_update;
1587 	p_ramrod->common.update_approx_mcast_flg = 1;
1588 
1589 	/* explicitly clear out the entire vector */
1590 	OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1591 		    0, sizeof(p_ramrod->approx_mcast.bins));
1592 	OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1593 		    ETH_MULTICAST_MAC_BINS_IN_REGS);
1594 	/* filter ADD op is explicit set op and it removes
1595 	*  any existing filters for the vport.
1596 	*/
1597 	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1598 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1599 			u32 bit;
1600 
1601 			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1602 			OSAL_SET_BIT(bit, bins);
1603 		}
1604 
1605 		/* Convert to correct endianity */
1606 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1607 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1608 			u32 *p_bins = (u32 *)bins;
1609 
1610 			p_ramrod_bins = &p_ramrod->approx_mcast;
1611 			p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1612 		}
1613 	}
1614 
1615 	p_ramrod->common.vport_id = abs_vport_id;
1616 
1617 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1618 	if (rc != ECORE_SUCCESS)
1619 		DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1620 
1621 	return rc;
1622 }
1623 
1624 enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1625 					    struct ecore_filter_mcast *p_filter_cmd,
1626 					    enum spq_mode comp_mode,
1627 					    struct ecore_spq_comp_cb *p_comp_data)
1628 {
1629 	enum _ecore_status_t rc = ECORE_SUCCESS;
1630 	int i;
1631 
1632 	/* only ADD and REMOVE operations are supported for multi-cast */
1633 	if ((p_filter_cmd->opcode != ECORE_FILTER_ADD  &&
1634 	     (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1635 	     (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1636 		return ECORE_INVAL;
1637 	}
1638 
1639 	for_each_hwfn(p_dev, i) {
1640 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1641 
1642 		if (IS_VF(p_dev)) {
1643 			ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1644 			continue;
1645 		}
1646 
1647 		rc = ecore_sp_eth_filter_mcast(p_hwfn,
1648 					       p_filter_cmd,
1649 					       comp_mode,
1650 					       p_comp_data);
1651 		if (rc != ECORE_SUCCESS)
1652 			break;
1653 	}
1654 
1655 	return rc;
1656 }
1657 
1658 enum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1659 					    struct ecore_filter_ucast *p_filter_cmd,
1660 					    enum spq_mode comp_mode,
1661 					    struct ecore_spq_comp_cb *p_comp_data)
1662 {
1663 	enum _ecore_status_t rc = ECORE_SUCCESS;
1664 	int i;
1665 
1666 	for_each_hwfn(p_dev, i) {
1667 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1668 		u16 opaque_fid;
1669 
1670 		if (IS_VF(p_dev)) {
1671 			rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1672 			continue;
1673 		}
1674 
1675 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1676 		rc = ecore_sp_eth_filter_ucast(p_hwfn,
1677 					       opaque_fid,
1678 					       p_filter_cmd,
1679 					       comp_mode,
1680 					       p_comp_data);
1681 		if (rc != ECORE_SUCCESS)
1682 			break;
1683 	}
1684 
1685 	return rc;
1686 }
1687 
1688 /* Statistics related code */
1689 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1690 					     u32 *p_addr, u32 *p_len,
1691 					     u16 statistics_bin)
1692 {
1693 	if (IS_PF(p_hwfn->p_dev)) {
1694 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1695 			  PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1696 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1697 	} else {
1698 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1699 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1700 
1701 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1702 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1703 	}
1704 }
1705 
1706 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1707 				     struct ecore_ptt *p_ptt,
1708 				     struct ecore_eth_stats *p_stats,
1709 				     u16 statistics_bin)
1710 {
1711 	struct eth_pstorm_per_queue_stat pstats;
1712 	u32 pstats_addr = 0, pstats_len = 0;
1713 
1714 	__ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1715 					 statistics_bin);
1716 
1717 	OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1718 	ecore_memcpy_from(p_hwfn, p_ptt, &pstats,
1719 			  pstats_addr, pstats_len);
1720 
1721 	p_stats->common.tx_ucast_bytes +=
1722 		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1723 	p_stats->common.tx_mcast_bytes +=
1724 		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1725 	p_stats->common.tx_bcast_bytes +=
1726 		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1727 	p_stats->common.tx_ucast_pkts +=
1728 		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1729 	p_stats->common.tx_mcast_pkts +=
1730 		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1731 	p_stats->common.tx_bcast_pkts +=
1732 		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1733 	p_stats->common.tx_err_drop_pkts +=
1734 		HILO_64_REGPAIR(pstats.error_drop_pkts);
1735 }
1736 
1737 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1738 				     struct ecore_ptt *p_ptt,
1739 				     struct ecore_eth_stats *p_stats)
1740 {
1741 	struct tstorm_per_port_stat tstats;
1742 	u32 tstats_addr, tstats_len;
1743 
1744 	if (IS_PF(p_hwfn->p_dev)) {
1745 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1746 			      TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1747 		tstats_len = sizeof(struct tstorm_per_port_stat);
1748 	} else {
1749 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1750 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1751 
1752 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1753 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1754 	}
1755 
1756 	OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1757 	ecore_memcpy_from(p_hwfn, p_ptt, &tstats,
1758 			  tstats_addr, tstats_len);
1759 
1760 	p_stats->common.mftag_filter_discards +=
1761 		HILO_64_REGPAIR(tstats.mftag_filter_discard);
1762 	p_stats->common.mac_filter_discards +=
1763 		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1764 }
1765 
1766 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1767 					     u32 *p_addr, u32 *p_len,
1768 					     u16 statistics_bin)
1769 {
1770 	if (IS_PF(p_hwfn->p_dev)) {
1771 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1772 			  USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1773 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1774 	} else {
1775 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1776 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1777 
1778 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1779 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1780 	}
1781 }
1782 
1783 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1784 				     struct ecore_ptt *p_ptt,
1785 				     struct ecore_eth_stats *p_stats,
1786 				     u16 statistics_bin)
1787 {
1788 	struct eth_ustorm_per_queue_stat ustats;
1789 	u32 ustats_addr = 0, ustats_len = 0;
1790 
1791 	__ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1792 					 statistics_bin);
1793 
1794 	OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1795 	ecore_memcpy_from(p_hwfn, p_ptt, &ustats,
1796 			  ustats_addr, ustats_len);
1797 
1798 	p_stats->common.rx_ucast_bytes +=
1799 		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1800 	p_stats->common.rx_mcast_bytes +=
1801 		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1802 	p_stats->common.rx_bcast_bytes +=
1803 		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1804 	p_stats->common.rx_ucast_pkts +=
1805 		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1806 	p_stats->common.rx_mcast_pkts +=
1807 		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1808 	p_stats->common.rx_bcast_pkts +=
1809 		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1810 }
1811 
1812 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1813 					     u32 *p_addr, u32 *p_len,
1814 					     u16 statistics_bin)
1815 {
1816 	if (IS_PF(p_hwfn->p_dev)) {
1817 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1818 			  MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1819 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1820 	} else {
1821 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1822 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1823 
1824 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1825 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1826 	}
1827 }
1828 
1829 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1830 				     struct ecore_ptt *p_ptt,
1831 				     struct ecore_eth_stats *p_stats,
1832 				     u16 statistics_bin)
1833 {
1834 	struct eth_mstorm_per_queue_stat mstats;
1835 	u32 mstats_addr = 0, mstats_len = 0;
1836 
1837 	__ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1838 					 statistics_bin);
1839 
1840 	OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1841 	ecore_memcpy_from(p_hwfn, p_ptt, &mstats,
1842 			  mstats_addr, mstats_len);
1843 
1844 	p_stats->common.no_buff_discards +=
1845 		HILO_64_REGPAIR(mstats.no_buff_discard);
1846 	p_stats->common.packet_too_big_discard +=
1847 		HILO_64_REGPAIR(mstats.packet_too_big_discard);
1848 	p_stats->common.ttl0_discard +=
1849 		HILO_64_REGPAIR(mstats.ttl0_discard);
1850 	p_stats->common.tpa_coalesced_pkts +=
1851 		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1852 	p_stats->common.tpa_coalesced_events +=
1853 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1854 	p_stats->common.tpa_aborts_num +=
1855 		HILO_64_REGPAIR(mstats.tpa_aborts_num);
1856 	p_stats->common.tpa_coalesced_bytes +=
1857 		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1858 }
1859 
1860 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1861 					 struct ecore_ptt *p_ptt,
1862 					 struct ecore_eth_stats *p_stats)
1863 {
1864 	struct ecore_eth_stats_common *p_common = &p_stats->common;
1865 	struct port_stats port_stats;
1866 	int j;
1867 
1868 	OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1869 
1870 	ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1871 			  p_hwfn->mcp_info->port_addr +
1872 			  OFFSETOF(struct public_port, stats),
1873 			  sizeof(port_stats));
1874 
1875 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1876 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1877 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1878 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1879 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1880 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1881 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1882 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1883 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1884 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1885 	p_common->rx_align_errors += port_stats.eth.raln;
1886 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1887 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1888 	p_common->rx_jabbers += port_stats.eth.rjbr;
1889 	p_common->rx_undersize_packets += port_stats.eth.rund;
1890 	p_common->rx_fragments += port_stats.eth.rfrg;
1891 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1892 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1893 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1894 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1895 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1896 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1897 	p_common->tx_pause_frames += port_stats.eth.txpf;
1898 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1899 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1900 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1901 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1902 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1903 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1904 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1905 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1906 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1907 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1908 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1909 	for (j = 0; j < 8; j++) {
1910 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1911 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1912 	}
1913 
1914 	if (ECORE_IS_BB(p_hwfn->p_dev)) {
1915 		struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1916 
1917 		p_bb->rx_1519_to_1522_byte_packets +=
1918 			port_stats.eth.u0.bb0.r1522;
1919 		p_bb->rx_1519_to_2047_byte_packets +=
1920 			port_stats.eth.u0.bb0.r2047;
1921 		p_bb->rx_2048_to_4095_byte_packets +=
1922 			port_stats.eth.u0.bb0.r4095;
1923 		p_bb->rx_4096_to_9216_byte_packets +=
1924 			port_stats.eth.u0.bb0.r9216;
1925 		p_bb->rx_9217_to_16383_byte_packets +=
1926 			port_stats.eth.u0.bb0.r16383;
1927 		p_bb->tx_1519_to_2047_byte_packets +=
1928 			port_stats.eth.u1.bb1.t2047;
1929 		p_bb->tx_2048_to_4095_byte_packets +=
1930 			port_stats.eth.u1.bb1.t4095;
1931 		p_bb->tx_4096_to_9216_byte_packets +=
1932 			port_stats.eth.u1.bb1.t9216;
1933 		p_bb->tx_9217_to_16383_byte_packets +=
1934 			port_stats.eth.u1.bb1.t16383;
1935 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1936 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1937 	} else {
1938 		struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1939 
1940 		p_ah->rx_1519_to_max_byte_packets +=
1941 			port_stats.eth.u0.ah0.r1519_to_max;
1942 		p_ah->tx_1519_to_max_byte_packets =
1943 			port_stats.eth.u1.ah1.t1519_to_max;
1944 	}
1945 }
1946 
1947 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1948 			     struct ecore_ptt *p_ptt,
1949 			     struct ecore_eth_stats *stats,
1950 			     u16 statistics_bin, bool b_get_port_stats)
1951 {
1952 	__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1953 	__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1954 	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
1955 	__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1956 
1957 #ifndef ASIC_ONLY
1958 	/* Avoid getting PORT stats for emulation.*/
1959 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1960 		return;
1961 #endif
1962 
1963 	if (b_get_port_stats && p_hwfn->mcp_info)
1964 		__ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1965 }
1966 
1967 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1968 				   struct ecore_eth_stats *stats)
1969 {
1970 	u8 fw_vport = 0;
1971 	int i;
1972 
1973 	OSAL_MEMSET(stats, 0, sizeof(*stats));
1974 
1975 	for_each_hwfn(p_dev, i) {
1976 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1977 		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1978 					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1979 
1980 		if (IS_PF(p_dev)) {
1981 			/* The main vport index is relative first */
1982 			if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1983 				DP_ERR(p_hwfn, "No vport available!\n");
1984 				goto out;
1985 			}
1986 		}
1987 
1988 		if (IS_PF(p_dev) && !p_ptt) {
1989 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1990 			continue;
1991 		}
1992 
1993 		__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1994 					IS_PF(p_dev) ? true : false);
1995 
1996 out:
1997 		if (IS_PF(p_dev) && p_ptt)
1998 			ecore_ptt_release(p_hwfn, p_ptt);
1999 	}
2000 }
2001 
2002 void ecore_get_vport_stats(struct ecore_dev *p_dev,
2003 			   struct ecore_eth_stats *stats)
2004 {
2005 	u32 i;
2006 
2007 	if (!p_dev) {
2008 		OSAL_MEMSET(stats, 0, sizeof(*stats));
2009 		return;
2010 	}
2011 
2012 	_ecore_get_vport_stats(p_dev, stats);
2013 
2014 	if (!p_dev->reset_stats)
2015 		return;
2016 
2017 	/* Reduce the statistics baseline */
2018 	for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2019 		((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2020 }
2021 
2022 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2023 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2024 {
2025 	int i;
2026 
2027 	for_each_hwfn(p_dev, i) {
2028 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2029 		struct eth_mstorm_per_queue_stat mstats;
2030 		struct eth_ustorm_per_queue_stat ustats;
2031 		struct eth_pstorm_per_queue_stat pstats;
2032 		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2033 					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2034 		u32 addr = 0, len = 0;
2035 
2036 		if (IS_PF(p_dev) && !p_ptt) {
2037 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2038 			continue;
2039 		}
2040 
2041 		OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2042 		__ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2043 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2044 
2045 		OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2046 		__ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2047 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2048 
2049 		OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2050 		__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2051 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2052 
2053 		if (IS_PF(p_dev))
2054 			ecore_ptt_release(p_hwfn, p_ptt);
2055 	}
2056 
2057 	/* PORT statistics are not necessarily reset, so we need to
2058 	 * read and create a baseline for future statistics.
2059 	 */
2060 	if (!p_dev->reset_stats)
2061 		DP_INFO(p_dev, "Reset stats not allocated\n");
2062 	else
2063 		_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2064 }
2065 
2066 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2067 			       struct ecore_ptt *p_ptt,
2068 			       struct ecore_arfs_config_params *p_cfg_params)
2069 {
2070 	if (p_cfg_params->arfs_enable) {
2071 		ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2072 					  p_cfg_params->tcp,
2073 					  p_cfg_params->udp,
2074 					  p_cfg_params->ipv4,
2075 					  p_cfg_params->ipv6);
2076 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2077 			   "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2078 			   p_cfg_params->tcp ? "Enable" : "Disable",
2079 			   p_cfg_params->udp ? "Enable" : "Disable",
2080 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
2081 			   p_cfg_params->ipv6 ? "Enable" : "Disable");
2082 	} else {
2083 		ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2084 	}
2085 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2086 		   p_cfg_params->arfs_enable ? "Enable" : "Disable");
2087 }
2088 
2089 enum _ecore_status_t
2090 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2091 				  struct ecore_spq_comp_cb *p_cb,
2092 				  dma_addr_t p_addr, u16 length,
2093 				  u16 qid, u8 vport_id,
2094 				  bool b_is_add)
2095 {
2096 	struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2097 	struct ecore_spq_entry *p_ent = OSAL_NULL;
2098 	struct ecore_sp_init_data init_data;
2099 	u16 abs_rx_q_id = 0;
2100 	u8 abs_vport_id = 0;
2101 	enum _ecore_status_t rc = ECORE_NOTIMPL;
2102 
2103 	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2104 	if (rc != ECORE_SUCCESS)
2105 		return rc;
2106 
2107 	rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2108 	if (rc != ECORE_SUCCESS)
2109 		return rc;
2110 
2111 	/* Get SPQ entry */
2112 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2113 	init_data.cid = ecore_spq_get_cid(p_hwfn);
2114 
2115 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2116 
2117 	if (p_cb) {
2118 		init_data.comp_mode = ECORE_SPQ_MODE_CB;
2119 		init_data.p_comp_data = p_cb;
2120 	} else {
2121 		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2122 	}
2123 
2124 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2125 				   ETH_RAMROD_GFT_UPDATE_FILTER,
2126 				   PROTOCOLID_ETH, &init_data);
2127 	if (rc != ECORE_SUCCESS)
2128 		return rc;
2129 
2130 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2131 
2132 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2133 	p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2134 	p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2135 	p_ramrod->vport_id = abs_vport_id;
2136 	p_ramrod->filter_type = RFS_FILTER_TYPE;
2137 	p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2138 					   : GFT_DELETE_FILTER;
2139 
2140 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2141 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2142 		   abs_vport_id, abs_rx_q_id,
2143 		   b_is_add ? "Adding" : "Removing",
2144 		   (unsigned long long)p_addr, length);
2145 
2146 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2147 }
2148