xref: /illumos-gate/usr/src/uts/common/io/qede/579xx/drivers/ecore/ecore_sp_commands.c (revision 14b24e2b79293068c8e016a69ef1d872fb5e2fd5)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 #include "bcm_osal.h"
37 
38 #include "ecore.h"
39 #include "ecore_status.h"
40 #include "ecore_chain.h"
41 #include "ecore_spq.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_cxt.h"
44 #include "ecore_sp_commands.h"
45 #include "ecore_gtt_reg_addr.h"
46 #include "ecore_iro.h"
47 #include "reg_addr.h"
48 #include "ecore_int.h"
49 #include "ecore_hw.h"
50 #include "ecore_dcbx.h"
51 #include "ecore_sriov.h"
52 #include "ecore_vf.h"
53 
ecore_sp_init_request(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry ** pp_ent,u8 cmd,u8 protocol,struct ecore_sp_init_data * p_data)54 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
55 					   struct ecore_spq_entry **pp_ent,
56 					   u8 cmd,
57 					   u8 protocol,
58 					   struct ecore_sp_init_data *p_data)
59 {
60 	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
61 	struct ecore_spq_entry *p_ent = OSAL_NULL;
62 	enum _ecore_status_t rc;
63 
64 	if (!pp_ent)
65 		return ECORE_INVAL;
66 
67 	/* Get an SPQ entry */
68 	rc = ecore_spq_get_entry(p_hwfn, pp_ent);
69 	if (rc != ECORE_SUCCESS)
70 		return rc;
71 
72 	/* Fill the SPQ entry */
73 	p_ent = *pp_ent;
74 	p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
75 	p_ent->elem.hdr.cmd_id = cmd;
76 	p_ent->elem.hdr.protocol_id = protocol;
77 	p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
78 	p_ent->comp_mode = p_data->comp_mode;
79 	p_ent->comp_done.done = 0;
80 
81 	switch (p_ent->comp_mode) {
82 	case ECORE_SPQ_MODE_EBLOCK:
83 		p_ent->comp_cb.cookie = &p_ent->comp_done;
84 		break;
85 
86 	case ECORE_SPQ_MODE_BLOCK:
87 		if (!p_data->p_comp_data)
88 			return ECORE_INVAL;
89 
90 		p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
91 		break;
92 
93 	case ECORE_SPQ_MODE_CB:
94 		if (!p_data->p_comp_data)
95 			p_ent->comp_cb.function = OSAL_NULL;
96 		else
97 			p_ent->comp_cb = *p_data->p_comp_data;
98 		break;
99 
100 	default:
101 		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
102 			  p_ent->comp_mode);
103 		return ECORE_INVAL;
104 	}
105 
106 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
107 		   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
108 		   opaque_cid, cmd, protocol,
109 		   (unsigned long)&p_ent->ramrod,
110 		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
111 			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
112 			   "MODE_CB"));
113 
114 	OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
115 
116 	return ECORE_SUCCESS;
117 }
118 
ecore_tunn_clss_to_fw_clss(u8 type)119 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
120 {
121 	switch (type) {
122 	case ECORE_TUNN_CLSS_MAC_VLAN:
123 		return TUNNEL_CLSS_MAC_VLAN;
124 	case ECORE_TUNN_CLSS_MAC_VNI:
125 		return TUNNEL_CLSS_MAC_VNI;
126 	case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
127 		return TUNNEL_CLSS_INNER_MAC_VLAN;
128 	case ECORE_TUNN_CLSS_INNER_MAC_VNI:
129 		return TUNNEL_CLSS_INNER_MAC_VNI;
130 	case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
131 		return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
132 	default:
133 		return TUNNEL_CLSS_MAC_VLAN;
134 	}
135 }
136 
137 static void
ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src,bool b_pf_start)138 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
139 			      struct ecore_tunnel_info *p_src,
140 			      bool b_pf_start)
141 {
142 	if (p_src->vxlan.b_update_mode || b_pf_start)
143 		p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
144 
145 	if (p_src->l2_gre.b_update_mode || b_pf_start)
146 		p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
147 
148 	if (p_src->ip_gre.b_update_mode || b_pf_start)
149 		p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
150 
151 	if (p_src->l2_geneve.b_update_mode || b_pf_start)
152 		p_tun->l2_geneve.b_mode_enabled =
153 				p_src->l2_geneve.b_mode_enabled;
154 
155 	if (p_src->ip_geneve.b_update_mode || b_pf_start)
156 		p_tun->ip_geneve.b_mode_enabled =
157 				p_src->ip_geneve.b_mode_enabled;
158 }
159 
ecore_set_tunn_cls_info(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src)160 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
161 				    struct ecore_tunnel_info *p_src)
162 {
163 	enum tunnel_clss type;
164 
165 	p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
166 	p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
167 
168 	type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
169 	p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
170 	type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
171 	p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
172 	type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
173 	p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
174 	type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
175 	p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
176 	type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
177 	p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
178 }
179 
ecore_set_tunn_ports(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src)180 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
181 				 struct ecore_tunnel_info *p_src)
182 {
183 	p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
184 	p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
185 
186 	if (p_src->geneve_port.b_update_port)
187 		p_tun->geneve_port.port = p_src->geneve_port.port;
188 
189 	if (p_src->vxlan_port.b_update_port)
190 		p_tun->vxlan_port.port = p_src->vxlan_port.port;
191 }
192 
193 static void
__ecore_set_ramrod_tunnel_param(u8 * p_tunn_cls,struct ecore_tunn_update_type * tun_type)194 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
195 				struct ecore_tunn_update_type *tun_type)
196 {
197 	*p_tunn_cls = tun_type->tun_cls;
198 }
199 
200 static void
ecore_set_ramrod_tunnel_param(u8 * p_tunn_cls,struct ecore_tunn_update_type * tun_type,u8 * p_update_port,__le16 * p_port,struct ecore_tunn_update_udp_port * p_udp_port)201 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
202 			      struct ecore_tunn_update_type *tun_type,
203 			      u8 *p_update_port, __le16 *p_port,
204 			      struct ecore_tunn_update_udp_port *p_udp_port)
205 {
206 	__ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
207 	if (p_udp_port->b_update_port) {
208 		*p_update_port = 1;
209 		*p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
210 	}
211 }
212 
213 static void
ecore_tunn_set_pf_update_params(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_src,struct pf_update_tunnel_config * p_tunn_cfg)214 ecore_tunn_set_pf_update_params(struct ecore_hwfn		*p_hwfn,
215 				struct ecore_tunnel_info *p_src,
216 				struct pf_update_tunnel_config	*p_tunn_cfg)
217 {
218 	struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
219 
220 	ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
221 	ecore_set_tunn_cls_info(p_tun, p_src);
222 	ecore_set_tunn_ports(p_tun, p_src);
223 
224 	ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
225 				      &p_tun->vxlan,
226 				      &p_tunn_cfg->set_vxlan_udp_port_flg,
227 				      &p_tunn_cfg->vxlan_udp_port,
228 				      &p_tun->vxlan_port);
229 
230 	ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
231 				      &p_tun->l2_geneve,
232 				      &p_tunn_cfg->set_geneve_udp_port_flg,
233 				      &p_tunn_cfg->geneve_udp_port,
234 				      &p_tun->geneve_port);
235 
236 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
237 					&p_tun->ip_geneve);
238 
239 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
240 					&p_tun->l2_gre);
241 
242 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
243 					&p_tun->ip_gre);
244 
245 	p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
246 }
247 
ecore_set_hw_tunn_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_tunnel_info * p_tun)248 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
249 				   struct ecore_ptt  *p_ptt,
250 				   struct ecore_tunnel_info *p_tun)
251 {
252 	ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
253 			     p_tun->ip_gre.b_mode_enabled);
254 	ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
255 
256 	ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
257 				p_tun->ip_geneve.b_mode_enabled);
258 }
259 
ecore_set_hw_tunn_mode_port(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_tunn)260 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
261 					struct ecore_tunnel_info *p_tunn)
262 {
263 	if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
264 		DP_NOTICE(p_hwfn, true,
265 			  "A0 chip: tunnel hw config is not supported\n");
266 		return;
267 	}
268 
269 	if (p_tunn->vxlan_port.b_update_port)
270 		ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
271 					  p_tunn->vxlan_port.port);
272 
273 	if (p_tunn->geneve_port.b_update_port)
274 		ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
275 					   p_tunn->geneve_port.port);
276 
277 	ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
278 }
279 
280 static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_src,struct pf_start_tunnel_config * p_tunn_cfg)281 ecore_tunn_set_pf_start_params(struct ecore_hwfn		*p_hwfn,
282 			       struct ecore_tunnel_info		*p_src,
283 			       struct pf_start_tunnel_config	*p_tunn_cfg)
284 {
285 	struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
286 
287 	if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
288 		DP_NOTICE(p_hwfn, true,
289 			  "A0 chip: tunnel pf start config is not supported\n");
290 		return;
291 	}
292 
293 	if (!p_src)
294 		return;
295 
296 	ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
297 	ecore_set_tunn_cls_info(p_tun, p_src);
298 	ecore_set_tunn_ports(p_tun, p_src);
299 
300 	ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
301 				      &p_tun->vxlan,
302 				      &p_tunn_cfg->set_vxlan_udp_port_flg,
303 				      &p_tunn_cfg->vxlan_udp_port,
304 				      &p_tun->vxlan_port);
305 
306 	ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
307 				      &p_tun->l2_geneve,
308 				      &p_tunn_cfg->set_geneve_udp_port_flg,
309 				      &p_tunn_cfg->geneve_udp_port,
310 				      &p_tun->geneve_port);
311 
312 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
313 					&p_tun->ip_geneve);
314 
315 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
316 					&p_tun->l2_gre);
317 
318 	__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
319 					&p_tun->ip_gre);
320 }
321 
ecore_sp_pf_start(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_tunn,enum ecore_mf_mode mode,bool allow_npar_tx_switch)322 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn	*p_hwfn,
323 				       struct ecore_tunnel_info *p_tunn,
324 				       enum ecore_mf_mode mode,
325 				       bool allow_npar_tx_switch)
326 {
327 	struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
328 	u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
329 	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
330 	struct ecore_spq_entry *p_ent = OSAL_NULL;
331 	struct ecore_sp_init_data init_data;
332 	enum _ecore_status_t rc = ECORE_NOTIMPL;
333 	u8 page_cnt;
334 
335 	/* update initial eq producer */
336 	ecore_eq_prod_update(p_hwfn,
337 			     ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
338 
339 	/* Initialize the SPQ entry for the ramrod */
340 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
341 	init_data.cid = ecore_spq_get_cid(p_hwfn);
342 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
343 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
344 
345 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
346 				   COMMON_RAMROD_PF_START,
347 				   PROTOCOLID_COMMON,
348 				   &init_data);
349 	if (rc != ECORE_SUCCESS)
350 		return rc;
351 
352 	/* Fill the ramrod data */
353 	p_ramrod = &p_ent->ramrod.pf_start;
354 	p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
355 	p_ramrod->event_ring_sb_index = sb_index;
356 	p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
357 
358 	/* For easier debugging */
359 	p_ramrod->dont_log_ramrods = 0;
360 	p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
361 
362 	switch (mode) {
363 	case ECORE_MF_DEFAULT:
364 	case ECORE_MF_NPAR:
365 		p_ramrod->mf_mode = MF_NPAR;
366 		break;
367 	case ECORE_MF_OVLAN:
368 		p_ramrod->mf_mode = MF_OVLAN;
369 		break;
370 	default:
371 		DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n");
372 		p_ramrod->mf_mode = MF_NPAR;
373 	}
374 	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
375 
376 	/* Place EQ address in RAMROD */
377 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
378 		       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
379 	page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
380 	p_ramrod->event_ring_num_pages = page_cnt;
381 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
382 		       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
383 
384 	ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
385 				       &p_ramrod->tunnel_config);
386 
387 	if (IS_MF_SI(p_hwfn))
388 		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
389 
390 	switch (p_hwfn->hw_info.personality) {
391 	case ECORE_PCI_ETH:
392 		p_ramrod->personality = PERSONALITY_ETH;
393 		break;
394 	case ECORE_PCI_FCOE:
395 		p_ramrod->personality = PERSONALITY_FCOE;
396 		break;
397 	case ECORE_PCI_ISCSI:
398 		p_ramrod->personality = PERSONALITY_ISCSI;
399 		break;
400 	case ECORE_PCI_ETH_IWARP:
401 	case ECORE_PCI_ETH_ROCE:
402 	case ECORE_PCI_ETH_RDMA:
403 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
404 		break;
405 	default:
406 		DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
407 			  p_hwfn->hw_info.personality);
408 		p_ramrod->personality = PERSONALITY_ETH;
409 	}
410 
411 	if (p_hwfn->p_dev->p_iov_info) {
412 		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
413 
414 		p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
415 		p_ramrod->num_vfs = (u8)p_iov->total_vfs;
416 	}
417 	/* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
418 	 * version is available.
419 	 */
420 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
421 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
422 
423 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
424 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
425 		   sb, sb_index, p_ramrod->outer_tag);
426 
427 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
428 
429 	if (p_tunn)
430 		ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
431 
432 	return rc;
433 }
434 
ecore_sp_pf_update_dcbx(struct ecore_hwfn * p_hwfn)435 enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
436 {
437 	struct ecore_spq_entry *p_ent = OSAL_NULL;
438 	struct ecore_sp_init_data init_data;
439 	enum _ecore_status_t rc = ECORE_NOTIMPL;
440 
441 	/* Get SPQ entry */
442 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
443 	init_data.cid = ecore_spq_get_cid(p_hwfn);
444 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
445 	init_data.comp_mode = ECORE_SPQ_MODE_CB;
446 
447 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
448 				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
449 				   &init_data);
450 	if (rc != ECORE_SUCCESS)
451 		return rc;
452 
453 	ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
454 					&p_ent->ramrod.pf_update);
455 
456 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
457 }
458 
ecore_sp_rl_update(struct ecore_hwfn * p_hwfn,struct ecore_rl_update_params * params)459 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
460 					struct ecore_rl_update_params *params)
461 {
462 	struct ecore_spq_entry *p_ent = OSAL_NULL;
463 	enum _ecore_status_t rc = ECORE_NOTIMPL;
464 	struct rl_update_ramrod_data *rl_update;
465 	struct ecore_sp_init_data init_data;
466 
467 	/* Get SPQ entry */
468 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
469 	init_data.cid = ecore_spq_get_cid(p_hwfn);
470 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
471 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
472 
473 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
474 				   COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
475 				   &init_data);
476 	if (rc != ECORE_SUCCESS)
477 		return rc;
478 
479 	rl_update = &p_ent->ramrod.rl_update;
480 
481 	rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
482 	rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
483 	rl_update->rl_init_flg = params->rl_init_flg;
484 	rl_update->rl_start_flg = params->rl_start_flg;
485 	rl_update->rl_stop_flg = params->rl_stop_flg;
486 	rl_update->rl_id_first = params->rl_id_first;
487 	rl_update->rl_id_last = params->rl_id_last;
488 	rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
489 	rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
490 	rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
491 	rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
492 	rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
493 	rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
494 	rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
495 	rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
496 		params->dcqcn_timeuot_us);
497 	rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
498 
499 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
500 }
501 
502 /* Set pf update ramrod command params */
503 enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_tunn,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)504 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
505 			    struct ecore_tunnel_info *p_tunn,
506 			    enum spq_mode comp_mode,
507 			    struct ecore_spq_comp_cb *p_comp_data)
508 {
509 	struct ecore_spq_entry *p_ent = OSAL_NULL;
510 	struct ecore_sp_init_data init_data;
511 	enum _ecore_status_t rc = ECORE_NOTIMPL;
512 
513 	if (IS_VF(p_hwfn->p_dev))
514 		return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
515 
516 	if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
517 		DP_NOTICE(p_hwfn, true,
518 			  "A0 chip: tunnel pf update config is not supported\n");
519 		return rc;
520 	}
521 
522 	if (!p_tunn)
523 		return ECORE_INVAL;
524 
525 	/* Get SPQ entry */
526 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
527 	init_data.cid = ecore_spq_get_cid(p_hwfn);
528 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
529 	init_data.comp_mode = comp_mode;
530 	init_data.p_comp_data = p_comp_data;
531 
532 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
533 				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
534 				   &init_data);
535 	if (rc != ECORE_SUCCESS)
536 		return rc;
537 
538 	ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
539 					&p_ent->ramrod.pf_update.tunnel_config);
540 
541 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
542 	if (rc != ECORE_SUCCESS)
543 		return rc;
544 
545 	ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
546 
547 	return rc;
548 }
549 
ecore_sp_pf_stop(struct ecore_hwfn * p_hwfn)550 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
551 {
552 	struct ecore_spq_entry *p_ent = OSAL_NULL;
553 	struct ecore_sp_init_data init_data;
554 	enum _ecore_status_t rc = ECORE_NOTIMPL;
555 
556 	/* Get SPQ entry */
557 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
558 	init_data.cid = ecore_spq_get_cid(p_hwfn);
559 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
560 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
561 
562 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
563 				   COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
564 				   &init_data);
565 	if (rc != ECORE_SUCCESS)
566 		return rc;
567 
568 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
569 }
570 
ecore_sp_heartbeat_ramrod(struct ecore_hwfn * p_hwfn)571 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
572 {
573 	struct ecore_spq_entry *p_ent = OSAL_NULL;
574 	struct ecore_sp_init_data init_data;
575 	enum _ecore_status_t rc;
576 
577 	/* Get SPQ entry */
578 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
579 	init_data.cid = ecore_spq_get_cid(p_hwfn);
580 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
581 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
582 
583 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
584 				   COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
585 				   &init_data);
586 	if (rc != ECORE_SUCCESS)
587 		return rc;
588 
589 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
590 }
591 
ecore_sp_pf_update_stag(struct ecore_hwfn * p_hwfn)592 enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
593 {
594 	struct ecore_spq_entry *p_ent = OSAL_NULL;
595 	struct ecore_sp_init_data init_data;
596 	enum _ecore_status_t rc = ECORE_NOTIMPL;
597 
598 	/* Get SPQ entry */
599 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
600 	init_data.cid = ecore_spq_get_cid(p_hwfn);
601 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
602 	init_data.comp_mode = ECORE_SPQ_MODE_CB;
603 
604 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
605 				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
606 				   &init_data);
607 	if (rc != ECORE_SUCCESS)
608 		return rc;
609 
610 	p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
611 	p_ent->ramrod.pf_update.mf_vlan = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
612 
613 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
614 }
615