1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File : ecore_sp_commands.c
30 */
31 #include <sys/cdefs.h>
32 #include "bcm_osal.h"
33
34 #include "ecore.h"
35 #include "ecore_status.h"
36 #include "ecore_chain.h"
37 #include "ecore_spq.h"
38 #include "ecore_init_fw_funcs.h"
39 #include "ecore_cxt.h"
40 #include "ecore_sp_commands.h"
41 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_iro.h"
43 #include "reg_addr.h"
44 #include "ecore_int.h"
45 #include "ecore_hw.h"
46 #include "ecore_dcbx.h"
47 #include "ecore_sriov.h"
48 #include "ecore_vf.h"
49 #ifndef LINUX_REMOVE
50 #include "ecore_tcp_ip.h"
51 #endif
52
ecore_sp_init_request(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry ** pp_ent,u8 cmd,u8 protocol,struct ecore_sp_init_data * p_data)53 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
54 struct ecore_spq_entry **pp_ent,
55 u8 cmd,
56 u8 protocol,
57 struct ecore_sp_init_data *p_data)
58 {
59 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
60 struct ecore_spq_entry *p_ent = OSAL_NULL;
61 enum _ecore_status_t rc;
62
63 if (!pp_ent)
64 return ECORE_INVAL;
65
66 /* Get an SPQ entry */
67 rc = ecore_spq_get_entry(p_hwfn, pp_ent);
68 if (rc != ECORE_SUCCESS)
69 return rc;
70
71 /* Fill the SPQ entry */
72 p_ent = *pp_ent;
73 p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
74 p_ent->elem.hdr.cmd_id = cmd;
75 p_ent->elem.hdr.protocol_id = protocol;
76 p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
77 p_ent->comp_mode = p_data->comp_mode;
78 p_ent->comp_done.done = 0;
79
80 switch (p_ent->comp_mode) {
81 case ECORE_SPQ_MODE_EBLOCK:
82 p_ent->comp_cb.cookie = &p_ent->comp_done;
83 break;
84
85 case ECORE_SPQ_MODE_BLOCK:
86 if (!p_data->p_comp_data)
87 return ECORE_INVAL;
88
89 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
90 break;
91
92 case ECORE_SPQ_MODE_CB:
93 if (!p_data->p_comp_data)
94 p_ent->comp_cb.function = OSAL_NULL;
95 else
96 p_ent->comp_cb = *p_data->p_comp_data;
97 break;
98
99 default:
100 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
101 p_ent->comp_mode);
102 return ECORE_INVAL;
103 }
104
105 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
106 "Initialized: CID %08x cmd %02x protocol %02x data_addr %llx comp_mode [%s]\n",
107 opaque_cid, cmd, protocol,
108 (unsigned long long)(osal_uintptr_t)&p_ent->ramrod,
109 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
110 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
111 "MODE_CB"));
112
113 OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
114
115 return ECORE_SUCCESS;
116 }
117
ecore_tunn_clss_to_fw_clss(u8 type)118 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
119 {
120 switch (type) {
121 case ECORE_TUNN_CLSS_MAC_VLAN:
122 return TUNNEL_CLSS_MAC_VLAN;
123 case ECORE_TUNN_CLSS_MAC_VNI:
124 return TUNNEL_CLSS_MAC_VNI;
125 case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
126 return TUNNEL_CLSS_INNER_MAC_VLAN;
127 case ECORE_TUNN_CLSS_INNER_MAC_VNI:
128 return TUNNEL_CLSS_INNER_MAC_VNI;
129 case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
130 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
131 default:
132 return TUNNEL_CLSS_MAC_VLAN;
133 }
134 }
135
136 static void
ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src,bool b_pf_start)137 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
138 struct ecore_tunnel_info *p_src,
139 bool b_pf_start)
140 {
141 if (p_src->vxlan.b_update_mode || b_pf_start)
142 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
143
144 if (p_src->l2_gre.b_update_mode || b_pf_start)
145 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
146
147 if (p_src->ip_gre.b_update_mode || b_pf_start)
148 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
149
150 if (p_src->l2_geneve.b_update_mode || b_pf_start)
151 p_tun->l2_geneve.b_mode_enabled =
152 p_src->l2_geneve.b_mode_enabled;
153
154 if (p_src->ip_geneve.b_update_mode || b_pf_start)
155 p_tun->ip_geneve.b_mode_enabled =
156 p_src->ip_geneve.b_mode_enabled;
157 }
158
ecore_set_tunn_cls_info(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src)159 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
160 struct ecore_tunnel_info *p_src)
161 {
162 enum tunnel_clss type;
163
164 p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
165 p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
166
167 type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
168 p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
169 type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
170 p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
171 type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
172 p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
173 type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
174 p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
175 type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
176 p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
177 }
178
ecore_set_tunn_ports(struct ecore_tunnel_info * p_tun,struct ecore_tunnel_info * p_src)179 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
180 struct ecore_tunnel_info *p_src)
181 {
182 p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
183 p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
184
185 if (p_src->geneve_port.b_update_port)
186 p_tun->geneve_port.port = p_src->geneve_port.port;
187
188 if (p_src->vxlan_port.b_update_port)
189 p_tun->vxlan_port.port = p_src->vxlan_port.port;
190 }
191
192 static void
__ecore_set_ramrod_tunnel_param(u8 * p_tunn_cls,struct ecore_tunn_update_type * tun_type)193 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
194 struct ecore_tunn_update_type *tun_type)
195 {
196 *p_tunn_cls = tun_type->tun_cls;
197 }
198
199 static void
ecore_set_ramrod_tunnel_param(u8 * p_tunn_cls,struct ecore_tunn_update_type * tun_type,u8 * p_update_port,__le16 * p_port,struct ecore_tunn_update_udp_port * p_udp_port)200 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
201 struct ecore_tunn_update_type *tun_type,
202 u8 *p_update_port, __le16 *p_port,
203 struct ecore_tunn_update_udp_port *p_udp_port)
204 {
205 __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
206 if (p_udp_port->b_update_port) {
207 *p_update_port = 1;
208 *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
209 }
210 }
211
212 static void
ecore_tunn_set_pf_update_params(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_src,struct pf_update_tunnel_config * p_tunn_cfg)213 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
214 struct ecore_tunnel_info *p_src,
215 struct pf_update_tunnel_config *p_tunn_cfg)
216 {
217 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
218
219 ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
220 ecore_set_tunn_cls_info(p_tun, p_src);
221 ecore_set_tunn_ports(p_tun, p_src);
222
223 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
224 &p_tun->vxlan,
225 &p_tunn_cfg->set_vxlan_udp_port_flg,
226 &p_tunn_cfg->vxlan_udp_port,
227 &p_tun->vxlan_port);
228
229 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
230 &p_tun->l2_geneve,
231 &p_tunn_cfg->set_geneve_udp_port_flg,
232 &p_tunn_cfg->geneve_udp_port,
233 &p_tun->geneve_port);
234
235 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
236 &p_tun->ip_geneve);
237
238 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
239 &p_tun->l2_gre);
240
241 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
242 &p_tun->ip_gre);
243
244 p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
245 }
246
ecore_set_hw_tunn_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_tunnel_info * p_tun)247 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
248 struct ecore_ptt *p_ptt,
249 struct ecore_tunnel_info *p_tun)
250 {
251 ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
252 p_tun->ip_gre.b_mode_enabled);
253 ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
254
255 ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
256 p_tun->ip_geneve.b_mode_enabled);
257 }
258
ecore_set_hw_tunn_mode_port(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_tunnel_info * p_tunn)259 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
260 struct ecore_ptt *p_ptt,
261 struct ecore_tunnel_info *p_tunn)
262 {
263 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
264 DP_NOTICE(p_hwfn, true,
265 "A0 chip: tunnel hw config is not supported\n");
266 return;
267 }
268
269 if (p_tunn->vxlan_port.b_update_port)
270 ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
271 p_tunn->vxlan_port.port);
272
273 if (p_tunn->geneve_port.b_update_port)
274 ecore_set_geneve_dest_port(p_hwfn, p_ptt,
275 p_tunn->geneve_port.port);
276
277 ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
278 }
279
280 static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_src,struct pf_start_tunnel_config * p_tunn_cfg)281 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
282 struct ecore_tunnel_info *p_src,
283 struct pf_start_tunnel_config *p_tunn_cfg)
284 {
285 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
286
287 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
288 DP_NOTICE(p_hwfn, true,
289 "A0 chip: tunnel pf start config is not supported\n");
290 return;
291 }
292
293 if (!p_src)
294 return;
295
296 ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
297 ecore_set_tunn_cls_info(p_tun, p_src);
298 ecore_set_tunn_ports(p_tun, p_src);
299
300 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
301 &p_tun->vxlan,
302 &p_tunn_cfg->set_vxlan_udp_port_flg,
303 &p_tunn_cfg->vxlan_udp_port,
304 &p_tun->vxlan_port);
305
306 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
307 &p_tun->l2_geneve,
308 &p_tunn_cfg->set_geneve_udp_port_flg,
309 &p_tunn_cfg->geneve_udp_port,
310 &p_tun->geneve_port);
311
312 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
313 &p_tun->ip_geneve);
314
315 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
316 &p_tun->l2_gre);
317
318 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
319 &p_tun->ip_gre);
320 }
321
ecore_sp_pf_start(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_tunnel_info * p_tunn,bool allow_npar_tx_switch)322 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
323 struct ecore_ptt *p_ptt,
324 struct ecore_tunnel_info *p_tunn,
325 bool allow_npar_tx_switch)
326 {
327 struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
328 u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
329 u8 sb_index = p_hwfn->p_eq->eq_sb_index;
330 struct ecore_spq_entry *p_ent = OSAL_NULL;
331 struct ecore_sp_init_data init_data;
332 enum _ecore_status_t rc = ECORE_NOTIMPL;
333 u8 page_cnt;
334 u8 i;
335
336 /* update initial eq producer */
337 ecore_eq_prod_update(p_hwfn,
338 ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
339
340 /* Initialize the SPQ entry for the ramrod */
341 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
342 init_data.cid = ecore_spq_get_cid(p_hwfn);
343 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
344 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
345
346 rc = ecore_sp_init_request(p_hwfn, &p_ent,
347 COMMON_RAMROD_PF_START,
348 PROTOCOLID_COMMON,
349 &init_data);
350 if (rc != ECORE_SUCCESS)
351 return rc;
352
353 /* Fill the ramrod data */
354 p_ramrod = &p_ent->ramrod.pf_start;
355 p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
356 p_ramrod->event_ring_sb_index = sb_index;
357 p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
358
359 /* For easier debugging */
360 p_ramrod->dont_log_ramrods = 0;
361 p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
362
363 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
364 p_ramrod->mf_mode = MF_OVLAN;
365 else
366 p_ramrod->mf_mode = MF_NPAR;
367
368 p_ramrod->outer_tag_config.outer_tag.tci =
369 OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
370 if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits))
371 p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
372 else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
373 &p_hwfn->p_dev->mf_bits)) {
374 p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
375 p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
376 }
377
378 p_ramrod->outer_tag_config.pri_map_valid = 1;
379 for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
380 p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
381
382 /* enable_stag_pri_change should be set if port is in BD mode or,
383 * UFP with Host Control mode or, UFP with DCB over base interface.
384 */
385 if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
386 if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
387 (p_hwfn->p_dcbx_info->results.dcbx_enabled))
388 p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
389 else
390 p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
391 }
392
393 /* Place EQ address in RAMROD */
394 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
395 p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
396 page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
397 p_ramrod->event_ring_num_pages = page_cnt;
398 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
399 p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
400
401 ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
402 &p_ramrod->tunnel_config);
403
404 if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
405 &p_hwfn->p_dev->mf_bits))
406 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
407
408 switch (p_hwfn->hw_info.personality) {
409 case ECORE_PCI_ETH:
410 p_ramrod->personality = PERSONALITY_ETH;
411 break;
412 case ECORE_PCI_FCOE:
413 p_ramrod->personality = PERSONALITY_FCOE;
414 break;
415 case ECORE_PCI_ISCSI:
416 p_ramrod->personality = PERSONALITY_ISCSI;
417 break;
418 case ECORE_PCI_ETH_IWARP:
419 case ECORE_PCI_ETH_ROCE:
420 case ECORE_PCI_ETH_RDMA:
421 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
422 break;
423 default:
424 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
425 p_hwfn->hw_info.personality);
426 p_ramrod->personality = PERSONALITY_ETH;
427 }
428
429 if (p_hwfn->p_dev->p_iov_info) {
430 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
431
432 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
433 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
434 }
435 /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
436 * version is available.
437 */
438 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
439 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
440
441 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
442 "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
443 sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
444 p_ramrod->outer_tag_config.outer_tag.tci);
445
446 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
447
448 if (p_tunn)
449 ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
450 &p_hwfn->p_dev->tunnel);
451
452 return rc;
453 }
454
ecore_sp_pf_update_dcbx(struct ecore_hwfn * p_hwfn)455 enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
456 {
457 struct ecore_spq_entry *p_ent = OSAL_NULL;
458 struct ecore_sp_init_data init_data;
459 enum _ecore_status_t rc = ECORE_NOTIMPL;
460
461 /* Get SPQ entry */
462 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
463 init_data.cid = ecore_spq_get_cid(p_hwfn);
464 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
465 init_data.comp_mode = ECORE_SPQ_MODE_CB;
466
467 rc = ecore_sp_init_request(p_hwfn, &p_ent,
468 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
469 &init_data);
470 if (rc != ECORE_SUCCESS)
471 return rc;
472
473 ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
474 &p_ent->ramrod.pf_update);
475
476 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
477 }
478
ecore_sp_pf_update_ufp(struct ecore_hwfn * p_hwfn)479 enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
480 {
481 struct ecore_spq_entry *p_ent = OSAL_NULL;
482 struct ecore_sp_init_data init_data;
483 enum _ecore_status_t rc = ECORE_NOTIMPL;
484
485 if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_UNKNOWN) {
486 DP_INFO(p_hwfn, "Invalid priority type %d\n",
487 p_hwfn->ufp_info.pri_type);
488 return ECORE_INVAL;
489 }
490
491 /* Get SPQ entry */
492 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
493 init_data.cid = ecore_spq_get_cid(p_hwfn);
494 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
495 init_data.comp_mode = ECORE_SPQ_MODE_CB;
496
497 rc = ecore_sp_init_request(p_hwfn, &p_ent,
498 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
499 &init_data);
500 if (rc != ECORE_SUCCESS)
501 return rc;
502
503 p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
504 if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
505 (p_hwfn->p_dcbx_info->results.dcbx_enabled))
506 p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
507 else
508 p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
509
510 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
511 }
512
513 /* QM rate limiter resolution is 1.6Mbps */
514 #define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)
515
516 /* FW uses 1/64k to express gd */
517 #define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd))
518
ecore_sp_rl_mb_to_qm(u32 mb_val)519 static u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
520 {
521 return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
522 }
523
ecore_sp_rl_gd_denom(u32 gd)524 static u16 ecore_sp_rl_gd_denom(u32 gd)
525 {
526 return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
527 }
528
ecore_sp_rl_update(struct ecore_hwfn * p_hwfn,struct ecore_rl_update_params * params)529 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
530 struct ecore_rl_update_params *params)
531 {
532 struct ecore_spq_entry *p_ent = OSAL_NULL;
533 enum _ecore_status_t rc = ECORE_NOTIMPL;
534 struct rl_update_ramrod_data *rl_update;
535 struct ecore_sp_init_data init_data;
536
537 /* Get SPQ entry */
538 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
539 init_data.cid = ecore_spq_get_cid(p_hwfn);
540 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
541 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
542
543 rc = ecore_sp_init_request(p_hwfn, &p_ent,
544 COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
545 &init_data);
546 if (rc != ECORE_SUCCESS)
547 return rc;
548
549 rl_update = &p_ent->ramrod.rl_update;
550
551 rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
552 rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
553 rl_update->rl_init_flg = params->rl_init_flg;
554 rl_update->rl_start_flg = params->rl_start_flg;
555 rl_update->rl_stop_flg = params->rl_stop_flg;
556 rl_update->rl_id_first = params->rl_id_first;
557 rl_update->rl_id_last = params->rl_id_last;
558 rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
559 rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
560 rl_update->rl_max_rate = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
561 rl_update->rl_r_ai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
562 rl_update->rl_r_hai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
563 rl_update->dcqcn_g = OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
564 rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
565 rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
566 rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
567
568 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
569 rl_update->qcn_update_param_flg, rl_update->dcqcn_update_param_flg,
570 rl_update->rl_init_flg, rl_update->rl_start_flg,
571 rl_update->rl_stop_flg, rl_update->rl_id_first,
572 rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
573 rl_update->rl_bc_rate, rl_update->rl_max_rate,
574 rl_update->rl_r_ai, rl_update->rl_r_hai,
575 rl_update->dcqcn_g, rl_update->dcqcn_k_us,
576 rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
577
578 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
579 }
580
581 /* Set pf update ramrod command params */
582 enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_tunnel_info * p_tunn,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)583 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
584 struct ecore_ptt *p_ptt,
585 struct ecore_tunnel_info *p_tunn,
586 enum spq_mode comp_mode,
587 struct ecore_spq_comp_cb *p_comp_data)
588 {
589 struct ecore_spq_entry *p_ent = OSAL_NULL;
590 struct ecore_sp_init_data init_data;
591 enum _ecore_status_t rc = ECORE_NOTIMPL;
592
593 if (IS_VF(p_hwfn->p_dev))
594 return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
595
596 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
597 DP_NOTICE(p_hwfn, true,
598 "A0 chip: tunnel pf update config is not supported\n");
599 return rc;
600 }
601
602 if (!p_tunn)
603 return ECORE_INVAL;
604
605 /* Get SPQ entry */
606 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
607 init_data.cid = ecore_spq_get_cid(p_hwfn);
608 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
609 init_data.comp_mode = comp_mode;
610 init_data.p_comp_data = p_comp_data;
611
612 rc = ecore_sp_init_request(p_hwfn, &p_ent,
613 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
614 &init_data);
615 if (rc != ECORE_SUCCESS)
616 return rc;
617
618 ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
619 &p_ent->ramrod.pf_update.tunnel_config);
620
621 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
622 if (rc != ECORE_SUCCESS)
623 return rc;
624
625 ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
626
627 return rc;
628 }
629
ecore_sp_pf_stop(struct ecore_hwfn * p_hwfn)630 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
631 {
632 struct ecore_spq_entry *p_ent = OSAL_NULL;
633 struct ecore_sp_init_data init_data;
634 enum _ecore_status_t rc = ECORE_NOTIMPL;
635
636 /* Get SPQ entry */
637 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
638 init_data.cid = ecore_spq_get_cid(p_hwfn);
639 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
640 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
641
642 rc = ecore_sp_init_request(p_hwfn, &p_ent,
643 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
644 &init_data);
645 if (rc != ECORE_SUCCESS)
646 return rc;
647
648 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
649 }
650
ecore_sp_heartbeat_ramrod(struct ecore_hwfn * p_hwfn)651 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
652 {
653 struct ecore_spq_entry *p_ent = OSAL_NULL;
654 struct ecore_sp_init_data init_data;
655 enum _ecore_status_t rc;
656
657 /* Get SPQ entry */
658 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
659 init_data.cid = ecore_spq_get_cid(p_hwfn);
660 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
661 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
662
663 rc = ecore_sp_init_request(p_hwfn, &p_ent,
664 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
665 &init_data);
666 if (rc != ECORE_SUCCESS)
667 return rc;
668
669 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
670 }
671
ecore_sp_pf_update_stag(struct ecore_hwfn * p_hwfn)672 enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
673 {
674 struct ecore_spq_entry *p_ent = OSAL_NULL;
675 struct ecore_sp_init_data init_data;
676 enum _ecore_status_t rc = ECORE_NOTIMPL;
677
678 /* Get SPQ entry */
679 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
680 init_data.cid = ecore_spq_get_cid(p_hwfn);
681 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
682 init_data.comp_mode = ECORE_SPQ_MODE_CB;
683
684 rc = ecore_sp_init_request(p_hwfn, &p_ent,
685 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
686 &init_data);
687 if (rc != ECORE_SUCCESS)
688 return rc;
689
690 p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
691 p_ent->ramrod.pf_update.mf_vlan = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
692
693 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
694 }
695