1 /*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "bcm_osal.h"
30 #include "ecore.h"
31 #include "reg_addr.h"
32 #include "ecore_sriov.h"
33 #include "ecore_status.h"
34 #include "ecore_hw.h"
35 #include "ecore_hw_defs.h"
36 #include "ecore_int.h"
37 #include "ecore_hsi_eth.h"
38 #include "ecore_l2.h"
39 #include "ecore_vfpf_if.h"
40 #include "ecore_rt_defs.h"
41 #include "ecore_init_ops.h"
42 #include "pcics_reg_driver.h"
43 #include "ecore_gtt_reg_addr.h"
44 #include "ecore_iro.h"
45 #include "ecore_mcp.h"
46 #include "ecore_cxt.h"
47 #include "ecore_vf.h"
48 #include "ecore_init_fw_funcs.h"
49 #include "ecore_sp_commands.h"
50
51 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
52 u8 opcode,
53 __le16 echo,
54 union event_ring_data *data,
55 u8 fw_return_code);
56
57 const char *ecore_channel_tlvs_string[] = {
58 "CHANNEL_TLV_NONE", /* ends tlv sequence */
59 "CHANNEL_TLV_ACQUIRE",
60 "CHANNEL_TLV_VPORT_START",
61 "CHANNEL_TLV_VPORT_UPDATE",
62 "CHANNEL_TLV_VPORT_TEARDOWN",
63 "CHANNEL_TLV_START_RXQ",
64 "CHANNEL_TLV_START_TXQ",
65 "CHANNEL_TLV_STOP_RXQ",
66 "CHANNEL_TLV_STOP_TXQ",
67 "CHANNEL_TLV_UPDATE_RXQ",
68 "CHANNEL_TLV_INT_CLEANUP",
69 "CHANNEL_TLV_CLOSE",
70 "CHANNEL_TLV_RELEASE",
71 "CHANNEL_TLV_LIST_END",
72 "CHANNEL_TLV_UCAST_FILTER",
73 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
74 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
75 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
76 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
77 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
78 "CHANNEL_TLV_VPORT_UPDATE_RSS",
79 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
80 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
81 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
82 "CHANNEL_TLV_COALESCE_UPDATE",
83 "CHANNEL_TLV_QID",
84 "CHANNEL_TLV_COALESCE_READ",
85 "CHANNEL_TLV_MAX"
86 };
87
ecore_vf_calculate_legacy(struct ecore_vf_info * p_vf)88 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
89 {
90 u8 legacy = 0;
91
92 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
93 ETH_HSI_VER_NO_PKT_LEN_TUNN)
94 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
95
96 if (!(p_vf->acquire.vfdev_info.capabilities &
97 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
98 legacy |= ECORE_QCID_LEGACY_VF_CID;
99
100 return legacy;
101 }
102
103 /* IOV ramrods */
ecore_sp_vf_start(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf)104 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
105 struct ecore_vf_info *p_vf)
106 {
107 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
108 struct ecore_spq_entry *p_ent = OSAL_NULL;
109 struct ecore_sp_init_data init_data;
110 enum _ecore_status_t rc = ECORE_NOTIMPL;
111 u8 fp_minor;
112
113 /* Get SPQ entry */
114 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
115 init_data.cid = ecore_spq_get_cid(p_hwfn);
116 init_data.opaque_fid = p_vf->opaque_fid;
117 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
118
119 rc = ecore_sp_init_request(p_hwfn, &p_ent,
120 COMMON_RAMROD_VF_START,
121 PROTOCOLID_COMMON, &init_data);
122 if (rc != ECORE_SUCCESS)
123 return rc;
124
125 p_ramrod = &p_ent->ramrod.vf_start;
126
127 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
128 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
129
130 switch (p_hwfn->hw_info.personality) {
131 case ECORE_PCI_ETH:
132 p_ramrod->personality = PERSONALITY_ETH;
133 break;
134 case ECORE_PCI_ETH_ROCE:
135 case ECORE_PCI_ETH_IWARP:
136 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
137 break;
138 default:
139 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
140 p_hwfn->hw_info.personality);
141 return ECORE_INVAL;
142 }
143
144 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
145 if (fp_minor > ETH_HSI_VER_MINOR &&
146 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
147 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
148 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
149 p_vf->abs_vf_id,
150 ETH_HSI_VER_MAJOR, fp_minor,
151 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
152 fp_minor = ETH_HSI_VER_MINOR;
153 }
154
155 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
156 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
157
158 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
159 "VF[%d] - Starting using HSI %02x.%02x\n",
160 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
161
162 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
163 }
164
ecore_sp_vf_stop(struct ecore_hwfn * p_hwfn,u32 concrete_vfid,u16 opaque_vfid)165 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
166 u32 concrete_vfid,
167 u16 opaque_vfid)
168 {
169 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
170 struct ecore_spq_entry *p_ent = OSAL_NULL;
171 struct ecore_sp_init_data init_data;
172 enum _ecore_status_t rc = ECORE_NOTIMPL;
173
174 /* Get SPQ entry */
175 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
176 init_data.cid = ecore_spq_get_cid(p_hwfn);
177 init_data.opaque_fid = opaque_vfid;
178 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
179
180 rc = ecore_sp_init_request(p_hwfn, &p_ent,
181 COMMON_RAMROD_VF_STOP,
182 PROTOCOLID_COMMON, &init_data);
183 if (rc != ECORE_SUCCESS)
184 return rc;
185
186 p_ramrod = &p_ent->ramrod.vf_stop;
187
188 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
189
190 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
191 }
192
ecore_iov_is_valid_vfid(struct ecore_hwfn * p_hwfn,int rel_vf_id,bool b_enabled_only,bool b_non_malicious)193 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
194 bool b_enabled_only, bool b_non_malicious)
195 {
196 if (!p_hwfn->pf_iov_info) {
197 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
198 return false;
199 }
200
201 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
202 (rel_vf_id < 0))
203 return false;
204
205 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
206 b_enabled_only)
207 return false;
208
209 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
210 b_non_malicious)
211 return false;
212
213 return true;
214 }
215
ecore_iov_get_vf_info(struct ecore_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)216 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
217 u16 relative_vf_id,
218 bool b_enabled_only)
219 {
220 struct ecore_vf_info *vf = OSAL_NULL;
221
222 if (!p_hwfn->pf_iov_info) {
223 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
224 return OSAL_NULL;
225 }
226
227 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
228 b_enabled_only, false))
229 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
230 else
231 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
232 relative_vf_id);
233
234 return vf;
235 }
236
237 static struct ecore_queue_cid *
ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue * p_queue)238 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
239 {
240 int i;
241
242 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
243 if (p_queue->cids[i].p_cid &&
244 !p_queue->cids[i].b_is_tx)
245 return p_queue->cids[i].p_cid;
246 }
247
248 return OSAL_NULL;
249 }
250
251 enum ecore_iov_validate_q_mode {
252 ECORE_IOV_VALIDATE_Q_NA,
253 ECORE_IOV_VALIDATE_Q_ENABLE,
254 ECORE_IOV_VALIDATE_Q_DISABLE,
255 };
256
ecore_iov_validate_queue_mode(struct ecore_vf_info * p_vf,u16 qid,enum ecore_iov_validate_q_mode mode,bool b_is_tx)257 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
258 u16 qid,
259 enum ecore_iov_validate_q_mode mode,
260 bool b_is_tx)
261 {
262 int i;
263
264 if (mode == ECORE_IOV_VALIDATE_Q_NA)
265 return true;
266
267 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
268 struct ecore_vf_queue_cid *p_qcid;
269
270 p_qcid = &p_vf->vf_queues[qid].cids[i];
271
272 if (p_qcid->p_cid == OSAL_NULL)
273 continue;
274
275 if (p_qcid->b_is_tx != b_is_tx)
276 continue;
277
278 /* Found. It's enabled. */
279 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
280 }
281
282 /* In case we haven't found any valid cid, then its disabled */
283 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
284 }
285
ecore_iov_validate_rxq(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,u16 rx_qid,enum ecore_iov_validate_q_mode mode)286 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
287 struct ecore_vf_info *p_vf,
288 u16 rx_qid,
289 enum ecore_iov_validate_q_mode mode)
290 {
291 if (rx_qid >= p_vf->num_rxqs) {
292 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
293 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
294 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
295 return false;
296 }
297
298 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
299 }
300
ecore_iov_validate_txq(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,u16 tx_qid,enum ecore_iov_validate_q_mode mode)301 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
302 struct ecore_vf_info *p_vf,
303 u16 tx_qid,
304 enum ecore_iov_validate_q_mode mode)
305 {
306 if (tx_qid >= p_vf->num_txqs) {
307 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
308 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
309 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
310 return false;
311 }
312
313 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
314 }
315
ecore_iov_validate_sb(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,u16 sb_idx)316 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
317 struct ecore_vf_info *p_vf,
318 u16 sb_idx)
319 {
320 int i;
321
322 for (i = 0; i < p_vf->num_sbs; i++)
323 if (p_vf->igu_sbs[i] == sb_idx)
324 return true;
325
326 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
327 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
328 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
329
330 return false;
331 }
332
333 /* Is there at least 1 queue open? */
ecore_iov_validate_active_rxq(struct ecore_vf_info * p_vf)334 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
335 {
336 u8 i;
337
338 for (i = 0; i < p_vf->num_rxqs; i++)
339 if (ecore_iov_validate_queue_mode(p_vf, i,
340 ECORE_IOV_VALIDATE_Q_ENABLE,
341 false))
342 return true;
343
344 return false;
345 }
346
ecore_iov_validate_active_txq(struct ecore_vf_info * p_vf)347 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
348 {
349 u8 i;
350
351 for (i = 0; i < p_vf->num_txqs; i++)
352 if (ecore_iov_validate_queue_mode(p_vf, i,
353 ECORE_IOV_VALIDATE_Q_ENABLE,
354 true))
355 return true;
356
357 return false;
358 }
359
ecore_iov_post_vf_bulletin(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_ptt * p_ptt)360 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
361 int vfid,
362 struct ecore_ptt *p_ptt)
363 {
364 struct ecore_bulletin_content *p_bulletin;
365 int crc_size = sizeof(p_bulletin->crc);
366 struct ecore_dmae_params params;
367 struct ecore_vf_info *p_vf;
368
369 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
370 if (!p_vf)
371 return ECORE_INVAL;
372
373 /* TODO - check VF is in a state where it can accept message */
374 if (!p_vf->vf_bulletin)
375 return ECORE_INVAL;
376
377 p_bulletin = p_vf->bulletin.p_virt;
378
379 /* Increment bulletin board version and compute crc */
380 p_bulletin->version++;
381 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
382 p_vf->bulletin.size - crc_size);
383
384 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
385 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
386 p_bulletin->version, p_vf->relative_vf_id,
387 p_bulletin->crc);
388
389 /* propagate bulletin board via dmae to vm memory */
390 OSAL_MEMSET(¶ms, 0, sizeof(params));
391 params.flags = ECORE_DMAE_FLAG_VF_DST;
392 params.dst_vfid = p_vf->abs_vf_id;
393 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
394 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
395 ¶ms);
396 }
397
ecore_iov_pci_cfg_info(struct ecore_dev * p_dev)398 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
399 {
400 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
401 int pos = iov->pos;
402
403 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
404 OSAL_PCI_READ_CONFIG_WORD(p_dev,
405 pos + PCI_SRIOV_CTRL,
406 &iov->ctrl);
407
408 OSAL_PCI_READ_CONFIG_WORD(p_dev,
409 pos + PCI_SRIOV_TOTAL_VF,
410 &iov->total_vfs);
411 OSAL_PCI_READ_CONFIG_WORD(p_dev,
412 pos + PCI_SRIOV_INITIAL_VF,
413 &iov->initial_vfs);
414
415 OSAL_PCI_READ_CONFIG_WORD(p_dev,
416 pos + PCI_SRIOV_NUM_VF,
417 &iov->num_vfs);
418 if (iov->num_vfs) {
419 /* @@@TODO - in future we might want to add an OSAL here to
420 * allow each OS to decide on its own how to act.
421 */
422 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
423 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
424 iov->num_vfs = 0;
425 }
426
427 OSAL_PCI_READ_CONFIG_WORD(p_dev,
428 pos + PCI_SRIOV_VF_OFFSET,
429 &iov->offset);
430
431 OSAL_PCI_READ_CONFIG_WORD(p_dev,
432 pos + PCI_SRIOV_VF_STRIDE,
433 &iov->stride);
434
435 OSAL_PCI_READ_CONFIG_WORD(p_dev,
436 pos + PCI_SRIOV_VF_DID,
437 &iov->vf_device_id);
438
439 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
440 pos + PCI_SRIOV_SUP_PGSIZE,
441 &iov->pgsz);
442
443 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
444 pos + PCI_SRIOV_CAP,
445 &iov->cap);
446
447 OSAL_PCI_READ_CONFIG_BYTE(p_dev,
448 pos + PCI_SRIOV_FUNC_LINK,
449 &iov->link);
450
451 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
452 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
453 iov->nres, iov->cap, iov->ctrl,
454 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
455 iov->offset, iov->stride, iov->pgsz);
456
457 /* Some sanity checks */
458 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
459 iov->total_vfs > NUM_OF_VFS(p_dev)) {
460 /* This can happen only due to a bug. In this case we set
461 * num_vfs to zero to avoid memory corruption in the code that
462 * assumes max number of vfs
463 */
464 DP_NOTICE(p_dev, false, "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
465 iov->num_vfs);
466
467 iov->num_vfs = 0;
468 iov->total_vfs = 0;
469 }
470
471 return ECORE_SUCCESS;
472 }
473
ecore_iov_setup_vfdb(struct ecore_hwfn * p_hwfn)474 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
475 {
476 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
477 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
478 struct ecore_bulletin_content *p_bulletin_virt;
479 dma_addr_t req_p, rply_p, bulletin_p;
480 union pfvf_tlvs *p_reply_virt_addr;
481 union vfpf_tlvs *p_req_virt_addr;
482 u8 idx = 0;
483
484 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
485
486 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
487 req_p = p_iov_info->mbx_msg_phys_addr;
488 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
489 rply_p = p_iov_info->mbx_reply_phys_addr;
490 p_bulletin_virt = p_iov_info->p_bulletins;
491 bulletin_p = p_iov_info->bulletins_phys;
492 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
493 DP_ERR(p_hwfn, "ecore_iov_setup_vfdb called without allocating mem first\n");
494 return;
495 }
496
497 for (idx = 0; idx < p_iov->total_vfs; idx++) {
498 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
499 u32 concrete;
500
501 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
502 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
503 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
504 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
505
506 #ifdef CONFIG_ECORE_SW_CHANNEL
507 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
508 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
509 #endif
510 vf->state = VF_STOPPED;
511 vf->b_init = false;
512
513 vf->bulletin.phys = idx *
514 sizeof(struct ecore_bulletin_content) +
515 bulletin_p;
516 vf->bulletin.p_virt = p_bulletin_virt + idx;
517 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
518
519 vf->relative_vf_id = idx;
520 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
521 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
522 vf->concrete_fid = concrete;
523 /* TODO - need to devise a better way of getting opaque */
524 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
525 (vf->abs_vf_id << 8);
526
527 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
528 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
529 }
530 }
531
ecore_iov_allocate_vfdb(struct ecore_hwfn * p_hwfn)532 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
533 {
534 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
535 void **p_v_addr;
536 u16 num_vfs = 0;
537
538 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
539
540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
541 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
542
543 /* Allocate PF Mailbox buffer (per-VF) */
544 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
545 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
546 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
547 &p_iov_info->mbx_msg_phys_addr,
548 p_iov_info->mbx_msg_size);
549 if (!*p_v_addr)
550 return ECORE_NOMEM;
551
552 /* Allocate PF Mailbox Reply buffer (per-VF) */
553 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
554 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
555 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
556 &p_iov_info->mbx_reply_phys_addr,
557 p_iov_info->mbx_reply_size);
558 if (!*p_v_addr)
559 return ECORE_NOMEM;
560
561 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
562 num_vfs;
563 p_v_addr = &p_iov_info->p_bulletins;
564 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
565 &p_iov_info->bulletins_phys,
566 p_iov_info->bulletins_size);
567 if (!*p_v_addr)
568 return ECORE_NOMEM;
569
570 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
571 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
572 p_iov_info->mbx_msg_virt_addr,
573 (unsigned long long)p_iov_info->mbx_msg_phys_addr,
574 p_iov_info->mbx_reply_virt_addr,
575 (unsigned long long)p_iov_info->mbx_reply_phys_addr,
576 p_iov_info->p_bulletins,
577 (unsigned long long)p_iov_info->bulletins_phys);
578
579 return ECORE_SUCCESS;
580 }
581
ecore_iov_free_vfdb(struct ecore_hwfn * p_hwfn)582 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
583 {
584 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
585
586 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
587 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
588 p_iov_info->mbx_msg_virt_addr,
589 p_iov_info->mbx_msg_phys_addr,
590 p_iov_info->mbx_msg_size);
591
592 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
593 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
594 p_iov_info->mbx_reply_virt_addr,
595 p_iov_info->mbx_reply_phys_addr,
596 p_iov_info->mbx_reply_size);
597
598 if (p_iov_info->p_bulletins)
599 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
600 p_iov_info->p_bulletins,
601 p_iov_info->bulletins_phys,
602 p_iov_info->bulletins_size);
603 }
604
ecore_iov_alloc(struct ecore_hwfn * p_hwfn)605 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
606 {
607 struct ecore_pf_iov *p_sriov;
608
609 if (!IS_PF_SRIOV(p_hwfn)) {
610 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
611 "No SR-IOV - no need for IOV db\n");
612 return ECORE_SUCCESS;
613 }
614
615 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
616 if (!p_sriov) {
617 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
618 return ECORE_NOMEM;
619 }
620
621 p_hwfn->pf_iov_info = p_sriov;
622
623 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
624 ecore_sriov_eqe_event);
625
626 return ecore_iov_allocate_vfdb(p_hwfn);
627 }
628
ecore_iov_setup(struct ecore_hwfn * p_hwfn)629 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
630 {
631 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
632 return;
633
634 ecore_iov_setup_vfdb(p_hwfn);
635 }
636
ecore_iov_free(struct ecore_hwfn * p_hwfn)637 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
638 {
639 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
640
641 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
642 ecore_iov_free_vfdb(p_hwfn);
643 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
644 p_hwfn->pf_iov_info = OSAL_NULL;
645 }
646 }
647
ecore_iov_free_hw_info(struct ecore_dev * p_dev)648 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
649 {
650 OSAL_FREE(p_dev, p_dev->p_iov_info);
651 p_dev->p_iov_info = OSAL_NULL;
652 }
653
ecore_iov_hw_info(struct ecore_hwfn * p_hwfn)654 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
655 {
656 struct ecore_dev *p_dev = p_hwfn->p_dev;
657 int pos;
658 enum _ecore_status_t rc;
659
660 if (IS_VF(p_hwfn->p_dev))
661 return ECORE_SUCCESS;
662
663 /* Learn the PCI configuration */
664 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
665 PCI_EXT_CAP_ID_SRIOV);
666 if (!pos) {
667 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
668 return ECORE_SUCCESS;
669 }
670
671 /* Allocate a new struct for IOV information */
672 /* TODO - can change to VALLOC when its available */
673 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
674 sizeof(*p_dev->p_iov_info));
675 if (!p_dev->p_iov_info) {
676 DP_NOTICE(p_hwfn, false,
677 "Can't support IOV due to lack of memory\n");
678 return ECORE_NOMEM;
679 }
680 p_dev->p_iov_info->pos = pos;
681
682 rc = ecore_iov_pci_cfg_info(p_dev);
683 if (rc)
684 return rc;
685
686 /* We want PF IOV to be synonemous with the existence of p_iov_info;
687 * In case the capability is published but there are no VFs, simply
688 * de-allocate the struct.
689 */
690 if (!p_dev->p_iov_info->total_vfs) {
691 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
692 "IOV capabilities, but no VFs are published\n");
693 OSAL_FREE(p_dev, p_dev->p_iov_info);
694 p_dev->p_iov_info = OSAL_NULL;
695 return ECORE_SUCCESS;
696 }
697
698 /* First VF index based on offset is tricky:
699 * - If ARI is supported [likely], offset - (16 - pf_id) would
700 * provide the number for eng0. 2nd engine Vfs would begin
701 * after the first engine's VFs.
702 * - If !ARI, VFs would start on next device.
703 * so offset - (256 - pf_id) would provide the number.
704 * Utilize the fact that (256 - pf_id) is achieved only be later
705 * to diffrentiate between the two.
706 */
707
708 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
709 u32 first = p_hwfn->p_dev->p_iov_info->offset +
710 p_hwfn->abs_pf_id - 16;
711
712 p_dev->p_iov_info->first_vf_in_pf = first;
713
714 if (ECORE_PATH_ID(p_hwfn))
715 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
716 } else {
717 u32 first = p_hwfn->p_dev->p_iov_info->offset +
718 p_hwfn->abs_pf_id - 256;
719
720 p_dev->p_iov_info->first_vf_in_pf = first;
721 }
722
723 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
724 "First VF in hwfn 0x%08x\n",
725 p_dev->p_iov_info->first_vf_in_pf);
726
727 return ECORE_SUCCESS;
728 }
729
_ecore_iov_pf_sanity_check(struct ecore_hwfn * p_hwfn,int vfid,bool b_fail_malicious)730 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
731 bool b_fail_malicious)
732 {
733 /* Check PF supports sriov */
734 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
735 !IS_PF_SRIOV_ALLOC(p_hwfn))
736 return false;
737
738 /* Check VF validity */
739 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
740 return false;
741
742 return true;
743 }
744
ecore_iov_pf_sanity_check(struct ecore_hwfn * p_hwfn,int vfid)745 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
746 {
747 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
748 }
749
ecore_iov_set_vf_to_disable(struct ecore_dev * p_dev,u16 rel_vf_id,u8 to_disable)750 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
751 u16 rel_vf_id,
752 u8 to_disable)
753 {
754 struct ecore_vf_info *vf;
755 int i;
756
757 for_each_hwfn(p_dev, i) {
758 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
759
760 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
761 if (!vf)
762 continue;
763
764 vf->to_disable = to_disable;
765 }
766 }
767
ecore_iov_set_vfs_to_disable(struct ecore_dev * p_dev,u8 to_disable)768 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
769 u8 to_disable)
770 {
771 u16 i;
772
773 if (!IS_ECORE_SRIOV(p_dev))
774 return;
775
776 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
777 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
778 }
779
780 #ifndef LINUX_REMOVE
781 /* @@@TBD Consider taking outside of ecore... */
ecore_iov_set_vf_ctx(struct ecore_hwfn * p_hwfn,u16 vf_id,void * ctx)782 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
783 u16 vf_id,
784 void *ctx)
785 {
786 enum _ecore_status_t rc = ECORE_SUCCESS;
787 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
788
789 if (vf != OSAL_NULL) {
790 vf->ctx = ctx;
791 #ifdef CONFIG_ECORE_SW_CHANNEL
792 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
793 #endif
794 } else {
795 rc = ECORE_UNKNOWN_ERROR;
796 }
797 return rc;
798 }
799 #endif
800
ecore_iov_vf_pglue_clear_err(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_vfid)801 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
802 struct ecore_ptt *p_ptt,
803 u8 abs_vfid)
804 {
805 ecore_wr(p_hwfn, p_ptt,
806 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
807 1 << (abs_vfid & 0x1f));
808 }
809
ecore_iov_vf_igu_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)810 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
811 struct ecore_ptt *p_ptt,
812 struct ecore_vf_info *vf)
813 {
814 int i;
815
816 /* Set VF masks and configuration - pretend */
817 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
818
819 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
820
821 /* unpretend */
822 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
823
824 /* iterate over all queues, clear sb consumer */
825 for (i = 0; i < vf->num_sbs; i++)
826 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
827 vf->igu_sbs[i],
828 vf->opaque_fid, true);
829 }
830
ecore_iov_vf_igu_set_int(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf,bool enable)831 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
832 struct ecore_ptt *p_ptt,
833 struct ecore_vf_info *vf,
834 bool enable)
835 {
836 u32 igu_vf_conf;
837
838 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
839
840 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
841
842 if (enable) {
843 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
844 } else {
845 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
846 }
847
848 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
849
850 /* unpretend */
851 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
852 }
853
854 static enum _ecore_status_t
ecore_iov_enable_vf_access_msix(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_vf_id,u8 num_sbs)855 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
856 struct ecore_ptt *p_ptt,
857 u8 abs_vf_id,
858 u8 num_sbs)
859 {
860 u8 current_max = 0;
861 int i;
862
863 /* If client overrides this, don't do anything */
864 if (p_hwfn->p_dev->b_dont_override_vf_msix)
865 return ECORE_SUCCESS;
866
867 /* For AH onward, configuration is per-PF. Find maximum of all
868 * the currently enabled child VFs, and set the number to be that.
869 */
870 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
871 ecore_for_each_vf(p_hwfn, i) {
872 struct ecore_vf_info *p_vf;
873
874 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
875 if (!p_vf)
876 continue;
877
878 current_max = OSAL_MAX_T(u8, current_max,
879 p_vf->num_sbs);
880 }
881 }
882
883 if (num_sbs > current_max)
884 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
885 abs_vf_id, num_sbs);
886
887 return ECORE_SUCCESS;
888 }
889
ecore_iov_enable_vf_access(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)890 static enum _ecore_status_t ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
891 struct ecore_ptt *p_ptt,
892 struct ecore_vf_info *vf)
893 {
894 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
895 enum _ecore_status_t rc = ECORE_SUCCESS;
896
897 /* It's possible VF was previously considered malicious -
898 * clear the indication even if we're only going to disable VF.
899 */
900 vf->b_malicious = false;
901
902 if (vf->to_disable)
903 return ECORE_SUCCESS;
904
905 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Enable internal access for vf %x [abs %x]\n",
906 vf->abs_vf_id, ECORE_VF_ABS_ID(p_hwfn, vf));
907
908 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
909 ECORE_VF_ABS_ID(p_hwfn, vf));
910
911 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
912
913 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
914 vf->abs_vf_id, vf->num_sbs);
915 if (rc != ECORE_SUCCESS)
916 return rc;
917
918 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
919
920 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
921 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
922
923 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
924 p_hwfn->hw_info.hw_mode);
925
926 /* unpretend */
927 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
928
929 vf->state = VF_FREE;
930
931 return rc;
932 }
933
934 /**
935 * @brief ecore_iov_config_perm_table - configure the permission
936 * zone table.
937 * In E4, queue zone permission table size is 320x9. There
938 * are 320 VF queues for single engine device (256 for dual
939 * engine device), and each entry has the following format:
940 * {Valid, VF[7:0]}
941 * @param p_hwfn
942 * @param p_ptt
943 * @param vf
944 * @param enable
945 */
ecore_iov_config_perm_table(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf,u8 enable)946 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
947 struct ecore_ptt *p_ptt,
948 struct ecore_vf_info *vf,
949 u8 enable)
950 {
951 u32 reg_addr, val;
952 u16 qzone_id = 0;
953 int qid;
954
955 for (qid = 0; qid < vf->num_rxqs; qid++) {
956 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
957 &qzone_id);
958
959 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
960 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
961 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
962 }
963 }
964
ecore_iov_enable_vf_traffic(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)965 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
966 struct ecore_ptt *p_ptt,
967 struct ecore_vf_info *vf)
968 {
969 /* Reset vf in IGU - interrupts are still disabled */
970 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
971
972 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
973
974 /* Permission Table */
975 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
976 }
977
ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf,u16 num_rx_queues)978 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
979 struct ecore_ptt *p_ptt,
980 struct ecore_vf_info *vf,
981 u16 num_rx_queues)
982 {
983 struct ecore_igu_block *p_block;
984 struct cau_sb_entry sb_entry;
985 int qid = 0;
986 u32 val = 0;
987
988 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
989 num_rx_queues =
990 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
991 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
992
993 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
994 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
995 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
996
997 for (qid = 0; qid < num_rx_queues; qid++) {
998 p_block = ecore_get_igu_free_sb(p_hwfn, false);
999 vf->igu_sbs[qid] = p_block->igu_sb_id;
1000 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1001 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
1002
1003 ecore_wr(p_hwfn, p_ptt,
1004 IGU_REG_MAPPING_MEMORY +
1005 sizeof(u32) * p_block->igu_sb_id, val);
1006
1007 /* Configure igu sb in CAU which were marked valid */
1008 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1009 p_hwfn->rel_pf_id,
1010 vf->abs_vf_id, 1);
1011
1012 ecore_dmae_host2grc(p_hwfn, p_ptt,
1013 (u64)(osal_uintptr_t)&sb_entry,
1014 CAU_REG_SB_VAR_MEMORY +
1015 p_block->igu_sb_id * sizeof(u64), 2,
1016 OSAL_NULL /* default parameters */);
1017 }
1018
1019 vf->num_sbs = (u8)num_rx_queues;
1020
1021 return vf->num_sbs;
1022 }
1023
1024 /**
1025 *
1026 * @brief The function invalidates all the VF entries,
1027 * technically this isn't required, but added for
1028 * cleaness and ease of debugging incase a VF attempts to
1029 * produce an interrupt after it has been taken down.
1030 *
1031 * @param p_hwfn
1032 * @param p_ptt
1033 * @param vf
1034 */
ecore_iov_free_vf_igu_sbs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)1035 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1036 struct ecore_ptt *p_ptt,
1037 struct ecore_vf_info *vf)
1038
1039 {
1040 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1041 int idx, igu_id;
1042 u32 addr, val;
1043
1044 /* Invalidate igu CAM lines and mark them as free */
1045 for (idx = 0; idx < vf->num_sbs; idx++) {
1046 igu_id = vf->igu_sbs[idx];
1047 addr = IGU_REG_MAPPING_MEMORY +
1048 sizeof(u32) * igu_id;
1049
1050 val = ecore_rd(p_hwfn, p_ptt, addr);
1051 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1052 ecore_wr(p_hwfn, p_ptt, addr, val);
1053
1054 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1055 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1056 }
1057
1058 vf->num_sbs = 0;
1059 }
1060
ecore_iov_set_link(struct ecore_hwfn * p_hwfn,u16 vfid,struct ecore_mcp_link_params * params,struct ecore_mcp_link_state * link,struct ecore_mcp_link_capabilities * p_caps)1061 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1062 u16 vfid,
1063 struct ecore_mcp_link_params *params,
1064 struct ecore_mcp_link_state *link,
1065 struct ecore_mcp_link_capabilities *p_caps)
1066 {
1067 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1068 struct ecore_bulletin_content *p_bulletin;
1069
1070 if (!p_vf)
1071 return;
1072
1073 p_bulletin = p_vf->bulletin.p_virt;
1074 p_bulletin->req_autoneg = params->speed.autoneg;
1075 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1076 p_bulletin->req_forced_speed = params->speed.forced_speed;
1077 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1078 p_bulletin->req_forced_rx = params->pause.forced_rx;
1079 p_bulletin->req_forced_tx = params->pause.forced_tx;
1080 p_bulletin->req_loopback = params->loopback_mode;
1081
1082 p_bulletin->link_up = link->link_up;
1083 p_bulletin->speed = link->speed;
1084 p_bulletin->full_duplex = link->full_duplex;
1085 p_bulletin->autoneg = link->an;
1086 p_bulletin->autoneg_complete = link->an_complete;
1087 p_bulletin->parallel_detection = link->parallel_detection;
1088 p_bulletin->pfc_enabled = link->pfc_enabled;
1089 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1090 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1091 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1092 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1093 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1094
1095 p_bulletin->capability_speed = p_caps->speed_capabilities;
1096 }
1097
1098 enum _ecore_status_t
ecore_iov_init_hw_for_vf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_iov_vf_init_params * p_params)1099 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1100 struct ecore_ptt *p_ptt,
1101 struct ecore_iov_vf_init_params *p_params)
1102 {
1103 struct ecore_mcp_link_capabilities link_caps;
1104 struct ecore_mcp_link_params link_params;
1105 struct ecore_mcp_link_state link_state;
1106 u8 num_of_vf_avaiable_chains = 0;
1107 struct ecore_vf_info *vf = OSAL_NULL;
1108 u16 qid, num_irqs;
1109 enum _ecore_status_t rc = ECORE_SUCCESS;
1110 u32 cids;
1111 u8 i;
1112
1113 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1114 if (!vf) {
1115 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1116 return ECORE_UNKNOWN_ERROR;
1117 }
1118
1119 if (vf->b_init) {
1120 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1121 p_params->rel_vf_id);
1122 return ECORE_INVAL;
1123 }
1124
1125 /* Perform sanity checking on the requested vport/rss */
1126 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1127 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1128 p_params->rel_vf_id, p_params->vport_id);
1129 return ECORE_INVAL;
1130 }
1131
1132 if ((p_params->num_queues > 1) &&
1133 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1134 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1135 p_params->rel_vf_id, p_params->rss_eng_id);
1136 return ECORE_INVAL;
1137 }
1138
1139 /* TODO - remove this once we get confidence of change */
1140 if (!p_params->vport_id) {
1141 DP_NOTICE(p_hwfn, false,
1142 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1143 p_params->rel_vf_id);
1144 }
1145 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1146 DP_NOTICE(p_hwfn, false,
1147 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1148 p_params->rel_vf_id);
1149 }
1150 vf->vport_id = p_params->vport_id;
1151 vf->rss_eng_id = p_params->rss_eng_id;
1152
1153 /* Since it's possible to relocate SBs, it's a bit difficult to check
1154 * things here. Simply check whether the index falls in the range
1155 * belonging to the PF.
1156 */
1157 for (i = 0; i < p_params->num_queues; i++) {
1158 qid = p_params->req_rx_queue[i];
1159 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1160 DP_NOTICE(p_hwfn, true,
1161 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1162 qid, p_params->rel_vf_id,
1163 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1164 return ECORE_INVAL;
1165 }
1166
1167 qid = p_params->req_tx_queue[i];
1168 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1169 DP_NOTICE(p_hwfn, true,
1170 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1171 qid, p_params->rel_vf_id,
1172 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1173 return ECORE_INVAL;
1174 }
1175 }
1176
1177 /* Limit number of queues according to number of CIDs */
1178 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1179 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1180 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1181 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1182 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1183
1184 num_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1185 p_ptt,
1186 vf,
1187 num_irqs);
1188 if (num_of_vf_avaiable_chains == 0) {
1189 DP_ERR(p_hwfn, "no available igu sbs\n");
1190 return ECORE_NOMEM;
1191 }
1192
1193 /* Choose queue number and index ranges */
1194 vf->num_rxqs = num_of_vf_avaiable_chains;
1195 vf->num_txqs = num_of_vf_avaiable_chains;
1196
1197 for (i = 0; i < vf->num_rxqs; i++) {
1198 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1199
1200 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1201 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1202
1203 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1204 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1205 vf->relative_vf_id, i, vf->igu_sbs[i],
1206 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1207 }
1208
1209 /* Update the link configuration in bulletin.
1210 */
1211 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1212 sizeof(link_params));
1213 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1214 sizeof(link_state));
1215 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1216 sizeof(link_caps));
1217 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1218 &link_params, &link_state, &link_caps);
1219
1220 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1221
1222 if (rc == ECORE_SUCCESS) {
1223 vf->b_init = true;
1224 #ifndef REMOVE_DBG
1225 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1226 (1ULL << (vf->relative_vf_id % 64));
1227 #endif
1228
1229 if (IS_LEAD_HWFN(p_hwfn))
1230 p_hwfn->p_dev->p_iov_info->num_vfs++;
1231 }
1232
1233 return rc;
1234 }
1235
ecore_iov_release_hw_for_vf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 rel_vf_id)1236 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1237 struct ecore_ptt *p_ptt,
1238 u16 rel_vf_id)
1239 {
1240 struct ecore_mcp_link_capabilities caps;
1241 struct ecore_mcp_link_params params;
1242 struct ecore_mcp_link_state link;
1243 struct ecore_vf_info *vf = OSAL_NULL;
1244
1245 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1246 if (!vf) {
1247 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1248 return ECORE_UNKNOWN_ERROR;
1249 }
1250
1251 if (vf->bulletin.p_virt)
1252 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1253 sizeof(*vf->bulletin.p_virt));
1254
1255 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1256
1257 /* Get the link configuration back in bulletin so
1258 * that when VFs are re-enabled they get the actual
1259 * link configuration.
1260 */
1261 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1262 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1263 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1264 sizeof(caps));
1265 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1266
1267 /* Forget the VF's acquisition message */
1268 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1269
1270 /* disablng interrupts and resetting permission table was done during
1271 * vf-close, however, we could get here without going through vf_close
1272 */
1273 /* Disable Interrupts for VF */
1274 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1275
1276 /* Reset Permission table */
1277 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1278
1279 vf->num_rxqs = 0;
1280 vf->num_txqs = 0;
1281 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1282
1283 if (vf->b_init) {
1284 vf->b_init = false;
1285 #ifndef REMOVE_DBG
1286 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1287 ~(1ULL << (vf->relative_vf_id / 64));
1288 #endif
1289
1290 if (IS_LEAD_HWFN(p_hwfn))
1291 p_hwfn->p_dev->p_iov_info->num_vfs--;
1292 }
1293
1294 return ECORE_SUCCESS;
1295 }
1296
ecore_iov_tlv_supported(u16 tlvtype)1297 static bool ecore_iov_tlv_supported(u16 tlvtype)
1298 {
1299 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1300 }
1301
ecore_iov_lock_vf_pf_channel(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * vf,u16 tlv)1302 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1303 struct ecore_vf_info *vf,
1304 u16 tlv)
1305 {
1306 /* lock the channel */
1307 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1308
1309 /* record the locking op */
1310 /* vf->op_current = tlv; @@@TBD MichalK */
1311
1312 /* log the lock */
1313 if (ecore_iov_tlv_supported(tlv))
1314 DP_VERBOSE(p_hwfn,
1315 ECORE_MSG_IOV,
1316 "VF[%d]: vf pf channel locked by %s\n",
1317 vf->abs_vf_id,
1318 ecore_channel_tlvs_string[tlv]);
1319 else
1320 DP_VERBOSE(p_hwfn,
1321 ECORE_MSG_IOV,
1322 "VF[%d]: vf pf channel locked by %04x\n",
1323 vf->abs_vf_id, tlv);
1324 }
1325
ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * vf,u16 expected_tlv)1326 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1327 struct ecore_vf_info *vf,
1328 u16 expected_tlv)
1329 {
1330 /*WARN(expected_tlv != vf->op_current,
1331 "lock mismatch: expected %s found %s",
1332 channel_tlvs_string[expected_tlv],
1333 channel_tlvs_string[vf->op_current]);
1334 @@@TBD MichalK
1335 */
1336
1337 /* lock the channel */
1338 /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
1339
1340 /* log the unlock */
1341 if (ecore_iov_tlv_supported(expected_tlv))
1342 DP_VERBOSE(p_hwfn,
1343 ECORE_MSG_IOV,
1344 "VF[%d]: vf pf channel unlocked by %s\n",
1345 vf->abs_vf_id,
1346 ecore_channel_tlvs_string[expected_tlv]);
1347 else
1348 DP_VERBOSE(p_hwfn,
1349 ECORE_MSG_IOV,
1350 "VF[%d]: vf pf channel unlocked by %04x\n",
1351 vf->abs_vf_id, expected_tlv);
1352
1353 /* record the locking op */
1354 /* vf->op_current = CHANNEL_TLV_NONE;*/
1355 }
1356
1357 /* place a given tlv on the tlv buffer, continuing current tlv list */
ecore_add_tlv(u8 ** offset,u16 type,u16 length)1358 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1359 {
1360 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1361
1362 tl->type = type;
1363 tl->length = length;
1364
1365 /* Offset should keep pointing to next TLV (the end of the last) */
1366 *offset += length;
1367
1368 /* Return a pointer to the start of the added tlv */
1369 return *offset - length;
1370 }
1371
1372 /* list the types and lengths of the tlvs on the buffer */
ecore_dp_tlv_list(struct ecore_hwfn * p_hwfn,void * tlvs_list)1373 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1374 {
1375 u16 i = 1, total_length = 0;
1376 struct channel_tlv *tlv;
1377
1378 do {
1379 /* cast current tlv list entry to channel tlv header*/
1380 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1381
1382 /* output tlv */
1383 if (ecore_iov_tlv_supported(tlv->type))
1384 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1385 "TLV number %d: type %s, length %d\n",
1386 i, ecore_channel_tlvs_string[tlv->type],
1387 tlv->length);
1388 else
1389 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1390 "TLV number %d: type %d, length %d\n",
1391 i, tlv->type, tlv->length);
1392
1393 if (tlv->type == CHANNEL_TLV_LIST_END)
1394 return;
1395
1396 /* Validate entry - protect against malicious VFs */
1397 if (!tlv->length) {
1398 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1399 return;
1400 }
1401
1402 total_length += tlv->length;
1403
1404 if (total_length >= sizeof(struct tlv_buffer_size)) {
1405 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1406 return;
1407 }
1408
1409 i++;
1410 } while (1);
1411 }
1412
ecore_iov_send_response(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf,u16 length,u8 status)1413 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1414 struct ecore_ptt *p_ptt,
1415 struct ecore_vf_info *p_vf,
1416 #ifdef CONFIG_ECORE_SW_CHANNEL
1417 u16 length,
1418 #else
1419 u16 OSAL_UNUSED length,
1420 #endif
1421 u8 status)
1422 {
1423 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1424 struct ecore_dmae_params params;
1425 u8 eng_vf_id;
1426
1427 mbx->reply_virt->default_resp.hdr.status = status;
1428
1429 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1430
1431 #ifdef CONFIG_ECORE_SW_CHANNEL
1432 mbx->sw_mbx.response_size =
1433 length + sizeof(struct channel_list_end_tlv);
1434
1435 if (!p_vf->b_hw_channel)
1436 return;
1437 #endif
1438
1439 eng_vf_id = p_vf->abs_vf_id;
1440
1441 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1442 params.flags = ECORE_DMAE_FLAG_VF_DST;
1443 params.dst_vfid = eng_vf_id;
1444
1445 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1446 mbx->req_virt->first_tlv.reply_address +
1447 sizeof(u64),
1448 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4 ,
1449 ¶ms);
1450
1451 /* Once PF copies the rc to the VF, the latter can continue and
1452 * and send an additional message. So we have to make sure the
1453 * channel would be re-set to ready prior to that.
1454 */
1455 REG_WR(p_hwfn,
1456 GTT_BAR0_MAP_REG_USDM_RAM +
1457 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id),
1458 1);
1459
1460 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1461 mbx->req_virt->first_tlv.reply_address,
1462 sizeof(u64) / 4, ¶ms);
1463
1464 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1465 }
1466
ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)1467 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1468 {
1469 switch (flag) {
1470 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1471 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1472 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1473 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1474 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1475 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1476 case ECORE_IOV_VP_UPDATE_MCAST:
1477 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1478 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1479 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1480 case ECORE_IOV_VP_UPDATE_RSS:
1481 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1482 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1483 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1484 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1485 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1486 default:
1487 return 0;
1488 }
1489 }
1490
ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_iov_vf_mbx * p_mbx,u8 status,u16 tlvs_mask,u16 tlvs_accepted)1491 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1492 struct ecore_vf_info *p_vf,
1493 struct ecore_iov_vf_mbx *p_mbx,
1494 u8 status, u16 tlvs_mask,
1495 u16 tlvs_accepted)
1496 {
1497 struct pfvf_def_resp_tlv *resp;
1498 u16 size, total_len, i;
1499
1500 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1501 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1502 size = sizeof(struct pfvf_def_resp_tlv);
1503 total_len = size;
1504
1505 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1506
1507 /* Prepare response for all extended tlvs if they are found by PF */
1508 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1509 if (!(tlvs_mask & (1 << i)))
1510 continue;
1511
1512 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1513 size);
1514
1515 if (tlvs_accepted & (1 << i))
1516 resp->hdr.status = status;
1517 else
1518 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1519
1520 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1521 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1522 p_vf->relative_vf_id,
1523 ecore_iov_vport_to_tlv(i),
1524 resp->hdr.status);
1525
1526 total_len += size;
1527 }
1528
1529 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1530 sizeof(struct channel_list_end_tlv));
1531
1532 return total_len;
1533 }
1534
ecore_iov_prepare_resp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf_info,u16 type,u16 length,u8 status)1535 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1536 struct ecore_ptt *p_ptt,
1537 struct ecore_vf_info *vf_info,
1538 u16 type, u16 length, u8 status)
1539 {
1540 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1541
1542 mbx->offset = (u8 *)mbx->reply_virt;
1543
1544 ecore_add_tlv(&mbx->offset, type, length);
1545 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1546 sizeof(struct channel_list_end_tlv));
1547
1548 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1549 }
1550
ecore_iov_get_public_vf_info(struct ecore_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)1551 struct ecore_public_vf_info * ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1552 u16 relative_vf_id,
1553 bool b_enabled_only)
1554 {
1555 struct ecore_vf_info *vf = OSAL_NULL;
1556
1557 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1558 if (!vf)
1559 return OSAL_NULL;
1560
1561 return &vf->p_vf_info;
1562 }
1563
ecore_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf)1564 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1565 struct ecore_vf_info *p_vf)
1566 {
1567 u32 i, j;
1568
1569 p_vf->vf_bulletin = 0;
1570 p_vf->vport_instance = 0;
1571 p_vf->configured_features = 0;
1572
1573 /* If VF previously requested less resources, go back to default */
1574 p_vf->num_rxqs = p_vf->num_sbs;
1575 p_vf->num_txqs = p_vf->num_sbs;
1576
1577 p_vf->num_active_rxqs = 0;
1578
1579 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1580 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1581
1582 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1583 if (!p_queue->cids[j].p_cid)
1584 continue;
1585
1586 ecore_eth_queue_cid_release(p_hwfn,
1587 p_queue->cids[j].p_cid);
1588 p_queue->cids[j].p_cid = OSAL_NULL;
1589 }
1590 }
1591
1592 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1593 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1594 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1595 }
1596
1597 /* Returns either 0, or log(size) */
ecore_iov_vf_db_bar_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1598 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1599 struct ecore_ptt *p_ptt)
1600 {
1601 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1602
1603 if (val)
1604 return val + 11;
1605 return 0;
1606 }
1607
1608 static void
ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1609 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1610 struct ecore_ptt *p_ptt,
1611 struct ecore_vf_info *p_vf,
1612 struct vf_pf_resc_request *p_req,
1613 struct pf_vf_resc *p_resp)
1614 {
1615 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1616 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1617 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1618 u32 bar_size;
1619
1620 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1621
1622 /* If VF didn't bother asking for QIDs than don't bother limiting
1623 * number of CIDs. The VF doesn't care about the number, and this
1624 * has the likely result of causing an additional acquisition.
1625 */
1626 if (!(p_vf->acquire.vfdev_info.capabilities &
1627 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1628 return;
1629
1630 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1631 * that would make sure doorbells for all CIDs fall within the bar.
1632 * If it doesn't, make sure regview window is sufficient.
1633 */
1634 if (p_vf->acquire.vfdev_info.capabilities &
1635 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1636 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1637 if (bar_size)
1638 bar_size = 1 << bar_size;
1639
1640 if (ECORE_IS_CMT(p_hwfn->p_dev))
1641 bar_size /= 2;
1642 } else {
1643 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1644 }
1645
1646 if (bar_size / db_size < 256)
1647 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1648 (u8)(bar_size / db_size));
1649 }
1650
ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1651 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1652 struct ecore_ptt *p_ptt,
1653 struct ecore_vf_info *p_vf,
1654 struct vf_pf_resc_request *p_req,
1655 struct pf_vf_resc *p_resp)
1656 {
1657 u8 i;
1658
1659 /* Queue related information */
1660 p_resp->num_rxqs = p_vf->num_rxqs;
1661 p_resp->num_txqs = p_vf->num_txqs;
1662 p_resp->num_sbs = p_vf->num_sbs;
1663
1664 for (i = 0; i < p_resp->num_sbs; i++) {
1665 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1666 /* TODO - what's this sb_qid field? Is it deprecated?
1667 * or is there an ecore_client that looks at this?
1668 */
1669 p_resp->hw_sbs[i].sb_qid = 0;
1670 }
1671
1672 /* These fields are filled for backward compatibility.
1673 * Unused by modern vfs.
1674 */
1675 for (i = 0; i < p_resp->num_rxqs; i++) {
1676 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1677 (u16 *)&p_resp->hw_qid[i]);
1678 p_resp->cid[i] = i;
1679 }
1680
1681 /* Filter related information */
1682 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1683 p_req->num_mac_filters);
1684 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1685 p_req->num_vlan_filters);
1686
1687 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1688
1689 /* This isn't really needed/enforced, but some legacy VFs might depend
1690 * on the correct filling of this field.
1691 */
1692 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1693
1694 /* Validate sufficient resources for VF */
1695 if (p_resp->num_rxqs < p_req->num_rxqs ||
1696 p_resp->num_txqs < p_req->num_txqs ||
1697 p_resp->num_sbs < p_req->num_sbs ||
1698 p_resp->num_mac_filters < p_req->num_mac_filters ||
1699 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1700 p_resp->num_mc_filters < p_req->num_mc_filters ||
1701 p_resp->num_cids < p_req->num_cids) {
1702 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1703 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1704 p_vf->abs_vf_id,
1705 p_req->num_rxqs, p_resp->num_rxqs,
1706 p_req->num_rxqs, p_resp->num_txqs,
1707 p_req->num_sbs, p_resp->num_sbs,
1708 p_req->num_mac_filters, p_resp->num_mac_filters,
1709 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1710 p_req->num_mc_filters, p_resp->num_mc_filters,
1711 p_req->num_cids, p_resp->num_cids);
1712
1713 /* Some legacy OSes are incapable of correctly handling this
1714 * failure.
1715 */
1716 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1717 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1718 (p_vf->acquire.vfdev_info.os_type ==
1719 VFPF_ACQUIRE_OS_WINDOWS))
1720 return PFVF_STATUS_SUCCESS;
1721
1722 return PFVF_STATUS_NO_RESOURCE;
1723 }
1724
1725 return PFVF_STATUS_SUCCESS;
1726 }
1727
ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info * p_stats)1728 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1729 {
1730 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1731 OFFSETOF(struct mstorm_vf_zone,
1732 non_trigger.eth_queue_stat);
1733 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1734 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1735 OFFSETOF(struct ustorm_vf_zone,
1736 non_trigger.eth_queue_stat);
1737 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1738 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1739 OFFSETOF(struct pstorm_vf_zone,
1740 non_trigger.eth_queue_stat);
1741 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1742 p_stats->tstats.address = 0;
1743 p_stats->tstats.len = 0;
1744 }
1745
ecore_iov_vf_mbx_acquire(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)1746 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1747 struct ecore_ptt *p_ptt,
1748 struct ecore_vf_info *vf)
1749 {
1750 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1751 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1752 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1753 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1754 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1755 struct pf_vf_resc *resc = &resp->resc;
1756 enum _ecore_status_t rc;
1757
1758 OSAL_MEMSET(resp, 0, sizeof(*resp));
1759
1760 /* Write the PF version so that VF would know which version
1761 * is supported - might be later overriden. This guarantees that
1762 * VF could recognize legacy PF based on lack of versions in reply.
1763 */
1764 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1765 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1766
1767 /* TODO - not doing anything is bad since we'll assert, but this isn't
1768 * necessarily the right behavior - perhaps we should have allowed some
1769 * versatility here.
1770 */
1771 if (vf->state != VF_FREE &&
1772 vf->state != VF_STOPPED) {
1773 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1774 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1775 vf->abs_vf_id, vf->state);
1776 goto out;
1777 }
1778
1779 /* Validate FW compatibility */
1780 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1781 if (req->vfdev_info.capabilities &
1782 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1783 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1784
1785 /* This legacy support would need to be removed once
1786 * the major has changed.
1787 */
1788 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1789
1790 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1791 "VF[%d] is pre-fastpath HSI\n",
1792 vf->abs_vf_id);
1793 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1794 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1795 } else {
1796 DP_INFO(p_hwfn,
1797 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1798 vf->abs_vf_id,
1799 req->vfdev_info.eth_fp_hsi_major,
1800 req->vfdev_info.eth_fp_hsi_minor,
1801 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1802
1803 goto out;
1804 }
1805 }
1806
1807 /* On 100g PFs, prevent old VFs from loading */
1808 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1809 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1810 DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n",
1811 vf->abs_vf_id);
1812 goto out;
1813 }
1814
1815 #ifndef __EXTRACT__LINUX__
1816 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1817 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1818 goto out;
1819 }
1820 #endif
1821
1822 /* Store the acquire message */
1823 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1824
1825 vf->opaque_fid = req->vfdev_info.opaque_fid;
1826
1827 vf->vf_bulletin = req->bulletin_addr;
1828 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1829 vf->bulletin.size : req->bulletin_size;
1830
1831 /* fill in pfdev info */
1832 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1833 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1834 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1835
1836 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1837 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1838 if (ECORE_IS_CMT(p_hwfn->p_dev))
1839 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1840
1841 /* Share our ability to use multiple queue-ids only with VFs
1842 * that request it.
1843 */
1844 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1845 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1846
1847 /* Share the sizes of the bars with VF */
1848 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1849 p_ptt);
1850
1851 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1852
1853 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1854 ETH_ALEN);
1855
1856 pfdev_info->fw_major = FW_MAJOR_VERSION;
1857 pfdev_info->fw_minor = FW_MINOR_VERSION;
1858 pfdev_info->fw_rev = FW_REVISION_VERSION;
1859 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1860
1861 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1862 * this field.
1863 */
1864 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1865 req->vfdev_info.eth_fp_hsi_minor);
1866 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1867 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1868 OSAL_NULL);
1869
1870 pfdev_info->dev_type = p_hwfn->p_dev->type;
1871 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1872
1873 /* Fill resources available to VF; Make sure there are enough to
1874 * satisfy the VF's request.
1875 */
1876 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1877 &req->resc_request, resc);
1878 if (vfpf_status != PFVF_STATUS_SUCCESS)
1879 goto out;
1880
1881 /* Start the VF in FW */
1882 rc = ecore_sp_vf_start(p_hwfn, vf);
1883 if (rc != ECORE_SUCCESS) {
1884 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1885 vf->abs_vf_id);
1886 vfpf_status = PFVF_STATUS_FAILURE;
1887 goto out;
1888 }
1889
1890 /* Fill agreed size of bulletin board in response, and post
1891 * an initial image to the bulletin board.
1892 */
1893 resp->bulletin_size = vf->bulletin.size;
1894 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1895
1896 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1897 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1898 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1899 vf->abs_vf_id, resp->pfdev_info.chip_num,
1900 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1901 (unsigned long long)resp->pfdev_info.capabilities, resc->num_rxqs,
1902 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1903 resc->num_vlan_filters);
1904
1905 vf->state = VF_ACQUIRED;
1906
1907 out:
1908 /* Prepare Response */
1909 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1910 sizeof(struct pfvf_acquire_resp_tlv),
1911 vfpf_status);
1912 }
1913
__ecore_iov_spoofchk_set(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,bool val)1914 static enum _ecore_status_t __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1915 struct ecore_vf_info *p_vf, bool val)
1916 {
1917 struct ecore_sp_vport_update_params params;
1918 enum _ecore_status_t rc;
1919
1920 if (val == p_vf->spoof_chk) {
1921 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1922 "Spoofchk value[%d] is already configured\n",
1923 val);
1924 return ECORE_SUCCESS;
1925 }
1926
1927 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1928 params.opaque_fid = p_vf->opaque_fid;
1929 params.vport_id = p_vf->vport_id;
1930 params.update_anti_spoofing_en_flg = 1;
1931 params.anti_spoofing_en = val;
1932
1933 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1934 OSAL_NULL);
1935 if (rc == ECORE_SUCCESS) {
1936 p_vf->spoof_chk = val;
1937 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1938 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1939 "Spoofchk val[%d] configured\n", val);
1940 } else {
1941 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1942 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1943 val, p_vf->relative_vf_id);
1944 }
1945
1946 return rc;
1947 }
1948
ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf)1949 static enum _ecore_status_t ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1950 struct ecore_vf_info *p_vf)
1951 {
1952 struct ecore_filter_ucast filter;
1953 enum _ecore_status_t rc = ECORE_SUCCESS;
1954 int i;
1955
1956 OSAL_MEMSET(&filter, 0, sizeof(filter));
1957 filter.is_rx_filter = 1;
1958 filter.is_tx_filter = 1;
1959 filter.vport_to_add_to = p_vf->vport_id;
1960 filter.opcode = ECORE_FILTER_ADD;
1961
1962 /* Reconfigure vlans */
1963 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1964 if (!p_vf->shadow_config.vlans[i].used)
1965 continue;
1966
1967 filter.type = ECORE_FILTER_VLAN;
1968 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1969 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1970 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1971 filter.vlan, p_vf->relative_vf_id);
1972 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1973 &filter, ECORE_SPQ_MODE_CB, OSAL_NULL);
1974 if (rc) {
1975 DP_NOTICE(p_hwfn, true, "Failed to configure VLAN [%04x] to VF [%04x]\n",
1976 filter.vlan,
1977 p_vf->relative_vf_id);
1978 break;
1979 }
1980 }
1981
1982 return rc;
1983 }
1984
1985 static enum _ecore_status_t
ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,u64 events)1986 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1987 struct ecore_vf_info *p_vf,
1988 u64 events)
1989 {
1990 enum _ecore_status_t rc = ECORE_SUCCESS;
1991
1992 /*TODO - what about MACs? */
1993
1994 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1995 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1996 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1997
1998 return rc;
1999 }
2000
2001 static enum _ecore_status_t
ecore_iov_configure_vport_forced(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,u64 events)2002 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
2003 struct ecore_vf_info *p_vf,
2004 u64 events)
2005 {
2006 enum _ecore_status_t rc = ECORE_SUCCESS;
2007 struct ecore_filter_ucast filter;
2008
2009 if (!p_vf->vport_instance)
2010 return ECORE_INVAL;
2011
2012 if (events & (1 << MAC_ADDR_FORCED)) {
2013 /* Since there's no way [currently] of removing the MAC,
2014 * we can always assume this means we need to force it.
2015 */
2016 OSAL_MEMSET(&filter, 0, sizeof(filter));
2017 filter.type = ECORE_FILTER_MAC;
2018 filter.opcode = ECORE_FILTER_REPLACE;
2019 filter.is_rx_filter = 1;
2020 filter.is_tx_filter = 1;
2021 filter.vport_to_add_to = p_vf->vport_id;
2022 OSAL_MEMCPY(filter.mac,
2023 p_vf->bulletin.p_virt->mac,
2024 ETH_ALEN);
2025
2026 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2027 &filter,
2028 ECORE_SPQ_MODE_CB, OSAL_NULL);
2029 if (rc) {
2030 DP_NOTICE(p_hwfn, true,
2031 "PF failed to configure MAC for VF\n");
2032 return rc;
2033 }
2034
2035 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2036 }
2037
2038 if (events & (1 << VLAN_ADDR_FORCED)) {
2039 struct ecore_sp_vport_update_params vport_update;
2040 u8 removal;
2041 int i;
2042
2043 OSAL_MEMSET(&filter, 0, sizeof(filter));
2044 filter.type = ECORE_FILTER_VLAN;
2045 filter.is_rx_filter = 1;
2046 filter.is_tx_filter = 1;
2047 filter.vport_to_add_to = p_vf->vport_id;
2048 filter.vlan = p_vf->bulletin.p_virt->pvid;
2049 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2050 ECORE_FILTER_FLUSH;
2051
2052 /* Send the ramrod */
2053 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2054 &filter,
2055 ECORE_SPQ_MODE_CB, OSAL_NULL);
2056 if (rc) {
2057 DP_NOTICE(p_hwfn, true,
2058 "PF failed to configure VLAN for VF\n");
2059 return rc;
2060 }
2061
2062 /* Update the default-vlan & silent vlan stripping */
2063 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2064 vport_update.opaque_fid = p_vf->opaque_fid;
2065 vport_update.vport_id = p_vf->vport_id;
2066 vport_update.update_default_vlan_enable_flg = 1;
2067 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2068 vport_update.update_default_vlan_flg = 1;
2069 vport_update.default_vlan = filter.vlan;
2070
2071 vport_update.update_inner_vlan_removal_flg = 1;
2072 removal = filter.vlan ?
2073 1 : p_vf->shadow_config.inner_vlan_removal;
2074 vport_update.inner_vlan_removal_flg = removal;
2075 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2076 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2077 ECORE_SPQ_MODE_EBLOCK,
2078 OSAL_NULL);
2079 if (rc) {
2080 DP_NOTICE(p_hwfn, true,
2081 "PF failed to configure VF vport for vlan\n");
2082 return rc;
2083 }
2084
2085 /* Update all the Rx queues */
2086 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2087 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2088 struct ecore_queue_cid *p_cid = OSAL_NULL;
2089
2090 /* There can be at most 1 Rx queue on qzone. Find it */
2091 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2092 if (p_cid == OSAL_NULL)
2093 continue;
2094
2095 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2096 (void **)&p_cid,
2097 1, 0, 1,
2098 ECORE_SPQ_MODE_EBLOCK,
2099 OSAL_NULL);
2100 if (rc) {
2101 DP_NOTICE(p_hwfn, true,
2102 "Failed to send Rx update fo queue[0x%04x]\n",
2103 p_cid->rel.queue_id);
2104 return rc;
2105 }
2106 }
2107
2108 if (filter.vlan)
2109 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2110 else
2111 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2112 }
2113
2114 /* If forced features are terminated, we need to configure the shadow
2115 * configuration back again.
2116 */
2117 if (events)
2118 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2119
2120 return rc;
2121 }
2122
ecore_iov_vf_mbx_start_vport(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2123 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2124 struct ecore_ptt *p_ptt,
2125 struct ecore_vf_info *vf)
2126 {
2127 struct ecore_sp_vport_start_params params = {0};
2128 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2129 struct vfpf_vport_start_tlv *start;
2130 u8 status = PFVF_STATUS_SUCCESS;
2131 struct ecore_vf_info *vf_info;
2132 u64 *p_bitmap;
2133 int sb_id;
2134 enum _ecore_status_t rc;
2135
2136 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2137 if (!vf_info) {
2138 DP_NOTICE(p_hwfn->p_dev, true,
2139 "Failed to get VF info, invalid vfid [%d]\n",
2140 vf->relative_vf_id);
2141 return;
2142 }
2143
2144 vf->state = VF_ENABLED;
2145 start = &mbx->req_virt->start_vport;
2146
2147 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2148
2149 /* Initialize Status block in CAU */
2150 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2151 if (!start->sb_addr[sb_id]) {
2152 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2153 "VF[%d] did not fill the address of SB %d\n",
2154 vf->relative_vf_id, sb_id);
2155 break;
2156 }
2157
2158 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2159 start->sb_addr[sb_id],
2160 vf->igu_sbs[sb_id],
2161 vf->abs_vf_id, 1);
2162 }
2163
2164 vf->mtu = start->mtu;
2165 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2166
2167 /* Take into consideration configuration forced by hypervisor;
2168 * If none is configured, use the supplied VF values [for old
2169 * vfs that would still be fine, since they passed '0' as padding].
2170 */
2171 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2172 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2173 u8 vf_req = start->only_untagged;
2174
2175 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2176 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2177 }
2178
2179 params.tpa_mode = start->tpa_mode;
2180 params.remove_inner_vlan = start->inner_vlan_removal;
2181 params.tx_switching = true;
2182 params.zero_placement_offset = start->zero_placement_offset;
2183
2184 #ifndef ASIC_ONLY
2185 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2186 DP_NOTICE(p_hwfn, false, "FPGA: Don't configure VF for Tx-switching [no pVFC]\n");
2187 params.tx_switching = false;
2188 }
2189 #endif
2190
2191 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2192 params.drop_ttl0 = false;
2193 params.concrete_fid = vf->concrete_fid;
2194 params.opaque_fid = vf->opaque_fid;
2195 params.vport_id = vf->vport_id;
2196 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2197 params.mtu = vf->mtu;
2198 params.check_mac = true;
2199
2200 #ifndef ECORE_UPSTREAM
2201 rc = OSAL_IOV_PRE_START_VPORT(p_hwfn, vf->relative_vf_id, ¶ms);
2202 if (rc != ECORE_SUCCESS) {
2203 DP_ERR(p_hwfn, "OSAL_IOV_PRE_START_VPORT returned error %d\n", rc);
2204 status = PFVF_STATUS_FAILURE;
2205 goto exit;
2206 }
2207 #endif
2208
2209 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2210 if (rc != ECORE_SUCCESS) {
2211 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2212 status = PFVF_STATUS_FAILURE;
2213 } else {
2214 vf->vport_instance++;
2215
2216 /* Force configuration if needed on the newly opened vport */
2217 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2218 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2219 vf->vport_id, vf->opaque_fid);
2220 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2221 }
2222 #ifndef ECORE_UPSTREAM
2223 exit:
2224 #endif
2225 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2226 sizeof(struct pfvf_def_resp_tlv), status);
2227 }
2228
ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2229 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2230 struct ecore_ptt *p_ptt,
2231 struct ecore_vf_info *vf)
2232 {
2233 u8 status = PFVF_STATUS_SUCCESS;
2234 enum _ecore_status_t rc;
2235
2236 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2237 vf->vport_instance--;
2238 vf->spoof_chk = false;
2239
2240 if ((ecore_iov_validate_active_rxq(vf)) ||
2241 (ecore_iov_validate_active_txq(vf))) {
2242 vf->b_malicious = true;
2243 DP_NOTICE(p_hwfn,
2244 false, " VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
2245 vf->abs_vf_id);
2246 status = PFVF_STATUS_MALICIOUS;
2247 goto out;
2248 }
2249
2250 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2251 if (rc != ECORE_SUCCESS) {
2252 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_stop_vport returned error %d\n",
2253 rc);
2254 status = PFVF_STATUS_FAILURE;
2255 }
2256
2257 /* Forget the configuration on the vport */
2258 vf->configured_features = 0;
2259 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2260
2261 out:
2262 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2263 sizeof(struct pfvf_def_resp_tlv), status);
2264 }
2265
ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf,u8 status,bool b_legacy)2266 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2267 struct ecore_ptt *p_ptt,
2268 struct ecore_vf_info *vf,
2269 u8 status, bool b_legacy)
2270 {
2271 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2272 struct pfvf_start_queue_resp_tlv *p_tlv;
2273 struct vfpf_start_rxq_tlv *req;
2274 u16 length;
2275
2276 mbx->offset = (u8 *)mbx->reply_virt;
2277
2278 /* Taking a bigger struct instead of adding a TLV to list was a
2279 * mistake, but one which we're now stuck with, as some older
2280 * clients assume the size of the previous response.
2281 */
2282 if (!b_legacy)
2283 length = sizeof(*p_tlv);
2284 else
2285 length = sizeof(struct pfvf_def_resp_tlv);
2286
2287 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2288 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2289 sizeof(struct channel_list_end_tlv));
2290
2291 /* Update the TLV with the response */
2292 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2293 req = &mbx->req_virt->start_rxq;
2294 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2295 OFFSETOF(struct mstorm_vf_zone,
2296 non_trigger.eth_rx_queue_producers) +
2297 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2298 }
2299
2300 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2301 }
2302
ecore_iov_vf_mbx_qid(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,bool b_is_tx)2303 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2304 struct ecore_vf_info *p_vf, bool b_is_tx)
2305 {
2306 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2307 struct vfpf_qid_tlv *p_qid_tlv;
2308
2309 /* Search for the qid if the VF published if its going to provide it */
2310 if (!(p_vf->acquire.vfdev_info.capabilities &
2311 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2312 if (b_is_tx)
2313 return ECORE_IOV_LEGACY_QID_TX;
2314 else
2315 return ECORE_IOV_LEGACY_QID_RX;
2316 }
2317
2318 p_qid_tlv = (struct vfpf_qid_tlv *)
2319 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2320 CHANNEL_TLV_QID);
2321 if (p_qid_tlv == OSAL_NULL) {
2322 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2323 "VF[%2x]: Failed to provide qid\n",
2324 p_vf->relative_vf_id);
2325
2326 return ECORE_IOV_QID_INVALID;
2327 }
2328
2329 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2330 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2331 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2332 p_vf->relative_vf_id, p_qid_tlv->qid);
2333 return ECORE_IOV_QID_INVALID;
2334 }
2335
2336 return p_qid_tlv->qid;
2337 }
2338
ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2339 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2340 struct ecore_ptt *p_ptt,
2341 struct ecore_vf_info *vf)
2342 {
2343 struct ecore_queue_start_common_params params;
2344 struct ecore_queue_cid_vf_params vf_params;
2345 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2346 u8 status = PFVF_STATUS_NO_RESOURCE;
2347 u8 qid_usage_idx, vf_legacy = 0;
2348 struct ecore_vf_queue *p_queue;
2349 struct vfpf_start_rxq_tlv *req;
2350 struct ecore_queue_cid *p_cid;
2351 struct ecore_sb_info sb_dummy;
2352 enum _ecore_status_t rc;
2353
2354 req = &mbx->req_virt->start_rxq;
2355
2356 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2357 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2358 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2359 goto out;
2360
2361 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2362 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2363 goto out;
2364
2365 p_queue = &vf->vf_queues[req->rx_qid];
2366 if (p_queue->cids[qid_usage_idx].p_cid)
2367 goto out;
2368
2369 vf_legacy = ecore_vf_calculate_legacy(vf);
2370
2371 /* Acquire a new queue-cid */
2372 OSAL_MEMSET(¶ms, 0, sizeof(params));
2373 params.queue_id = (u8)p_queue->fw_rx_qid;
2374 params.vport_id = vf->vport_id;
2375 params.stats_id = vf->abs_vf_id + 0x10;
2376
2377 /* Since IGU index is passed via sb_info, construct a dummy one */
2378 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2379 sb_dummy.igu_sb_id = req->hw_sb;
2380 params.p_sb = &sb_dummy;
2381 params.sb_idx = req->sb_index;
2382
2383 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2384 vf_params.vfid = vf->relative_vf_id;
2385 vf_params.vf_qid = (u8)req->rx_qid;
2386 vf_params.vf_legacy = vf_legacy;
2387 vf_params.qid_usage_idx = qid_usage_idx;
2388
2389 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2390 ¶ms, true, &vf_params);
2391 if (p_cid == OSAL_NULL)
2392 goto out;
2393
2394 /* Legacy VFs have their Producers in a different location, which they
2395 * calculate on their own and clean the producer prior to this.
2396 */
2397 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2398 REG_WR(p_hwfn,
2399 GTT_BAR0_MAP_REG_MSDM_RAM +
2400 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2401 0);
2402
2403 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2404 req->bd_max_bytes,
2405 req->rxq_addr,
2406 req->cqe_pbl_addr,
2407 req->cqe_pbl_size);
2408 if (rc != ECORE_SUCCESS) {
2409 status = PFVF_STATUS_FAILURE;
2410 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2411 } else {
2412 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2413 p_queue->cids[qid_usage_idx].b_is_tx = false;
2414 status = PFVF_STATUS_SUCCESS;
2415 vf->num_active_rxqs++;
2416 }
2417
2418 out:
2419 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2420 !!(vf_legacy &
2421 ECORE_QCID_LEGACY_VF_RX_PROD));
2422 }
2423
2424 static void
ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv * p_resp,struct ecore_tunnel_info * p_tun,u16 tunn_feature_mask)2425 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2426 struct ecore_tunnel_info *p_tun,
2427 u16 tunn_feature_mask)
2428 {
2429 p_resp->tunn_feature_mask = tunn_feature_mask;
2430 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2431 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2432 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2433 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2434 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2435 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2436 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2437 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2438 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2439 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2440 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2441 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2442 }
2443
2444 static void
__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct ecore_tunn_update_type * p_tun,enum ecore_tunn_mode mask,u8 tun_cls)2445 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2446 struct ecore_tunn_update_type *p_tun,
2447 enum ecore_tunn_mode mask, u8 tun_cls)
2448 {
2449 if (p_req->tun_mode_update_mask & (1 << mask)) {
2450 p_tun->b_update_mode = true;
2451
2452 if (p_req->tunn_mode & (1 << mask))
2453 p_tun->b_mode_enabled = true;
2454 }
2455
2456 p_tun->tun_cls = tun_cls;
2457 }
2458
2459 static void
ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct ecore_tunn_update_type * p_tun,struct ecore_tunn_update_udp_port * p_port,enum ecore_tunn_mode mask,u8 tun_cls,u8 update_port,u16 port)2460 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2461 struct ecore_tunn_update_type *p_tun,
2462 struct ecore_tunn_update_udp_port *p_port,
2463 enum ecore_tunn_mode mask,
2464 u8 tun_cls, u8 update_port, u16 port)
2465 {
2466 if (update_port) {
2467 p_port->b_update_port = true;
2468 p_port->port = port;
2469 }
2470
2471 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2472 }
2473
2474 static bool
ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv * p_req)2475 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2476 {
2477 bool b_update_requested = false;
2478
2479 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2480 p_req->update_geneve_port || p_req->update_vxlan_port)
2481 b_update_requested = true;
2482
2483 return b_update_requested;
2484 }
2485
ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf)2486 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2487 struct ecore_ptt *p_ptt,
2488 struct ecore_vf_info *p_vf)
2489 {
2490 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2491 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2492 struct pfvf_update_tunn_param_tlv *p_resp;
2493 struct vfpf_update_tunn_param_tlv *p_req;
2494 enum _ecore_status_t rc = ECORE_SUCCESS;
2495 u8 status = PFVF_STATUS_SUCCESS;
2496 bool b_update_required = false;
2497 struct ecore_tunnel_info tunn;
2498 u16 tunn_feature_mask = 0;
2499 int i;
2500
2501 mbx->offset = (u8 *)mbx->reply_virt;
2502
2503 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2504 p_req = &mbx->req_virt->tunn_param_update;
2505
2506 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2507 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2508 "No tunnel update requested by VF\n");
2509 status = PFVF_STATUS_FAILURE;
2510 goto send_resp;
2511 }
2512
2513 tunn.b_update_rx_cls = p_req->update_tun_cls;
2514 tunn.b_update_tx_cls = p_req->update_tun_cls;
2515
2516 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2517 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2518 p_req->update_vxlan_port,
2519 p_req->vxlan_port);
2520 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2521 ECORE_MODE_L2GENEVE_TUNN,
2522 p_req->l2geneve_clss,
2523 p_req->update_geneve_port,
2524 p_req->geneve_port);
2525 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2526 ECORE_MODE_IPGENEVE_TUNN,
2527 p_req->ipgeneve_clss);
2528 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2529 ECORE_MODE_L2GRE_TUNN,
2530 p_req->l2gre_clss);
2531 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2532 ECORE_MODE_IPGRE_TUNN,
2533 p_req->ipgre_clss);
2534
2535 /* If PF modifies VF's req then it should
2536 * still return an error in case of partial configuration
2537 * or modified configuration as opposed to requested one.
2538 */
2539 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2540 &b_update_required, &tunn);
2541
2542 if (rc != ECORE_SUCCESS)
2543 status = PFVF_STATUS_FAILURE;
2544
2545 /* If ECORE client is willing to update anything ? */
2546 if (b_update_required) {
2547 u16 geneve_port;
2548
2549 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2550 ECORE_SPQ_MODE_EBLOCK,
2551 OSAL_NULL);
2552 if (rc != ECORE_SUCCESS)
2553 status = PFVF_STATUS_FAILURE;
2554
2555 geneve_port = p_tun->geneve_port.port;
2556 ecore_for_each_vf(p_hwfn, i) {
2557 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2558 p_tun->vxlan_port.port,
2559 geneve_port);
2560 }
2561 }
2562
2563 send_resp:
2564 p_resp = ecore_add_tlv(&mbx->offset,
2565 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2566
2567 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2568 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2569 sizeof(struct channel_list_end_tlv));
2570
2571 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2572 }
2573
ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf,u32 cid,u8 status)2574 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2575 struct ecore_ptt *p_ptt,
2576 struct ecore_vf_info *p_vf,
2577 u32 cid,
2578 u8 status)
2579 {
2580 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2581 struct pfvf_start_queue_resp_tlv *p_tlv;
2582 bool b_legacy = false;
2583 u16 length;
2584
2585 mbx->offset = (u8 *)mbx->reply_virt;
2586
2587 /* Taking a bigger struct instead of adding a TLV to list was a
2588 * mistake, but one which we're now stuck with, as some older
2589 * clients assume the size of the previous response.
2590 */
2591 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2592 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2593 b_legacy = true;
2594
2595 if (!b_legacy)
2596 length = sizeof(*p_tlv);
2597 else
2598 length = sizeof(struct pfvf_def_resp_tlv);
2599
2600 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2601 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2602 sizeof(struct channel_list_end_tlv));
2603
2604 /* Update the TLV with the response */
2605 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2606 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2607
2608 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2609 }
2610
ecore_iov_vf_mbx_start_txq(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2611 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2612 struct ecore_ptt *p_ptt,
2613 struct ecore_vf_info *vf)
2614 {
2615 struct ecore_queue_start_common_params params;
2616 struct ecore_queue_cid_vf_params vf_params;
2617 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2618 u8 status = PFVF_STATUS_NO_RESOURCE;
2619 struct ecore_vf_queue *p_queue;
2620 struct vfpf_start_txq_tlv *req;
2621 struct ecore_queue_cid *p_cid;
2622 struct ecore_sb_info sb_dummy;
2623 u8 qid_usage_idx, vf_legacy;
2624 u32 cid = 0;
2625 enum _ecore_status_t rc;
2626 u16 pq;
2627
2628 OSAL_MEMSET(¶ms, 0, sizeof(params));
2629 req = &mbx->req_virt->start_txq;
2630
2631 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2632 ECORE_IOV_VALIDATE_Q_NA) ||
2633 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2634 goto out;
2635
2636 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2637 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2638 goto out;
2639
2640 p_queue = &vf->vf_queues[req->tx_qid];
2641 if (p_queue->cids[qid_usage_idx].p_cid)
2642 goto out;
2643
2644 vf_legacy = ecore_vf_calculate_legacy(vf);
2645
2646 /* Acquire a new queue-cid */
2647 params.queue_id = p_queue->fw_tx_qid;
2648 params.vport_id = vf->vport_id;
2649 params.stats_id = vf->abs_vf_id + 0x10;
2650
2651 /* Since IGU index is passed via sb_info, construct a dummy one */
2652 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2653 sb_dummy.igu_sb_id = req->hw_sb;
2654 params.p_sb = &sb_dummy;
2655 params.sb_idx = req->sb_index;
2656
2657 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2658 vf_params.vfid = vf->relative_vf_id;
2659 vf_params.vf_qid = (u8)req->tx_qid;
2660 vf_params.vf_legacy = vf_legacy;
2661 vf_params.qid_usage_idx = qid_usage_idx;
2662
2663 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2664 ¶ms, false, &vf_params);
2665 if (p_cid == OSAL_NULL)
2666 goto out;
2667
2668 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2669 vf->relative_vf_id);
2670 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2671 req->pbl_addr, req->pbl_size, pq);
2672 if (rc != ECORE_SUCCESS) {
2673 status = PFVF_STATUS_FAILURE;
2674 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2675 } else {
2676 status = PFVF_STATUS_SUCCESS;
2677 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2678 p_queue->cids[qid_usage_idx].b_is_tx = true;
2679 cid = p_cid->cid;
2680 }
2681
2682 out:
2683 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2684 cid, status);
2685 }
2686
ecore_iov_vf_stop_rxqs(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * vf,u16 rxq_id,u8 qid_usage_idx,bool cqe_completion)2687 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2688 struct ecore_vf_info *vf,
2689 u16 rxq_id,
2690 u8 qid_usage_idx,
2691 bool cqe_completion)
2692 {
2693 struct ecore_vf_queue *p_queue;
2694 enum _ecore_status_t rc = ECORE_SUCCESS;
2695
2696 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2697 ECORE_IOV_VALIDATE_Q_NA)) {
2698 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2699 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2700 vf->relative_vf_id, rxq_id, qid_usage_idx);
2701 return ECORE_INVAL;
2702 }
2703
2704 p_queue = &vf->vf_queues[rxq_id];
2705
2706 /* We've validated the index and the existence of the active RXQ -
2707 * now we need to make sure that it's using the correct qid.
2708 */
2709 if (!p_queue->cids[qid_usage_idx].p_cid ||
2710 p_queue->cids[qid_usage_idx].b_is_tx) {
2711 struct ecore_queue_cid *p_cid;
2712
2713 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2714 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2715 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2716 vf->relative_vf_id, rxq_id, qid_usage_idx,
2717 rxq_id, p_cid->qid_usage_idx);
2718 return ECORE_INVAL;
2719 }
2720
2721 /* Now that we know we have a valid Rx-queue - close it */
2722 rc = ecore_eth_rx_queue_stop(p_hwfn,
2723 p_queue->cids[qid_usage_idx].p_cid,
2724 false, cqe_completion);
2725 if (rc != ECORE_SUCCESS)
2726 return rc;
2727
2728 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2729 vf->num_active_rxqs--;
2730
2731 return ECORE_SUCCESS;
2732 }
2733
ecore_iov_vf_stop_txqs(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * vf,u16 txq_id,u8 qid_usage_idx)2734 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2735 struct ecore_vf_info *vf,
2736 u16 txq_id,
2737 u8 qid_usage_idx)
2738 {
2739 struct ecore_vf_queue *p_queue;
2740 enum _ecore_status_t rc = ECORE_SUCCESS;
2741
2742 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2743 ECORE_IOV_VALIDATE_Q_NA))
2744 return ECORE_INVAL;
2745
2746 p_queue = &vf->vf_queues[txq_id];
2747 if (!p_queue->cids[qid_usage_idx].p_cid ||
2748 !p_queue->cids[qid_usage_idx].b_is_tx)
2749 return ECORE_INVAL;
2750
2751 rc = ecore_eth_tx_queue_stop(p_hwfn,
2752 p_queue->cids[qid_usage_idx].p_cid);
2753 if (rc != ECORE_SUCCESS)
2754 return rc;
2755
2756 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2757 return ECORE_SUCCESS;
2758 }
2759
ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2760 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2761 struct ecore_ptt *p_ptt,
2762 struct ecore_vf_info *vf)
2763 {
2764 u16 length = sizeof(struct pfvf_def_resp_tlv);
2765 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2766 u8 status = PFVF_STATUS_FAILURE;
2767 struct vfpf_stop_rxqs_tlv *req;
2768 u8 qid_usage_idx;
2769 enum _ecore_status_t rc;
2770
2771 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2772 * would be one. Since no older ecore passed multiple queues
2773 * using this API, sanitize on the value.
2774 */
2775 req = &mbx->req_virt->stop_rxqs;
2776 if (req->num_rxqs != 1) {
2777 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2778 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2779 vf->relative_vf_id);
2780 status = PFVF_STATUS_NOT_SUPPORTED;
2781 goto out;
2782 }
2783
2784 /* Find which qid-index is associated with the queue */
2785 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2786 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2787 goto out;
2788
2789 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2790 qid_usage_idx, req->cqe_completion);
2791 if (rc == ECORE_SUCCESS)
2792 status = PFVF_STATUS_SUCCESS;
2793 out:
2794 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2795 length, status);
2796 }
2797
ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2798 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2799 struct ecore_ptt *p_ptt,
2800 struct ecore_vf_info *vf)
2801 {
2802 u16 length = sizeof(struct pfvf_def_resp_tlv);
2803 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2804 u8 status = PFVF_STATUS_FAILURE;
2805 struct vfpf_stop_txqs_tlv *req;
2806 u8 qid_usage_idx;
2807 enum _ecore_status_t rc;
2808
2809 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2810 * would be one. Since no older ecore passed multiple queues
2811 * using this API, sanitize on the value.
2812 */
2813 req = &mbx->req_virt->stop_txqs;
2814 if (req->num_txqs != 1) {
2815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2816 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2817 vf->relative_vf_id);
2818 status = PFVF_STATUS_NOT_SUPPORTED;
2819 goto out;
2820 }
2821
2822 /* Find which qid-index is associated with the queue */
2823 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2824 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2825 goto out;
2826
2827 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2828 qid_usage_idx);
2829 if (rc == ECORE_SUCCESS)
2830 status = PFVF_STATUS_SUCCESS;
2831
2832 out:
2833 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2834 length, status);
2835 }
2836
ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)2837 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2838 struct ecore_ptt *p_ptt,
2839 struct ecore_vf_info *vf)
2840 {
2841 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2842 u16 length = sizeof(struct pfvf_def_resp_tlv);
2843 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2844 struct vfpf_update_rxq_tlv *req;
2845 u8 status = PFVF_STATUS_FAILURE;
2846 u8 complete_event_flg;
2847 u8 complete_cqe_flg;
2848 u8 qid_usage_idx;
2849 enum _ecore_status_t rc;
2850 u16 i;
2851
2852 req = &mbx->req_virt->update_rxq;
2853 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2854 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2855
2856 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2857 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2858 goto out;
2859
2860 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2861 * expecting a single queue at a time. Validate this.
2862 */
2863 if ((vf->acquire.vfdev_info.capabilities &
2864 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2865 req->num_rxqs != 1) {
2866 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2867 "VF[%d] supports QIDs but sends multiple queues\n",
2868 vf->relative_vf_id);
2869 goto out;
2870 }
2871
2872 /* Validate inputs - for the legacy case this is still true since
2873 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2874 */
2875 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2876 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2877 ECORE_IOV_VALIDATE_Q_NA) ||
2878 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2879 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2880 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2881 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2882 vf->relative_vf_id, req->rx_qid,
2883 req->num_rxqs);
2884 goto out;
2885 }
2886 }
2887
2888 for (i = 0; i < req->num_rxqs; i++) {
2889 u16 qid = req->rx_qid + i;
2890
2891 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2892 }
2893
2894 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2895 req->num_rxqs,
2896 complete_cqe_flg,
2897 complete_event_flg,
2898 ECORE_SPQ_MODE_EBLOCK,
2899 OSAL_NULL);
2900 if (rc != ECORE_SUCCESS)
2901 goto out;
2902
2903 status = PFVF_STATUS_SUCCESS;
2904 out:
2905 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2906 length, status);
2907 }
2908
ecore_iov_search_list_tlvs(struct ecore_hwfn * p_hwfn,void * p_tlvs_list,u16 req_type)2909 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2910 void *p_tlvs_list, u16 req_type)
2911 {
2912 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2913 int len = 0;
2914
2915 do {
2916 if (!p_tlv->length) {
2917 DP_NOTICE(p_hwfn, true,
2918 "Zero length TLV found\n");
2919 return OSAL_NULL;
2920 }
2921
2922 if (p_tlv->type == req_type) {
2923 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2924 "Extended tlv type %s, length %d found\n",
2925 ecore_channel_tlvs_string[p_tlv->type],
2926 p_tlv->length);
2927 return p_tlv;
2928 }
2929
2930 len += p_tlv->length;
2931 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2932
2933 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2934 DP_NOTICE(p_hwfn, true,
2935 "TLVs has overrun the buffer size\n");
2936 return OSAL_NULL;
2937 }
2938 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2939
2940 return OSAL_NULL;
2941 }
2942
2943 static void
ecore_iov_vp_update_act_param(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2944 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2945 struct ecore_sp_vport_update_params *p_data,
2946 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2947 {
2948 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2949 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2950
2951 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2952 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2953 tlv);
2954 if (!p_act_tlv)
2955 return;
2956
2957 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2958 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2959 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2960 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2961 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2962 }
2963
2964 static void
ecore_iov_vp_update_vlan_param(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_vf_info * p_vf,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2965 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2966 struct ecore_sp_vport_update_params *p_data,
2967 struct ecore_vf_info *p_vf,
2968 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2969 {
2970 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2971 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2972
2973 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2974 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2975 tlv);
2976 if (!p_vlan_tlv)
2977 return;
2978
2979 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2980
2981 /* Ignore the VF request if we're forcing a vlan */
2982 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2983 p_data->update_inner_vlan_removal_flg = 1;
2984 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2985 }
2986
2987 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2988 }
2989
2990 static void
ecore_iov_vp_update_tx_switch(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2991 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2992 struct ecore_sp_vport_update_params *p_data,
2993 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2994 {
2995 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2996 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2997
2998 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2999 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3000 tlv);
3001 if (!p_tx_switch_tlv)
3002 return;
3003
3004 #ifndef ASIC_ONLY
3005 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3006 DP_NOTICE(p_hwfn, false, "FPGA: Ignore tx-switching configuration originating from VFs\n");
3007 return;
3008 }
3009 #endif
3010
3011 p_data->update_tx_switching_flg = 1;
3012 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3013 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3014 }
3015
3016 static void
ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)3017 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3018 struct ecore_sp_vport_update_params *p_data,
3019 struct ecore_iov_vf_mbx *p_mbx,
3020 u16 *tlvs_mask)
3021 {
3022 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3023 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3024
3025 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3026 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3027 tlv);
3028 if (!p_mcast_tlv)
3029 return;
3030
3031 p_data->update_approx_mcast_flg = 1;
3032 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3033 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3034 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3035 }
3036
3037 static void
ecore_iov_vp_update_accept_flag(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)3038 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3039 struct ecore_sp_vport_update_params *p_data,
3040 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3041 {
3042 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3043 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3044 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3045
3046 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3047 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3048 tlv);
3049 if (!p_accept_tlv)
3050 return;
3051
3052 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3053 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3054 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3055 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3056 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3057 }
3058
3059 static void
ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)3060 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3061 struct ecore_sp_vport_update_params *p_data,
3062 struct ecore_iov_vf_mbx *p_mbx,
3063 u16 *tlvs_mask)
3064 {
3065 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3066 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3067
3068 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3069 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3070 tlv);
3071 if (!p_accept_any_vlan)
3072 return;
3073
3074 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3075 p_data->update_accept_any_vlan_flg =
3076 p_accept_any_vlan->update_accept_any_vlan_flg;
3077 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3078 }
3079
3080 static void
ecore_iov_vp_update_rss_param(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * vf,struct ecore_sp_vport_update_params * p_data,struct ecore_rss_params * p_rss,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask,u16 * tlvs_accepted)3081 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3082 struct ecore_vf_info *vf,
3083 struct ecore_sp_vport_update_params *p_data,
3084 struct ecore_rss_params *p_rss,
3085 struct ecore_iov_vf_mbx *p_mbx,
3086 u16 *tlvs_mask, u16 *tlvs_accepted)
3087 {
3088 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3089 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3090 bool b_reject = false;
3091 u16 table_size;
3092 u16 i, q_idx;
3093
3094 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3095 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3096 tlv);
3097 if (!p_rss_tlv) {
3098 p_data->rss_params = OSAL_NULL;
3099 return;
3100 }
3101
3102 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3103
3104 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
3105 VFPF_UPDATE_RSS_CONFIG_FLAG);
3106 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
3107 VFPF_UPDATE_RSS_CAPS_FLAG);
3108 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
3109 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3110 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
3111 VFPF_UPDATE_RSS_KEY_FLAG);
3112
3113 p_rss->rss_enable = p_rss_tlv->rss_enable;
3114 p_rss->rss_eng_id = vf->rss_eng_id;
3115 p_rss->rss_caps = p_rss_tlv->rss_caps;
3116 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3117 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3118 sizeof(p_rss->rss_key));
3119
3120 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3121 (1 << p_rss_tlv->rss_table_size_log));
3122
3123 for (i = 0; i < table_size; i++) {
3124 struct ecore_queue_cid *p_cid;
3125
3126 q_idx = p_rss_tlv->rss_ind_table[i];
3127 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3128 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3129 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3130 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3131 vf->relative_vf_id, q_idx);
3132 b_reject = true;
3133 goto out;
3134 }
3135
3136 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3137 p_rss->rss_ind_table[i] = p_cid;
3138 }
3139
3140 p_data->rss_params = p_rss;
3141 out:
3142 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3143 if (!b_reject)
3144 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3145 }
3146
3147 static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,struct ecore_sge_tpa_params * p_sge_tpa,struct ecore_iov_vf_mbx * p_mbx,u16 * tlvs_mask)3148 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3149 struct ecore_sp_vport_update_params *p_data,
3150 struct ecore_sge_tpa_params *p_sge_tpa,
3151 struct ecore_iov_vf_mbx *p_mbx,
3152 u16 *tlvs_mask)
3153 {
3154 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3155 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3156
3157 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3158 ecore_iov_search_list_tlvs(p_hwfn,
3159 p_mbx->req_virt, tlv);
3160
3161 if (!p_sge_tpa_tlv) {
3162 p_data->sge_tpa_params = OSAL_NULL;
3163 return;
3164 }
3165
3166 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3167
3168 p_sge_tpa->update_tpa_en_flg =
3169 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3170 VFPF_UPDATE_TPA_EN_FLAG);
3171 p_sge_tpa->update_tpa_param_flg =
3172 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3173 VFPF_UPDATE_TPA_PARAM_FLAG);
3174
3175 p_sge_tpa->tpa_ipv4_en_flg =
3176 !!(p_sge_tpa_tlv->sge_tpa_flags &
3177 VFPF_TPA_IPV4_EN_FLAG);
3178 p_sge_tpa->tpa_ipv6_en_flg =
3179 !!(p_sge_tpa_tlv->sge_tpa_flags &
3180 VFPF_TPA_IPV6_EN_FLAG);
3181 p_sge_tpa->tpa_pkt_split_flg =
3182 !!(p_sge_tpa_tlv->sge_tpa_flags &
3183 VFPF_TPA_PKT_SPLIT_FLAG);
3184 p_sge_tpa->tpa_hdr_data_split_flg =
3185 !!(p_sge_tpa_tlv->sge_tpa_flags &
3186 VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3187 p_sge_tpa->tpa_gro_consistent_flg =
3188 !!(p_sge_tpa_tlv->sge_tpa_flags &
3189 VFPF_TPA_GRO_CONSIST_FLAG);
3190
3191 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3192 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3193 p_sge_tpa->tpa_min_size_to_start =
3194 p_sge_tpa_tlv->tpa_min_size_to_start;
3195 p_sge_tpa->tpa_min_size_to_cont =
3196 p_sge_tpa_tlv->tpa_min_size_to_cont;
3197 p_sge_tpa->max_buffers_per_cqe =
3198 p_sge_tpa_tlv->max_buffers_per_cqe;
3199
3200 p_data->sge_tpa_params = p_sge_tpa;
3201
3202 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3203 }
3204
ecore_iov_vf_mbx_vport_update(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)3205 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3206 struct ecore_ptt *p_ptt,
3207 struct ecore_vf_info *vf)
3208 {
3209 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3210 struct ecore_sp_vport_update_params params;
3211 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3212 struct ecore_sge_tpa_params sge_tpa_params;
3213 u16 tlvs_mask = 0, tlvs_accepted = 0;
3214 u8 status = PFVF_STATUS_SUCCESS;
3215 u16 length;
3216 enum _ecore_status_t rc;
3217
3218 /* Valiate PF can send such a request */
3219 if (!vf->vport_instance) {
3220 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3221 "No VPORT instance available for VF[%d], failing vport update\n",
3222 vf->abs_vf_id);
3223 status = PFVF_STATUS_FAILURE;
3224 goto out;
3225 }
3226
3227 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3228 if (p_rss_params == OSAL_NULL) {
3229 status = PFVF_STATUS_FAILURE;
3230 goto out;
3231 }
3232
3233 OSAL_MEMSET(¶ms, 0, sizeof(params));
3234 params.opaque_fid = vf->opaque_fid;
3235 params.vport_id = vf->vport_id;
3236 params.rss_params = OSAL_NULL;
3237
3238 /* Search for extended tlvs list and update values
3239 * from VF in struct ecore_sp_vport_update_params.
3240 */
3241 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3242 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3243 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3244 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3245 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3246 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3247 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3248 &sge_tpa_params, mbx, &tlvs_mask);
3249
3250 tlvs_accepted = tlvs_mask;
3251
3252 /* Some of the extended TLVs need to be validated first; In that case,
3253 * they can update the mask without updating the accepted [so that
3254 * PF could communicate to VF it has rejected request].
3255 */
3256 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3257 mbx, &tlvs_mask, &tlvs_accepted);
3258
3259 /* Just log a message if there is no single extended tlv in buffer.
3260 * When all features of vport update ramrod would be requested by VF
3261 * as extended TLVs in buffer then an error can be returned in response
3262 * if there is no extended TLV present in buffer.
3263 */
3264 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3265 ¶ms, &tlvs_accepted) !=
3266 ECORE_SUCCESS) {
3267 tlvs_accepted = 0;
3268 status = PFVF_STATUS_NOT_SUPPORTED;
3269 goto out;
3270 }
3271
3272 if (!tlvs_accepted) {
3273 if (tlvs_mask)
3274 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3275 "Upper-layer prevents said VF configuration\n");
3276 else
3277 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3278 "No feature tlvs found for vport update\n");
3279 status = PFVF_STATUS_NOT_SUPPORTED;
3280 goto out;
3281 }
3282
3283 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3284 OSAL_NULL);
3285
3286 if (rc)
3287 status = PFVF_STATUS_FAILURE;
3288
3289 out:
3290 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3291 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3292 tlvs_mask, tlvs_accepted);
3293 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3294 }
3295
ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_filter_ucast * p_params)3296 static enum _ecore_status_t ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3297 struct ecore_vf_info *p_vf,
3298 struct ecore_filter_ucast *p_params)
3299 {
3300 int i;
3301
3302 /* First remove entries and then add new ones */
3303 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3304 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3305 if (p_vf->shadow_config.vlans[i].used &&
3306 p_vf->shadow_config.vlans[i].vid ==
3307 p_params->vlan) {
3308 p_vf->shadow_config.vlans[i].used = false;
3309 break;
3310 }
3311 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3312 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3313 "VF [%d] - Tries to remove a non-existing vlan\n",
3314 p_vf->relative_vf_id);
3315 return ECORE_INVAL;
3316 }
3317 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3318 p_params->opcode == ECORE_FILTER_FLUSH) {
3319 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3320 p_vf->shadow_config.vlans[i].used = false;
3321 }
3322
3323 /* In forced mode, we're willing to remove entries - but we don't add
3324 * new ones.
3325 */
3326 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3327 return ECORE_SUCCESS;
3328
3329 if (p_params->opcode == ECORE_FILTER_ADD ||
3330 p_params->opcode == ECORE_FILTER_REPLACE) {
3331 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3332 if (p_vf->shadow_config.vlans[i].used)
3333 continue;
3334
3335 p_vf->shadow_config.vlans[i].used = true;
3336 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3337 break;
3338 }
3339
3340 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3341 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3342 "VF [%d] - Tries to configure more than %d vlan filters\n",
3343 p_vf->relative_vf_id,
3344 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3345 return ECORE_INVAL;
3346 }
3347 }
3348
3349 return ECORE_SUCCESS;
3350 }
3351
ecore_iov_vf_update_mac_shadow(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_filter_ucast * p_params)3352 static enum _ecore_status_t ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3353 struct ecore_vf_info *p_vf,
3354 struct ecore_filter_ucast *p_params)
3355 {
3356 char empty_mac[ETH_ALEN];
3357 int i;
3358
3359 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3360
3361 /* If we're in forced-mode, we don't allow any change */
3362 /* TODO - this would change if we were ever to implement logic for
3363 * removing a forced MAC altogether [in which case, like for vlans,
3364 * we should be able to re-trace previous configuration.
3365 */
3366 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3367 return ECORE_SUCCESS;
3368
3369 /* First remove entries and then add new ones */
3370 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3371 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3372 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3373 p_params->mac, ETH_ALEN)) {
3374 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3375 ETH_ALEN);
3376 break;
3377 }
3378 }
3379
3380 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3381 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3382 "MAC isn't configured\n");
3383 return ECORE_INVAL;
3384 }
3385 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3386 p_params->opcode == ECORE_FILTER_FLUSH) {
3387 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3388 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3389 }
3390
3391 /* List the new MAC address */
3392 if (p_params->opcode != ECORE_FILTER_ADD &&
3393 p_params->opcode != ECORE_FILTER_REPLACE)
3394 return ECORE_SUCCESS;
3395
3396 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3397 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3398 empty_mac, ETH_ALEN)) {
3399 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3400 p_params->mac, ETH_ALEN);
3401 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3402 "Added MAC at %d entry in shadow\n", i);
3403 break;
3404 }
3405 }
3406
3407 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3408 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3409 "No available place for MAC\n");
3410 return ECORE_INVAL;
3411 }
3412
3413 return ECORE_SUCCESS;
3414 }
3415
3416 static enum _ecore_status_t
ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_filter_ucast * p_params)3417 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3418 struct ecore_vf_info *p_vf,
3419 struct ecore_filter_ucast *p_params)
3420 {
3421 enum _ecore_status_t rc = ECORE_SUCCESS;
3422
3423 if (p_params->type == ECORE_FILTER_MAC) {
3424 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3425 if (rc != ECORE_SUCCESS)
3426 return rc;
3427 }
3428
3429 if (p_params->type == ECORE_FILTER_VLAN)
3430 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3431
3432 return rc;
3433 }
3434
ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)3435 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3436 struct ecore_ptt *p_ptt,
3437 struct ecore_vf_info *vf)
3438 {
3439 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3440 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3441 struct vfpf_ucast_filter_tlv *req;
3442 u8 status = PFVF_STATUS_SUCCESS;
3443 struct ecore_filter_ucast params;
3444 enum _ecore_status_t rc;
3445
3446 /* Prepare the unicast filter params */
3447 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3448 req = &mbx->req_virt->ucast_filter;
3449 params.opcode = (enum ecore_filter_opcode)req->opcode;
3450 params.type = (enum ecore_filter_ucast_type)req->type;
3451
3452 /* @@@TBD - We might need logic on HV side in determining this */
3453 params.is_rx_filter = 1;
3454 params.is_tx_filter = 1;
3455 params.vport_to_remove_from = vf->vport_id;
3456 params.vport_to_add_to = vf->vport_id;
3457 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3458 params.vlan = req->vlan;
3459
3460 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3461 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3462 vf->abs_vf_id, params.opcode, params.type,
3463 params.is_rx_filter ? "RX" : "",
3464 params.is_tx_filter ? "TX" : "",
3465 params.vport_to_add_to,
3466 params.mac[0], params.mac[1], params.mac[2],
3467 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3468
3469 if (!vf->vport_instance) {
3470 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3471 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3472 vf->abs_vf_id);
3473 status = PFVF_STATUS_FAILURE;
3474 goto out;
3475 }
3476
3477 /* Update shadow copy of the VF configuration. In case shadow indicates
3478 * the action should be blocked return success to VF to imitate the
3479 * firmware behaviour in such case.
3480 */
3481 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3482 ECORE_SUCCESS)
3483 goto out;
3484
3485 /* Determine if the unicast filtering is acceptible by PF */
3486 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3487 (params.type == ECORE_FILTER_VLAN ||
3488 params.type == ECORE_FILTER_MAC_VLAN)) {
3489 /* Once VLAN is forced or PVID is set, do not allow
3490 * to add/replace any further VLANs.
3491 */
3492 if (params.opcode == ECORE_FILTER_ADD ||
3493 params.opcode == ECORE_FILTER_REPLACE)
3494 status = PFVF_STATUS_FORCED;
3495 goto out;
3496 }
3497
3498 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3499 (params.type == ECORE_FILTER_MAC ||
3500 params.type == ECORE_FILTER_MAC_VLAN)) {
3501 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3502 (params.opcode != ECORE_FILTER_ADD &&
3503 params.opcode != ECORE_FILTER_REPLACE))
3504 status = PFVF_STATUS_FORCED;
3505 goto out;
3506 }
3507
3508 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3509 if (rc == ECORE_EXISTS) {
3510 goto out;
3511 } else if (rc == ECORE_INVAL) {
3512 status = PFVF_STATUS_FAILURE;
3513 goto out;
3514 }
3515
3516 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3517 ECORE_SPQ_MODE_CB, OSAL_NULL);
3518 if (rc)
3519 status = PFVF_STATUS_FAILURE;
3520
3521 out:
3522 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3523 sizeof(struct pfvf_def_resp_tlv), status);
3524 }
3525
ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)3526 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3527 struct ecore_ptt *p_ptt,
3528 struct ecore_vf_info *vf)
3529 {
3530 int i;
3531
3532 /* Reset the SBs */
3533 for (i = 0; i < vf->num_sbs; i++)
3534 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3535 vf->igu_sbs[i],
3536 vf->opaque_fid, false);
3537
3538 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3539 sizeof(struct pfvf_def_resp_tlv),
3540 PFVF_STATUS_SUCCESS);
3541 }
3542
ecore_iov_vf_mbx_close(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)3543 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3544 struct ecore_ptt *p_ptt,
3545 struct ecore_vf_info *vf)
3546 {
3547 u16 length = sizeof(struct pfvf_def_resp_tlv);
3548 u8 status = PFVF_STATUS_SUCCESS;
3549
3550 /* Disable Interrupts for VF */
3551 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3552
3553 /* Reset Permission table */
3554 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3555
3556 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3557 length, status);
3558 }
3559
ecore_iov_vf_mbx_release(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf)3560 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3561 struct ecore_ptt *p_ptt,
3562 struct ecore_vf_info *p_vf)
3563 {
3564 u16 length = sizeof(struct pfvf_def_resp_tlv);
3565 u8 status = PFVF_STATUS_SUCCESS;
3566 enum _ecore_status_t rc = ECORE_SUCCESS;
3567
3568 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3569
3570 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3571 /* Stopping the VF */
3572 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3573 p_vf->opaque_fid);
3574
3575 if (rc != ECORE_SUCCESS) {
3576 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3577 rc);
3578 status = PFVF_STATUS_FAILURE;
3579 }
3580
3581 p_vf->state = VF_STOPPED;
3582 }
3583
3584 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3585 length, status);
3586 }
3587
ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * p_vf)3588 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3589 struct ecore_ptt *p_ptt,
3590 struct ecore_vf_info *p_vf)
3591 {
3592 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3593 struct pfvf_read_coal_resp_tlv *p_resp;
3594 struct vfpf_read_coal_req_tlv *req;
3595 u8 status = PFVF_STATUS_FAILURE;
3596 struct ecore_vf_queue *p_queue;
3597 struct ecore_queue_cid *p_cid;
3598 enum _ecore_status_t rc = ECORE_SUCCESS;
3599 u16 coal = 0, qid, i;
3600 bool b_is_rx;
3601
3602 mbx->offset = (u8 *)mbx->reply_virt;
3603 req = &mbx->req_virt->read_coal_req;
3604
3605 qid = req->qid;
3606 b_is_rx = req->is_rx ? true : false;
3607
3608 if (b_is_rx) {
3609 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3610 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3611 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3612 "VF[%d]: Invalid Rx queue_id = %d\n",
3613 p_vf->abs_vf_id, qid);
3614 goto send_resp;
3615 }
3616
3617 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3618 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3619 if (rc != ECORE_SUCCESS)
3620 goto send_resp;
3621 } else {
3622 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3623 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3624 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3625 "VF[%d]: Invalid Tx queue_id = %d\n",
3626 p_vf->abs_vf_id, qid);
3627 goto send_resp;
3628 }
3629 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3630 p_queue = &p_vf->vf_queues[qid];
3631 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3632 (!p_queue->cids[i].b_is_tx))
3633 continue;
3634
3635 p_cid = p_queue->cids[i].p_cid;
3636
3637 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3638 p_cid, &coal);
3639 if (rc != ECORE_SUCCESS)
3640 goto send_resp;
3641 break;
3642 }
3643 }
3644
3645 status = PFVF_STATUS_SUCCESS;
3646
3647 send_resp:
3648 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3649 sizeof(*p_resp));
3650 p_resp->coal = coal;
3651
3652 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3653 sizeof(struct channel_list_end_tlv));
3654
3655 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3656 }
3657
ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_vf_info * vf)3658 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3659 struct ecore_ptt *p_ptt,
3660 struct ecore_vf_info *vf)
3661 {
3662 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3663 enum _ecore_status_t rc = ECORE_SUCCESS;
3664 struct vfpf_update_coalesce *req;
3665 u8 status = PFVF_STATUS_FAILURE;
3666 struct ecore_queue_cid *p_cid;
3667 u16 rx_coal, tx_coal;
3668 u16 qid;
3669 int i;
3670
3671 req = &mbx->req_virt->update_coalesce;
3672
3673 rx_coal = req->rx_coal;
3674 tx_coal = req->tx_coal;
3675 qid = req->qid;
3676
3677 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3678 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3679 rx_coal) {
3680 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3681 vf->abs_vf_id, qid);
3682 goto out;
3683 }
3684
3685 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3686 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3687 tx_coal) {
3688 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3689 vf->abs_vf_id, qid);
3690 goto out;
3691 }
3692
3693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3694 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3695 vf->abs_vf_id, rx_coal, tx_coal, qid);
3696
3697 if (rx_coal) {
3698 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3699
3700 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3701 if (rc != ECORE_SUCCESS) {
3702 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3703 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3704 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3705 goto out;
3706 }
3707 vf->rx_coal = rx_coal;
3708 }
3709
3710 /* TODO - in future, it might be possible to pass this in a per-cid
3711 * granularity. For now, do this for all Tx queues.
3712 */
3713 if (tx_coal) {
3714 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3715
3716 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3717 if (p_queue->cids[i].p_cid == OSAL_NULL)
3718 continue;
3719
3720 if (!p_queue->cids[i].b_is_tx)
3721 continue;
3722
3723 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3724 p_queue->cids[i].p_cid);
3725 if (rc != ECORE_SUCCESS) {
3726 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3727 "VF[%d]: Unable to set tx queue coalesce\n",
3728 vf->abs_vf_id);
3729 goto out;
3730 }
3731 }
3732 vf->tx_coal = tx_coal;
3733 }
3734
3735 status = PFVF_STATUS_SUCCESS;
3736 out:
3737 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3738 sizeof(struct pfvf_def_resp_tlv), status);
3739 }
3740
3741 enum _ecore_status_t
ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn * p_hwfn,u16 rx_coal,u16 tx_coal,u16 vf_id,u16 qid)3742 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3743 u16 rx_coal, u16 tx_coal,
3744 u16 vf_id, u16 qid)
3745 {
3746 struct ecore_queue_cid *p_cid;
3747 struct ecore_vf_info *vf;
3748 struct ecore_ptt *p_ptt;
3749 int i, rc = 0;
3750
3751 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3752 DP_NOTICE(p_hwfn, true,
3753 "VF[%d] - Can not set coalescing: VF is not active\n",
3754 vf_id);
3755 return ECORE_INVAL;
3756 }
3757
3758 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3759 p_ptt = ecore_ptt_acquire(p_hwfn);
3760 if (!p_ptt)
3761 return ECORE_AGAIN;
3762
3763 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3764 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3765 rx_coal) {
3766 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3767 vf->abs_vf_id, qid);
3768 goto out;
3769 }
3770
3771 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3772 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3773 tx_coal) {
3774 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3775 vf->abs_vf_id, qid);
3776 goto out;
3777 }
3778
3779 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3780 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3781 vf->abs_vf_id, rx_coal, tx_coal, qid);
3782
3783 if (rx_coal) {
3784 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3785
3786 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3787 if (rc != ECORE_SUCCESS) {
3788 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3789 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3790 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3791 goto out;
3792 }
3793 vf->rx_coal = rx_coal;
3794 }
3795
3796 /* TODO - in future, it might be possible to pass this in a per-cid
3797 * granularity. For now, do this for all Tx queues.
3798 */
3799 if (tx_coal) {
3800 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3801
3802 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3803 if (p_queue->cids[i].p_cid == OSAL_NULL)
3804 continue;
3805
3806 if (!p_queue->cids[i].b_is_tx)
3807 continue;
3808
3809 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3810 p_queue->cids[i].p_cid);
3811 if (rc != ECORE_SUCCESS) {
3812 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3813 "VF[%d]: Unable to set tx queue coalesce\n",
3814 vf->abs_vf_id);
3815 goto out;
3816 }
3817 }
3818 vf->tx_coal = tx_coal;
3819 }
3820
3821 out:
3822 ecore_ptt_release(p_hwfn, p_ptt);
3823
3824 return rc;
3825 }
3826
3827 static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_ptt * p_ptt)3828 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3829 struct ecore_vf_info *p_vf,
3830 struct ecore_ptt *p_ptt)
3831 {
3832 int cnt;
3833 u32 val;
3834
3835 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3836
3837 for (cnt = 0; cnt < 50; cnt++) {
3838 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3839 if (!val)
3840 break;
3841 OSAL_MSLEEP(20);
3842 }
3843 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3844
3845 if (cnt == 50) {
3846 DP_ERR(p_hwfn, "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3847 p_vf->abs_vf_id, val);
3848 return ECORE_TIMEOUT;
3849 }
3850
3851 return ECORE_SUCCESS;
3852 }
3853
3854 static enum _ecore_status_t
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_ptt * p_ptt)3855 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3856 struct ecore_vf_info *p_vf,
3857 struct ecore_ptt *p_ptt)
3858 {
3859 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3860 int i, cnt;
3861
3862 /* Read initial consumers & producers */
3863 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3864 u32 prod;
3865
3866 cons[i] = ecore_rd(p_hwfn, p_ptt,
3867 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3868 i * 0x40);
3869 prod = ecore_rd(p_hwfn, p_ptt,
3870 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3871 i * 0x40);
3872 distance[i] = prod - cons[i];
3873 }
3874
3875 /* Wait for consumers to pass the producers */
3876 i = 0;
3877 for (cnt = 0; cnt < 50; cnt++) {
3878 for (; i < MAX_NUM_VOQS_E4; i++) {
3879 u32 tmp;
3880
3881 tmp = ecore_rd(p_hwfn, p_ptt,
3882 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3883 i * 0x40);
3884 if (distance[i] > tmp - cons[i])
3885 break;
3886 }
3887
3888 if (i == MAX_NUM_VOQS_E4)
3889 break;
3890
3891 OSAL_MSLEEP(20);
3892 }
3893
3894 if (cnt == 50) {
3895 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3896 p_vf->abs_vf_id, i);
3897 return ECORE_TIMEOUT;
3898 }
3899
3900 return ECORE_SUCCESS;
3901 }
3902
ecore_iov_vf_flr_poll(struct ecore_hwfn * p_hwfn,struct ecore_vf_info * p_vf,struct ecore_ptt * p_ptt)3903 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3904 struct ecore_vf_info *p_vf,
3905 struct ecore_ptt *p_ptt)
3906 {
3907 enum _ecore_status_t rc;
3908
3909 /* TODO - add SRC and TM polling once we add storage IOV */
3910
3911 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3912 if (rc)
3913 return rc;
3914
3915 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3916 if (rc)
3917 return rc;
3918
3919 return ECORE_SUCCESS;
3920 }
3921
3922 static enum _ecore_status_t
ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 rel_vf_id,u32 * ack_vfs)3923 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3924 struct ecore_ptt *p_ptt,
3925 u16 rel_vf_id,
3926 u32 *ack_vfs)
3927 {
3928 struct ecore_vf_info *p_vf;
3929 enum _ecore_status_t rc = ECORE_SUCCESS;
3930
3931 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3932 if (!p_vf)
3933 return ECORE_SUCCESS;
3934
3935 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3936 (1ULL << (rel_vf_id % 64))) {
3937 u16 vfid = p_vf->abs_vf_id;
3938
3939 /* TODO - should we lock channel? */
3940
3941 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3942 "VF[%d] - Handling FLR\n", vfid);
3943
3944 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3945
3946 /* If VF isn't active, no need for anything but SW */
3947 if (!p_vf->b_init)
3948 goto cleanup;
3949
3950 /* TODO - what to do in case of failure? */
3951 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3952 if (rc != ECORE_SUCCESS)
3953 goto cleanup;
3954
3955 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3956 if (rc) {
3957 /* TODO - what's now? What a mess.... */
3958 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n",
3959 vfid);
3960 return rc;
3961 }
3962
3963 /* Workaround to make VF-PF channel ready, as FW
3964 * doesn't do that as a part of FLR.
3965 */
3966 REG_WR(p_hwfn,
3967 GTT_BAR0_MAP_REG_USDM_RAM +
3968 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3969
3970 /* VF_STOPPED has to be set only after final cleanup
3971 * but prior to re-enabling the VF.
3972 */
3973 p_vf->state = VF_STOPPED;
3974
3975 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3976 if (rc) {
3977 /* TODO - again, a mess... */
3978 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3979 vfid);
3980 return rc;
3981 }
3982 cleanup:
3983 /* Mark VF for ack and clean pending state */
3984 if (p_vf->state == VF_RESET)
3985 p_vf->state = VF_STOPPED;
3986 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3987 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3988 ~(1ULL << (rel_vf_id % 64));
3989 p_vf->vf_mbx.b_pending_msg = false;
3990 }
3991
3992 return rc;
3993 }
3994
ecore_iov_vf_flr_cleanup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3995 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3996 struct ecore_ptt *p_ptt)
3997
3998 {
3999 u32 ack_vfs[VF_MAX_STATIC / 32];
4000 enum _ecore_status_t rc = ECORE_SUCCESS;
4001 u16 i;
4002
4003 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
4004
4005 /* Since BRB <-> PRS interface can't be tested as part of the flr
4006 * polling due to HW limitations, simply sleep a bit. And since
4007 * there's no need to wait per-vf, do it before looping.
4008 */
4009 OSAL_MSLEEP(100);
4010
4011 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
4012 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
4013
4014 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4015 return rc;
4016 }
4017
4018 #ifndef LINUX_REMOVE
4019 enum _ecore_status_t
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 rel_vf_id)4020 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4021 struct ecore_ptt *p_ptt,
4022 u16 rel_vf_id)
4023
4024 {
4025 u32 ack_vfs[VF_MAX_STATIC / 32];
4026 enum _ecore_status_t rc = ECORE_SUCCESS;
4027
4028 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
4029
4030 /* Wait instead of polling the BRB <-> PRS interface */
4031 OSAL_MSLEEP(100);
4032
4033 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4034
4035 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4036 return rc;
4037 }
4038 #endif
4039
ecore_iov_mark_vf_flr(struct ecore_hwfn * p_hwfn,u32 * p_disabled_vfs)4040 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
4041 u32 *p_disabled_vfs)
4042 {
4043 bool found = false;
4044 u16 i;
4045
4046 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4047 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
4048 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4049 "[%08x,...,%08x]: %08x\n",
4050 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4051
4052 if (!p_hwfn->p_dev->p_iov_info) {
4053 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4054 return false;
4055 }
4056
4057 /* Mark VFs */
4058 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4059 struct ecore_vf_info *p_vf;
4060 u8 vfid;
4061
4062 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4063 if (!p_vf)
4064 continue;
4065
4066 vfid = p_vf->abs_vf_id;
4067 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4068 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4069 u16 rel_vf_id = p_vf->relative_vf_id;
4070
4071 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4072 "VF[%d] [rel %d] got FLR-ed\n",
4073 vfid, rel_vf_id);
4074
4075 p_vf->state = VF_RESET;
4076
4077 /* No need to lock here, since pending_flr should
4078 * only change here and before ACKing MFw. Since
4079 * MFW will not trigger an additional attention for
4080 * VF flr until ACKs, we're safe.
4081 */
4082 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4083 found = true;
4084 }
4085 }
4086
4087 return found;
4088 }
4089
ecore_iov_get_link(struct ecore_hwfn * p_hwfn,u16 vfid,struct ecore_mcp_link_params * p_params,struct ecore_mcp_link_state * p_link,struct ecore_mcp_link_capabilities * p_caps)4090 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4091 u16 vfid,
4092 struct ecore_mcp_link_params *p_params,
4093 struct ecore_mcp_link_state *p_link,
4094 struct ecore_mcp_link_capabilities *p_caps)
4095 {
4096 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4097 struct ecore_bulletin_content *p_bulletin;
4098
4099 if (!p_vf)
4100 return;
4101
4102 p_bulletin = p_vf->bulletin.p_virt;
4103
4104 if (p_params)
4105 __ecore_vf_get_link_params(p_params, p_bulletin);
4106 if (p_link)
4107 __ecore_vf_get_link_state(p_link, p_bulletin);
4108 if (p_caps)
4109 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4110 }
4111
ecore_iov_process_mbx_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,int vfid)4112 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4113 struct ecore_ptt *p_ptt,
4114 int vfid)
4115 {
4116 struct ecore_iov_vf_mbx *mbx;
4117 struct ecore_vf_info *p_vf;
4118
4119 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4120 if (!p_vf)
4121 return;
4122
4123 mbx = &p_vf->vf_mbx;
4124
4125 /* ecore_iov_process_mbx_request */
4126 #ifndef CONFIG_ECORE_SW_CHANNEL
4127 if (!mbx->b_pending_msg) {
4128 DP_NOTICE(p_hwfn, true,
4129 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4130 p_vf->abs_vf_id);
4131 return;
4132 }
4133 mbx->b_pending_msg = false;
4134 #endif
4135
4136 mbx->first_tlv = mbx->req_virt->first_tlv;
4137
4138 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4139 "VF[%02x]: Processing mailbox message [type %04x]\n",
4140 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4141
4142 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4143 p_vf->relative_vf_id,
4144 mbx->first_tlv.tl.type);
4145
4146 /* Lock the per vf op mutex and note the locker's identity.
4147 * The unlock will take place in mbx response.
4148 */
4149 ecore_iov_lock_vf_pf_channel(p_hwfn, p_vf,
4150 mbx->first_tlv.tl.type);
4151
4152 /* check if tlv type is known */
4153 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4154 !p_vf->b_malicious) {
4155 /* switch on the opcode */
4156 switch (mbx->first_tlv.tl.type) {
4157 case CHANNEL_TLV_ACQUIRE:
4158 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4159 break;
4160 case CHANNEL_TLV_VPORT_START:
4161 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4162 break;
4163 case CHANNEL_TLV_VPORT_TEARDOWN:
4164 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4165 break;
4166 case CHANNEL_TLV_START_RXQ:
4167 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4168 break;
4169 case CHANNEL_TLV_START_TXQ:
4170 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4171 break;
4172 case CHANNEL_TLV_STOP_RXQS:
4173 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4174 break;
4175 case CHANNEL_TLV_STOP_TXQS:
4176 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4177 break;
4178 case CHANNEL_TLV_UPDATE_RXQ:
4179 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4180 break;
4181 case CHANNEL_TLV_VPORT_UPDATE:
4182 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4183 break;
4184 case CHANNEL_TLV_UCAST_FILTER:
4185 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4186 break;
4187 case CHANNEL_TLV_CLOSE:
4188 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4189 break;
4190 case CHANNEL_TLV_INT_CLEANUP:
4191 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4192 break;
4193 case CHANNEL_TLV_RELEASE:
4194 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4195 break;
4196 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4197 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4198 break;
4199 case CHANNEL_TLV_COALESCE_UPDATE:
4200 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4201 break;
4202 case CHANNEL_TLV_COALESCE_READ:
4203 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4204 break;
4205 }
4206 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4207 /* If we've received a message from a VF we consider malicious
4208 * we ignore the messasge unless it's one for RELEASE, in which
4209 * case we'll let it have the benefit of doubt, allowing the
4210 * next loaded driver to start again.
4211 */
4212 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4213 /* TODO - initiate FLR, remove malicious indication */
4214 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4215 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4216 p_vf->abs_vf_id);
4217 } else {
4218 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4219 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4220 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4221 }
4222
4223 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4224 mbx->first_tlv.tl.type,
4225 sizeof(struct pfvf_def_resp_tlv),
4226 PFVF_STATUS_MALICIOUS);
4227 } else {
4228 /* unknown TLV - this may belong to a VF driver from the future
4229 * - a version written after this PF driver was written, which
4230 * supports features unknown as of yet. Too bad since we don't
4231 * support them. Or this may be because someone wrote a crappy
4232 * VF driver and is sending garbage over the channel.
4233 */
4234 DP_NOTICE(p_hwfn, false,
4235 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
4236 p_vf->abs_vf_id,
4237 mbx->first_tlv.tl.type,
4238 mbx->first_tlv.tl.length,
4239 mbx->first_tlv.padding,
4240 (unsigned long long)mbx->first_tlv.reply_address);
4241
4242 /* Try replying in case reply address matches the acquisition's
4243 * posted address.
4244 */
4245 if (p_vf->acquire.first_tlv.reply_address &&
4246 (mbx->first_tlv.reply_address ==
4247 p_vf->acquire.first_tlv.reply_address))
4248 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4249 mbx->first_tlv.tl.type,
4250 sizeof(struct pfvf_def_resp_tlv),
4251 PFVF_STATUS_NOT_SUPPORTED);
4252 else
4253 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4254 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
4255 p_vf->abs_vf_id);
4256 }
4257
4258 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4259 mbx->first_tlv.tl.type);
4260
4261 #ifdef CONFIG_ECORE_SW_CHANNEL
4262 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4263 mbx->sw_mbx.response_offset = 0;
4264 #endif
4265 }
4266
ecore_iov_pf_get_pending_events(struct ecore_hwfn * p_hwfn,u64 * events)4267 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4268 u64 *events)
4269 {
4270 int i;
4271
4272 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4273
4274 ecore_for_each_vf(p_hwfn, i) {
4275 struct ecore_vf_info *p_vf;
4276
4277 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4278 if (p_vf->vf_mbx.b_pending_msg)
4279 events[i / 64] |= 1ULL << (i % 64);
4280 }
4281 }
4282
4283 static struct ecore_vf_info *
ecore_sriov_get_vf_from_absid(struct ecore_hwfn * p_hwfn,u16 abs_vfid)4284 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4285 {
4286 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4287
4288 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4289 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4290 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4291 abs_vfid);
4292 return OSAL_NULL;
4293 }
4294
4295 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4296 }
4297
ecore_sriov_vfpf_msg(struct ecore_hwfn * p_hwfn,u16 abs_vfid,struct regpair * vf_msg)4298 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4299 u16 abs_vfid,
4300 struct regpair *vf_msg)
4301 {
4302 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4303 abs_vfid);
4304
4305 if (!p_vf)
4306 return ECORE_SUCCESS;
4307
4308 /* List the physical address of the request so that handler
4309 * could later on copy the message from it.
4310 */
4311 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) |
4312 vf_msg->lo;
4313
4314 p_vf->vf_mbx.b_pending_msg = true;
4315
4316 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4317 }
4318
ecore_sriov_vfpf_malicious(struct ecore_hwfn * p_hwfn,struct malicious_vf_eqe_data * p_data)4319 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4320 struct malicious_vf_eqe_data *p_data)
4321 {
4322 struct ecore_vf_info *p_vf;
4323
4324 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4325
4326 if (!p_vf)
4327 return;
4328
4329 if (!p_vf->b_malicious) {
4330 DP_NOTICE(p_hwfn, false,
4331 "VF [%d] - Malicious behavior [%02x]\n",
4332 p_vf->abs_vf_id, p_data->err_id);
4333
4334 p_vf->b_malicious = true;
4335 } else {
4336 DP_INFO(p_hwfn,
4337 "VF [%d] - Malicious behavior [%02x]\n",
4338 p_vf->abs_vf_id, p_data->err_id);
4339 }
4340
4341 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4342 }
4343
ecore_sriov_eqe_event(struct ecore_hwfn * p_hwfn,u8 opcode,__le16 echo,union event_ring_data * data,u8 OSAL_UNUSED fw_return_code)4344 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4345 u8 opcode,
4346 __le16 echo,
4347 union event_ring_data *data,
4348 u8 OSAL_UNUSED fw_return_code)
4349 {
4350 switch (opcode) {
4351 case COMMON_EVENT_VF_PF_CHANNEL:
4352 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4353 &data->vf_pf_channel.msg_addr);
4354 case COMMON_EVENT_VF_FLR:
4355 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4356 "VF-FLR is still not supported\n");
4357 return ECORE_SUCCESS;
4358 case COMMON_EVENT_MALICIOUS_VF:
4359 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4360 return ECORE_SUCCESS;
4361 default:
4362 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4363 opcode);
4364 return ECORE_INVAL;
4365 }
4366 }
4367
4368 #ifndef LINUX_REMOVE
ecore_iov_is_vf_pending_flr(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4369 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
4370 u16 rel_vf_id)
4371 {
4372 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4373 (1ULL << (rel_vf_id % 64)));
4374 }
4375 #endif
4376
ecore_iov_get_next_active_vf(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4377 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4378 {
4379 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4380 u16 i;
4381
4382 if (!p_iov)
4383 goto out;
4384
4385 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4386 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4387 return i;
4388
4389 out:
4390 return MAX_NUM_VFS_E4;
4391 }
4392
ecore_iov_copy_vf_msg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * ptt,int vfid)4393 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4394 struct ecore_ptt *ptt,
4395 int vfid)
4396 {
4397 struct ecore_dmae_params params;
4398 struct ecore_vf_info *vf_info;
4399
4400 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4401 if (!vf_info)
4402 return ECORE_INVAL;
4403
4404 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4405 params.flags = ECORE_DMAE_FLAG_VF_SRC |
4406 ECORE_DMAE_FLAG_COMPLETION_DST;
4407 params.src_vfid = vf_info->abs_vf_id;
4408
4409 if (ecore_dmae_host2host(p_hwfn, ptt,
4410 vf_info->vf_mbx.pending_req,
4411 vf_info->vf_mbx.req_phys,
4412 sizeof(union vfpf_tlvs) / 4,
4413 ¶ms)) {
4414 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4415 "Failed to copy message from VF 0x%02x\n",
4416 vfid);
4417
4418 return ECORE_IO;
4419 }
4420
4421 return ECORE_SUCCESS;
4422 }
4423
ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn * p_hwfn,u8 * mac,int vfid)4424 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4425 u8 *mac, int vfid)
4426 {
4427 struct ecore_vf_info *vf_info;
4428 u64 feature;
4429
4430 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4431 if (!vf_info) {
4432 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
4433 vfid);
4434 return;
4435 }
4436 if (vf_info->b_malicious) {
4437 DP_NOTICE(p_hwfn->p_dev, false, "Can't set forced MAC to malicious VF [%d]\n",
4438 vfid);
4439 return;
4440 }
4441
4442 feature = 1 << MAC_ADDR_FORCED;
4443 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4444 mac, ETH_ALEN);
4445
4446 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4447 /* Forced MAC will disable MAC_ADDR */
4448 vf_info->bulletin.p_virt->valid_bitmap &=
4449 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4450
4451 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4452 }
4453
4454 #ifndef LINUX_REMOVE
ecore_iov_bulletin_set_mac(struct ecore_hwfn * p_hwfn,u8 * mac,int vfid)4455 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4456 u8 *mac, int vfid)
4457 {
4458 struct ecore_vf_info *vf_info;
4459 u64 feature;
4460
4461 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4462 if (!vf_info) {
4463 DP_NOTICE(p_hwfn->p_dev, true, "Can not set MAC, invalid vfid [%d]\n",
4464 vfid);
4465 return ECORE_INVAL;
4466 }
4467 if (vf_info->b_malicious) {
4468 DP_NOTICE(p_hwfn->p_dev, false, "Can't set MAC to malicious VF [%d]\n",
4469 vfid);
4470 return ECORE_INVAL;
4471 }
4472
4473 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4474 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Can not set MAC, Forced MAC is configured\n");
4475 return ECORE_INVAL;
4476 }
4477
4478 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4479 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4480 mac, ETH_ALEN);
4481
4482 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4483
4484 return ECORE_SUCCESS;
4485 }
4486
4487 enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn * p_hwfn,bool b_untagged_only,int vfid)4488 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4489 bool b_untagged_only,
4490 int vfid)
4491 {
4492 struct ecore_vf_info *vf_info;
4493 u64 feature;
4494
4495 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4496 if (!vf_info) {
4497 DP_NOTICE(p_hwfn->p_dev, true,
4498 "Can not set untagged default, invalid vfid [%d]\n",
4499 vfid);
4500 return ECORE_INVAL;
4501 }
4502 if (vf_info->b_malicious) {
4503 DP_NOTICE(p_hwfn->p_dev, false,
4504 "Can't set untagged default to malicious VF [%d]\n",
4505 vfid);
4506 return ECORE_INVAL;
4507 }
4508
4509 /* Since this is configurable only during vport-start, don't take it
4510 * if we're past that point.
4511 */
4512 if (vf_info->state == VF_ENABLED) {
4513 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4514 "Can't support untagged change for vfid[%d] - VF is already active\n",
4515 vfid);
4516 return ECORE_INVAL;
4517 }
4518
4519 /* Set configuration; This will later be taken into account during the
4520 * VF initialization.
4521 */
4522 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4523 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4524 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4525
4526 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4527 : 0;
4528
4529 return ECORE_SUCCESS;
4530 }
4531
ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn * p_hwfn,int vfid,u16 * opaque_fid)4532 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4533 u16 *opaque_fid)
4534 {
4535 struct ecore_vf_info *vf_info;
4536
4537 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4538 if (!vf_info)
4539 return;
4540
4541 *opaque_fid = vf_info->opaque_fid;
4542 }
4543 #endif
4544
ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn * p_hwfn,u16 pvid,int vfid)4545 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4546 u16 pvid, int vfid)
4547 {
4548 struct ecore_vf_info *vf_info;
4549 u64 feature;
4550
4551 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4552 if (!vf_info) {
4553 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
4554 vfid);
4555 return;
4556 }
4557 if (vf_info->b_malicious) {
4558 DP_NOTICE(p_hwfn->p_dev, false,
4559 "Can't set forced vlan to malicious VF [%d]\n",
4560 vfid);
4561 return;
4562 }
4563
4564 feature = 1 << VLAN_ADDR_FORCED;
4565 vf_info->bulletin.p_virt->pvid = pvid;
4566 if (pvid)
4567 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4568 else
4569 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4570
4571 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4572 }
4573
ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn * p_hwfn,int vfid,u16 vxlan_port,u16 geneve_port)4574 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4575 int vfid, u16 vxlan_port, u16 geneve_port)
4576 {
4577 struct ecore_vf_info *vf_info;
4578
4579 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4580 if (!vf_info) {
4581 DP_NOTICE(p_hwfn->p_dev, true,
4582 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4583 return;
4584 }
4585
4586 if (vf_info->b_malicious) {
4587 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4588 "Can not set udp ports to malicious VF [%d]\n",
4589 vfid);
4590 return;
4591 }
4592
4593 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4594 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4595 }
4596
ecore_iov_vf_has_vport_instance(struct ecore_hwfn * p_hwfn,int vfid)4597 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4598 {
4599 struct ecore_vf_info *p_vf_info;
4600
4601 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4602 if (!p_vf_info)
4603 return false;
4604
4605 return !!p_vf_info->vport_instance;
4606 }
4607
ecore_iov_is_vf_stopped(struct ecore_hwfn * p_hwfn,int vfid)4608 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4609 {
4610 struct ecore_vf_info *p_vf_info;
4611
4612 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4613 if (!p_vf_info)
4614 return true;
4615
4616 return p_vf_info->state == VF_STOPPED;
4617 }
4618
ecore_iov_spoofchk_get(struct ecore_hwfn * p_hwfn,int vfid)4619 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4620 {
4621 struct ecore_vf_info *vf_info;
4622
4623 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4624 if (!vf_info)
4625 return false;
4626
4627 return vf_info->spoof_chk;
4628 }
4629
ecore_iov_spoofchk_set(struct ecore_hwfn * p_hwfn,int vfid,bool val)4630 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4631 int vfid, bool val)
4632 {
4633 struct ecore_vf_info *vf;
4634 enum _ecore_status_t rc = ECORE_INVAL;
4635
4636 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4637 DP_NOTICE(p_hwfn, true,
4638 "SR-IOV sanity check failed, can't set spoofchk\n");
4639 goto out;
4640 }
4641
4642 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4643 if (!vf)
4644 goto out;
4645
4646 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4647 /* After VF VPORT start PF will configure spoof check */
4648 vf->req_spoofchk_val = val;
4649 rc = ECORE_SUCCESS;
4650 goto out;
4651 }
4652
4653 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4654
4655 out:
4656 return rc;
4657 }
4658
4659 #ifndef LINUX_REMOVE
ecore_iov_vf_chains_per_pf(struct ecore_hwfn * p_hwfn)4660 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4661 {
4662 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4663
4664 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4665 : ECORE_MAX_VF_CHAINS_PER_PF;
4666
4667 return max_chains_per_vf;
4668 }
4669
ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn * p_hwfn,u16 rel_vf_id,void ** pp_req_virt_addr,u16 * p_req_virt_size)4670 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4671 u16 rel_vf_id,
4672 void **pp_req_virt_addr,
4673 u16 *p_req_virt_size)
4674 {
4675 struct ecore_vf_info *vf_info =
4676 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4677
4678 if (!vf_info)
4679 return;
4680
4681 if (pp_req_virt_addr)
4682 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4683
4684 if (p_req_virt_size)
4685 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4686 }
4687
ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn * p_hwfn,u16 rel_vf_id,void ** pp_reply_virt_addr,u16 * p_reply_virt_size)4688 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4689 u16 rel_vf_id,
4690 void **pp_reply_virt_addr,
4691 u16 *p_reply_virt_size)
4692 {
4693 struct ecore_vf_info *vf_info =
4694 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4695
4696 if (!vf_info)
4697 return;
4698
4699 if (pp_reply_virt_addr)
4700 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4701
4702 if (p_reply_virt_size)
4703 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4704 }
4705
4706 #ifdef CONFIG_ECORE_SW_CHANNEL
4707 struct ecore_iov_sw_mbx*
ecore_iov_get_vf_sw_mbx(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4708 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4709 u16 rel_vf_id)
4710 {
4711 struct ecore_vf_info *vf_info =
4712 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4713
4714 if (!vf_info)
4715 return OSAL_NULL;
4716
4717 return &vf_info->vf_mbx.sw_mbx;
4718 }
4719 #endif
4720
ecore_iov_is_valid_vfpf_msg_length(u32 length)4721 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4722 {
4723 return (length >= sizeof(struct vfpf_first_tlv) &&
4724 (length <= sizeof(union vfpf_tlvs)));
4725 }
4726
ecore_iov_pfvf_msg_length(void)4727 u32 ecore_iov_pfvf_msg_length(void)
4728 {
4729 return sizeof(union pfvf_tlvs);
4730 }
4731 #endif
4732
ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4733 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
4734 u16 rel_vf_id)
4735 {
4736 struct ecore_vf_info *p_vf;
4737
4738 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4739 if (!p_vf || !p_vf->bulletin.p_virt)
4740 return OSAL_NULL;
4741
4742 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4743 return OSAL_NULL;
4744
4745 return p_vf->bulletin.p_virt->mac;
4746 }
4747
ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4748 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4749 u16 rel_vf_id)
4750 {
4751 struct ecore_vf_info *p_vf;
4752
4753 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4754 if (!p_vf || !p_vf->bulletin.p_virt)
4755 return 0;
4756
4757 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4758 return 0;
4759
4760 return p_vf->bulletin.p_virt->pvid;
4761 }
4762
ecore_iov_configure_tx_rate(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,int vfid,int val)4763 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4764 struct ecore_ptt *p_ptt,
4765 int vfid, int val)
4766 {
4767 struct ecore_mcp_link_state *p_link;
4768 struct ecore_vf_info *vf;
4769 u8 abs_vp_id = 0;
4770 enum _ecore_status_t rc;
4771
4772 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4773
4774 if (!vf)
4775 return ECORE_INVAL;
4776
4777 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4778 if (rc != ECORE_SUCCESS)
4779 return rc;
4780
4781 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
4782
4783 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4784 p_link->speed);
4785 }
4786
ecore_iov_configure_min_tx_rate(struct ecore_dev * p_dev,int vfid,u32 rate)4787 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4788 int vfid, u32 rate)
4789 {
4790 struct ecore_vf_info *vf;
4791 u8 vport_id;
4792 int i;
4793
4794 for_each_hwfn(p_dev, i) {
4795 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4796
4797 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4798 DP_NOTICE(p_hwfn, true,
4799 "SR-IOV sanity check failed, can't set min rate\n");
4800 return ECORE_INVAL;
4801 }
4802 }
4803
4804 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4805 vport_id = vf->vport_id;
4806
4807 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4808 }
4809
4810 #ifndef LINUX_REMOVE
ecore_iov_get_vf_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,int vfid,struct ecore_eth_stats * p_stats)4811 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4812 struct ecore_ptt *p_ptt,
4813 int vfid,
4814 struct ecore_eth_stats *p_stats)
4815 {
4816 struct ecore_vf_info *vf;
4817
4818 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4819 if (!vf)
4820 return ECORE_INVAL;
4821
4822 if (vf->state != VF_ENABLED)
4823 return ECORE_INVAL;
4824
4825 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4826 vf->abs_vf_id + 0x10, false);
4827
4828 return ECORE_SUCCESS;
4829 }
4830
ecore_iov_get_vf_num_rxqs(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4831 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
4832 u16 rel_vf_id)
4833 {
4834 struct ecore_vf_info *p_vf;
4835
4836 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4837 if (!p_vf)
4838 return 0;
4839
4840 return p_vf->num_rxqs;
4841 }
4842
ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4843 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
4844 u16 rel_vf_id)
4845 {
4846 struct ecore_vf_info *p_vf;
4847
4848 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4849 if (!p_vf)
4850 return 0;
4851
4852 return p_vf->num_active_rxqs;
4853 }
4854
ecore_iov_get_vf_ctx(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4855 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
4856 u16 rel_vf_id)
4857 {
4858 struct ecore_vf_info *p_vf;
4859
4860 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4861 if (!p_vf)
4862 return OSAL_NULL;
4863
4864 return p_vf->ctx;
4865 }
4866
ecore_iov_get_vf_num_sbs(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4867 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
4868 u16 rel_vf_id)
4869 {
4870 struct ecore_vf_info *p_vf;
4871
4872 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4873 if (!p_vf)
4874 return 0;
4875
4876 return p_vf->num_sbs;
4877 }
4878
ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4879 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
4880 u16 rel_vf_id)
4881 {
4882 struct ecore_vf_info *p_vf;
4883
4884 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4885 if (!p_vf)
4886 return false;
4887
4888 return (p_vf->state == VF_FREE);
4889 }
4890
ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4891 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4892 u16 rel_vf_id)
4893 {
4894 struct ecore_vf_info *p_vf;
4895
4896 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4897 if (!p_vf)
4898 return false;
4899
4900 return (p_vf->state == VF_ACQUIRED);
4901 }
4902
ecore_iov_is_vf_initialized(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4903 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
4904 u16 rel_vf_id)
4905 {
4906 struct ecore_vf_info *p_vf;
4907
4908 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4909 if (!p_vf)
4910 return false;
4911
4912 return (p_vf->state == VF_ENABLED);
4913 }
4914
ecore_iov_is_vf_started(struct ecore_hwfn * p_hwfn,u16 rel_vf_id)4915 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4916 u16 rel_vf_id)
4917 {
4918 struct ecore_vf_info *p_vf;
4919
4920 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4921 if (!p_vf)
4922 return false;
4923
4924 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4925 }
4926 #endif
4927
4928 int
ecore_iov_get_vf_min_rate(struct ecore_hwfn * p_hwfn,int vfid)4929 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4930 {
4931 struct ecore_wfq_data *vf_vp_wfq;
4932 struct ecore_vf_info *vf_info;
4933
4934 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4935 if (!vf_info)
4936 return 0;
4937
4938 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4939
4940 if (vf_vp_wfq->configured)
4941 return vf_vp_wfq->min_speed;
4942 else
4943 return 0;
4944 }
4945
4946 #ifdef CONFIG_ECORE_SW_CHANNEL
ecore_iov_set_vf_hw_channel(struct ecore_hwfn * p_hwfn,int vfid,bool b_is_hw)4947 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
4948 bool b_is_hw)
4949 {
4950 struct ecore_vf_info *vf_info;
4951
4952 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4953 if (!vf_info)
4954 return;
4955
4956 vf_info->b_hw_channel = b_is_hw;
4957 }
4958 #endif
4959