1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_l2.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "bcm_osal.h" 35 36 #include "ecore.h" 37 #include "ecore_status.h" 38 #include "ecore_hsi_eth.h" 39 #include "ecore_chain.h" 40 #include "ecore_spq.h" 41 #include "ecore_init_fw_funcs.h" 42 #include "ecore_cxt.h" 43 #include "ecore_l2.h" 44 #include "ecore_sp_commands.h" 45 #include "ecore_gtt_reg_addr.h" 46 #include "ecore_iro.h" 47 #include "reg_addr.h" 48 #include "ecore_int.h" 49 #include "ecore_hw.h" 50 #include "ecore_vf.h" 51 #include "ecore_sriov.h" 52 #include "ecore_mcp.h" 53 54 #define ECORE_MAX_SGES_NUM 16 55 #define CRC32_POLY 0x1edc6f41 56 57 struct ecore_l2_info { 58 u32 queues; 59 unsigned long **pp_qid_usage; 60 61 /* The lock is meant to synchronize access to the qid usage */ 62 osal_mutex_t lock; 63 }; 64 65 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) 66 { 67 struct ecore_l2_info *p_l2_info; 68 unsigned long **pp_qids; 69 u32 i; 70 71 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 72 return ECORE_SUCCESS; 73 74 p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); 75 if (!p_l2_info) 76 return ECORE_NOMEM; 77 p_hwfn->p_l2_info = p_l2_info; 78 79 if (IS_PF(p_hwfn->p_dev)) { 80 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 81 } else { 82 u8 rx = 0, tx = 0; 83 84 ecore_vf_get_num_rxqs(p_hwfn, &rx); 85 ecore_vf_get_num_txqs(p_hwfn, &tx); 86 87 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); 88 } 89 90 pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, 91 sizeof(unsigned long *) * 92 p_l2_info->queues); 93 if (pp_qids == OSAL_NULL) 94 return ECORE_NOMEM; 95 p_l2_info->pp_qid_usage = pp_qids; 96 97 for (i = 0; i < p_l2_info->queues; i++) { 98 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, 99 MAX_QUEUES_PER_QZONE / 8); 100 if (pp_qids[i] == OSAL_NULL) 101 return ECORE_NOMEM; 102 } 103 104 #ifdef CONFIG_ECORE_LOCK_ALLOC 105 OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); 106 #endif 107 108 return ECORE_SUCCESS; 109 } 110 111 void ecore_l2_setup(struct ecore_hwfn *p_hwfn) 112 { 113 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 114 return; 115 116 OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); 117 } 118 119 void ecore_l2_free(struct ecore_hwfn *p_hwfn) 120 { 121 u32 i; 122 123 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 124 return; 125 126 if (p_hwfn->p_l2_info == OSAL_NULL) 127 return; 128 129 if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) 130 goto out_l2_info; 131 132 /* Free until hit first uninitialized entry */ 133 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 134 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) 135 break; 136 OSAL_VFREE(p_hwfn->p_dev, 137 p_hwfn->p_l2_info->pp_qid_usage[i]); 138 } 139 140 #ifdef CONFIG_ECORE_LOCK_ALLOC 141 /* Lock is last to initialize, if everything else was */ 142 if (i == p_hwfn->p_l2_info->queues) 143 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); 144 #endif 145 146 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); 147 148 out_l2_info: 149 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); 150 p_hwfn->p_l2_info = OSAL_NULL; 151 } 152 153 /* TODO - we'll need locking around these... */ 154 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, 155 struct ecore_queue_cid *p_cid) 156 { 157 struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; 158 u16 queue_id = p_cid->rel.queue_id; 159 bool b_rc = true; 160 u8 first; 161 162 OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); 163 164 if (queue_id > p_l2_info->queues) { 165 DP_NOTICE(p_hwfn, true, 166 "Requested to increase usage for qzone %04x out of %08x\n", 167 queue_id, p_l2_info->queues); 168 b_rc = false; 169 goto out; 170 } 171 172 first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], 173 MAX_QUEUES_PER_QZONE); 174 if (first >= MAX_QUEUES_PER_QZONE) { 175 b_rc = false; 176 goto out; 177 } 178 179 OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); 180 p_cid->qid_usage_idx = first; 181 182 out: 183 OSAL_MUTEX_RELEASE(&p_l2_info->lock); 184 return b_rc; 185 } 186 187 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, 188 struct ecore_queue_cid *p_cid) 189 { 190 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); 191 192 OSAL_CLEAR_BIT(p_cid->qid_usage_idx, 193 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 194 195 OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); 196 } 197 198 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, 199 struct ecore_queue_cid *p_cid) 200 { 201 bool b_legacy_vf = !!(p_cid->vf_legacy & 202 ECORE_QCID_LEGACY_VF_CID); 203 204 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. 205 * For legacy vf-queues, the CID doesn't go through here. 206 */ 207 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 208 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 209 210 /* VFs maintain the index inside queue-zone on their own */ 211 if (p_cid->vfid == ECORE_QUEUE_CID_PF) 212 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); 213 214 OSAL_VFREE(p_hwfn->p_dev, p_cid); 215 } 216 217 /* The internal is only meant to be directly called by PFs initializeing CIDs 218 * for their VFs. 219 */ 220 static struct ecore_queue_cid * 221 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, 222 u16 opaque_fid, u32 cid, 223 struct ecore_queue_start_common_params *p_params, 224 struct ecore_queue_cid_vf_params *p_vf_params) 225 { 226 struct ecore_queue_cid *p_cid; 227 enum _ecore_status_t rc; 228 229 p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); 230 if (p_cid == OSAL_NULL) 231 return OSAL_NULL; 232 233 p_cid->opaque_fid = opaque_fid; 234 p_cid->cid = cid; 235 p_cid->p_owner = p_hwfn; 236 237 /* Fill in parameters */ 238 p_cid->rel.vport_id = p_params->vport_id; 239 p_cid->rel.queue_id = p_params->queue_id; 240 p_cid->rel.stats_id = p_params->stats_id; 241 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 242 p_cid->sb_idx = p_params->sb_idx; 243 244 /* Fill-in bits related to VFs' queues if information was provided */ 245 if (p_vf_params != OSAL_NULL) { 246 p_cid->vfid = p_vf_params->vfid; 247 p_cid->vf_qid = p_vf_params->vf_qid; 248 p_cid->vf_legacy = p_vf_params->vf_legacy; 249 } else { 250 p_cid->vfid = ECORE_QUEUE_CID_PF; 251 } 252 253 /* Don't try calculating the absolute indices for VFs */ 254 if (IS_VF(p_hwfn->p_dev)) { 255 p_cid->abs = p_cid->rel; 256 257 goto out; 258 } 259 260 /* Calculate the engine-absolute indices of the resources. 261 * The would guarantee they're valid later on. 262 * In some cases [SBs] we already have the right values. 263 */ 264 rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 265 if (rc != ECORE_SUCCESS) 266 goto fail; 267 268 rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, 269 &p_cid->abs.queue_id); 270 if (rc != ECORE_SUCCESS) 271 goto fail; 272 273 /* In case of a PF configuring its VF's queues, the stats-id is already 274 * absolute [since there's a single index that's suitable per-VF]. 275 */ 276 if (p_cid->vfid == ECORE_QUEUE_CID_PF) { 277 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, 278 &p_cid->abs.stats_id); 279 if (rc != ECORE_SUCCESS) 280 goto fail; 281 } else { 282 p_cid->abs.stats_id = p_cid->rel.stats_id; 283 } 284 285 out: 286 /* VF-images have provided the qid_usage_idx on their own. 287 * Otherwise, we need to allocate a unique one. 288 */ 289 if (!p_vf_params) { 290 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) 291 goto fail; 292 } else { 293 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 294 } 295 296 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 297 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 298 p_cid->opaque_fid, p_cid->cid, 299 p_cid->rel.vport_id, p_cid->abs.vport_id, 300 p_cid->rel.queue_id, p_cid->qid_usage_idx, 301 p_cid->abs.queue_id, 302 p_cid->rel.stats_id, p_cid->abs.stats_id, 303 p_cid->sb_igu_id, p_cid->sb_idx); 304 305 return p_cid; 306 307 fail: 308 OSAL_VFREE(p_hwfn->p_dev, p_cid); 309 return OSAL_NULL; 310 } 311 312 struct ecore_queue_cid * 313 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 314 struct ecore_queue_start_common_params *p_params, 315 struct ecore_queue_cid_vf_params *p_vf_params) 316 { 317 struct ecore_queue_cid *p_cid; 318 u8 vfid = ECORE_CXT_PF_CID; 319 bool b_legacy_vf = false; 320 u32 cid = 0; 321 322 /* In case of legacy VFs, The CID can be derived from the additional 323 * VF parameters - the VF assumes queue X uses CID X, so we can simply 324 * use the vf_qid for this purpose as well. 325 */ 326 if (p_vf_params) { 327 vfid = p_vf_params->vfid; 328 329 if (p_vf_params->vf_legacy & 330 ECORE_QCID_LEGACY_VF_CID) { 331 b_legacy_vf = true; 332 cid = p_vf_params->vf_qid; 333 } 334 } 335 336 /* Get a unique firmware CID for this queue, in case it's a PF. 337 * VF's don't need a CID as the queue configuration will be done 338 * by PF. 339 */ 340 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { 341 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 342 &cid, vfid) != ECORE_SUCCESS) { 343 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); 344 return OSAL_NULL; 345 } 346 } 347 348 p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 349 p_params, p_vf_params); 350 if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 351 _ecore_cxt_release_cid(p_hwfn, cid, vfid); 352 353 return p_cid; 354 } 355 356 static struct ecore_queue_cid * 357 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 358 struct ecore_queue_start_common_params *p_params) 359 { 360 return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL); 361 } 362 363 enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, 364 struct ecore_sp_vport_start_params *p_params) 365 { 366 struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; 367 struct ecore_spq_entry *p_ent = OSAL_NULL; 368 struct ecore_sp_init_data init_data; 369 u16 rx_mode = 0, tx_err = 0; 370 u8 abs_vport_id = 0; 371 enum _ecore_status_t rc = ECORE_NOTIMPL; 372 373 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 374 if (rc != ECORE_SUCCESS) 375 return rc; 376 377 /* Get SPQ entry */ 378 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 379 init_data.cid = ecore_spq_get_cid(p_hwfn); 380 init_data.opaque_fid = p_params->opaque_fid; 381 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 382 383 rc = ecore_sp_init_request(p_hwfn, &p_ent, 384 ETH_RAMROD_VPORT_START, 385 PROTOCOLID_ETH, &init_data); 386 if (rc != ECORE_SUCCESS) 387 return rc; 388 389 p_ramrod = &p_ent->ramrod.vport_start; 390 p_ramrod->vport_id = abs_vport_id; 391 392 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); 393 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 394 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 395 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 396 p_ramrod->untagged = p_params->only_untagged; 397 p_ramrod->zero_placement_offset = p_params->zero_placement_offset; 398 399 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 400 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 401 402 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); 403 404 /* Handle requests for strict behavior on transmission errors */ 405 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, 406 p_params->b_err_illegal_vlan_mode ? 407 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 408 SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, 409 p_params->b_err_small_pkt ? 410 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 411 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, 412 p_params->b_err_anti_spoof ? 413 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 414 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, 415 p_params->b_err_illegal_inband_mode ? 416 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 417 SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, 418 p_params->b_err_vlan_insert_with_inband ? 419 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 420 SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, 421 p_params->b_err_big_pkt ? 422 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 423 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, 424 p_params->b_err_ctrl_frame ? 425 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 426 p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); 427 428 /* TPA related fields */ 429 OSAL_MEMSET(&p_ramrod->tpa_param, 0, 430 sizeof(struct eth_vport_tpa_param)); 431 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 432 433 switch (p_params->tpa_mode) { 434 case ECORE_TPA_MODE_GRO: 435 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 436 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 437 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu/2; 438 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu/2; 439 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 440 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 441 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1; 442 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1; 443 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 444 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 445 break; 446 default: 447 break; 448 } 449 450 p_ramrod->tx_switching_en = p_params->tx_switching; 451 #ifndef ASIC_ONLY 452 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 453 p_ramrod->tx_switching_en = 0; 454 #endif 455 456 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 457 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 458 459 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 460 p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev, 461 p_params->concrete_fid); 462 463 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 464 } 465 466 enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 467 struct ecore_sp_vport_start_params *p_params) 468 { 469 if (IS_VF(p_hwfn->p_dev)) 470 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, 471 p_params->mtu, 472 p_params->remove_inner_vlan, 473 p_params->tpa_mode, 474 p_params->max_buffers_per_cqe, 475 p_params->only_untagged); 476 477 return ecore_sp_eth_vport_start(p_hwfn, p_params); 478 } 479 480 static enum _ecore_status_t 481 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, 482 struct vport_update_ramrod_data *p_ramrod, 483 struct ecore_rss_params *p_rss) 484 { 485 struct eth_vport_rss_config *p_config; 486 int i, table_size; 487 enum _ecore_status_t rc = ECORE_SUCCESS; 488 489 if (!p_rss) { 490 p_ramrod->common.update_rss_flg = 0; 491 return rc; 492 } 493 p_config = &p_ramrod->rss_config; 494 495 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != 496 ETH_RSS_IND_TABLE_ENTRIES_NUM); 497 498 rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, 499 &p_config->rss_id); 500 if (rc != ECORE_SUCCESS) 501 return rc; 502 503 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 504 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 505 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 506 p_config->update_rss_key = p_rss->update_rss_key; 507 508 p_config->rss_mode = p_rss->rss_enable ? 509 ETH_VPORT_RSS_MODE_REGULAR : 510 ETH_VPORT_RSS_MODE_DISABLED; 511 512 p_config->capabilities = 0; 513 514 SET_FIELD(p_config->capabilities, 515 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 516 !!(p_rss->rss_caps & ECORE_RSS_IPV4)); 517 SET_FIELD(p_config->capabilities, 518 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 519 !!(p_rss->rss_caps & ECORE_RSS_IPV6)); 520 SET_FIELD(p_config->capabilities, 521 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 522 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); 523 SET_FIELD(p_config->capabilities, 524 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 525 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); 526 SET_FIELD(p_config->capabilities, 527 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 528 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); 529 SET_FIELD(p_config->capabilities, 530 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 531 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); 532 p_config->tbl_size = p_rss->rss_table_size_log; 533 p_config->capabilities = 534 OSAL_CPU_TO_LE16(p_config->capabilities); 535 536 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 537 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 538 p_ramrod->common.update_rss_flg, 539 p_config->rss_mode, 540 p_config->update_rss_capabilities, 541 p_config->capabilities, 542 p_config->update_rss_ind_table, 543 p_config->update_rss_key); 544 545 table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, 546 1 << p_config->tbl_size); 547 for (i = 0; i < table_size; i++) { 548 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; 549 550 if (!p_queue) 551 return ECORE_INVAL; 552 553 p_config->indirection_table[i] = 554 OSAL_CPU_TO_LE16(p_queue->abs.queue_id); 555 } 556 557 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 558 "Configured RSS indirection table [%d entries]:\n", 559 table_size); 560 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { 561 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 562 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 563 OSAL_LE16_TO_CPU(p_config->indirection_table[i]), 564 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), 565 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), 566 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), 567 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), 568 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), 569 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), 570 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), 571 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), 572 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), 573 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), 574 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), 575 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), 576 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), 577 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), 578 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); 579 } 580 581 for (i = 0; i < 10; i++) 582 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); 583 584 return rc; 585 } 586 587 static void 588 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, 589 struct vport_update_ramrod_data *p_ramrod, 590 struct ecore_filter_accept_flags accept_flags) 591 { 592 p_ramrod->common.update_rx_mode_flg = 593 accept_flags.update_rx_mode_config; 594 p_ramrod->common.update_tx_mode_flg = 595 accept_flags.update_tx_mode_config; 596 597 #ifndef ASIC_ONLY 598 /* On B0 emulation we cannot enable Tx, since this would cause writes 599 * to PVFC HW block which isn't implemented in emulation. 600 */ 601 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 602 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 603 "Non-Asic - prevent Tx mode in vport update\n"); 604 p_ramrod->common.update_tx_mode_flg = 0; 605 } 606 #endif 607 608 /* Set Rx mode accept flags */ 609 if (p_ramrod->common.update_rx_mode_flg) { 610 u8 accept_filter = accept_flags.rx_accept_filter; 611 u16 state = 0; 612 613 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 614 !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || 615 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); 616 617 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 618 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); 619 620 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 621 !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || 622 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 623 624 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 625 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 626 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 627 628 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 629 !!(accept_filter & ECORE_ACCEPT_BCAST)); 630 631 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); 632 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 633 "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", 634 p_ramrod->common.vport_id, state); 635 } 636 637 /* Set Tx mode accept flags */ 638 if (p_ramrod->common.update_tx_mode_flg) { 639 u8 accept_filter = accept_flags.tx_accept_filter; 640 u16 state = 0; 641 642 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 643 !!(accept_filter & ECORE_ACCEPT_NONE)); 644 645 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 646 !!(accept_filter & ECORE_ACCEPT_NONE)); 647 648 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 649 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 650 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 651 652 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 653 !!(accept_filter & ECORE_ACCEPT_BCAST)); 654 655 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); 656 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 657 "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", 658 p_ramrod->common.vport_id, state); 659 } 660 } 661 662 static void 663 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn, 664 struct vport_update_ramrod_data *p_ramrod, 665 struct ecore_sge_tpa_params *p_params) 666 { 667 struct eth_vport_tpa_param *p_tpa; 668 669 if (!p_params) { 670 p_ramrod->common.update_tpa_param_flg = 0; 671 p_ramrod->common.update_tpa_en_flg = 0; 672 p_ramrod->common.update_tpa_param_flg = 0; 673 return; 674 } 675 676 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 677 p_tpa = &p_ramrod->tpa_param; 678 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 679 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 680 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 681 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 682 683 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 684 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 685 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 686 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 687 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 688 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 689 p_tpa->tpa_max_size = p_params->tpa_max_size; 690 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 691 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 692 } 693 694 static void 695 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn, 696 struct vport_update_ramrod_data *p_ramrod, 697 struct ecore_sp_vport_update_params *p_params) 698 { 699 int i; 700 701 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, 702 sizeof(p_ramrod->approx_mcast.bins)); 703 704 if (!p_params->update_approx_mcast_flg) 705 return; 706 707 p_ramrod->common.update_approx_mcast_flg = 1; 708 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 709 u32 *p_bins = (u32 *)p_params->bins; 710 711 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 712 } 713 } 714 715 enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 716 struct ecore_sp_vport_update_params *p_params, 717 enum spq_mode comp_mode, 718 struct ecore_spq_comp_cb *p_comp_data) 719 { 720 struct ecore_rss_params *p_rss_params = p_params->rss_params; 721 struct vport_update_ramrod_data_cmn *p_cmn; 722 struct ecore_sp_init_data init_data; 723 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 724 struct ecore_spq_entry *p_ent = OSAL_NULL; 725 u8 abs_vport_id = 0, val; 726 enum _ecore_status_t rc = ECORE_NOTIMPL; 727 728 if (IS_VF(p_hwfn->p_dev)) { 729 rc = ecore_vf_pf_vport_update(p_hwfn, p_params); 730 return rc; 731 } 732 733 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 734 if (rc != ECORE_SUCCESS) 735 return rc; 736 737 /* Get SPQ entry */ 738 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 739 init_data.cid = ecore_spq_get_cid(p_hwfn); 740 init_data.opaque_fid = p_params->opaque_fid; 741 init_data.comp_mode = comp_mode; 742 init_data.p_comp_data = p_comp_data; 743 744 rc = ecore_sp_init_request(p_hwfn, &p_ent, 745 ETH_RAMROD_VPORT_UPDATE, 746 PROTOCOLID_ETH, &init_data); 747 if (rc != ECORE_SUCCESS) 748 return rc; 749 750 /* Copy input params to ramrod according to FW struct */ 751 p_ramrod = &p_ent->ramrod.vport_update; 752 p_cmn = &p_ramrod->common; 753 754 p_cmn->vport_id = abs_vport_id; 755 756 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 757 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 758 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 759 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 760 761 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 762 val = p_params->update_accept_any_vlan_flg; 763 p_cmn->update_accept_any_vlan_flg = val; 764 765 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 766 val = p_params->update_inner_vlan_removal_flg; 767 p_cmn->update_inner_vlan_removal_en_flg = val; 768 769 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 770 val = p_params->update_default_vlan_enable_flg; 771 p_cmn->update_default_vlan_en_flg = val; 772 773 p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); 774 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 775 776 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 777 778 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 779 #ifndef ASIC_ONLY 780 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 781 if (p_ramrod->common.tx_switching_en || 782 p_ramrod->common.update_tx_switching_en_flg) { 783 DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n"); 784 p_ramrod->common.tx_switching_en = 0; 785 p_ramrod->common.update_tx_switching_en_flg = 1; 786 } 787 #endif 788 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 789 790 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 791 val = p_params->update_anti_spoofing_en_flg; 792 p_ramrod->common.update_anti_spoofing_en_flg = val; 793 794 rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 795 if (rc != ECORE_SUCCESS) { 796 /* Return spq entry which is taken in ecore_sp_init_request()*/ 797 ecore_spq_return_entry(p_hwfn, p_ent); 798 return rc; 799 } 800 801 /* Update mcast bins for VFs, PF doesn't use this functionality */ 802 ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 803 804 ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 805 ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, 806 p_params->sge_tpa_params); 807 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 808 } 809 810 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 811 u16 opaque_fid, 812 u8 vport_id) 813 { 814 struct vport_stop_ramrod_data *p_ramrod; 815 struct ecore_sp_init_data init_data; 816 struct ecore_spq_entry *p_ent; 817 u8 abs_vport_id = 0; 818 enum _ecore_status_t rc; 819 820 if (IS_VF(p_hwfn->p_dev)) 821 return ecore_vf_pf_vport_stop(p_hwfn); 822 823 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 824 if (rc != ECORE_SUCCESS) 825 return rc; 826 827 /* Get SPQ entry */ 828 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 829 init_data.cid = ecore_spq_get_cid(p_hwfn); 830 init_data.opaque_fid = opaque_fid; 831 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 832 833 rc = ecore_sp_init_request(p_hwfn, &p_ent, 834 ETH_RAMROD_VPORT_STOP, 835 PROTOCOLID_ETH, &init_data); 836 if (rc != ECORE_SUCCESS) 837 return rc; 838 839 p_ramrod = &p_ent->ramrod.vport_stop; 840 p_ramrod->vport_id = abs_vport_id; 841 842 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 843 } 844 845 static enum _ecore_status_t 846 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, 847 struct ecore_filter_accept_flags *p_accept_flags) 848 { 849 struct ecore_sp_vport_update_params s_params; 850 851 OSAL_MEMSET(&s_params, 0, sizeof(s_params)); 852 OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, 853 sizeof(struct ecore_filter_accept_flags)); 854 855 return ecore_vf_pf_vport_update(p_hwfn, &s_params); 856 } 857 858 enum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev, 859 u8 vport, 860 struct ecore_filter_accept_flags accept_flags, 861 u8 update_accept_any_vlan, 862 u8 accept_any_vlan, 863 enum spq_mode comp_mode, 864 struct ecore_spq_comp_cb *p_comp_data) 865 { 866 struct ecore_sp_vport_update_params vport_update_params; 867 int i, rc; 868 869 /* Prepare and send the vport rx_mode change */ 870 OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); 871 vport_update_params.vport_id = vport; 872 vport_update_params.accept_flags = accept_flags; 873 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 874 vport_update_params.accept_any_vlan = accept_any_vlan; 875 876 for_each_hwfn(p_dev, i) { 877 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 878 879 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 880 881 if (IS_VF(p_dev)) { 882 rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); 883 if (rc != ECORE_SUCCESS) 884 return rc; 885 continue; 886 } 887 888 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 889 comp_mode, p_comp_data); 890 if (rc != ECORE_SUCCESS) { 891 DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); 892 return rc; 893 } 894 895 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 896 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 897 accept_flags.rx_accept_filter, 898 accept_flags.tx_accept_filter); 899 900 if (update_accept_any_vlan) 901 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 902 "accept_any_vlan=%d configured\n", 903 accept_any_vlan); 904 } 905 906 return 0; 907 } 908 909 enum _ecore_status_t 910 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, 911 struct ecore_queue_cid *p_cid, 912 u16 bd_max_bytes, 913 dma_addr_t bd_chain_phys_addr, 914 dma_addr_t cqe_pbl_addr, 915 u16 cqe_pbl_size) 916 { 917 struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 918 struct ecore_spq_entry *p_ent = OSAL_NULL; 919 struct ecore_sp_init_data init_data; 920 enum _ecore_status_t rc = ECORE_NOTIMPL; 921 922 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 923 p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, 924 p_cid->abs.vport_id, p_cid->sb_igu_id); 925 926 /* Get SPQ entry */ 927 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 928 init_data.cid = p_cid->cid; 929 init_data.opaque_fid = p_cid->opaque_fid; 930 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 931 932 rc = ecore_sp_init_request(p_hwfn, &p_ent, 933 ETH_RAMROD_RX_QUEUE_START, 934 PROTOCOLID_ETH, &init_data); 935 if (rc != ECORE_SUCCESS) 936 return rc; 937 938 p_ramrod = &p_ent->ramrod.rx_queue_start; 939 940 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 941 p_ramrod->sb_index = p_cid->sb_idx; 942 p_ramrod->vport_id = p_cid->abs.vport_id; 943 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 944 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 945 p_ramrod->complete_cqe_flg = 0; 946 p_ramrod->complete_event_flg = 1; 947 948 p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); 949 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 950 951 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 952 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 953 954 if (p_cid->vfid != ECORE_QUEUE_CID_PF) { 955 bool b_legacy_vf = !!(p_cid->vf_legacy & 956 ECORE_QCID_LEGACY_VF_RX_PROD); 957 958 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 959 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", 960 b_legacy_vf ? " [legacy]" : "", 961 p_cid->vf_qid); 962 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 963 } 964 965 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 966 } 967 968 static enum _ecore_status_t 969 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, 970 struct ecore_queue_cid *p_cid, 971 u16 bd_max_bytes, 972 dma_addr_t bd_chain_phys_addr, 973 dma_addr_t cqe_pbl_addr, 974 u16 cqe_pbl_size, 975 void OSAL_IOMEM **pp_prod) 976 { 977 u32 init_prod_val = 0; 978 979 *pp_prod = (u8 OSAL_IOMEM*) 980 p_hwfn->regview + 981 GTT_BAR0_MAP_REG_MSDM_RAM + 982 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 983 984 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 985 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 986 (u32 *)(&init_prod_val)); 987 988 return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 989 bd_max_bytes, 990 bd_chain_phys_addr, 991 cqe_pbl_addr, cqe_pbl_size); 992 } 993 994 enum _ecore_status_t 995 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 996 u16 opaque_fid, 997 struct ecore_queue_start_common_params *p_params, 998 u16 bd_max_bytes, 999 dma_addr_t bd_chain_phys_addr, 1000 dma_addr_t cqe_pbl_addr, 1001 u16 cqe_pbl_size, 1002 struct ecore_rxq_start_ret_params *p_ret_params) 1003 { 1004 struct ecore_queue_cid *p_cid; 1005 enum _ecore_status_t rc; 1006 1007 /* Allocate a CID for the queue */ 1008 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); 1009 if (p_cid == OSAL_NULL) 1010 return ECORE_NOMEM; 1011 1012 if (IS_PF(p_hwfn->p_dev)) 1013 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, 1014 bd_max_bytes, 1015 bd_chain_phys_addr, 1016 cqe_pbl_addr, cqe_pbl_size, 1017 &p_ret_params->p_prod); 1018 else 1019 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, 1020 bd_max_bytes, 1021 bd_chain_phys_addr, 1022 cqe_pbl_addr, 1023 cqe_pbl_size, 1024 &p_ret_params->p_prod); 1025 1026 /* Provide the caller with a reference to as handler */ 1027 if (rc != ECORE_SUCCESS) 1028 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1029 else 1030 p_ret_params->p_handle = (void *)p_cid; 1031 1032 return rc; 1033 } 1034 1035 enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 1036 void **pp_rxq_handles, 1037 u8 num_rxqs, 1038 u8 complete_cqe_flg, 1039 u8 complete_event_flg, 1040 enum spq_mode comp_mode, 1041 struct ecore_spq_comp_cb *p_comp_data) 1042 { 1043 struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1044 struct ecore_spq_entry *p_ent = OSAL_NULL; 1045 struct ecore_sp_init_data init_data; 1046 struct ecore_queue_cid *p_cid; 1047 enum _ecore_status_t rc = ECORE_NOTIMPL; 1048 u8 i; 1049 1050 if (IS_VF(p_hwfn->p_dev)) 1051 return ecore_vf_pf_rxqs_update(p_hwfn, 1052 (struct ecore_queue_cid **) 1053 pp_rxq_handles, 1054 num_rxqs, 1055 complete_cqe_flg, 1056 complete_event_flg); 1057 1058 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1059 init_data.comp_mode = comp_mode; 1060 init_data.p_comp_data = p_comp_data; 1061 1062 for (i = 0; i < num_rxqs; i++) { 1063 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; 1064 1065 /* Get SPQ entry */ 1066 init_data.cid = p_cid->cid; 1067 init_data.opaque_fid = p_cid->opaque_fid; 1068 1069 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1070 ETH_RAMROD_RX_QUEUE_UPDATE, 1071 PROTOCOLID_ETH, &init_data); 1072 if (rc != ECORE_SUCCESS) 1073 return rc; 1074 1075 p_ramrod = &p_ent->ramrod.rx_queue_update; 1076 p_ramrod->vport_id = p_cid->abs.vport_id; 1077 1078 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1079 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1080 p_ramrod->complete_event_flg = complete_event_flg; 1081 1082 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1083 if (rc != ECORE_SUCCESS) 1084 return rc; 1085 } 1086 1087 return rc; 1088 } 1089 1090 static enum _ecore_status_t 1091 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1092 struct ecore_queue_cid *p_cid, 1093 bool b_eq_completion_only, 1094 bool b_cqe_completion) 1095 { 1096 struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; 1097 struct ecore_spq_entry *p_ent = OSAL_NULL; 1098 struct ecore_sp_init_data init_data; 1099 enum _ecore_status_t rc; 1100 1101 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1102 init_data.cid = p_cid->cid; 1103 init_data.opaque_fid = p_cid->opaque_fid; 1104 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1105 1106 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1107 ETH_RAMROD_RX_QUEUE_STOP, 1108 PROTOCOLID_ETH, &init_data); 1109 if (rc != ECORE_SUCCESS) 1110 return rc; 1111 1112 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1113 p_ramrod->vport_id = p_cid->abs.vport_id; 1114 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1115 1116 /* Cleaning the queue requires the completion to arrive there. 1117 * In addition, VFs require the answer to come as eqe to PF. 1118 */ 1119 p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && 1120 !b_eq_completion_only) || 1121 b_cqe_completion; 1122 p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || 1123 b_eq_completion_only; 1124 1125 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1126 } 1127 1128 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1129 void *p_rxq, 1130 bool eq_completion_only, 1131 bool cqe_completion) 1132 { 1133 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; 1134 enum _ecore_status_t rc = ECORE_NOTIMPL; 1135 1136 if (IS_PF(p_hwfn->p_dev)) 1137 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1138 eq_completion_only, 1139 cqe_completion); 1140 else 1141 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1142 1143 if (rc == ECORE_SUCCESS) 1144 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1145 return rc; 1146 } 1147 1148 enum _ecore_status_t 1149 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, 1150 struct ecore_queue_cid *p_cid, 1151 dma_addr_t pbl_addr, u16 pbl_size, 1152 u16 pq_id) 1153 { 1154 struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 1155 struct ecore_spq_entry *p_ent = OSAL_NULL; 1156 struct ecore_sp_init_data init_data; 1157 enum _ecore_status_t rc = ECORE_NOTIMPL; 1158 1159 /* Get SPQ entry */ 1160 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1161 init_data.cid = p_cid->cid; 1162 init_data.opaque_fid = p_cid->opaque_fid; 1163 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1164 1165 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1166 ETH_RAMROD_TX_QUEUE_START, 1167 PROTOCOLID_ETH, &init_data); 1168 if (rc != ECORE_SUCCESS) 1169 return rc; 1170 1171 p_ramrod = &p_ent->ramrod.tx_queue_start; 1172 p_ramrod->vport_id = p_cid->abs.vport_id; 1173 1174 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 1175 p_ramrod->sb_index = p_cid->sb_idx; 1176 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1177 1178 p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1179 p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1180 1181 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 1182 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1183 1184 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 1185 1186 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1187 } 1188 1189 static enum _ecore_status_t 1190 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, 1191 struct ecore_queue_cid *p_cid, 1192 u8 tc, 1193 dma_addr_t pbl_addr, u16 pbl_size, 1194 void OSAL_IOMEM **pp_doorbell) 1195 { 1196 enum _ecore_status_t rc; 1197 1198 /* TODO - set tc in the pq_params for multi-cos */ 1199 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, 1200 pbl_addr, pbl_size, 1201 ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); 1202 if (rc != ECORE_SUCCESS) 1203 return rc; 1204 1205 /* Provide the caller with the necessary return values */ 1206 *pp_doorbell = (u8 OSAL_IOMEM *) 1207 p_hwfn->doorbells + 1208 DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); 1209 1210 return ECORE_SUCCESS; 1211 } 1212 1213 enum _ecore_status_t 1214 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 1215 struct ecore_queue_start_common_params *p_params, 1216 u8 tc, 1217 dma_addr_t pbl_addr, u16 pbl_size, 1218 struct ecore_txq_start_ret_params *p_ret_params) 1219 { 1220 struct ecore_queue_cid *p_cid; 1221 enum _ecore_status_t rc; 1222 1223 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); 1224 if (p_cid == OSAL_NULL) 1225 return ECORE_INVAL; 1226 1227 if (IS_PF(p_hwfn->p_dev)) 1228 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1229 pbl_addr, pbl_size, 1230 &p_ret_params->p_doorbell); 1231 else 1232 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, 1233 pbl_addr, pbl_size, 1234 &p_ret_params->p_doorbell); 1235 1236 if (rc != ECORE_SUCCESS) 1237 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1238 else 1239 p_ret_params->p_handle = (void *)p_cid; 1240 1241 return rc; 1242 } 1243 1244 static enum _ecore_status_t 1245 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1246 struct ecore_queue_cid *p_cid) 1247 { 1248 struct ecore_spq_entry *p_ent = OSAL_NULL; 1249 struct ecore_sp_init_data init_data; 1250 enum _ecore_status_t rc; 1251 1252 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1253 init_data.cid = p_cid->cid; 1254 init_data.opaque_fid = p_cid->opaque_fid; 1255 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1256 1257 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1258 ETH_RAMROD_TX_QUEUE_STOP, 1259 PROTOCOLID_ETH, &init_data); 1260 if (rc != ECORE_SUCCESS) 1261 return rc; 1262 1263 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1264 } 1265 1266 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1267 void *p_handle) 1268 { 1269 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 1270 enum _ecore_status_t rc; 1271 1272 if (IS_PF(p_hwfn->p_dev)) 1273 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1274 else 1275 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); 1276 1277 if (rc == ECORE_SUCCESS) 1278 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1279 return rc; 1280 } 1281 1282 static enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode) 1283 { 1284 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1285 1286 switch (opcode) { 1287 case ECORE_FILTER_ADD: 1288 action = ETH_FILTER_ACTION_ADD; 1289 break; 1290 case ECORE_FILTER_REMOVE: 1291 action = ETH_FILTER_ACTION_REMOVE; 1292 break; 1293 case ECORE_FILTER_FLUSH: 1294 action = ETH_FILTER_ACTION_REMOVE_ALL; 1295 break; 1296 default: 1297 action = MAX_ETH_FILTER_ACTION; 1298 } 1299 1300 return action; 1301 } 1302 1303 static enum _ecore_status_t 1304 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, 1305 u16 opaque_fid, 1306 struct ecore_filter_ucast *p_filter_cmd, 1307 struct vport_filter_update_ramrod_data **pp_ramrod, 1308 struct ecore_spq_entry **pp_ent, 1309 enum spq_mode comp_mode, 1310 struct ecore_spq_comp_cb *p_comp_data) 1311 { 1312 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1313 struct vport_filter_update_ramrod_data *p_ramrod; 1314 struct eth_filter_cmd *p_first_filter; 1315 struct eth_filter_cmd *p_second_filter; 1316 struct ecore_sp_init_data init_data; 1317 enum eth_filter_action action; 1318 enum _ecore_status_t rc; 1319 1320 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1321 &vport_to_remove_from); 1322 if (rc != ECORE_SUCCESS) 1323 return rc; 1324 1325 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1326 &vport_to_add_to); 1327 if (rc != ECORE_SUCCESS) 1328 return rc; 1329 1330 /* Get SPQ entry */ 1331 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1332 init_data.cid = ecore_spq_get_cid(p_hwfn); 1333 init_data.opaque_fid = opaque_fid; 1334 init_data.comp_mode = comp_mode; 1335 init_data.p_comp_data = p_comp_data; 1336 1337 rc = ecore_sp_init_request(p_hwfn, pp_ent, 1338 ETH_RAMROD_FILTERS_UPDATE, 1339 PROTOCOLID_ETH, &init_data); 1340 if (rc != ECORE_SUCCESS) 1341 return rc; 1342 1343 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1344 p_ramrod = *pp_ramrod; 1345 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1346 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1347 1348 #ifndef ASIC_ONLY 1349 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1350 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1351 "Non-Asic - prevent Tx filters\n"); 1352 p_ramrod->filter_cmd_hdr.tx = 0; 1353 } 1354 1355 #endif 1356 1357 switch (p_filter_cmd->opcode) { 1358 case ECORE_FILTER_REPLACE: 1359 case ECORE_FILTER_MOVE: 1360 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1361 default: 1362 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1363 } 1364 1365 p_first_filter = &p_ramrod->filter_cmds[0]; 1366 p_second_filter = &p_ramrod->filter_cmds[1]; 1367 1368 switch (p_filter_cmd->type) { 1369 case ECORE_FILTER_MAC: 1370 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1371 case ECORE_FILTER_VLAN: 1372 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1373 case ECORE_FILTER_MAC_VLAN: 1374 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1375 case ECORE_FILTER_INNER_MAC: 1376 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1377 case ECORE_FILTER_INNER_VLAN: 1378 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1379 case ECORE_FILTER_INNER_PAIR: 1380 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1381 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1382 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1383 break; 1384 case ECORE_FILTER_MAC_VNI_PAIR: 1385 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1386 case ECORE_FILTER_VNI: 1387 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1388 } 1389 1390 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1391 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1392 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1393 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1394 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1395 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) 1396 ecore_set_fw_mac_addr(&p_first_filter->mac_msb, 1397 &p_first_filter->mac_mid, 1398 &p_first_filter->mac_lsb, 1399 (u8 *)p_filter_cmd->mac); 1400 1401 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1402 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1403 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1404 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1405 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); 1406 1407 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1408 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1409 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1410 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); 1411 1412 if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { 1413 p_second_filter->type = p_first_filter->type; 1414 p_second_filter->mac_msb = p_first_filter->mac_msb; 1415 p_second_filter->mac_mid = p_first_filter->mac_mid; 1416 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1417 p_second_filter->vlan_id = p_first_filter->vlan_id; 1418 p_second_filter->vni = p_first_filter->vni; 1419 1420 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1421 1422 p_first_filter->vport_id = vport_to_remove_from; 1423 1424 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1425 p_second_filter->vport_id = vport_to_add_to; 1426 } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { 1427 p_first_filter->vport_id = vport_to_add_to; 1428 OSAL_MEMCPY(p_second_filter, p_first_filter, 1429 sizeof(*p_second_filter)); 1430 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1431 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1432 } else { 1433 action = ecore_filter_action(p_filter_cmd->opcode); 1434 1435 if (action == MAX_ETH_FILTER_ACTION) { 1436 DP_NOTICE(p_hwfn, true, 1437 "%d is not supported yet\n", 1438 p_filter_cmd->opcode); 1439 return ECORE_NOTIMPL; 1440 } 1441 1442 p_first_filter->action = action; 1443 p_first_filter->vport_id = 1444 (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1445 vport_to_remove_from : vport_to_add_to; 1446 } 1447 1448 return ECORE_SUCCESS; 1449 } 1450 1451 enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 1452 u16 opaque_fid, 1453 struct ecore_filter_ucast *p_filter_cmd, 1454 enum spq_mode comp_mode, 1455 struct ecore_spq_comp_cb *p_comp_data) 1456 { 1457 struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; 1458 struct ecore_spq_entry *p_ent = OSAL_NULL; 1459 struct eth_filter_cmd_header *p_header; 1460 enum _ecore_status_t rc; 1461 1462 rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1463 &p_ramrod, &p_ent, 1464 comp_mode, p_comp_data); 1465 if (rc != ECORE_SUCCESS) { 1466 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1467 return rc; 1468 } 1469 p_header = &p_ramrod->filter_cmd_hdr; 1470 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1471 1472 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1473 if (rc != ECORE_SUCCESS) { 1474 DP_ERR(p_hwfn, 1475 "Unicast filter ADD command failed %d\n", 1476 rc); 1477 return rc; 1478 } 1479 1480 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1481 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1482 (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : 1483 ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1484 "REMOVE" : 1485 ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? 1486 "MOVE" : "REPLACE")), 1487 (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : 1488 ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? 1489 "VLAN" : "MAC & VLAN"), 1490 p_ramrod->filter_cmd_hdr.cmd_cnt, 1491 p_filter_cmd->is_rx_filter, 1492 p_filter_cmd->is_tx_filter); 1493 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1494 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1495 p_filter_cmd->vport_to_add_to, 1496 p_filter_cmd->vport_to_remove_from, 1497 p_filter_cmd->mac[0], p_filter_cmd->mac[1], 1498 p_filter_cmd->mac[2], p_filter_cmd->mac[3], 1499 p_filter_cmd->mac[4], p_filter_cmd->mac[5], 1500 p_filter_cmd->vlan); 1501 1502 return ECORE_SUCCESS; 1503 } 1504 1505 /******************************************************************************* 1506 * Description: 1507 * Calculates crc 32 on a buffer 1508 * Note: crc32_length MUST be aligned to 8 1509 * Return: 1510 ******************************************************************************/ 1511 static u32 ecore_calc_crc32c(u8 *crc32_packet, 1512 u32 crc32_length, 1513 u32 crc32_seed, 1514 u8 complement) 1515 { 1516 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1517 u8 msb = 0, current_byte = 0; 1518 1519 if ((crc32_packet == OSAL_NULL) || 1520 (crc32_length == 0) || 1521 ((crc32_length % 8) != 0)) { 1522 return crc32_result; 1523 } 1524 1525 for (byte = 0; byte < crc32_length; byte++) { 1526 current_byte = crc32_packet[byte]; 1527 for (bit = 0; bit < 8; bit++) { 1528 msb = (u8)(crc32_result >> 31); 1529 crc32_result = crc32_result << 1; 1530 if (msb != (0x1 & (current_byte >> bit))) { 1531 crc32_result = crc32_result ^ CRC32_POLY; 1532 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1533 } 1534 } 1535 } 1536 1537 return crc32_result; 1538 } 1539 1540 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len) 1541 { 1542 u32 packet_buf[2] = {0}; 1543 1544 OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); 1545 return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1546 } 1547 1548 u8 ecore_mcast_bin_from_mac(u8 *mac) 1549 { 1550 u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1551 mac, ETH_ALEN); 1552 1553 return crc & 0xff; 1554 } 1555 1556 static enum _ecore_status_t 1557 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, 1558 u16 opaque_fid, 1559 struct ecore_filter_mcast *p_filter_cmd, 1560 enum spq_mode comp_mode, 1561 struct ecore_spq_comp_cb *p_comp_data) 1562 { 1563 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1564 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 1565 struct ecore_spq_entry *p_ent = OSAL_NULL; 1566 struct ecore_sp_init_data init_data; 1567 u8 abs_vport_id = 0; 1568 enum _ecore_status_t rc; 1569 int i; 1570 1571 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) 1572 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1573 &abs_vport_id); 1574 else 1575 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1576 &abs_vport_id); 1577 if (rc != ECORE_SUCCESS) 1578 return rc; 1579 1580 /* Get SPQ entry */ 1581 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1582 init_data.cid = ecore_spq_get_cid(p_hwfn); 1583 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1584 init_data.comp_mode = comp_mode; 1585 init_data.p_comp_data = p_comp_data; 1586 1587 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1588 ETH_RAMROD_VPORT_UPDATE, 1589 PROTOCOLID_ETH, &init_data); 1590 if (rc != ECORE_SUCCESS) { 1591 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1592 return rc; 1593 } 1594 1595 p_ramrod = &p_ent->ramrod.vport_update; 1596 p_ramrod->common.update_approx_mcast_flg = 1; 1597 1598 /* explicitly clear out the entire vector */ 1599 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 1600 0, sizeof(p_ramrod->approx_mcast.bins)); 1601 OSAL_MEMSET(bins, 0, sizeof(unsigned long) * 1602 ETH_MULTICAST_MAC_BINS_IN_REGS); 1603 /* filter ADD op is explicit set op and it removes 1604 * any existing filters for the vport. 1605 */ 1606 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1607 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1608 u32 bit; 1609 1610 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1611 OSAL_SET_BIT(bit, bins); 1612 } 1613 1614 /* Convert to correct endianity */ 1615 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1616 struct vport_update_ramrod_mcast *p_ramrod_bins; 1617 u32 *p_bins = (u32 *)bins; 1618 1619 p_ramrod_bins = &p_ramrod->approx_mcast; 1620 p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 1621 } 1622 } 1623 1624 p_ramrod->common.vport_id = abs_vport_id; 1625 1626 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1627 if (rc != ECORE_SUCCESS) 1628 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); 1629 1630 return rc; 1631 } 1632 1633 enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 1634 struct ecore_filter_mcast *p_filter_cmd, 1635 enum spq_mode comp_mode, 1636 struct ecore_spq_comp_cb *p_comp_data) 1637 { 1638 enum _ecore_status_t rc = ECORE_SUCCESS; 1639 int i; 1640 1641 /* only ADD and REMOVE operations are supported for multi-cast */ 1642 if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && 1643 (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || 1644 (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { 1645 return ECORE_INVAL; 1646 } 1647 1648 for_each_hwfn(p_dev, i) { 1649 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1650 u16 opaque_fid; 1651 1652 if (IS_VF(p_dev)) { 1653 ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1654 continue; 1655 } 1656 1657 opaque_fid = p_hwfn->hw_info.opaque_fid; 1658 rc = ecore_sp_eth_filter_mcast(p_hwfn, 1659 opaque_fid, 1660 p_filter_cmd, 1661 comp_mode, 1662 p_comp_data); 1663 if (rc != ECORE_SUCCESS) 1664 break; 1665 } 1666 1667 return rc; 1668 } 1669 1670 enum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 1671 struct ecore_filter_ucast *p_filter_cmd, 1672 enum spq_mode comp_mode, 1673 struct ecore_spq_comp_cb *p_comp_data) 1674 { 1675 enum _ecore_status_t rc = ECORE_SUCCESS; 1676 int i; 1677 1678 for_each_hwfn(p_dev, i) { 1679 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1680 u16 opaque_fid; 1681 1682 if (IS_VF(p_dev)) { 1683 rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1684 continue; 1685 } 1686 1687 opaque_fid = p_hwfn->hw_info.opaque_fid; 1688 rc = ecore_sp_eth_filter_ucast(p_hwfn, 1689 opaque_fid, 1690 p_filter_cmd, 1691 comp_mode, 1692 p_comp_data); 1693 if (rc != ECORE_SUCCESS) 1694 break; 1695 } 1696 1697 return rc; 1698 } 1699 1700 /* Statistics related code */ 1701 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, 1702 u32 *p_addr, u32 *p_len, 1703 u16 statistics_bin) 1704 { 1705 if (IS_PF(p_hwfn->p_dev)) { 1706 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1707 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1708 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1709 } else { 1710 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1711 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1712 1713 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1714 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1715 } 1716 } 1717 1718 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, 1719 struct ecore_ptt *p_ptt, 1720 struct ecore_eth_stats *p_stats, 1721 u16 statistics_bin) 1722 { 1723 struct eth_pstorm_per_queue_stat pstats; 1724 u32 pstats_addr = 0, pstats_len = 0; 1725 1726 __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1727 statistics_bin); 1728 1729 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 1730 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, 1731 pstats_addr, pstats_len); 1732 1733 p_stats->common.tx_ucast_bytes += 1734 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1735 p_stats->common.tx_mcast_bytes += 1736 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1737 p_stats->common.tx_bcast_bytes += 1738 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1739 p_stats->common.tx_ucast_pkts += 1740 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1741 p_stats->common.tx_mcast_pkts += 1742 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1743 p_stats->common.tx_bcast_pkts += 1744 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1745 p_stats->common.tx_err_drop_pkts += 1746 HILO_64_REGPAIR(pstats.error_drop_pkts); 1747 } 1748 1749 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, 1750 struct ecore_ptt *p_ptt, 1751 struct ecore_eth_stats *p_stats, 1752 u16 statistics_bin) 1753 { 1754 struct tstorm_per_port_stat tstats; 1755 u32 tstats_addr, tstats_len; 1756 1757 if (IS_PF(p_hwfn->p_dev)) { 1758 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1759 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1760 tstats_len = sizeof(struct tstorm_per_port_stat); 1761 } else { 1762 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1763 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1764 1765 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1766 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1767 } 1768 1769 OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 1770 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, 1771 tstats_addr, tstats_len); 1772 1773 p_stats->common.mftag_filter_discards += 1774 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1775 p_stats->common.mac_filter_discards += 1776 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1777 } 1778 1779 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, 1780 u32 *p_addr, u32 *p_len, 1781 u16 statistics_bin) 1782 { 1783 if (IS_PF(p_hwfn->p_dev)) { 1784 *p_addr = BAR0_MAP_REG_USDM_RAM + 1785 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1786 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1787 } else { 1788 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1789 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1790 1791 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1792 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1793 } 1794 } 1795 1796 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, 1797 struct ecore_ptt *p_ptt, 1798 struct ecore_eth_stats *p_stats, 1799 u16 statistics_bin) 1800 { 1801 struct eth_ustorm_per_queue_stat ustats; 1802 u32 ustats_addr = 0, ustats_len = 0; 1803 1804 __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1805 statistics_bin); 1806 1807 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 1808 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, 1809 ustats_addr, ustats_len); 1810 1811 p_stats->common.rx_ucast_bytes += 1812 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1813 p_stats->common.rx_mcast_bytes += 1814 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1815 p_stats->common.rx_bcast_bytes += 1816 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1817 p_stats->common.rx_ucast_pkts += 1818 HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1819 p_stats->common.rx_mcast_pkts += 1820 HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1821 p_stats->common.rx_bcast_pkts += 1822 HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1823 } 1824 1825 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, 1826 u32 *p_addr, u32 *p_len, 1827 u16 statistics_bin) 1828 { 1829 if (IS_PF(p_hwfn->p_dev)) { 1830 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1831 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1832 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1833 } else { 1834 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1835 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1836 1837 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1838 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1839 } 1840 } 1841 1842 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, 1843 struct ecore_ptt *p_ptt, 1844 struct ecore_eth_stats *p_stats, 1845 u16 statistics_bin) 1846 { 1847 struct eth_mstorm_per_queue_stat mstats; 1848 u32 mstats_addr = 0, mstats_len = 0; 1849 1850 __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1851 statistics_bin); 1852 1853 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 1854 ecore_memcpy_from(p_hwfn, p_ptt, &mstats, 1855 mstats_addr, mstats_len); 1856 1857 p_stats->common.no_buff_discards += 1858 HILO_64_REGPAIR(mstats.no_buff_discard); 1859 p_stats->common.packet_too_big_discard += 1860 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1861 p_stats->common.ttl0_discard += 1862 HILO_64_REGPAIR(mstats.ttl0_discard); 1863 p_stats->common.tpa_coalesced_pkts += 1864 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1865 p_stats->common.tpa_coalesced_events += 1866 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1867 p_stats->common.tpa_aborts_num += 1868 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1869 p_stats->common.tpa_coalesced_bytes += 1870 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1871 } 1872 1873 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, 1874 struct ecore_ptt *p_ptt, 1875 struct ecore_eth_stats *p_stats) 1876 { 1877 struct ecore_eth_stats_common *p_common = &p_stats->common; 1878 struct port_stats port_stats; 1879 int j; 1880 1881 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 1882 1883 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 1884 p_hwfn->mcp_info->port_addr + 1885 OFFSETOF(struct public_port, stats), 1886 sizeof(port_stats)); 1887 1888 p_common->rx_64_byte_packets += port_stats.eth.r64; 1889 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1890 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1891 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1892 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1893 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1894 p_common->rx_crc_errors += port_stats.eth.rfcs; 1895 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1896 p_common->rx_pause_frames += port_stats.eth.rxpf; 1897 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1898 p_common->rx_align_errors += port_stats.eth.raln; 1899 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1900 p_common->rx_oversize_packets += port_stats.eth.rovr; 1901 p_common->rx_jabbers += port_stats.eth.rjbr; 1902 p_common->rx_undersize_packets += port_stats.eth.rund; 1903 p_common->rx_fragments += port_stats.eth.rfrg; 1904 p_common->tx_64_byte_packets += port_stats.eth.t64; 1905 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1906 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1907 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1908 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1909 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1910 p_common->tx_pause_frames += port_stats.eth.txpf; 1911 p_common->tx_pfc_frames += port_stats.eth.txpp; 1912 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1913 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1914 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1915 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1916 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1917 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1918 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1919 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1920 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1921 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1922 for (j = 0; j < 8; j++) { 1923 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1924 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1925 } 1926 1927 if (ECORE_IS_BB(p_hwfn->p_dev)) { 1928 struct ecore_eth_stats_bb *p_bb = &p_stats->bb; 1929 1930 p_bb->rx_1519_to_1522_byte_packets += 1931 port_stats.eth.u0.bb0.r1522; 1932 p_bb->rx_1519_to_2047_byte_packets += 1933 port_stats.eth.u0.bb0.r2047; 1934 p_bb->rx_2048_to_4095_byte_packets += 1935 port_stats.eth.u0.bb0.r4095; 1936 p_bb->rx_4096_to_9216_byte_packets += 1937 port_stats.eth.u0.bb0.r9216; 1938 p_bb->rx_9217_to_16383_byte_packets += 1939 port_stats.eth.u0.bb0.r16383; 1940 p_bb->tx_1519_to_2047_byte_packets += 1941 port_stats.eth.u1.bb1.t2047; 1942 p_bb->tx_2048_to_4095_byte_packets += 1943 port_stats.eth.u1.bb1.t4095; 1944 p_bb->tx_4096_to_9216_byte_packets += 1945 port_stats.eth.u1.bb1.t9216; 1946 p_bb->tx_9217_to_16383_byte_packets += 1947 port_stats.eth.u1.bb1.t16383; 1948 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1949 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1950 } else { 1951 struct ecore_eth_stats_ah *p_ah = &p_stats->ah; 1952 1953 p_ah->rx_1519_to_max_byte_packets += 1954 port_stats.eth.u0.ah0.r1519_to_max; 1955 p_ah->tx_1519_to_max_byte_packets = 1956 port_stats.eth.u1.ah1.t1519_to_max; 1957 } 1958 } 1959 1960 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 1961 struct ecore_ptt *p_ptt, 1962 struct ecore_eth_stats *stats, 1963 u16 statistics_bin, bool b_get_port_stats) 1964 { 1965 __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1966 __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1967 __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1968 __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1969 1970 #ifndef ASIC_ONLY 1971 /* Avoid getting PORT stats for emulation.*/ 1972 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1973 return; 1974 #endif 1975 1976 if (b_get_port_stats && p_hwfn->mcp_info) 1977 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); 1978 } 1979 1980 static void _ecore_get_vport_stats(struct ecore_dev *p_dev, 1981 struct ecore_eth_stats *stats) 1982 { 1983 u8 fw_vport = 0; 1984 int i; 1985 1986 OSAL_MEMSET(stats, 0, sizeof(*stats)); 1987 1988 for_each_hwfn(p_dev, i) { 1989 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1990 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 1991 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 1992 1993 if (IS_PF(p_dev)) { 1994 /* The main vport index is relative first */ 1995 if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { 1996 DP_ERR(p_hwfn, "No vport available!\n"); 1997 goto out; 1998 } 1999 } 2000 2001 if (IS_PF(p_dev) && !p_ptt) { 2002 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2003 continue; 2004 } 2005 2006 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 2007 IS_PF(p_dev) ? true : false); 2008 2009 out: 2010 if (IS_PF(p_dev) && p_ptt) 2011 ecore_ptt_release(p_hwfn, p_ptt); 2012 } 2013 } 2014 2015 void ecore_get_vport_stats(struct ecore_dev *p_dev, 2016 struct ecore_eth_stats *stats) 2017 { 2018 u32 i; 2019 2020 if (!p_dev) { 2021 OSAL_MEMSET(stats, 0, sizeof(*stats)); 2022 return; 2023 } 2024 2025 _ecore_get_vport_stats(p_dev, stats); 2026 2027 if (!p_dev->reset_stats) 2028 return; 2029 2030 /* Reduce the statistics baseline */ 2031 for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) 2032 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; 2033 } 2034 2035 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 2036 void ecore_reset_vport_stats(struct ecore_dev *p_dev) 2037 { 2038 int i; 2039 2040 for_each_hwfn(p_dev, i) { 2041 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2042 struct eth_mstorm_per_queue_stat mstats; 2043 struct eth_ustorm_per_queue_stat ustats; 2044 struct eth_pstorm_per_queue_stat pstats; 2045 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2046 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2047 u32 addr = 0, len = 0; 2048 2049 if (IS_PF(p_dev) && !p_ptt) { 2050 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2051 continue; 2052 } 2053 2054 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 2055 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 2056 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 2057 2058 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2059 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 2060 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 2061 2062 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2063 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 2064 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 2065 2066 if (IS_PF(p_dev)) 2067 ecore_ptt_release(p_hwfn, p_ptt); 2068 } 2069 2070 /* PORT statistics are not necessarily reset, so we need to 2071 * read and create a baseline for future statistics. 2072 */ 2073 if (!p_dev->reset_stats) 2074 DP_INFO(p_dev, "Reset stats not allocated\n"); 2075 else 2076 _ecore_get_vport_stats(p_dev, p_dev->reset_stats); 2077 } 2078 2079 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 2080 struct ecore_ptt *p_ptt, 2081 struct ecore_arfs_config_params *p_cfg_params) 2082 { 2083 if (p_cfg_params->arfs_enable) { 2084 ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2085 p_cfg_params->tcp, 2086 p_cfg_params->udp, 2087 p_cfg_params->ipv4, 2088 p_cfg_params->ipv6); 2089 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2090 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 2091 p_cfg_params->tcp ? "Enable" : "Disable", 2092 p_cfg_params->udp ? "Enable" : "Disable", 2093 p_cfg_params->ipv4 ? "Enable" : "Disable", 2094 p_cfg_params->ipv6 ? "Enable" : "Disable"); 2095 } else { 2096 ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2097 } 2098 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n", 2099 p_cfg_params->arfs_enable ? "Enable" : "Disable"); 2100 } 2101 2102 enum _ecore_status_t 2103 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 2104 struct ecore_ptt *p_ptt, 2105 struct ecore_spq_comp_cb *p_cb, 2106 dma_addr_t p_addr, u16 length, 2107 u16 qid, u8 vport_id, 2108 bool b_is_add) 2109 { 2110 struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; 2111 struct ecore_spq_entry *p_ent = OSAL_NULL; 2112 struct ecore_sp_init_data init_data; 2113 u16 abs_rx_q_id = 0; 2114 u8 abs_vport_id = 0; 2115 enum _ecore_status_t rc = ECORE_NOTIMPL; 2116 2117 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2118 if (rc != ECORE_SUCCESS) 2119 return rc; 2120 2121 rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2122 if (rc != ECORE_SUCCESS) 2123 return rc; 2124 2125 /* Get SPQ entry */ 2126 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 2127 init_data.cid = ecore_spq_get_cid(p_hwfn); 2128 2129 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2130 2131 if (p_cb) { 2132 init_data.comp_mode = ECORE_SPQ_MODE_CB; 2133 init_data.p_comp_data = p_cb; 2134 } else { 2135 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 2136 } 2137 2138 rc = ecore_sp_init_request(p_hwfn, &p_ent, 2139 ETH_RAMROD_GFT_UPDATE_FILTER, 2140 PROTOCOLID_ETH, &init_data); 2141 if (rc != ECORE_SUCCESS) 2142 return rc; 2143 2144 p_ramrod = &p_ent->ramrod.rx_update_gft; 2145 2146 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2147 p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length); 2148 p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id); 2149 p_ramrod->vport_id = abs_vport_id; 2150 p_ramrod->filter_type = RFS_FILTER_TYPE; 2151 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER 2152 : GFT_DELETE_FILTER; 2153 2154 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2155 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2156 abs_vport_id, abs_rx_q_id, 2157 b_is_add ? "Adding" : "Removing", 2158 (unsigned long long)p_addr, length); 2159 2160 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 2161 } 2162