1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <asm/param.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/etherdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/stddef.h> 21 #include <linux/string.h> 22 #include <linux/version.h> 23 #include <linux/workqueue.h> 24 #include <linux/bitops.h> 25 #include <linux/bug.h> 26 #include <linux/vmalloc.h> 27 #include "qed.h" 28 #include <linux/qed/qed_chain.h> 29 #include "qed_cxt.h" 30 #include "qed_dev_api.h" 31 #include <linux/qed/qed_eth_if.h> 32 #include "qed_hsi.h" 33 #include "qed_hw.h" 34 #include "qed_int.h" 35 #include "qed_l2.h" 36 #include "qed_mcp.h" 37 #include "qed_reg_addr.h" 38 #include "qed_sp.h" 39 #include "qed_sriov.h" 40 41 42 #define QED_MAX_SGES_NUM 16 43 #define CRC32_POLY 0x1edc6f41 44 45 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 46 struct qed_queue_cid *p_cid) 47 { 48 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ 49 if (!p_cid->is_vf && IS_PF(p_hwfn->cdev)) 50 qed_cxt_release_cid(p_hwfn, p_cid->cid); 51 vfree(p_cid); 52 } 53 54 /* The internal is only meant to be directly called by PFs initializeing CIDs 55 * for their VFs. 56 */ 57 struct qed_queue_cid * 58 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 59 u16 opaque_fid, 60 u32 cid, 61 u8 vf_qid, 62 struct qed_queue_start_common_params *p_params) 63 { 64 bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid); 65 struct qed_queue_cid *p_cid; 66 int rc; 67 68 p_cid = vmalloc(sizeof(*p_cid)); 69 if (!p_cid) 70 return NULL; 71 memset(p_cid, 0, sizeof(*p_cid)); 72 73 p_cid->opaque_fid = opaque_fid; 74 p_cid->cid = cid; 75 p_cid->vf_qid = vf_qid; 76 p_cid->rel = *p_params; 77 78 /* Don't try calculating the absolute indices for VFs */ 79 if (IS_VF(p_hwfn->cdev)) { 80 p_cid->abs = p_cid->rel; 81 goto out; 82 } 83 84 /* Calculate the engine-absolute indices of the resources. 85 * This would guarantee they're valid later on. 86 * In some cases [SBs] we already have the right values. 87 */ 88 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 89 if (rc) 90 goto fail; 91 92 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); 93 if (rc) 94 goto fail; 95 96 /* In case of a PF configuring its VF's queues, the stats-id is already 97 * absolute [since there's a single index that's suitable per-VF]. 98 */ 99 if (b_is_same) { 100 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, 101 &p_cid->abs.stats_id); 102 if (rc) 103 goto fail; 104 } else { 105 p_cid->abs.stats_id = p_cid->rel.stats_id; 106 } 107 108 /* SBs relevant information was already provided as absolute */ 109 p_cid->abs.sb = p_cid->rel.sb; 110 p_cid->abs.sb_idx = p_cid->rel.sb_idx; 111 112 /* This is tricky - we're actually interested in whehter this is a PF 113 * entry meant for the VF. 114 */ 115 if (!b_is_same) 116 p_cid->is_vf = true; 117 out: 118 DP_VERBOSE(p_hwfn, 119 QED_MSG_SP, 120 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 121 p_cid->opaque_fid, 122 p_cid->cid, 123 p_cid->rel.vport_id, 124 p_cid->abs.vport_id, 125 p_cid->rel.queue_id, 126 p_cid->abs.queue_id, 127 p_cid->rel.stats_id, 128 p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); 129 130 return p_cid; 131 132 fail: 133 vfree(p_cid); 134 return NULL; 135 } 136 137 static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 138 u16 opaque_fid, struct 139 qed_queue_start_common_params 140 *p_params) 141 { 142 struct qed_queue_cid *p_cid; 143 u32 cid = 0; 144 145 /* Get a unique firmware CID for this queue, in case it's a PF. 146 * VF's don't need a CID as the queue configuration will be done 147 * by PF. 148 */ 149 if (IS_PF(p_hwfn->cdev)) { 150 if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { 151 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 152 return NULL; 153 } 154 } 155 156 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params); 157 if (!p_cid && IS_PF(p_hwfn->cdev)) 158 qed_cxt_release_cid(p_hwfn, cid); 159 160 return p_cid; 161 } 162 163 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 164 struct qed_sp_vport_start_params *p_params) 165 { 166 struct vport_start_ramrod_data *p_ramrod = NULL; 167 struct qed_spq_entry *p_ent = NULL; 168 struct qed_sp_init_data init_data; 169 u8 abs_vport_id = 0; 170 int rc = -EINVAL; 171 u16 rx_mode = 0; 172 173 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 174 if (rc) 175 return rc; 176 177 memset(&init_data, 0, sizeof(init_data)); 178 init_data.cid = qed_spq_get_cid(p_hwfn); 179 init_data.opaque_fid = p_params->opaque_fid; 180 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 181 182 rc = qed_sp_init_request(p_hwfn, &p_ent, 183 ETH_RAMROD_VPORT_START, 184 PROTOCOLID_ETH, &init_data); 185 if (rc) 186 return rc; 187 188 p_ramrod = &p_ent->ramrod.vport_start; 189 p_ramrod->vport_id = abs_vport_id; 190 191 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 192 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 193 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 194 p_ramrod->untagged = p_params->only_untagged; 195 196 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 197 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 198 199 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 200 201 /* TPA related fields */ 202 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); 203 204 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 205 206 switch (p_params->tpa_mode) { 207 case QED_TPA_MODE_GRO: 208 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 209 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 210 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; 211 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; 212 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 213 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 214 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 215 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 216 break; 217 default: 218 break; 219 } 220 221 p_ramrod->tx_switching_en = p_params->tx_switching; 222 223 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 224 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 225 226 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 227 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 228 p_params->concrete_fid); 229 230 return qed_spq_post(p_hwfn, p_ent, NULL); 231 } 232 233 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 234 struct qed_sp_vport_start_params *p_params) 235 { 236 if (IS_VF(p_hwfn->cdev)) { 237 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 238 p_params->mtu, 239 p_params->remove_inner_vlan, 240 p_params->tpa_mode, 241 p_params->max_buffers_per_cqe, 242 p_params->only_untagged); 243 } 244 245 return qed_sp_eth_vport_start(p_hwfn, p_params); 246 } 247 248 static int 249 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 250 struct vport_update_ramrod_data *p_ramrod, 251 struct qed_rss_params *p_params) 252 { 253 struct eth_vport_rss_config *rss = &p_ramrod->rss_config; 254 u16 abs_l2_queue = 0, capabilities = 0; 255 int rc = 0, i; 256 257 if (!p_params) { 258 p_ramrod->common.update_rss_flg = 0; 259 return rc; 260 } 261 262 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != 263 ETH_RSS_IND_TABLE_ENTRIES_NUM); 264 265 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); 266 if (rc) 267 return rc; 268 269 p_ramrod->common.update_rss_flg = p_params->update_rss_config; 270 rss->update_rss_capabilities = p_params->update_rss_capabilities; 271 rss->update_rss_ind_table = p_params->update_rss_ind_table; 272 rss->update_rss_key = p_params->update_rss_key; 273 274 rss->rss_mode = p_params->rss_enable ? 275 ETH_VPORT_RSS_MODE_REGULAR : 276 ETH_VPORT_RSS_MODE_DISABLED; 277 278 SET_FIELD(capabilities, 279 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 280 !!(p_params->rss_caps & QED_RSS_IPV4)); 281 SET_FIELD(capabilities, 282 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 283 !!(p_params->rss_caps & QED_RSS_IPV6)); 284 SET_FIELD(capabilities, 285 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 286 !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); 287 SET_FIELD(capabilities, 288 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 289 !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); 290 SET_FIELD(capabilities, 291 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 292 !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); 293 SET_FIELD(capabilities, 294 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 295 !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); 296 rss->tbl_size = p_params->rss_table_size_log; 297 298 rss->capabilities = cpu_to_le16(capabilities); 299 300 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 301 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 302 p_ramrod->common.update_rss_flg, 303 rss->rss_mode, rss->update_rss_capabilities, 304 capabilities, rss->update_rss_ind_table, 305 rss->update_rss_key); 306 307 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 308 rc = qed_fw_l2_queue(p_hwfn, 309 (u8)p_params->rss_ind_table[i], 310 &abs_l2_queue); 311 if (rc) 312 return rc; 313 314 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); 315 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", 316 i, rss->indirection_table[i]); 317 } 318 319 for (i = 0; i < 10; i++) 320 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); 321 322 return rc; 323 } 324 325 static void 326 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 327 struct vport_update_ramrod_data *p_ramrod, 328 struct qed_filter_accept_flags accept_flags) 329 { 330 p_ramrod->common.update_rx_mode_flg = 331 accept_flags.update_rx_mode_config; 332 333 p_ramrod->common.update_tx_mode_flg = 334 accept_flags.update_tx_mode_config; 335 336 /* Set Rx mode accept flags */ 337 if (p_ramrod->common.update_rx_mode_flg) { 338 u8 accept_filter = accept_flags.rx_accept_filter; 339 u16 state = 0; 340 341 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 342 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 343 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 344 345 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 346 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 347 348 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 349 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 350 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 351 352 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 353 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 354 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 355 356 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 357 !!(accept_filter & QED_ACCEPT_BCAST)); 358 359 p_ramrod->rx_mode.state = cpu_to_le16(state); 360 DP_VERBOSE(p_hwfn, QED_MSG_SP, 361 "p_ramrod->rx_mode.state = 0x%x\n", state); 362 } 363 364 /* Set Tx mode accept flags */ 365 if (p_ramrod->common.update_tx_mode_flg) { 366 u8 accept_filter = accept_flags.tx_accept_filter; 367 u16 state = 0; 368 369 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 370 !!(accept_filter & QED_ACCEPT_NONE)); 371 372 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 373 !!(accept_filter & QED_ACCEPT_NONE)); 374 375 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 376 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 377 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 378 379 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 380 !!(accept_filter & QED_ACCEPT_BCAST)); 381 382 p_ramrod->tx_mode.state = cpu_to_le16(state); 383 DP_VERBOSE(p_hwfn, QED_MSG_SP, 384 "p_ramrod->tx_mode.state = 0x%x\n", state); 385 } 386 } 387 388 static void 389 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 390 struct vport_update_ramrod_data *p_ramrod, 391 struct qed_sge_tpa_params *p_params) 392 { 393 struct eth_vport_tpa_param *p_tpa; 394 395 if (!p_params) { 396 p_ramrod->common.update_tpa_param_flg = 0; 397 p_ramrod->common.update_tpa_en_flg = 0; 398 p_ramrod->common.update_tpa_param_flg = 0; 399 return; 400 } 401 402 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 403 p_tpa = &p_ramrod->tpa_param; 404 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 405 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 406 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 407 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 408 409 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 410 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 411 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 412 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 413 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 414 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 415 p_tpa->tpa_max_size = p_params->tpa_max_size; 416 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 417 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 418 } 419 420 static void 421 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 422 struct vport_update_ramrod_data *p_ramrod, 423 struct qed_sp_vport_update_params *p_params) 424 { 425 int i; 426 427 memset(&p_ramrod->approx_mcast.bins, 0, 428 sizeof(p_ramrod->approx_mcast.bins)); 429 430 if (!p_params->update_approx_mcast_flg) 431 return; 432 433 p_ramrod->common.update_approx_mcast_flg = 1; 434 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 435 u32 *p_bins = (u32 *)p_params->bins; 436 437 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 438 } 439 } 440 441 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 442 struct qed_sp_vport_update_params *p_params, 443 enum spq_mode comp_mode, 444 struct qed_spq_comp_cb *p_comp_data) 445 { 446 struct qed_rss_params *p_rss_params = p_params->rss_params; 447 struct vport_update_ramrod_data_cmn *p_cmn; 448 struct qed_sp_init_data init_data; 449 struct vport_update_ramrod_data *p_ramrod = NULL; 450 struct qed_spq_entry *p_ent = NULL; 451 u8 abs_vport_id = 0, val; 452 int rc = -EINVAL; 453 454 if (IS_VF(p_hwfn->cdev)) { 455 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 456 return rc; 457 } 458 459 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 460 if (rc) 461 return rc; 462 463 memset(&init_data, 0, sizeof(init_data)); 464 init_data.cid = qed_spq_get_cid(p_hwfn); 465 init_data.opaque_fid = p_params->opaque_fid; 466 init_data.comp_mode = comp_mode; 467 init_data.p_comp_data = p_comp_data; 468 469 rc = qed_sp_init_request(p_hwfn, &p_ent, 470 ETH_RAMROD_VPORT_UPDATE, 471 PROTOCOLID_ETH, &init_data); 472 if (rc) 473 return rc; 474 475 /* Copy input params to ramrod according to FW struct */ 476 p_ramrod = &p_ent->ramrod.vport_update; 477 p_cmn = &p_ramrod->common; 478 479 p_cmn->vport_id = abs_vport_id; 480 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 481 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 482 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 483 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 484 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 485 val = p_params->update_accept_any_vlan_flg; 486 p_cmn->update_accept_any_vlan_flg = val; 487 488 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 489 val = p_params->update_inner_vlan_removal_flg; 490 p_cmn->update_inner_vlan_removal_en_flg = val; 491 492 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 493 val = p_params->update_default_vlan_enable_flg; 494 p_cmn->update_default_vlan_en_flg = val; 495 496 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 497 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 498 499 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 500 501 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 502 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 503 504 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 505 val = p_params->update_anti_spoofing_en_flg; 506 p_ramrod->common.update_anti_spoofing_en_flg = val; 507 508 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 509 if (rc) { 510 /* Return spq entry which is taken in qed_sp_init_request()*/ 511 qed_spq_return_entry(p_hwfn, p_ent); 512 return rc; 513 } 514 515 /* Update mcast bins for VFs, PF doesn't use this functionality */ 516 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 517 518 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 519 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 520 return qed_spq_post(p_hwfn, p_ent, NULL); 521 } 522 523 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 524 { 525 struct vport_stop_ramrod_data *p_ramrod; 526 struct qed_sp_init_data init_data; 527 struct qed_spq_entry *p_ent; 528 u8 abs_vport_id = 0; 529 int rc; 530 531 if (IS_VF(p_hwfn->cdev)) 532 return qed_vf_pf_vport_stop(p_hwfn); 533 534 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 535 if (rc) 536 return rc; 537 538 memset(&init_data, 0, sizeof(init_data)); 539 init_data.cid = qed_spq_get_cid(p_hwfn); 540 init_data.opaque_fid = opaque_fid; 541 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 542 543 rc = qed_sp_init_request(p_hwfn, &p_ent, 544 ETH_RAMROD_VPORT_STOP, 545 PROTOCOLID_ETH, &init_data); 546 if (rc) 547 return rc; 548 549 p_ramrod = &p_ent->ramrod.vport_stop; 550 p_ramrod->vport_id = abs_vport_id; 551 552 return qed_spq_post(p_hwfn, p_ent, NULL); 553 } 554 555 static int 556 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 557 struct qed_filter_accept_flags *p_accept_flags) 558 { 559 struct qed_sp_vport_update_params s_params; 560 561 memset(&s_params, 0, sizeof(s_params)); 562 memcpy(&s_params.accept_flags, p_accept_flags, 563 sizeof(struct qed_filter_accept_flags)); 564 565 return qed_vf_pf_vport_update(p_hwfn, &s_params); 566 } 567 568 static int qed_filter_accept_cmd(struct qed_dev *cdev, 569 u8 vport, 570 struct qed_filter_accept_flags accept_flags, 571 u8 update_accept_any_vlan, 572 u8 accept_any_vlan, 573 enum spq_mode comp_mode, 574 struct qed_spq_comp_cb *p_comp_data) 575 { 576 struct qed_sp_vport_update_params vport_update_params; 577 int i, rc; 578 579 /* Prepare and send the vport rx_mode change */ 580 memset(&vport_update_params, 0, sizeof(vport_update_params)); 581 vport_update_params.vport_id = vport; 582 vport_update_params.accept_flags = accept_flags; 583 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 584 vport_update_params.accept_any_vlan = accept_any_vlan; 585 586 for_each_hwfn(cdev, i) { 587 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 588 589 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 590 591 if (IS_VF(cdev)) { 592 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 593 if (rc) 594 return rc; 595 continue; 596 } 597 598 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 599 comp_mode, p_comp_data); 600 if (rc) { 601 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 602 return rc; 603 } 604 605 DP_VERBOSE(p_hwfn, QED_MSG_SP, 606 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 607 accept_flags.rx_accept_filter, 608 accept_flags.tx_accept_filter); 609 if (update_accept_any_vlan) 610 DP_VERBOSE(p_hwfn, QED_MSG_SP, 611 "accept_any_vlan=%d configured\n", 612 accept_any_vlan); 613 } 614 615 return 0; 616 } 617 618 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 619 struct qed_queue_cid *p_cid, 620 u16 bd_max_bytes, 621 dma_addr_t bd_chain_phys_addr, 622 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) 623 { 624 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 625 struct qed_spq_entry *p_ent = NULL; 626 struct qed_sp_init_data init_data; 627 int rc = -EINVAL; 628 629 DP_VERBOSE(p_hwfn, QED_MSG_SP, 630 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 631 p_cid->opaque_fid, p_cid->cid, 632 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); 633 634 /* Get SPQ entry */ 635 memset(&init_data, 0, sizeof(init_data)); 636 init_data.cid = p_cid->cid; 637 init_data.opaque_fid = p_cid->opaque_fid; 638 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 639 640 rc = qed_sp_init_request(p_hwfn, &p_ent, 641 ETH_RAMROD_RX_QUEUE_START, 642 PROTOCOLID_ETH, &init_data); 643 if (rc) 644 return rc; 645 646 p_ramrod = &p_ent->ramrod.rx_queue_start; 647 648 p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); 649 p_ramrod->sb_index = p_cid->abs.sb_idx; 650 p_ramrod->vport_id = p_cid->abs.vport_id; 651 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 652 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 653 p_ramrod->complete_cqe_flg = 0; 654 p_ramrod->complete_event_flg = 1; 655 656 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 657 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 658 659 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 660 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 661 662 if (p_cid->is_vf) { 663 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 664 DP_VERBOSE(p_hwfn, QED_MSG_SP, 665 "Queue%s is meant for VF rxq[%02x]\n", 666 !!p_cid->b_legacy_vf ? " [legacy]" : "", 667 p_cid->vf_qid); 668 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf; 669 } 670 671 return qed_spq_post(p_hwfn, p_ent, NULL); 672 } 673 674 static int 675 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, 676 struct qed_queue_cid *p_cid, 677 u16 bd_max_bytes, 678 dma_addr_t bd_chain_phys_addr, 679 dma_addr_t cqe_pbl_addr, 680 u16 cqe_pbl_size, void __iomem **pp_prod) 681 { 682 u32 init_prod_val = 0; 683 684 *pp_prod = p_hwfn->regview + 685 GTT_BAR0_MAP_REG_MSDM_RAM + 686 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 687 688 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 689 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 690 (u32 *)(&init_prod_val)); 691 692 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 693 bd_max_bytes, 694 bd_chain_phys_addr, 695 cqe_pbl_addr, cqe_pbl_size); 696 } 697 698 static int 699 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 700 u16 opaque_fid, 701 struct qed_queue_start_common_params *p_params, 702 u16 bd_max_bytes, 703 dma_addr_t bd_chain_phys_addr, 704 dma_addr_t cqe_pbl_addr, 705 u16 cqe_pbl_size, 706 struct qed_rxq_start_ret_params *p_ret_params) 707 { 708 struct qed_queue_cid *p_cid; 709 int rc; 710 711 /* Allocate a CID for the queue */ 712 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); 713 if (!p_cid) 714 return -ENOMEM; 715 716 if (IS_PF(p_hwfn->cdev)) { 717 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, 718 bd_max_bytes, 719 bd_chain_phys_addr, 720 cqe_pbl_addr, cqe_pbl_size, 721 &p_ret_params->p_prod); 722 } else { 723 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, 724 bd_max_bytes, 725 bd_chain_phys_addr, 726 cqe_pbl_addr, 727 cqe_pbl_size, &p_ret_params->p_prod); 728 } 729 730 /* Provide the caller with a reference to as handler */ 731 if (rc) 732 qed_eth_queue_cid_release(p_hwfn, p_cid); 733 else 734 p_ret_params->p_handle = (void *)p_cid; 735 736 return rc; 737 } 738 739 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 740 void **pp_rxq_handles, 741 u8 num_rxqs, 742 u8 complete_cqe_flg, 743 u8 complete_event_flg, 744 enum spq_mode comp_mode, 745 struct qed_spq_comp_cb *p_comp_data) 746 { 747 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 748 struct qed_spq_entry *p_ent = NULL; 749 struct qed_sp_init_data init_data; 750 struct qed_queue_cid *p_cid; 751 int rc = -EINVAL; 752 u8 i; 753 754 memset(&init_data, 0, sizeof(init_data)); 755 init_data.comp_mode = comp_mode; 756 init_data.p_comp_data = p_comp_data; 757 758 for (i = 0; i < num_rxqs; i++) { 759 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; 760 761 /* Get SPQ entry */ 762 init_data.cid = p_cid->cid; 763 init_data.opaque_fid = p_cid->opaque_fid; 764 765 rc = qed_sp_init_request(p_hwfn, &p_ent, 766 ETH_RAMROD_RX_QUEUE_UPDATE, 767 PROTOCOLID_ETH, &init_data); 768 if (rc) 769 return rc; 770 771 p_ramrod = &p_ent->ramrod.rx_queue_update; 772 p_ramrod->vport_id = p_cid->abs.vport_id; 773 774 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 775 p_ramrod->complete_cqe_flg = complete_cqe_flg; 776 p_ramrod->complete_event_flg = complete_event_flg; 777 778 rc = qed_spq_post(p_hwfn, p_ent, NULL); 779 if (rc) 780 return rc; 781 } 782 783 return rc; 784 } 785 786 static int 787 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, 788 struct qed_queue_cid *p_cid, 789 bool b_eq_completion_only, bool b_cqe_completion) 790 { 791 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 792 struct qed_spq_entry *p_ent = NULL; 793 struct qed_sp_init_data init_data; 794 int rc; 795 796 memset(&init_data, 0, sizeof(init_data)); 797 init_data.cid = p_cid->cid; 798 init_data.opaque_fid = p_cid->opaque_fid; 799 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 800 801 rc = qed_sp_init_request(p_hwfn, &p_ent, 802 ETH_RAMROD_RX_QUEUE_STOP, 803 PROTOCOLID_ETH, &init_data); 804 if (rc) 805 return rc; 806 807 p_ramrod = &p_ent->ramrod.rx_queue_stop; 808 p_ramrod->vport_id = p_cid->abs.vport_id; 809 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 810 811 /* Cleaning the queue requires the completion to arrive there. 812 * In addition, VFs require the answer to come as eqe to PF. 813 */ 814 p_ramrod->complete_cqe_flg = (!p_cid->is_vf && 815 !b_eq_completion_only) || 816 b_cqe_completion; 817 p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only; 818 819 return qed_spq_post(p_hwfn, p_ent, NULL); 820 } 821 822 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 823 void *p_rxq, 824 bool eq_completion_only, bool cqe_completion) 825 { 826 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; 827 int rc = -EINVAL; 828 829 if (IS_PF(p_hwfn->cdev)) 830 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, 831 eq_completion_only, 832 cqe_completion); 833 else 834 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 835 836 if (!rc) 837 qed_eth_queue_cid_release(p_hwfn, p_cid); 838 return rc; 839 } 840 841 int 842 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 843 struct qed_queue_cid *p_cid, 844 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) 845 { 846 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 847 struct qed_spq_entry *p_ent = NULL; 848 struct qed_sp_init_data init_data; 849 int rc = -EINVAL; 850 851 /* Get SPQ entry */ 852 memset(&init_data, 0, sizeof(init_data)); 853 init_data.cid = p_cid->cid; 854 init_data.opaque_fid = p_cid->opaque_fid; 855 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 856 857 rc = qed_sp_init_request(p_hwfn, &p_ent, 858 ETH_RAMROD_TX_QUEUE_START, 859 PROTOCOLID_ETH, &init_data); 860 if (rc) 861 return rc; 862 863 p_ramrod = &p_ent->ramrod.tx_queue_start; 864 p_ramrod->vport_id = p_cid->abs.vport_id; 865 866 p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); 867 p_ramrod->sb_index = p_cid->abs.sb_idx; 868 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 869 870 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); 871 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); 872 873 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 874 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 875 876 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 877 878 return qed_spq_post(p_hwfn, p_ent, NULL); 879 } 880 881 static int 882 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, 883 struct qed_queue_cid *p_cid, 884 u8 tc, 885 dma_addr_t pbl_addr, 886 u16 pbl_size, void __iomem **pp_doorbell) 887 { 888 union qed_qm_pq_params pq_params; 889 int rc; 890 891 memset(&pq_params, 0, sizeof(pq_params)); 892 893 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 894 pbl_addr, pbl_size, 895 qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, 896 &pq_params)); 897 if (rc) 898 return rc; 899 900 /* Provide the caller with the necessary return values */ 901 *pp_doorbell = p_hwfn->doorbells + 902 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); 903 904 return 0; 905 } 906 907 static int 908 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 909 u16 opaque_fid, 910 struct qed_queue_start_common_params *p_params, 911 u8 tc, 912 dma_addr_t pbl_addr, 913 u16 pbl_size, 914 struct qed_txq_start_ret_params *p_ret_params) 915 { 916 struct qed_queue_cid *p_cid; 917 int rc; 918 919 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); 920 if (!p_cid) 921 return -EINVAL; 922 923 if (IS_PF(p_hwfn->cdev)) 924 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 925 pbl_addr, pbl_size, 926 &p_ret_params->p_doorbell); 927 else 928 rc = qed_vf_pf_txq_start(p_hwfn, p_cid, 929 pbl_addr, pbl_size, 930 &p_ret_params->p_doorbell); 931 932 if (rc) 933 qed_eth_queue_cid_release(p_hwfn, p_cid); 934 else 935 p_ret_params->p_handle = (void *)p_cid; 936 937 return rc; 938 } 939 940 static int 941 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 942 { 943 struct qed_spq_entry *p_ent = NULL; 944 struct qed_sp_init_data init_data; 945 int rc; 946 947 memset(&init_data, 0, sizeof(init_data)); 948 init_data.cid = p_cid->cid; 949 init_data.opaque_fid = p_cid->opaque_fid; 950 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 951 952 rc = qed_sp_init_request(p_hwfn, &p_ent, 953 ETH_RAMROD_TX_QUEUE_STOP, 954 PROTOCOLID_ETH, &init_data); 955 if (rc) 956 return rc; 957 958 return qed_spq_post(p_hwfn, p_ent, NULL); 959 } 960 961 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) 962 { 963 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; 964 int rc; 965 966 if (IS_PF(p_hwfn->cdev)) 967 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); 968 else 969 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); 970 971 if (!rc) 972 qed_eth_queue_cid_release(p_hwfn, p_cid); 973 return rc; 974 } 975 976 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 977 { 978 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 979 980 switch (opcode) { 981 case QED_FILTER_ADD: 982 action = ETH_FILTER_ACTION_ADD; 983 break; 984 case QED_FILTER_REMOVE: 985 action = ETH_FILTER_ACTION_REMOVE; 986 break; 987 case QED_FILTER_FLUSH: 988 action = ETH_FILTER_ACTION_REMOVE_ALL; 989 break; 990 default: 991 action = MAX_ETH_FILTER_ACTION; 992 } 993 994 return action; 995 } 996 997 static void qed_set_fw_mac_addr(__le16 *fw_msb, 998 __le16 *fw_mid, 999 __le16 *fw_lsb, 1000 u8 *mac) 1001 { 1002 ((u8 *)fw_msb)[0] = mac[1]; 1003 ((u8 *)fw_msb)[1] = mac[0]; 1004 ((u8 *)fw_mid)[0] = mac[3]; 1005 ((u8 *)fw_mid)[1] = mac[2]; 1006 ((u8 *)fw_lsb)[0] = mac[5]; 1007 ((u8 *)fw_lsb)[1] = mac[4]; 1008 } 1009 1010 static int 1011 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 1012 u16 opaque_fid, 1013 struct qed_filter_ucast *p_filter_cmd, 1014 struct vport_filter_update_ramrod_data **pp_ramrod, 1015 struct qed_spq_entry **pp_ent, 1016 enum spq_mode comp_mode, 1017 struct qed_spq_comp_cb *p_comp_data) 1018 { 1019 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1020 struct vport_filter_update_ramrod_data *p_ramrod; 1021 struct eth_filter_cmd *p_first_filter; 1022 struct eth_filter_cmd *p_second_filter; 1023 struct qed_sp_init_data init_data; 1024 enum eth_filter_action action; 1025 int rc; 1026 1027 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1028 &vport_to_remove_from); 1029 if (rc) 1030 return rc; 1031 1032 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1033 &vport_to_add_to); 1034 if (rc) 1035 return rc; 1036 1037 /* Get SPQ entry */ 1038 memset(&init_data, 0, sizeof(init_data)); 1039 init_data.cid = qed_spq_get_cid(p_hwfn); 1040 init_data.opaque_fid = opaque_fid; 1041 init_data.comp_mode = comp_mode; 1042 init_data.p_comp_data = p_comp_data; 1043 1044 rc = qed_sp_init_request(p_hwfn, pp_ent, 1045 ETH_RAMROD_FILTERS_UPDATE, 1046 PROTOCOLID_ETH, &init_data); 1047 if (rc) 1048 return rc; 1049 1050 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1051 p_ramrod = *pp_ramrod; 1052 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1053 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1054 1055 switch (p_filter_cmd->opcode) { 1056 case QED_FILTER_REPLACE: 1057 case QED_FILTER_MOVE: 1058 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1059 default: 1060 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1061 } 1062 1063 p_first_filter = &p_ramrod->filter_cmds[0]; 1064 p_second_filter = &p_ramrod->filter_cmds[1]; 1065 1066 switch (p_filter_cmd->type) { 1067 case QED_FILTER_MAC: 1068 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1069 case QED_FILTER_VLAN: 1070 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1071 case QED_FILTER_MAC_VLAN: 1072 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1073 case QED_FILTER_INNER_MAC: 1074 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1075 case QED_FILTER_INNER_VLAN: 1076 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1077 case QED_FILTER_INNER_PAIR: 1078 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1079 case QED_FILTER_INNER_MAC_VNI_PAIR: 1080 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1081 break; 1082 case QED_FILTER_MAC_VNI_PAIR: 1083 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1084 case QED_FILTER_VNI: 1085 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1086 } 1087 1088 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1089 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1090 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1091 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1092 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1093 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1094 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1095 &p_first_filter->mac_mid, 1096 &p_first_filter->mac_lsb, 1097 (u8 *)p_filter_cmd->mac); 1098 } 1099 1100 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1101 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1102 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1103 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1104 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1105 1106 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1107 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1108 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1109 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1110 1111 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1112 p_second_filter->type = p_first_filter->type; 1113 p_second_filter->mac_msb = p_first_filter->mac_msb; 1114 p_second_filter->mac_mid = p_first_filter->mac_mid; 1115 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1116 p_second_filter->vlan_id = p_first_filter->vlan_id; 1117 p_second_filter->vni = p_first_filter->vni; 1118 1119 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1120 1121 p_first_filter->vport_id = vport_to_remove_from; 1122 1123 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1124 p_second_filter->vport_id = vport_to_add_to; 1125 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1126 p_first_filter->vport_id = vport_to_add_to; 1127 memcpy(p_second_filter, p_first_filter, 1128 sizeof(*p_second_filter)); 1129 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1130 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1131 } else { 1132 action = qed_filter_action(p_filter_cmd->opcode); 1133 1134 if (action == MAX_ETH_FILTER_ACTION) { 1135 DP_NOTICE(p_hwfn, 1136 "%d is not supported yet\n", 1137 p_filter_cmd->opcode); 1138 return -EINVAL; 1139 } 1140 1141 p_first_filter->action = action; 1142 p_first_filter->vport_id = (p_filter_cmd->opcode == 1143 QED_FILTER_REMOVE) ? 1144 vport_to_remove_from : 1145 vport_to_add_to; 1146 } 1147 1148 return 0; 1149 } 1150 1151 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1152 u16 opaque_fid, 1153 struct qed_filter_ucast *p_filter_cmd, 1154 enum spq_mode comp_mode, 1155 struct qed_spq_comp_cb *p_comp_data) 1156 { 1157 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1158 struct qed_spq_entry *p_ent = NULL; 1159 struct eth_filter_cmd_header *p_header; 1160 int rc; 1161 1162 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1163 &p_ramrod, &p_ent, 1164 comp_mode, p_comp_data); 1165 if (rc) { 1166 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1167 return rc; 1168 } 1169 p_header = &p_ramrod->filter_cmd_hdr; 1170 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1171 1172 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1173 if (rc) { 1174 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1175 return rc; 1176 } 1177 1178 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1179 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1180 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1181 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1182 "REMOVE" : 1183 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1184 "MOVE" : "REPLACE")), 1185 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1186 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1187 "VLAN" : "MAC & VLAN"), 1188 p_ramrod->filter_cmd_hdr.cmd_cnt, 1189 p_filter_cmd->is_rx_filter, 1190 p_filter_cmd->is_tx_filter); 1191 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1192 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1193 p_filter_cmd->vport_to_add_to, 1194 p_filter_cmd->vport_to_remove_from, 1195 p_filter_cmd->mac[0], 1196 p_filter_cmd->mac[1], 1197 p_filter_cmd->mac[2], 1198 p_filter_cmd->mac[3], 1199 p_filter_cmd->mac[4], 1200 p_filter_cmd->mac[5], 1201 p_filter_cmd->vlan); 1202 1203 return 0; 1204 } 1205 1206 /******************************************************************************* 1207 * Description: 1208 * Calculates crc 32 on a buffer 1209 * Note: crc32_length MUST be aligned to 8 1210 * Return: 1211 ******************************************************************************/ 1212 static u32 qed_calc_crc32c(u8 *crc32_packet, 1213 u32 crc32_length, u32 crc32_seed, u8 complement) 1214 { 1215 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1216 u8 msb = 0, current_byte = 0; 1217 1218 if ((!crc32_packet) || 1219 (crc32_length == 0) || 1220 ((crc32_length % 8) != 0)) 1221 return crc32_result; 1222 for (byte = 0; byte < crc32_length; byte++) { 1223 current_byte = crc32_packet[byte]; 1224 for (bit = 0; bit < 8; bit++) { 1225 msb = (u8)(crc32_result >> 31); 1226 crc32_result = crc32_result << 1; 1227 if (msb != (0x1 & (current_byte >> bit))) { 1228 crc32_result = crc32_result ^ CRC32_POLY; 1229 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1230 } 1231 } 1232 } 1233 return crc32_result; 1234 } 1235 1236 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1237 { 1238 u32 packet_buf[2] = { 0 }; 1239 1240 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1241 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1242 } 1243 1244 u8 qed_mcast_bin_from_mac(u8 *mac) 1245 { 1246 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1247 mac, ETH_ALEN); 1248 1249 return crc & 0xff; 1250 } 1251 1252 static int 1253 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1254 u16 opaque_fid, 1255 struct qed_filter_mcast *p_filter_cmd, 1256 enum spq_mode comp_mode, 1257 struct qed_spq_comp_cb *p_comp_data) 1258 { 1259 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1260 struct vport_update_ramrod_data *p_ramrod = NULL; 1261 struct qed_spq_entry *p_ent = NULL; 1262 struct qed_sp_init_data init_data; 1263 u8 abs_vport_id = 0; 1264 int rc, i; 1265 1266 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1267 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1268 &abs_vport_id); 1269 else 1270 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1271 &abs_vport_id); 1272 if (rc) 1273 return rc; 1274 1275 /* Get SPQ entry */ 1276 memset(&init_data, 0, sizeof(init_data)); 1277 init_data.cid = qed_spq_get_cid(p_hwfn); 1278 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1279 init_data.comp_mode = comp_mode; 1280 init_data.p_comp_data = p_comp_data; 1281 1282 rc = qed_sp_init_request(p_hwfn, &p_ent, 1283 ETH_RAMROD_VPORT_UPDATE, 1284 PROTOCOLID_ETH, &init_data); 1285 if (rc) { 1286 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1287 return rc; 1288 } 1289 1290 p_ramrod = &p_ent->ramrod.vport_update; 1291 p_ramrod->common.update_approx_mcast_flg = 1; 1292 1293 /* explicitly clear out the entire vector */ 1294 memset(&p_ramrod->approx_mcast.bins, 0, 1295 sizeof(p_ramrod->approx_mcast.bins)); 1296 memset(bins, 0, sizeof(unsigned long) * 1297 ETH_MULTICAST_MAC_BINS_IN_REGS); 1298 /* filter ADD op is explicit set op and it removes 1299 * any existing filters for the vport 1300 */ 1301 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1302 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1303 u32 bit; 1304 1305 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1306 __set_bit(bit, bins); 1307 } 1308 1309 /* Convert to correct endianity */ 1310 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1311 struct vport_update_ramrod_mcast *p_ramrod_bins; 1312 u32 *p_bins = (u32 *)bins; 1313 1314 p_ramrod_bins = &p_ramrod->approx_mcast; 1315 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1316 } 1317 } 1318 1319 p_ramrod->common.vport_id = abs_vport_id; 1320 1321 return qed_spq_post(p_hwfn, p_ent, NULL); 1322 } 1323 1324 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1325 struct qed_filter_mcast *p_filter_cmd, 1326 enum spq_mode comp_mode, 1327 struct qed_spq_comp_cb *p_comp_data) 1328 { 1329 int rc = 0; 1330 int i; 1331 1332 /* only ADD and REMOVE operations are supported for multi-cast */ 1333 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1334 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1335 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1336 return -EINVAL; 1337 1338 for_each_hwfn(cdev, i) { 1339 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1340 1341 u16 opaque_fid; 1342 1343 if (IS_VF(cdev)) { 1344 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1345 continue; 1346 } 1347 1348 opaque_fid = p_hwfn->hw_info.opaque_fid; 1349 1350 rc = qed_sp_eth_filter_mcast(p_hwfn, 1351 opaque_fid, 1352 p_filter_cmd, 1353 comp_mode, p_comp_data); 1354 } 1355 return rc; 1356 } 1357 1358 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1359 struct qed_filter_ucast *p_filter_cmd, 1360 enum spq_mode comp_mode, 1361 struct qed_spq_comp_cb *p_comp_data) 1362 { 1363 int rc = 0; 1364 int i; 1365 1366 for_each_hwfn(cdev, i) { 1367 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1368 u16 opaque_fid; 1369 1370 if (IS_VF(cdev)) { 1371 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1372 continue; 1373 } 1374 1375 opaque_fid = p_hwfn->hw_info.opaque_fid; 1376 1377 rc = qed_sp_eth_filter_ucast(p_hwfn, 1378 opaque_fid, 1379 p_filter_cmd, 1380 comp_mode, p_comp_data); 1381 if (rc) 1382 break; 1383 } 1384 1385 return rc; 1386 } 1387 1388 /* Statistics related code */ 1389 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1390 u32 *p_addr, 1391 u32 *p_len, u16 statistics_bin) 1392 { 1393 if (IS_PF(p_hwfn->cdev)) { 1394 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1395 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1396 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1397 } else { 1398 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1399 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1400 1401 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1402 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1403 } 1404 } 1405 1406 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1407 struct qed_ptt *p_ptt, 1408 struct qed_eth_stats *p_stats, 1409 u16 statistics_bin) 1410 { 1411 struct eth_pstorm_per_queue_stat pstats; 1412 u32 pstats_addr = 0, pstats_len = 0; 1413 1414 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1415 statistics_bin); 1416 1417 memset(&pstats, 0, sizeof(pstats)); 1418 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1419 1420 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1421 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1422 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1423 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1424 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1425 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1426 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); 1427 } 1428 1429 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1430 struct qed_ptt *p_ptt, 1431 struct qed_eth_stats *p_stats, 1432 u16 statistics_bin) 1433 { 1434 struct tstorm_per_port_stat tstats; 1435 u32 tstats_addr, tstats_len; 1436 1437 if (IS_PF(p_hwfn->cdev)) { 1438 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1439 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1440 tstats_len = sizeof(struct tstorm_per_port_stat); 1441 } else { 1442 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1443 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1444 1445 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1446 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1447 } 1448 1449 memset(&tstats, 0, sizeof(tstats)); 1450 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1451 1452 p_stats->mftag_filter_discards += 1453 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1454 p_stats->mac_filter_discards += 1455 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1456 } 1457 1458 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1459 u32 *p_addr, 1460 u32 *p_len, u16 statistics_bin) 1461 { 1462 if (IS_PF(p_hwfn->cdev)) { 1463 *p_addr = BAR0_MAP_REG_USDM_RAM + 1464 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1465 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1466 } else { 1467 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1468 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1469 1470 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1471 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1472 } 1473 } 1474 1475 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1476 struct qed_ptt *p_ptt, 1477 struct qed_eth_stats *p_stats, 1478 u16 statistics_bin) 1479 { 1480 struct eth_ustorm_per_queue_stat ustats; 1481 u32 ustats_addr = 0, ustats_len = 0; 1482 1483 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1484 statistics_bin); 1485 1486 memset(&ustats, 0, sizeof(ustats)); 1487 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1488 1489 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1490 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1491 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1492 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1493 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1494 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1495 } 1496 1497 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1498 u32 *p_addr, 1499 u32 *p_len, u16 statistics_bin) 1500 { 1501 if (IS_PF(p_hwfn->cdev)) { 1502 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1503 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1504 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1505 } else { 1506 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1507 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1508 1509 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1510 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1511 } 1512 } 1513 1514 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1515 struct qed_ptt *p_ptt, 1516 struct qed_eth_stats *p_stats, 1517 u16 statistics_bin) 1518 { 1519 struct eth_mstorm_per_queue_stat mstats; 1520 u32 mstats_addr = 0, mstats_len = 0; 1521 1522 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1523 statistics_bin); 1524 1525 memset(&mstats, 0, sizeof(mstats)); 1526 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1527 1528 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); 1529 p_stats->packet_too_big_discard += 1530 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1531 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1532 p_stats->tpa_coalesced_pkts += 1533 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1534 p_stats->tpa_coalesced_events += 1535 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1536 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); 1537 p_stats->tpa_coalesced_bytes += 1538 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1539 } 1540 1541 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1542 struct qed_ptt *p_ptt, 1543 struct qed_eth_stats *p_stats) 1544 { 1545 struct port_stats port_stats; 1546 int j; 1547 1548 memset(&port_stats, 0, sizeof(port_stats)); 1549 1550 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1551 p_hwfn->mcp_info->port_addr + 1552 offsetof(struct public_port, stats), 1553 sizeof(port_stats)); 1554 1555 p_stats->rx_64_byte_packets += port_stats.eth.r64; 1556 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; 1557 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; 1558 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; 1559 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1560 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1561 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; 1562 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; 1563 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; 1564 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; 1565 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; 1566 p_stats->rx_crc_errors += port_stats.eth.rfcs; 1567 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; 1568 p_stats->rx_pause_frames += port_stats.eth.rxpf; 1569 p_stats->rx_pfc_frames += port_stats.eth.rxpp; 1570 p_stats->rx_align_errors += port_stats.eth.raln; 1571 p_stats->rx_carrier_errors += port_stats.eth.rfcr; 1572 p_stats->rx_oversize_packets += port_stats.eth.rovr; 1573 p_stats->rx_jabbers += port_stats.eth.rjbr; 1574 p_stats->rx_undersize_packets += port_stats.eth.rund; 1575 p_stats->rx_fragments += port_stats.eth.rfrg; 1576 p_stats->tx_64_byte_packets += port_stats.eth.t64; 1577 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; 1578 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; 1579 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; 1580 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1581 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1582 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; 1583 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; 1584 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; 1585 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; 1586 p_stats->tx_pause_frames += port_stats.eth.txpf; 1587 p_stats->tx_pfc_frames += port_stats.eth.txpp; 1588 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; 1589 p_stats->tx_total_collisions += port_stats.eth.tncl; 1590 p_stats->rx_mac_bytes += port_stats.eth.rbyte; 1591 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; 1592 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; 1593 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; 1594 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; 1595 p_stats->tx_mac_bytes += port_stats.eth.tbyte; 1596 p_stats->tx_mac_uc_packets += port_stats.eth.txuca; 1597 p_stats->tx_mac_mc_packets += port_stats.eth.txmca; 1598 p_stats->tx_mac_bc_packets += port_stats.eth.txbca; 1599 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; 1600 for (j = 0; j < 8; j++) { 1601 p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1602 p_stats->brb_discards += port_stats.brb.brb_discard[j]; 1603 } 1604 } 1605 1606 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1607 struct qed_ptt *p_ptt, 1608 struct qed_eth_stats *stats, 1609 u16 statistics_bin, bool b_get_port_stats) 1610 { 1611 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1612 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1613 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1614 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1615 1616 if (b_get_port_stats && p_hwfn->mcp_info) 1617 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1618 } 1619 1620 static void _qed_get_vport_stats(struct qed_dev *cdev, 1621 struct qed_eth_stats *stats) 1622 { 1623 u8 fw_vport = 0; 1624 int i; 1625 1626 memset(stats, 0, sizeof(*stats)); 1627 1628 for_each_hwfn(cdev, i) { 1629 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1630 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1631 : NULL; 1632 1633 if (IS_PF(cdev)) { 1634 /* The main vport index is relative first */ 1635 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1636 DP_ERR(p_hwfn, "No vport available!\n"); 1637 goto out; 1638 } 1639 } 1640 1641 if (IS_PF(cdev) && !p_ptt) { 1642 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1643 continue; 1644 } 1645 1646 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1647 IS_PF(cdev) ? true : false); 1648 1649 out: 1650 if (IS_PF(cdev) && p_ptt) 1651 qed_ptt_release(p_hwfn, p_ptt); 1652 } 1653 } 1654 1655 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1656 { 1657 u32 i; 1658 1659 if (!cdev) { 1660 memset(stats, 0, sizeof(*stats)); 1661 return; 1662 } 1663 1664 _qed_get_vport_stats(cdev, stats); 1665 1666 if (!cdev->reset_stats) 1667 return; 1668 1669 /* Reduce the statistics baseline */ 1670 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1671 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1672 } 1673 1674 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1675 void qed_reset_vport_stats(struct qed_dev *cdev) 1676 { 1677 int i; 1678 1679 for_each_hwfn(cdev, i) { 1680 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1681 struct eth_mstorm_per_queue_stat mstats; 1682 struct eth_ustorm_per_queue_stat ustats; 1683 struct eth_pstorm_per_queue_stat pstats; 1684 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1685 : NULL; 1686 u32 addr = 0, len = 0; 1687 1688 if (IS_PF(cdev) && !p_ptt) { 1689 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1690 continue; 1691 } 1692 1693 memset(&mstats, 0, sizeof(mstats)); 1694 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1695 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1696 1697 memset(&ustats, 0, sizeof(ustats)); 1698 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1699 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1700 1701 memset(&pstats, 0, sizeof(pstats)); 1702 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1703 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1704 1705 if (IS_PF(cdev)) 1706 qed_ptt_release(p_hwfn, p_ptt); 1707 } 1708 1709 /* PORT statistics are not necessarily reset, so we need to 1710 * read and create a baseline for future statistics. 1711 */ 1712 if (!cdev->reset_stats) 1713 DP_INFO(cdev, "Reset stats not allocated\n"); 1714 else 1715 _qed_get_vport_stats(cdev, cdev->reset_stats); 1716 } 1717 1718 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 1719 struct qed_dev_eth_info *info) 1720 { 1721 int i; 1722 1723 memset(info, 0, sizeof(*info)); 1724 1725 info->num_tc = 1; 1726 1727 if (IS_PF(cdev)) { 1728 int max_vf_vlan_filters = 0; 1729 int max_vf_mac_filters = 0; 1730 1731 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 1732 for_each_hwfn(cdev, i) 1733 info->num_queues += 1734 FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 1735 if (cdev->int_params.fp_msix_cnt) 1736 info->num_queues = 1737 min_t(u8, info->num_queues, 1738 cdev->int_params.fp_msix_cnt); 1739 } else { 1740 info->num_queues = cdev->num_hwfns; 1741 } 1742 1743 if (IS_QED_SRIOV(cdev)) { 1744 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 1745 QED_ETH_VF_NUM_VLAN_FILTERS; 1746 max_vf_mac_filters = cdev->p_iov_info->total_vfs * 1747 QED_ETH_VF_NUM_MAC_FILTERS; 1748 } 1749 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 1750 QED_VLAN) - 1751 max_vf_vlan_filters; 1752 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 1753 QED_MAC) - 1754 max_vf_mac_filters; 1755 1756 ether_addr_copy(info->port_mac, 1757 cdev->hwfns[0].hw_info.hw_mac_addr); 1758 } else { 1759 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); 1760 if (cdev->num_hwfns > 1) { 1761 u8 queues = 0; 1762 1763 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); 1764 info->num_queues += queues; 1765 } 1766 1767 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 1768 (u8 *)&info->num_vlan_filters); 1769 qed_vf_get_num_mac_filters(&cdev->hwfns[0], 1770 (u8 *)&info->num_mac_filters); 1771 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 1772 1773 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 1774 } 1775 1776 qed_fill_dev_info(cdev, &info->common); 1777 1778 if (IS_VF(cdev)) 1779 memset(info->common.hw_mac, 0, ETH_ALEN); 1780 1781 return 0; 1782 } 1783 1784 static void qed_register_eth_ops(struct qed_dev *cdev, 1785 struct qed_eth_cb_ops *ops, void *cookie) 1786 { 1787 cdev->protocol_ops.eth = ops; 1788 cdev->ops_cookie = cookie; 1789 1790 /* For VF, we start bulletin reading */ 1791 if (IS_VF(cdev)) 1792 qed_vf_start_iov_wq(cdev); 1793 } 1794 1795 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 1796 { 1797 if (IS_PF(cdev)) 1798 return true; 1799 1800 return qed_vf_check_mac(&cdev->hwfns[0], mac); 1801 } 1802 1803 static int qed_start_vport(struct qed_dev *cdev, 1804 struct qed_start_vport_params *params) 1805 { 1806 int rc, i; 1807 1808 for_each_hwfn(cdev, i) { 1809 struct qed_sp_vport_start_params start = { 0 }; 1810 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1811 1812 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 1813 QED_TPA_MODE_NONE; 1814 start.remove_inner_vlan = params->remove_inner_vlan; 1815 start.only_untagged = true; /* untagged only */ 1816 start.drop_ttl0 = params->drop_ttl0; 1817 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 1818 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 1819 start.vport_id = params->vport_id; 1820 start.max_buffers_per_cqe = 16; 1821 start.mtu = params->mtu; 1822 1823 rc = qed_sp_vport_start(p_hwfn, &start); 1824 if (rc) { 1825 DP_ERR(cdev, "Failed to start VPORT\n"); 1826 return rc; 1827 } 1828 1829 qed_hw_start_fastpath(p_hwfn); 1830 1831 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1832 "Started V-PORT %d with MTU %d\n", 1833 start.vport_id, start.mtu); 1834 } 1835 1836 if (params->clear_stats) 1837 qed_reset_vport_stats(cdev); 1838 1839 return 0; 1840 } 1841 1842 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 1843 { 1844 int rc, i; 1845 1846 for_each_hwfn(cdev, i) { 1847 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1848 1849 rc = qed_sp_vport_stop(p_hwfn, 1850 p_hwfn->hw_info.opaque_fid, vport_id); 1851 1852 if (rc) { 1853 DP_ERR(cdev, "Failed to stop VPORT\n"); 1854 return rc; 1855 } 1856 } 1857 return 0; 1858 } 1859 1860 static int qed_update_vport(struct qed_dev *cdev, 1861 struct qed_update_vport_params *params) 1862 { 1863 struct qed_sp_vport_update_params sp_params; 1864 struct qed_rss_params sp_rss_params; 1865 int rc, i; 1866 1867 if (!cdev) 1868 return -ENODEV; 1869 1870 memset(&sp_params, 0, sizeof(sp_params)); 1871 memset(&sp_rss_params, 0, sizeof(sp_rss_params)); 1872 1873 /* Translate protocol params into sp params */ 1874 sp_params.vport_id = params->vport_id; 1875 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 1876 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 1877 sp_params.vport_active_rx_flg = params->vport_active_flg; 1878 sp_params.vport_active_tx_flg = params->vport_active_flg; 1879 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 1880 sp_params.tx_switching_flg = params->tx_switching_flg; 1881 sp_params.accept_any_vlan = params->accept_any_vlan; 1882 sp_params.update_accept_any_vlan_flg = 1883 params->update_accept_any_vlan_flg; 1884 1885 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 1886 * We need to re-fix the rss values per engine for CMT. 1887 */ 1888 if (cdev->num_hwfns > 1 && params->update_rss_flg) { 1889 struct qed_update_vport_rss_params *rss = ¶ms->rss_params; 1890 int k, max = 0; 1891 1892 /* Find largest entry, since it's possible RSS needs to 1893 * be disabled [in case only 1 queue per-hwfn] 1894 */ 1895 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1896 max = (max > rss->rss_ind_table[k]) ? 1897 max : rss->rss_ind_table[k]; 1898 1899 /* Either fix RSS values or disable RSS */ 1900 if (cdev->num_hwfns < max + 1) { 1901 int divisor = (max + cdev->num_hwfns - 1) / 1902 cdev->num_hwfns; 1903 1904 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1905 "CMT - fixing RSS values (modulo %02x)\n", 1906 divisor); 1907 1908 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1909 rss->rss_ind_table[k] = 1910 rss->rss_ind_table[k] % divisor; 1911 } else { 1912 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1913 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 1914 params->update_rss_flg = 0; 1915 } 1916 } 1917 1918 /* Now, update the RSS configuration for actual configuration */ 1919 if (params->update_rss_flg) { 1920 sp_rss_params.update_rss_config = 1; 1921 sp_rss_params.rss_enable = 1; 1922 sp_rss_params.update_rss_capabilities = 1; 1923 sp_rss_params.update_rss_ind_table = 1; 1924 sp_rss_params.update_rss_key = 1; 1925 sp_rss_params.rss_caps = params->rss_params.rss_caps; 1926 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ 1927 memcpy(sp_rss_params.rss_ind_table, 1928 params->rss_params.rss_ind_table, 1929 QED_RSS_IND_TABLE_SIZE * sizeof(u16)); 1930 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, 1931 QED_RSS_KEY_SIZE * sizeof(u32)); 1932 sp_params.rss_params = &sp_rss_params; 1933 } 1934 1935 for_each_hwfn(cdev, i) { 1936 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1937 1938 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1939 rc = qed_sp_vport_update(p_hwfn, &sp_params, 1940 QED_SPQ_MODE_EBLOCK, 1941 NULL); 1942 if (rc) { 1943 DP_ERR(cdev, "Failed to update VPORT\n"); 1944 return rc; 1945 } 1946 1947 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1948 "Updated V-PORT %d: active_flag %d [update %d]\n", 1949 params->vport_id, params->vport_active_flg, 1950 params->update_vport_active_flg); 1951 } 1952 1953 return 0; 1954 } 1955 1956 static int qed_start_rxq(struct qed_dev *cdev, 1957 u8 rss_num, 1958 struct qed_queue_start_common_params *p_params, 1959 u16 bd_max_bytes, 1960 dma_addr_t bd_chain_phys_addr, 1961 dma_addr_t cqe_pbl_addr, 1962 u16 cqe_pbl_size, 1963 struct qed_rxq_start_ret_params *ret_params) 1964 { 1965 struct qed_hwfn *p_hwfn; 1966 int rc, hwfn_index; 1967 1968 hwfn_index = rss_num % cdev->num_hwfns; 1969 p_hwfn = &cdev->hwfns[hwfn_index]; 1970 1971 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 1972 p_params->stats_id = p_params->vport_id; 1973 1974 rc = qed_eth_rx_queue_start(p_hwfn, 1975 p_hwfn->hw_info.opaque_fid, 1976 p_params, 1977 bd_max_bytes, 1978 bd_chain_phys_addr, 1979 cqe_pbl_addr, cqe_pbl_size, ret_params); 1980 if (rc) { 1981 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); 1982 return rc; 1983 } 1984 1985 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1986 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", 1987 p_params->queue_id, rss_num, p_params->vport_id, 1988 p_params->sb); 1989 1990 return 0; 1991 } 1992 1993 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) 1994 { 1995 int rc, hwfn_index; 1996 struct qed_hwfn *p_hwfn; 1997 1998 hwfn_index = rss_id % cdev->num_hwfns; 1999 p_hwfn = &cdev->hwfns[hwfn_index]; 2000 2001 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); 2002 if (rc) { 2003 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); 2004 return rc; 2005 } 2006 2007 return 0; 2008 } 2009 2010 static int qed_start_txq(struct qed_dev *cdev, 2011 u8 rss_num, 2012 struct qed_queue_start_common_params *p_params, 2013 dma_addr_t pbl_addr, 2014 u16 pbl_size, 2015 struct qed_txq_start_ret_params *ret_params) 2016 { 2017 struct qed_hwfn *p_hwfn; 2018 int rc, hwfn_index; 2019 2020 hwfn_index = rss_num % cdev->num_hwfns; 2021 p_hwfn = &cdev->hwfns[hwfn_index]; 2022 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2023 p_params->stats_id = p_params->vport_id; 2024 2025 rc = qed_eth_tx_queue_start(p_hwfn, 2026 p_hwfn->hw_info.opaque_fid, 2027 p_params, 0, 2028 pbl_addr, pbl_size, ret_params); 2029 2030 if (rc) { 2031 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 2032 return rc; 2033 } 2034 2035 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2036 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", 2037 p_params->queue_id, rss_num, p_params->vport_id, 2038 p_params->sb); 2039 2040 return 0; 2041 } 2042 2043 #define QED_HW_STOP_RETRY_LIMIT (10) 2044 static int qed_fastpath_stop(struct qed_dev *cdev) 2045 { 2046 qed_hw_stop_fastpath(cdev); 2047 2048 return 0; 2049 } 2050 2051 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) 2052 { 2053 struct qed_hwfn *p_hwfn; 2054 int rc, hwfn_index; 2055 2056 hwfn_index = rss_id % cdev->num_hwfns; 2057 p_hwfn = &cdev->hwfns[hwfn_index]; 2058 2059 rc = qed_eth_tx_queue_stop(p_hwfn, handle); 2060 if (rc) { 2061 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); 2062 return rc; 2063 } 2064 2065 return 0; 2066 } 2067 2068 static int qed_tunn_configure(struct qed_dev *cdev, 2069 struct qed_tunn_params *tunn_params) 2070 { 2071 struct qed_tunn_update_params tunn_info; 2072 int i, rc; 2073 2074 if (IS_VF(cdev)) 2075 return 0; 2076 2077 memset(&tunn_info, 0, sizeof(tunn_info)); 2078 if (tunn_params->update_vxlan_port == 1) { 2079 tunn_info.update_vxlan_udp_port = 1; 2080 tunn_info.vxlan_udp_port = tunn_params->vxlan_port; 2081 } 2082 2083 if (tunn_params->update_geneve_port == 1) { 2084 tunn_info.update_geneve_udp_port = 1; 2085 tunn_info.geneve_udp_port = tunn_params->geneve_port; 2086 } 2087 2088 for_each_hwfn(cdev, i) { 2089 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2090 2091 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, 2092 QED_SPQ_MODE_EBLOCK, NULL); 2093 2094 if (rc) 2095 return rc; 2096 } 2097 2098 return 0; 2099 } 2100 2101 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2102 enum qed_filter_rx_mode_type type) 2103 { 2104 struct qed_filter_accept_flags accept_flags; 2105 2106 memset(&accept_flags, 0, sizeof(accept_flags)); 2107 2108 accept_flags.update_rx_mode_config = 1; 2109 accept_flags.update_tx_mode_config = 1; 2110 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2111 QED_ACCEPT_MCAST_MATCHED | 2112 QED_ACCEPT_BCAST; 2113 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2114 QED_ACCEPT_MCAST_MATCHED | 2115 QED_ACCEPT_BCAST; 2116 2117 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) 2118 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2119 QED_ACCEPT_MCAST_UNMATCHED; 2120 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) 2121 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2122 2123 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2124 QED_SPQ_MODE_CB, NULL); 2125 } 2126 2127 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2128 struct qed_filter_ucast_params *params) 2129 { 2130 struct qed_filter_ucast ucast; 2131 2132 if (!params->vlan_valid && !params->mac_valid) { 2133 DP_NOTICE(cdev, 2134 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2135 return -EINVAL; 2136 } 2137 2138 memset(&ucast, 0, sizeof(ucast)); 2139 switch (params->type) { 2140 case QED_FILTER_XCAST_TYPE_ADD: 2141 ucast.opcode = QED_FILTER_ADD; 2142 break; 2143 case QED_FILTER_XCAST_TYPE_DEL: 2144 ucast.opcode = QED_FILTER_REMOVE; 2145 break; 2146 case QED_FILTER_XCAST_TYPE_REPLACE: 2147 ucast.opcode = QED_FILTER_REPLACE; 2148 break; 2149 default: 2150 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2151 params->type); 2152 } 2153 2154 if (params->vlan_valid && params->mac_valid) { 2155 ucast.type = QED_FILTER_MAC_VLAN; 2156 ether_addr_copy(ucast.mac, params->mac); 2157 ucast.vlan = params->vlan; 2158 } else if (params->mac_valid) { 2159 ucast.type = QED_FILTER_MAC; 2160 ether_addr_copy(ucast.mac, params->mac); 2161 } else { 2162 ucast.type = QED_FILTER_VLAN; 2163 ucast.vlan = params->vlan; 2164 } 2165 2166 ucast.is_rx_filter = true; 2167 ucast.is_tx_filter = true; 2168 2169 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2170 } 2171 2172 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2173 struct qed_filter_mcast_params *params) 2174 { 2175 struct qed_filter_mcast mcast; 2176 int i; 2177 2178 memset(&mcast, 0, sizeof(mcast)); 2179 switch (params->type) { 2180 case QED_FILTER_XCAST_TYPE_ADD: 2181 mcast.opcode = QED_FILTER_ADD; 2182 break; 2183 case QED_FILTER_XCAST_TYPE_DEL: 2184 mcast.opcode = QED_FILTER_REMOVE; 2185 break; 2186 default: 2187 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2188 params->type); 2189 } 2190 2191 mcast.num_mc_addrs = params->num; 2192 for (i = 0; i < mcast.num_mc_addrs; i++) 2193 ether_addr_copy(mcast.mac[i], params->mac[i]); 2194 2195 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2196 } 2197 2198 static int qed_configure_filter(struct qed_dev *cdev, 2199 struct qed_filter_params *params) 2200 { 2201 enum qed_filter_rx_mode_type accept_flags; 2202 2203 switch (params->type) { 2204 case QED_FILTER_TYPE_UCAST: 2205 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 2206 case QED_FILTER_TYPE_MCAST: 2207 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 2208 case QED_FILTER_TYPE_RX_MODE: 2209 accept_flags = params->filter.accept_flags; 2210 return qed_configure_filter_rx_mode(cdev, accept_flags); 2211 default: 2212 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); 2213 return -EINVAL; 2214 } 2215 } 2216 2217 static int qed_fp_cqe_completion(struct qed_dev *dev, 2218 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2219 { 2220 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2221 cqe); 2222 } 2223 2224 #ifdef CONFIG_QED_SRIOV 2225 extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2226 #endif 2227 2228 #ifdef CONFIG_DCB 2229 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; 2230 #endif 2231 2232 static const struct qed_eth_ops qed_eth_ops_pass = { 2233 .common = &qed_common_ops_pass, 2234 #ifdef CONFIG_QED_SRIOV 2235 .iov = &qed_iov_ops_pass, 2236 #endif 2237 #ifdef CONFIG_DCB 2238 .dcb = &qed_dcbnl_ops_pass, 2239 #endif 2240 .fill_dev_info = &qed_fill_eth_dev_info, 2241 .register_ops = &qed_register_eth_ops, 2242 .check_mac = &qed_check_mac, 2243 .vport_start = &qed_start_vport, 2244 .vport_stop = &qed_stop_vport, 2245 .vport_update = &qed_update_vport, 2246 .q_rx_start = &qed_start_rxq, 2247 .q_rx_stop = &qed_stop_rxq, 2248 .q_tx_start = &qed_start_txq, 2249 .q_tx_stop = &qed_stop_txq, 2250 .filter_config = &qed_configure_filter, 2251 .fastpath_stop = &qed_fastpath_stop, 2252 .eth_cqe_completion = &qed_fp_cqe_completion, 2253 .get_vport_stats = &qed_get_vport_stats, 2254 .tunn_config = &qed_tunn_configure, 2255 }; 2256 2257 const struct qed_eth_ops *qed_get_eth_ops(void) 2258 { 2259 return &qed_eth_ops_pass; 2260 } 2261 EXPORT_SYMBOL(qed_get_eth_ops); 2262 2263 void qed_put_eth_ops(void) 2264 { 2265 /* TODO - reference count for module? */ 2266 } 2267 EXPORT_SYMBOL(qed_put_eth_ops); 2268