1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #ifndef __ECORE_L2_API_H__ 30 #define __ECORE_L2_API_H__ 31 32 #include "ecore_status.h" 33 #include "ecore_sp_api.h" 34 #include "ecore_int_api.h" 35 36 #ifndef __EXTRACT__LINUX__ 37 enum ecore_rss_caps { 38 ECORE_RSS_IPV4 = 0x1, 39 ECORE_RSS_IPV6 = 0x2, 40 ECORE_RSS_IPV4_TCP = 0x4, 41 ECORE_RSS_IPV6_TCP = 0x8, 42 ECORE_RSS_IPV4_UDP = 0x10, 43 ECORE_RSS_IPV6_UDP = 0x20, 44 }; 45 46 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ 47 #define ECORE_RSS_IND_TABLE_SIZE 128 48 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ 49 50 #define ECORE_MAX_PHC_DRIFT_PPB 291666666 51 52 enum ecore_ptp_filter_type { 53 ECORE_PTP_FILTER_NONE, 54 ECORE_PTP_FILTER_ALL, 55 ECORE_PTP_FILTER_V1_L4_EVENT, 56 ECORE_PTP_FILTER_V1_L4_GEN, 57 ECORE_PTP_FILTER_V2_L4_EVENT, 58 ECORE_PTP_FILTER_V2_L4_GEN, 59 ECORE_PTP_FILTER_V2_L2_EVENT, 60 ECORE_PTP_FILTER_V2_L2_GEN, 61 ECORE_PTP_FILTER_V2_EVENT, 62 ECORE_PTP_FILTER_V2_GEN 63 }; 64 65 enum ecore_ptp_hwtstamp_tx_type { 66 ECORE_PTP_HWTSTAMP_TX_OFF, 67 ECORE_PTP_HWTSTAMP_TX_ON, 68 }; 69 #endif 70 71 #ifndef __EXTRACT__LINUX__ 72 struct ecore_queue_start_common_params { 73 /* Should always be relative to entity sending this. */ 74 u8 vport_id; 75 u16 queue_id; 76 77 /* Relative, but relevant only for PFs */ 78 u8 stats_id; 79 80 struct ecore_sb_info *p_sb; 81 u8 sb_idx; 82 83 u8 tc; 84 }; 85 86 struct ecore_rxq_start_ret_params { 87 void OSAL_IOMEM *p_prod; 88 void *p_handle; 89 }; 90 91 struct ecore_txq_start_ret_params { 92 void OSAL_IOMEM *p_doorbell; 93 void *p_handle; 94 }; 95 #endif 96 97 struct ecore_rss_params { 98 u8 update_rss_config; 99 u8 rss_enable; 100 u8 rss_eng_id; 101 u8 update_rss_capabilities; 102 u8 update_rss_ind_table; 103 u8 update_rss_key; 104 u8 rss_caps; 105 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 106 107 /* Indirection table consist of rx queue handles */ 108 void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; 109 u32 rss_key[ECORE_RSS_KEY_SIZE]; 110 }; 111 112 struct ecore_sge_tpa_params { 113 u8 max_buffers_per_cqe; 114 115 u8 update_tpa_en_flg; 116 u8 tpa_ipv4_en_flg; 117 u8 tpa_ipv6_en_flg; 118 u8 tpa_ipv4_tunn_en_flg; 119 u8 tpa_ipv6_tunn_en_flg; 120 121 u8 update_tpa_param_flg; 122 u8 tpa_pkt_split_flg; 123 u8 tpa_hdr_data_split_flg; 124 u8 tpa_gro_consistent_flg; 125 u8 tpa_max_aggs_num; 126 u16 tpa_max_size; 127 u16 tpa_min_size_to_start; 128 u16 tpa_min_size_to_cont; 129 }; 130 131 enum ecore_filter_opcode { 132 ECORE_FILTER_ADD, 133 ECORE_FILTER_REMOVE, 134 ECORE_FILTER_MOVE, 135 ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ 136 ECORE_FILTER_FLUSH, /* Removes all filters */ 137 }; 138 139 enum ecore_filter_ucast_type { 140 ECORE_FILTER_MAC, 141 ECORE_FILTER_VLAN, 142 ECORE_FILTER_MAC_VLAN, 143 ECORE_FILTER_INNER_MAC, 144 ECORE_FILTER_INNER_VLAN, 145 ECORE_FILTER_INNER_PAIR, 146 ECORE_FILTER_INNER_MAC_VNI_PAIR, 147 ECORE_FILTER_MAC_VNI_PAIR, 148 ECORE_FILTER_VNI, 149 }; 150 151 struct ecore_filter_ucast { 152 enum ecore_filter_opcode opcode; 153 enum ecore_filter_ucast_type type; 154 u8 is_rx_filter; 155 u8 is_tx_filter; 156 u8 vport_to_add_to; 157 u8 vport_to_remove_from; 158 unsigned char mac[ETH_ALEN]; 159 u8 assert_on_error; 160 u16 vlan; 161 u32 vni; 162 }; 163 164 struct ecore_filter_mcast { 165 /* MOVE is not supported for multicast */ 166 enum ecore_filter_opcode opcode; 167 u8 vport_to_add_to; 168 u8 vport_to_remove_from; 169 u8 num_mc_addrs; 170 #define ECORE_MAX_MC_ADDRS 64 171 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; 172 }; 173 174 struct ecore_filter_accept_flags { 175 u8 update_rx_mode_config; 176 u8 update_tx_mode_config; 177 u8 rx_accept_filter; 178 u8 tx_accept_filter; 179 #define ECORE_ACCEPT_NONE 0x01 180 #define ECORE_ACCEPT_UCAST_MATCHED 0x02 181 #define ECORE_ACCEPT_UCAST_UNMATCHED 0x04 182 #define ECORE_ACCEPT_MCAST_MATCHED 0x08 183 #define ECORE_ACCEPT_MCAST_UNMATCHED 0x10 184 #define ECORE_ACCEPT_BCAST 0x20 185 }; 186 187 #ifndef __EXTRACT__LINUX__ 188 enum ecore_filter_config_mode { 189 ECORE_FILTER_CONFIG_MODE_DISABLE, 190 ECORE_FILTER_CONFIG_MODE_5_TUPLE, 191 ECORE_FILTER_CONFIG_MODE_L4_PORT, 192 ECORE_FILTER_CONFIG_MODE_IP_DEST, 193 }; 194 #endif 195 196 struct ecore_arfs_config_params { 197 bool tcp; 198 bool udp; 199 bool ipv4; 200 bool ipv6; 201 enum ecore_filter_config_mode mode; 202 }; 203 204 /* Add / remove / move / remove-all unicast MAC-VLAN filters. 205 * FW will assert in the following cases, so driver should take care...: 206 * 1. Adding a filter to a full table. 207 * 2. Adding a filter which already exists on that vport. 208 * 3. Removing a filter which doesn't exist. 209 */ 210 211 enum _ecore_status_t 212 ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 213 struct ecore_filter_ucast *p_filter_cmd, 214 enum spq_mode comp_mode, 215 struct ecore_spq_comp_cb *p_comp_data); 216 217 /* Add / remove / move multicast MAC filters. */ 218 enum _ecore_status_t 219 ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 220 struct ecore_filter_mcast *p_filter_cmd, 221 enum spq_mode comp_mode, 222 struct ecore_spq_comp_cb *p_comp_data); 223 224 /* Set "accept" filters */ 225 enum _ecore_status_t 226 ecore_filter_accept_cmd( 227 struct ecore_dev *p_dev, 228 u8 vport, 229 struct ecore_filter_accept_flags accept_flags, 230 u8 update_accept_any_vlan, 231 u8 accept_any_vlan, 232 enum spq_mode comp_mode, 233 struct ecore_spq_comp_cb *p_comp_data); 234 235 /** 236 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod 237 * 238 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if 239 * the VPort ID is not currently initialized. 240 * 241 * @param p_hwfn 242 * @param opaque_fid 243 * @p_params Inputs; Relative for PF [SB being an exception] 244 * @param bd_max_bytes Maximum bytes that can be placed on a BD 245 * @param bd_chain_phys_addr Physical address of BDs for receive. 246 * @param cqe_pbl_addr Physical address of the CQE PBL Table. 247 * @param cqe_pbl_size Size of the CQE PBL Table 248 * @param p_ret_params Pointed struct to be filled with outputs. 249 * 250 * @return enum _ecore_status_t 251 */ 252 enum _ecore_status_t 253 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 254 u16 opaque_fid, 255 struct ecore_queue_start_common_params *p_params, 256 u16 bd_max_bytes, 257 dma_addr_t bd_chain_phys_addr, 258 dma_addr_t cqe_pbl_addr, 259 u16 cqe_pbl_size, 260 struct ecore_rxq_start_ret_params *p_ret_params); 261 262 /** 263 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue 264 * 265 * @param p_hwfn 266 * @param p_rxq Handler of queue to close 267 * @param eq_completion_only If True completion will be on 268 * EQe, if False completion will be 269 * on EQe if p_hwfn opaque 270 * different from the RXQ opaque 271 * otherwise on CQe. 272 * @param cqe_completion If True completion will be 273 * recieve on CQe. 274 * @return enum _ecore_status_t 275 */ 276 enum _ecore_status_t 277 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 278 void *p_rxq, 279 bool eq_completion_only, 280 bool cqe_completion); 281 282 /** 283 * @brief - TX Queue Start Ramrod 284 * 285 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if 286 * the VPort is not currently initialized. 287 * 288 * @param p_hwfn 289 * @param opaque_fid 290 * @p_params 291 * @param tc traffic class to use with this L2 txq 292 * @param pbl_addr address of the pbl array 293 * @param pbl_size number of entries in pbl 294 * @oaram p_ret_params Pointer to fill the return parameters in. 295 * 296 * @return enum _ecore_status_t 297 */ 298 enum _ecore_status_t 299 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, 300 u16 opaque_fid, 301 struct ecore_queue_start_common_params *p_params, 302 u8 tc, 303 dma_addr_t pbl_addr, 304 u16 pbl_size, 305 struct ecore_txq_start_ret_params *p_ret_params); 306 307 /** 308 * @brief ecore_eth_tx_queue_stop - closes a Tx queue 309 * 310 * @param p_hwfn 311 * @param p_txq - handle to Tx queue needed to be closed 312 * 313 * @return enum _ecore_status_t 314 */ 315 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 316 void *p_txq); 317 318 enum ecore_tpa_mode { 319 ECORE_TPA_MODE_NONE, 320 ECORE_TPA_MODE_RSC, 321 ECORE_TPA_MODE_GRO, 322 ECORE_TPA_MODE_MAX 323 }; 324 325 struct ecore_sp_vport_start_params { 326 enum ecore_tpa_mode tpa_mode; 327 bool remove_inner_vlan; /* Inner VLAN removal is enabled */ 328 bool tx_switching; /* Vport supports tx-switching */ 329 bool handle_ptp_pkts; /* Handle PTP packets */ 330 bool only_untagged; /* Untagged pkt control */ 331 bool drop_ttl0; /* Drop packets with TTL = 0 */ 332 u8 max_buffers_per_cqe; 333 u32 concrete_fid; 334 u16 opaque_fid; 335 u8 vport_id; /* VPORT ID */ 336 u16 mtu; /* VPORT MTU */ 337 bool zero_placement_offset; 338 bool check_mac; 339 bool check_ethtype; 340 341 /* Strict behavior on transmission errors */ 342 bool b_err_illegal_vlan_mode; 343 bool b_err_illegal_inband_mode; 344 bool b_err_vlan_insert_with_inband; 345 bool b_err_small_pkt; 346 bool b_err_big_pkt; 347 bool b_err_anti_spoof; 348 bool b_err_ctrl_frame; 349 }; 350 351 /** 352 * @brief ecore_sp_vport_start - 353 * 354 * This ramrod initializes a VPort. An Assert if generated if the Function ID 355 * of the VPort is not enabled. 356 * 357 * @param p_hwfn 358 * @param p_params VPORT start params 359 * 360 * @return enum _ecore_status_t 361 */ 362 enum _ecore_status_t 363 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 364 struct ecore_sp_vport_start_params *p_params); 365 366 struct ecore_sp_vport_update_params { 367 u16 opaque_fid; 368 u8 vport_id; 369 u8 update_vport_active_rx_flg; 370 u8 vport_active_rx_flg; 371 u8 update_vport_active_tx_flg; 372 u8 vport_active_tx_flg; 373 u8 update_inner_vlan_removal_flg; 374 u8 inner_vlan_removal_flg; 375 u8 silent_vlan_removal_flg; 376 u8 update_default_vlan_enable_flg; 377 u8 default_vlan_enable_flg; 378 u8 update_default_vlan_flg; 379 u16 default_vlan; 380 u8 update_tx_switching_flg; 381 u8 tx_switching_flg; 382 u8 update_approx_mcast_flg; 383 u8 update_anti_spoofing_en_flg; 384 u8 anti_spoofing_en; 385 u8 update_accept_any_vlan_flg; 386 u8 accept_any_vlan; 387 u32 bins[8]; 388 struct ecore_rss_params *rss_params; 389 struct ecore_filter_accept_flags accept_flags; 390 struct ecore_sge_tpa_params *sge_tpa_params; 391 }; 392 393 /** 394 * @brief ecore_sp_vport_update - 395 * 396 * This ramrod updates the parameters of the VPort. Every field can be updated 397 * independently, according to flags. 398 * 399 * This ramrod is also used to set the VPort state to active after creation. 400 * An Assert is generated if the VPort does not contain an RX queue. 401 * 402 * @param p_hwfn 403 * @param p_params 404 * 405 * @return enum _ecore_status_t 406 */ 407 enum _ecore_status_t 408 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 409 struct ecore_sp_vport_update_params *p_params, 410 enum spq_mode comp_mode, 411 struct ecore_spq_comp_cb *p_comp_data); 412 /** 413 * @brief ecore_sp_vport_stop - 414 * 415 * This ramrod closes a VPort after all its RX and TX queues are terminated. 416 * An Assert is generated if any queues are left open. 417 * 418 * @param p_hwfn 419 * @param opaque_fid 420 * @param vport_id VPort ID 421 * 422 * @return enum _ecore_status_t 423 */ 424 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 425 u16 opaque_fid, 426 u8 vport_id); 427 428 enum _ecore_status_t 429 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 430 u16 opaque_fid, 431 struct ecore_filter_ucast *p_filter_cmd, 432 enum spq_mode comp_mode, 433 struct ecore_spq_comp_cb *p_comp_data); 434 435 /** 436 * @brief ecore_sp_rx_eth_queues_update - 437 * 438 * This ramrod updates an RX queue. It is used for setting the active state 439 * of the queue and updating the TPA and SGE parameters. 440 * 441 * @note Final phase API. 442 * 443 * @param p_hwfn 444 * @param pp_rxq_handlers An array of queue handlers to be updated. 445 * @param num_rxqs number of queues to update. 446 * @param complete_cqe_flg Post completion to the CQE Ring if set 447 * @param complete_event_flg Post completion to the Event Ring if set 448 * @param comp_mode 449 * @param p_comp_data 450 * 451 * @return enum _ecore_status_t 452 */ 453 454 enum _ecore_status_t 455 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 456 void **pp_rxq_handlers, 457 u8 num_rxqs, 458 u8 complete_cqe_flg, 459 u8 complete_event_flg, 460 enum spq_mode comp_mode, 461 struct ecore_spq_comp_cb *p_comp_data); 462 463 /** 464 * @brief ecore_sp_eth_rx_queues_set_default - 465 * 466 * This ramrod sets RSS RX queue as default one. 467 * 468 * @note Final phase API. 469 * 470 * @param p_hwfn 471 * @param p_rxq_handlers queue handlers to be updated. 472 * @param comp_mode 473 * @param p_comp_data 474 * 475 * @return enum _ecore_status_t 476 */ 477 478 enum _ecore_status_t 479 ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn, 480 void *p_rxq_handler, 481 enum spq_mode comp_mode, 482 struct ecore_spq_comp_cb *p_comp_data); 483 484 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 485 struct ecore_ptt *p_ptt, 486 struct ecore_eth_stats *stats, 487 u16 statistics_bin, bool b_get_port_stats); 488 489 void ecore_get_vport_stats(struct ecore_dev *p_dev, 490 struct ecore_eth_stats *stats); 491 492 void ecore_reset_vport_stats(struct ecore_dev *p_dev); 493 494 /** 495 *@brief ecore_arfs_mode_configure - 496 * 497 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true 498 *and atleast one of ipv4 or ipv6 true to enable rfs mode. 499 * 500 *@param p_hwfn 501 *@param p_ptt 502 *@param p_cfg_params arfs mode configuration parameters. 503 * 504 */ 505 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 506 struct ecore_ptt *p_ptt, 507 struct ecore_arfs_config_params *p_cfg_params); 508 509 #ifndef __EXTRACT__LINUX__ 510 struct ecore_ntuple_filter_params { 511 /* Physically mapped address containing header of buffer to be used 512 * as filter. 513 */ 514 dma_addr_t addr; 515 516 /* Length of header in bytes */ 517 u16 length; 518 519 /* Relative queue-id to receive classified packet */ 520 #define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1) 521 u16 qid; 522 523 /* Identifier can either be according to vport-id or vfid */ 524 bool b_is_vf; 525 u8 vport_id; 526 u8 vf_id; 527 528 /* true iff this filter is to be added. Else to be removed */ 529 bool b_is_add; 530 }; 531 #endif 532 533 /** 534 * @brief - ecore_configure_rfs_ntuple_filter 535 * 536 * This ramrod should be used to add or remove arfs hw filter 537 * 538 * @params p_hwfn 539 * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize 540 * it with cookie and callback function address, if not 541 * using this mode then client must pass NULL. 542 * @params p_params 543 */ 544 enum _ecore_status_t 545 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 546 struct ecore_spq_comp_cb *p_cb, 547 struct ecore_ntuple_filter_params *p_params); 548 #endif 549