1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, v.1, (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2014-2017 Cavium, Inc. 24 * The contents of this file are subject to the terms of the Common Development 25 * and Distribution License, v.1, (the "License"). 26 27 * You may not use this file except in compliance with the License. 28 29 * You can obtain a copy of the License at available 30 * at http://opensource.org/licenses/CDDL-1.0 31 32 * See the License for the specific language governing permissions and 33 * limitations under the License. 34 */ 35 36 #ifndef __ECORE_DEV_API_H__ 37 #define __ECORE_DEV_API_H__ 38 39 #include "ecore_status.h" 40 #include "ecore_chain.h" 41 #include "ecore_int_api.h" 42 43 struct ecore_wake_info { 44 u32 wk_info; 45 u32 wk_details; 46 u32 wk_pkt_len; 47 u8 wk_buffer[256]; 48 }; 49 50 /** 51 * @brief ecore_init_dp - initialize the debug level 52 * 53 * @param p_dev 54 * @param dp_module 55 * @param dp_level 56 * @param dp_ctx 57 */ 58 void ecore_init_dp(struct ecore_dev *p_dev, 59 u32 dp_module, 60 u8 dp_level, 61 void *dp_ctx); 62 63 /** 64 * @brief ecore_init_struct - initialize the device structure to 65 * its defaults 66 * 67 * @param p_dev 68 */ 69 void ecore_init_struct(struct ecore_dev *p_dev); 70 71 /** 72 * @brief ecore_resc_free - 73 * 74 * @param p_dev 75 */ 76 void ecore_resc_free(struct ecore_dev *p_dev); 77 78 /** 79 * @brief ecore_resc_alloc - 80 * 81 * @param p_dev 82 * 83 * @return enum _ecore_status_t 84 */ 85 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); 86 87 /** 88 * @brief ecore_resc_setup - 89 * 90 * @param p_dev 91 */ 92 void ecore_resc_setup(struct ecore_dev *p_dev); 93 94 enum ecore_override_force_load { 95 ECORE_OVERRIDE_FORCE_LOAD_NONE, 96 ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, 97 ECORE_OVERRIDE_FORCE_LOAD_NEVER, 98 }; 99 100 struct ecore_drv_load_params { 101 /* Indicates whether the driver is running over a crash kernel. 102 * As part of the load request, this will be used for providing the 103 * driver role to the MFW. 104 * In case of a crash kernel over PDA - this should be set to false. 105 */ 106 bool is_crash_kernel; 107 108 /* The timeout value that the MFW should use when locking the engine for 109 * the driver load process. 110 * A value of '0' means the default value, and '255' means no timeout. 111 */ 112 u8 mfw_timeout_val; 113 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 114 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 115 116 /* Avoid engine reset when first PF loads on it */ 117 bool avoid_eng_reset; 118 119 /* Allow overriding the default force load behavior */ 120 enum ecore_override_force_load override_force_load; 121 }; 122 123 struct ecore_hw_init_params { 124 /* Tunneling parameters */ 125 struct ecore_tunnel_info *p_tunn; 126 127 bool b_hw_start; 128 129 /* Interrupt mode [msix, inta, etc.] to use */ 130 enum ecore_int_mode int_mode; 131 132 /* NPAR tx switching to be used for vports configured for tx-switching */ 133 bool allow_npar_tx_switch; 134 135 /* Binary fw data pointer in binary fw file */ 136 const u8 *bin_fw_data; 137 138 /* Driver load parameters */ 139 struct ecore_drv_load_params *p_drv_load_params; 140 }; 141 142 /** 143 * @brief ecore_hw_init - 144 * 145 * @param p_dev 146 * @param p_params 147 * 148 * @return enum _ecore_status_t 149 */ 150 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 151 struct ecore_hw_init_params *p_params); 152 153 /** 154 * @brief ecore_hw_timers_stop_all - 155 * 156 * @param p_dev 157 * 158 * @return void 159 */ 160 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev); 161 162 /** 163 * @brief ecore_hw_stop - 164 * 165 * @param p_dev 166 * 167 * @return enum _ecore_status_t 168 */ 169 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev); 170 171 /** 172 * @brief ecore_hw_stop_fastpath -should be called incase 173 * slowpath is still required for the device, 174 * but fastpath is not. 175 * 176 * @param p_dev 177 * 178 */ 179 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev); 180 181 #ifndef LINUX_REMOVE 182 /** 183 * @brief ecore_hw_hibernate_prepare -should be called when 184 * the system is going into the hibernate state 185 * 186 * @param p_dev 187 * 188 */ 189 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev); 190 191 /** 192 * @brief ecore_hw_hibernate_resume -should be called when the system is 193 resuming from D3 power state and before calling ecore_hw_init. 194 * 195 * @param p_hwfn 196 * 197 */ 198 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev); 199 200 #endif 201 202 /** 203 * @brief ecore_hw_start_fastpath -restart fastpath traffic, 204 * only if hw_stop_fastpath was called 205 206 * @param p_dev 207 * 208 */ 209 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn); 210 211 enum ecore_hw_prepare_result { 212 ECORE_HW_PREPARE_SUCCESS, 213 214 /* FAILED results indicate probe has failed & cleaned up */ 215 ECORE_HW_PREPARE_FAILED_ENG2, 216 ECORE_HW_PREPARE_FAILED_ME, 217 ECORE_HW_PREPARE_FAILED_MEM, 218 ECORE_HW_PREPARE_FAILED_DEV, 219 ECORE_HW_PREPARE_FAILED_NVM, 220 221 /* BAD results indicate probe is passed even though some wrongness 222 * has occurred; Trying to actually use [I.e., hw_init()] might have 223 * dire reprecautions. 224 */ 225 ECORE_HW_PREPARE_BAD_IOV, 226 ECORE_HW_PREPARE_BAD_MCP, 227 ECORE_HW_PREPARE_BAD_IGU, 228 }; 229 230 struct ecore_hw_prepare_params { 231 /* Personality to initialize */ 232 int personality; 233 234 /* Force the driver's default resource allocation */ 235 bool drv_resc_alloc; 236 237 /* Check the reg_fifo after any register access */ 238 bool chk_reg_fifo; 239 240 /* Request the MFW to initiate PF FLR */ 241 bool initiate_pf_flr; 242 243 /* The OS Epoch time in seconds */ 244 u32 epoch; 245 246 /* Allow the MFW to collect a crash dump */ 247 bool allow_mdump; 248 249 /* Allow prepare to pass even if some initializations are failing. 250 * If set, the `p_prepare_res' field would be set with the return, 251 * and might allow probe to pass even if there are certain issues. 252 */ 253 bool b_relaxed_probe; 254 enum ecore_hw_prepare_result p_relaxed_res; 255 }; 256 257 /** 258 * @brief ecore_hw_prepare - 259 * 260 * @param p_dev 261 * @param p_params 262 * 263 * @return enum _ecore_status_t 264 */ 265 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 266 struct ecore_hw_prepare_params *p_params); 267 268 /** 269 * @brief ecore_hw_remove - 270 * 271 * @param p_dev 272 */ 273 void ecore_hw_remove(struct ecore_dev *p_dev); 274 275 /** 276 * @brief ecore_set_nwuf_reg - 277 * 278 * @param p_dev 279 * @param wol_flag - wol_capability 280 * @param reg_idx - Index of the pattern register 281 * @param pattern_size - size of pattern 282 * @param crc - CRC value of patter & mask 283 * 284 * @return enum _ecore_status_t 285 */ 286 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 287 const bool b_enable, 288 u32 reg_idx, 289 u32 pattern_size, 290 u32 crc); 291 292 /** 293 * @brief ecore_get_wake_info - get magic packet buffer 294 * 295 * @param p_dev 296 * @param wake_info - pointer to ecore_wake_info buffer 297 * 298 * @return enum _ecore_status_t 299 */ 300 enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev, 301 struct ecore_wake_info *wake_info); 302 303 /** 304 * @brief ecore_wol_buffer_clear - Clear magic package buffer 305 * 306 * @param p_dev 307 * 308 * @return void 309 */ 310 void ecore_wol_buffer_clear(struct ecore_dev *p_dev); 311 312 /** 313 * @brief ecore_ptt_acquire - Allocate a PTT window 314 * 315 * Should be called at the entry point to the driver (at the beginning of an 316 * exported function) 317 * 318 * @param p_hwfn 319 * 320 * @return struct ecore_ptt 321 */ 322 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn); 323 324 /** 325 * @brief ecore_ptt_release - Release PTT Window 326 * 327 * Should be called at the end of a flow - at the end of the function that 328 * acquired the PTT. 329 * 330 * 331 * @param p_hwfn 332 * @param p_ptt 333 */ 334 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, 335 struct ecore_ptt *p_ptt); 336 337 #ifndef __EXTRACT__LINUX__ 338 struct ecore_eth_stats_common { 339 u64 no_buff_discards; 340 u64 packet_too_big_discard; 341 u64 ttl0_discard; 342 u64 rx_ucast_bytes; 343 u64 rx_mcast_bytes; 344 u64 rx_bcast_bytes; 345 u64 rx_ucast_pkts; 346 u64 rx_mcast_pkts; 347 u64 rx_bcast_pkts; 348 u64 mftag_filter_discards; 349 u64 mac_filter_discards; 350 u64 tx_ucast_bytes; 351 u64 tx_mcast_bytes; 352 u64 tx_bcast_bytes; 353 u64 tx_ucast_pkts; 354 u64 tx_mcast_pkts; 355 u64 tx_bcast_pkts; 356 u64 tx_err_drop_pkts; 357 u64 tpa_coalesced_pkts; 358 u64 tpa_coalesced_events; 359 u64 tpa_aborts_num; 360 u64 tpa_not_coalesced_pkts; 361 u64 tpa_coalesced_bytes; 362 363 /* port */ 364 u64 rx_64_byte_packets; 365 u64 rx_65_to_127_byte_packets; 366 u64 rx_128_to_255_byte_packets; 367 u64 rx_256_to_511_byte_packets; 368 u64 rx_512_to_1023_byte_packets; 369 u64 rx_1024_to_1518_byte_packets; 370 u64 rx_crc_errors; 371 u64 rx_mac_crtl_frames; 372 u64 rx_pause_frames; 373 u64 rx_pfc_frames; 374 u64 rx_align_errors; 375 u64 rx_carrier_errors; 376 u64 rx_oversize_packets; 377 u64 rx_jabbers; 378 u64 rx_undersize_packets; 379 u64 rx_fragments; 380 u64 tx_64_byte_packets; 381 u64 tx_65_to_127_byte_packets; 382 u64 tx_128_to_255_byte_packets; 383 u64 tx_256_to_511_byte_packets; 384 u64 tx_512_to_1023_byte_packets; 385 u64 tx_1024_to_1518_byte_packets; 386 u64 tx_pause_frames; 387 u64 tx_pfc_frames; 388 u64 brb_truncates; 389 u64 brb_discards; 390 u64 rx_mac_bytes; 391 u64 rx_mac_uc_packets; 392 u64 rx_mac_mc_packets; 393 u64 rx_mac_bc_packets; 394 u64 rx_mac_frames_ok; 395 u64 tx_mac_bytes; 396 u64 tx_mac_uc_packets; 397 u64 tx_mac_mc_packets; 398 u64 tx_mac_bc_packets; 399 u64 tx_mac_ctrl_frames; 400 }; 401 402 struct ecore_eth_stats_bb { 403 u64 rx_1519_to_1522_byte_packets; 404 u64 rx_1519_to_2047_byte_packets; 405 u64 rx_2048_to_4095_byte_packets; 406 u64 rx_4096_to_9216_byte_packets; 407 u64 rx_9217_to_16383_byte_packets; 408 u64 tx_1519_to_2047_byte_packets; 409 u64 tx_2048_to_4095_byte_packets; 410 u64 tx_4096_to_9216_byte_packets; 411 u64 tx_9217_to_16383_byte_packets; 412 u64 tx_lpi_entry_count; 413 u64 tx_total_collisions; 414 }; 415 416 struct ecore_eth_stats_ah { 417 u64 rx_1519_to_max_byte_packets; 418 u64 tx_1519_to_max_byte_packets; 419 }; 420 421 struct ecore_eth_stats { 422 struct ecore_eth_stats_common common; 423 union { 424 struct ecore_eth_stats_bb bb; 425 struct ecore_eth_stats_ah ah; 426 }; 427 }; 428 #endif 429 430 enum ecore_dmae_address_type_t { 431 ECORE_DMAE_ADDRESS_HOST_VIRT, 432 ECORE_DMAE_ADDRESS_HOST_PHYS, 433 ECORE_DMAE_ADDRESS_GRC 434 }; 435 436 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the 437 * source is a block of length DMAE_MAX_RW_SIZE and the 438 * destination is larger, the source block will be duplicated as 439 * many times as required to fill the destination block. This is 440 * used mostly to write a zeroed buffer to destination address 441 * using DMA 442 */ 443 #define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001 444 #define ECORE_DMAE_FLAG_VF_SRC 0x00000002 445 #define ECORE_DMAE_FLAG_VF_DST 0x00000004 446 #define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008 447 448 struct ecore_dmae_params { 449 u32 flags; /* consists of ECORE_DMAE_FLAG_* values */ 450 u8 src_vfid; 451 u8 dst_vfid; 452 }; 453 454 /** 455 * @brief ecore_dmae_host2grc - copy data from source addr to 456 * dmae registers using the given ptt 457 * 458 * @param p_hwfn 459 * @param p_ptt 460 * @param source_addr 461 * @param grc_addr (dmae_data_offset) 462 * @param size_in_dwords 463 * @param flags (one of the flags defined above) 464 */ 465 enum _ecore_status_t 466 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, 467 struct ecore_ptt *p_ptt, 468 u64 source_addr, 469 u32 grc_addr, 470 u32 size_in_dwords, 471 u32 flags); 472 473 /** 474 * @brief ecore_dmae_grc2host - Read data from dmae data offset 475 * to source address using the given ptt 476 * 477 * @param p_ptt 478 * @param grc_addr (dmae_data_offset) 479 * @param dest_addr 480 * @param size_in_dwords 481 * @param flags - one of the flags defined above 482 */ 483 enum _ecore_status_t 484 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, 485 struct ecore_ptt *p_ptt, 486 u32 grc_addr, 487 dma_addr_t dest_addr, 488 u32 size_in_dwords, 489 u32 flags); 490 491 /** 492 * @brief ecore_dmae_host2host - copy data from to source address 493 * to a destination adress (for SRIOV) using the given ptt 494 * 495 * @param p_hwfn 496 * @param p_ptt 497 * @param source_addr 498 * @param dest_addr 499 * @param size_in_dwords 500 * @param params 501 */ 502 enum _ecore_status_t 503 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, 504 struct ecore_ptt *p_ptt, 505 dma_addr_t source_addr, 506 dma_addr_t dest_addr, 507 u32 size_in_dwords, 508 struct ecore_dmae_params *p_params); 509 510 /** 511 * @brief ecore_chain_alloc - Allocate and initialize a chain 512 * 513 * @param p_hwfn 514 * @param intended_use 515 * @param mode 516 * @param num_elems 517 * @param elem_size 518 * @param p_chain 519 * 520 * @return enum _ecore_status_t 521 */ 522 enum _ecore_status_t 523 ecore_chain_alloc(struct ecore_dev *p_dev, 524 enum ecore_chain_use_mode intended_use, 525 enum ecore_chain_mode mode, 526 enum ecore_chain_cnt_type cnt_type, 527 u32 num_elems, 528 osal_size_t elem_size, 529 struct ecore_chain *p_chain, 530 struct ecore_chain_ext_pbl *ext_pbl); 531 532 /** 533 * @brief ecore_chain_free - Free chain DMA memory 534 * 535 * @param p_hwfn 536 * @param p_chain 537 */ 538 void ecore_chain_free(struct ecore_dev *p_dev, 539 struct ecore_chain *p_chain); 540 541 /** 542 * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID 543 * 544 * @param p_hwfn 545 * @param src_id - relative to p_hwfn 546 * @param dst_id - absolute per engine 547 * 548 * @return enum _ecore_status_t 549 */ 550 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 551 u16 src_id, 552 u16 *dst_id); 553 554 /** 555 * @@brief ecore_fw_vport - Get absolute vport ID 556 * 557 * @param p_hwfn 558 * @param src_id - relative to p_hwfn 559 * @param dst_id - absolute per engine 560 * 561 * @return enum _ecore_status_t 562 */ 563 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 564 u8 src_id, 565 u8 *dst_id); 566 567 /** 568 * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID 569 * 570 * @param p_hwfn 571 * @param src_id - relative to p_hwfn 572 * @param dst_id - absolute per engine 573 * 574 * @return enum _ecore_status_t 575 */ 576 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 577 u8 src_id, 578 u8 *dst_id); 579 580 /** 581 * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh 582 * 583 * @param p_hwfn 584 * @param p_ptt 585 * @param p_filter - MAC to add 586 */ 587 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 588 struct ecore_ptt *p_ptt, 589 u8 *p_filter); 590 591 /** 592 * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh 593 * 594 * @param p_hwfn 595 * @param p_ptt 596 * @param p_filter - MAC to remove 597 */ 598 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 599 struct ecore_ptt *p_ptt, 600 u8 *p_filter); 601 602 enum ecore_llh_port_filter_type_t { 603 ECORE_LLH_FILTER_ETHERTYPE, 604 ECORE_LLH_FILTER_TCP_SRC_PORT, 605 ECORE_LLH_FILTER_TCP_DEST_PORT, 606 ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT, 607 ECORE_LLH_FILTER_UDP_SRC_PORT, 608 ECORE_LLH_FILTER_UDP_DEST_PORT, 609 ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT 610 }; 611 612 /** 613 * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh 614 * 615 * @param p_hwfn 616 * @param p_ptt 617 * @param source_port_or_eth_type - source port or ethertype to add 618 * @param dest_port - destination port to add 619 * @param type - type of filters and comparing 620 */ 621 enum _ecore_status_t 622 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 623 struct ecore_ptt *p_ptt, 624 u16 source_port_or_eth_type, 625 u16 dest_port, 626 enum ecore_llh_port_filter_type_t type); 627 628 /** 629 * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh 630 * 631 * @param p_hwfn 632 * @param p_ptt 633 * @param source_port_or_eth_type - source port or ethertype to add 634 * @param dest_port - destination port to add 635 * @param type - type of filters and comparing 636 */ 637 void 638 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 639 struct ecore_ptt *p_ptt, 640 u16 source_port_or_eth_type, 641 u16 dest_port, 642 enum ecore_llh_port_filter_type_t type); 643 644 /** 645 * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh 646 * 647 * @param p_hwfn 648 * @param p_ptt 649 */ 650 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 651 struct ecore_ptt *p_ptt); 652 653 /** 654 * @brief ecore_llh_set_function_as_default - set function as default per port 655 * 656 * @param p_hwfn 657 * @param p_ptt 658 */ 659 enum _ecore_status_t 660 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 661 struct ecore_ptt *p_ptt); 662 663 /** 664 *@brief Cleanup of previous driver remains prior to load 665 * 666 * @param p_hwfn 667 * @param p_ptt 668 * @param id - For PF, engine-relative. For VF, PF-relative. 669 * @param is_vf - true iff cleanup is made for a VF. 670 * 671 * @return enum _ecore_status_t 672 */ 673 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 674 struct ecore_ptt *p_ptt, 675 u16 id, 676 bool is_vf); 677 /** 678 * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and 679 * Tx queue. The fact that we can configure coalescing to up to 511, but on 680 * varying accuracy [the bigger the value the less accurate] up to a mistake 681 * of 3usec for the highest values. 682 * While the API allows setting coalescing per-qid, all queues sharing a SB 683 * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] 684 * otherwise configuration would break. 685 * 686 * @param p_hwfn 687 * @param rx_coal - Rx Coalesce value in micro seconds. 688 * @param tx_coal - TX Coalesce value in micro seconds. 689 * @param p_handle 690 * 691 * @return enum _ecore_status_t 692 **/ 693 enum _ecore_status_t 694 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, 695 u16 tx_coal, void *p_handle); 696 697 /** 698 * @brief - Recalculate feature distributions based on HW resources and 699 * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE. 700 * As a result, this must not be called while RDMA is active or while VFs 701 * are enabled. 702 * 703 * @param p_hwfn 704 */ 705 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn); 706 707 /** 708 * @brief ecore_change_pci_hwfn - Enable or disable PCI BUS MASTER 709 * 710 * @param p_hwfn 711 * @param p_ptt 712 * @param enable - true/false 713 * 714 * @return enum _ecore_status_t 715 */ 716 enum _ecore_status_t 717 ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 718 struct ecore_ptt *p_ptt, 719 u8 enable); 720 721 #endif 722