1 /* 2 * Copyright (c) 2016~2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #ifndef __HCLGE_MAIN_H 11 #define __HCLGE_MAIN_H 12 #include <linux/fs.h> 13 #include <linux/types.h> 14 #include <linux/phy.h> 15 #include "hclge_cmd.h" 16 #include "hnae3.h" 17 18 #define HCLGE_MOD_VERSION "v1.0" 19 #define HCLGE_DRIVER_NAME "hclge" 20 21 #define HCLGE_INVALID_VPORT 0xffff 22 23 #define HCLGE_ROCE_VECTOR_OFFSET 96 24 25 #define HCLGE_PF_CFG_BLOCK_SIZE 32 26 #define HCLGE_PF_CFG_DESC_NUM \ 27 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 28 29 #define HCLGE_VECTOR_REG_BASE 0x20000 30 31 #define HCLGE_VECTOR_REG_OFFSET 0x4 32 #define HCLGE_VECTOR_VF_OFFSET 0x100000 33 34 #define HCLGE_RSS_IND_TBL_SIZE 512 35 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 36 #define HCLGE_RSS_KEY_SIZE 40 37 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 38 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 39 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 40 #define HCLGE_RSS_HASH_ALGO_MASK 0xf 41 #define HCLGE_RSS_CFG_TBL_NUM \ 42 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) 43 44 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 45 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 46 #define HCLGE_D_PORT_BIT BIT(0) 47 #define HCLGE_S_PORT_BIT BIT(1) 48 #define HCLGE_D_IP_BIT BIT(2) 49 #define HCLGE_S_IP_BIT BIT(3) 50 #define HCLGE_V_TAG_BIT BIT(4) 51 52 #define HCLGE_RSS_TC_SIZE_0 1 53 #define HCLGE_RSS_TC_SIZE_1 2 54 #define HCLGE_RSS_TC_SIZE_2 4 55 #define HCLGE_RSS_TC_SIZE_3 8 56 #define HCLGE_RSS_TC_SIZE_4 16 57 #define HCLGE_RSS_TC_SIZE_5 32 58 #define HCLGE_RSS_TC_SIZE_6 64 59 #define HCLGE_RSS_TC_SIZE_7 128 60 61 #define HCLGE_TQP_RESET_TRY_TIMES 10 62 63 #define HCLGE_PHY_PAGE_MDIX 0 64 #define HCLGE_PHY_PAGE_COPPER 0 65 66 /* Page Selection Reg. */ 67 #define HCLGE_PHY_PAGE_REG 22 68 69 /* Copper Specific Control Register */ 70 #define HCLGE_PHY_CSC_REG 16 71 72 /* Copper Specific Status Register */ 73 #define HCLGE_PHY_CSS_REG 17 74 75 #define HCLGE_PHY_MDIX_CTRL_S (5) 76 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 77 78 #define HCLGE_PHY_MDIX_STATUS_B (6) 79 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) 80 81 enum HCLGE_DEV_STATE { 82 HCLGE_STATE_REINITING, 83 HCLGE_STATE_DOWN, 84 HCLGE_STATE_DISABLED, 85 HCLGE_STATE_REMOVING, 86 HCLGE_STATE_SERVICE_INITED, 87 HCLGE_STATE_SERVICE_SCHED, 88 HCLGE_STATE_MBX_HANDLING, 89 HCLGE_STATE_MBX_IRQ, 90 HCLGE_STATE_MAX 91 }; 92 93 #define HCLGE_MPF_ENBALE 1 94 struct hclge_caps { 95 u16 num_tqp; 96 u16 num_buffer_cell; 97 u32 flag; 98 u16 vmdq; 99 }; 100 101 enum HCLGE_MAC_SPEED { 102 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 103 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 104 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 105 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 106 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 107 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 108 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 109 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ 110 }; 111 112 enum HCLGE_MAC_DUPLEX { 113 HCLGE_MAC_HALF, 114 HCLGE_MAC_FULL 115 }; 116 117 enum hclge_mta_dmac_sel_type { 118 HCLGE_MAC_ADDR_47_36, 119 HCLGE_MAC_ADDR_46_35, 120 HCLGE_MAC_ADDR_45_34, 121 HCLGE_MAC_ADDR_44_33, 122 }; 123 124 struct hclge_mac { 125 u8 phy_addr; 126 u8 flag; 127 u8 media_type; 128 u8 mac_addr[ETH_ALEN]; 129 u8 autoneg; 130 u8 duplex; 131 u32 speed; 132 int link; /* store the link status of mac & phy (if phy exit)*/ 133 struct phy_device *phydev; 134 struct mii_bus *mdio_bus; 135 phy_interface_t phy_if; 136 }; 137 138 struct hclge_hw { 139 void __iomem *io_base; 140 struct hclge_mac mac; 141 int num_vec; 142 struct hclge_cmq cmq; 143 struct hclge_caps caps; 144 void *back; 145 }; 146 147 /* TQP stats */ 148 struct hlcge_tqp_stats { 149 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 150 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 151 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 152 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 153 }; 154 155 struct hclge_tqp { 156 struct device *dev; /* Device for DMA mapping */ 157 struct hnae3_queue q; 158 struct hlcge_tqp_stats tqp_stats; 159 u16 index; /* Global index in a NIC controller */ 160 161 bool alloced; 162 }; 163 164 enum hclge_fc_mode { 165 HCLGE_FC_NONE, 166 HCLGE_FC_RX_PAUSE, 167 HCLGE_FC_TX_PAUSE, 168 HCLGE_FC_FULL, 169 HCLGE_FC_PFC, 170 HCLGE_FC_DEFAULT 171 }; 172 173 #define HCLGE_PG_NUM 4 174 #define HCLGE_SCH_MODE_SP 0 175 #define HCLGE_SCH_MODE_DWRR 1 176 struct hclge_pg_info { 177 u8 pg_id; 178 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 179 u8 tc_bit_map; 180 u32 bw_limit; 181 u8 tc_dwrr[HNAE3_MAX_TC]; 182 }; 183 184 struct hclge_tc_info { 185 u8 tc_id; 186 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 187 u8 pgid; 188 u32 bw_limit; 189 }; 190 191 struct hclge_cfg { 192 u8 vmdq_vport_num; 193 u8 tc_num; 194 u16 tqp_desc_num; 195 u16 rx_buf_len; 196 u8 phy_addr; 197 u8 media_type; 198 u8 mac_addr[ETH_ALEN]; 199 u8 default_speed; 200 u32 numa_node_map; 201 }; 202 203 struct hclge_tm_info { 204 u8 num_tc; 205 u8 num_pg; /* It must be 1 if vNET-Base schd */ 206 u8 pg_dwrr[HCLGE_PG_NUM]; 207 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 208 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 209 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 210 enum hclge_fc_mode fc_mode; 211 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 212 }; 213 214 struct hclge_comm_stats_str { 215 char desc[ETH_GSTRING_LEN]; 216 unsigned long offset; 217 }; 218 219 /* all 64bit stats, opcode id: 0x0030 */ 220 struct hclge_64_bit_stats { 221 /* query_igu_stat */ 222 u64 igu_rx_oversize_pkt; 223 u64 igu_rx_undersize_pkt; 224 u64 igu_rx_out_all_pkt; 225 u64 igu_rx_uni_pkt; 226 u64 igu_rx_multi_pkt; 227 u64 igu_rx_broad_pkt; 228 u64 rsv0; 229 230 /* query_egu_stat */ 231 u64 egu_tx_out_all_pkt; 232 u64 egu_tx_uni_pkt; 233 u64 egu_tx_multi_pkt; 234 u64 egu_tx_broad_pkt; 235 236 /* ssu_ppp packet stats */ 237 u64 ssu_ppp_mac_key_num; 238 u64 ssu_ppp_host_key_num; 239 u64 ppp_ssu_mac_rlt_num; 240 u64 ppp_ssu_host_rlt_num; 241 242 /* ssu_tx_in_out_dfx_stats */ 243 u64 ssu_tx_in_num; 244 u64 ssu_tx_out_num; 245 /* ssu_rx_in_out_dfx_stats */ 246 u64 ssu_rx_in_num; 247 u64 ssu_rx_out_num; 248 }; 249 250 /* all 32bit stats, opcode id: 0x0031 */ 251 struct hclge_32_bit_stats { 252 u64 igu_rx_err_pkt; 253 u64 igu_rx_no_eof_pkt; 254 u64 igu_rx_no_sof_pkt; 255 u64 egu_tx_1588_pkt; 256 u64 egu_tx_err_pkt; 257 u64 ssu_full_drop_num; 258 u64 ssu_part_drop_num; 259 u64 ppp_key_drop_num; 260 u64 ppp_rlt_drop_num; 261 u64 ssu_key_drop_num; 262 u64 pkt_curr_buf_cnt; 263 u64 qcn_fb_rcv_cnt; 264 u64 qcn_fb_drop_cnt; 265 u64 qcn_fb_invaild_cnt; 266 u64 rsv0; 267 u64 rx_packet_tc0_in_cnt; 268 u64 rx_packet_tc1_in_cnt; 269 u64 rx_packet_tc2_in_cnt; 270 u64 rx_packet_tc3_in_cnt; 271 u64 rx_packet_tc4_in_cnt; 272 u64 rx_packet_tc5_in_cnt; 273 u64 rx_packet_tc6_in_cnt; 274 u64 rx_packet_tc7_in_cnt; 275 u64 rx_packet_tc0_out_cnt; 276 u64 rx_packet_tc1_out_cnt; 277 u64 rx_packet_tc2_out_cnt; 278 u64 rx_packet_tc3_out_cnt; 279 u64 rx_packet_tc4_out_cnt; 280 u64 rx_packet_tc5_out_cnt; 281 u64 rx_packet_tc6_out_cnt; 282 u64 rx_packet_tc7_out_cnt; 283 284 /* Tx packet level statistics */ 285 u64 tx_packet_tc0_in_cnt; 286 u64 tx_packet_tc1_in_cnt; 287 u64 tx_packet_tc2_in_cnt; 288 u64 tx_packet_tc3_in_cnt; 289 u64 tx_packet_tc4_in_cnt; 290 u64 tx_packet_tc5_in_cnt; 291 u64 tx_packet_tc6_in_cnt; 292 u64 tx_packet_tc7_in_cnt; 293 u64 tx_packet_tc0_out_cnt; 294 u64 tx_packet_tc1_out_cnt; 295 u64 tx_packet_tc2_out_cnt; 296 u64 tx_packet_tc3_out_cnt; 297 u64 tx_packet_tc4_out_cnt; 298 u64 tx_packet_tc5_out_cnt; 299 u64 tx_packet_tc6_out_cnt; 300 u64 tx_packet_tc7_out_cnt; 301 302 /* packet buffer statistics */ 303 u64 pkt_curr_buf_tc0_cnt; 304 u64 pkt_curr_buf_tc1_cnt; 305 u64 pkt_curr_buf_tc2_cnt; 306 u64 pkt_curr_buf_tc3_cnt; 307 u64 pkt_curr_buf_tc4_cnt; 308 u64 pkt_curr_buf_tc5_cnt; 309 u64 pkt_curr_buf_tc6_cnt; 310 u64 pkt_curr_buf_tc7_cnt; 311 312 u64 mb_uncopy_num; 313 u64 lo_pri_unicast_rlt_drop_num; 314 u64 hi_pri_multicast_rlt_drop_num; 315 u64 lo_pri_multicast_rlt_drop_num; 316 u64 rx_oq_drop_pkt_cnt; 317 u64 tx_oq_drop_pkt_cnt; 318 u64 nic_l2_err_drop_pkt_cnt; 319 u64 roc_l2_err_drop_pkt_cnt; 320 }; 321 322 /* mac stats ,opcode id: 0x0032 */ 323 struct hclge_mac_stats { 324 u64 mac_tx_mac_pause_num; 325 u64 mac_rx_mac_pause_num; 326 u64 mac_tx_pfc_pri0_pkt_num; 327 u64 mac_tx_pfc_pri1_pkt_num; 328 u64 mac_tx_pfc_pri2_pkt_num; 329 u64 mac_tx_pfc_pri3_pkt_num; 330 u64 mac_tx_pfc_pri4_pkt_num; 331 u64 mac_tx_pfc_pri5_pkt_num; 332 u64 mac_tx_pfc_pri6_pkt_num; 333 u64 mac_tx_pfc_pri7_pkt_num; 334 u64 mac_rx_pfc_pri0_pkt_num; 335 u64 mac_rx_pfc_pri1_pkt_num; 336 u64 mac_rx_pfc_pri2_pkt_num; 337 u64 mac_rx_pfc_pri3_pkt_num; 338 u64 mac_rx_pfc_pri4_pkt_num; 339 u64 mac_rx_pfc_pri5_pkt_num; 340 u64 mac_rx_pfc_pri6_pkt_num; 341 u64 mac_rx_pfc_pri7_pkt_num; 342 u64 mac_tx_total_pkt_num; 343 u64 mac_tx_total_oct_num; 344 u64 mac_tx_good_pkt_num; 345 u64 mac_tx_bad_pkt_num; 346 u64 mac_tx_good_oct_num; 347 u64 mac_tx_bad_oct_num; 348 u64 mac_tx_uni_pkt_num; 349 u64 mac_tx_multi_pkt_num; 350 u64 mac_tx_broad_pkt_num; 351 u64 mac_tx_undersize_pkt_num; 352 u64 mac_tx_overrsize_pkt_num; 353 u64 mac_tx_64_oct_pkt_num; 354 u64 mac_tx_65_127_oct_pkt_num; 355 u64 mac_tx_128_255_oct_pkt_num; 356 u64 mac_tx_256_511_oct_pkt_num; 357 u64 mac_tx_512_1023_oct_pkt_num; 358 u64 mac_tx_1024_1518_oct_pkt_num; 359 u64 mac_tx_1519_max_oct_pkt_num; 360 u64 mac_rx_total_pkt_num; 361 u64 mac_rx_total_oct_num; 362 u64 mac_rx_good_pkt_num; 363 u64 mac_rx_bad_pkt_num; 364 u64 mac_rx_good_oct_num; 365 u64 mac_rx_bad_oct_num; 366 u64 mac_rx_uni_pkt_num; 367 u64 mac_rx_multi_pkt_num; 368 u64 mac_rx_broad_pkt_num; 369 u64 mac_rx_undersize_pkt_num; 370 u64 mac_rx_overrsize_pkt_num; 371 u64 mac_rx_64_oct_pkt_num; 372 u64 mac_rx_65_127_oct_pkt_num; 373 u64 mac_rx_128_255_oct_pkt_num; 374 u64 mac_rx_256_511_oct_pkt_num; 375 u64 mac_rx_512_1023_oct_pkt_num; 376 u64 mac_rx_1024_1518_oct_pkt_num; 377 u64 mac_rx_1519_max_oct_pkt_num; 378 379 u64 mac_trans_fragment_pkt_num; 380 u64 mac_trans_undermin_pkt_num; 381 u64 mac_trans_jabber_pkt_num; 382 u64 mac_trans_err_all_pkt_num; 383 u64 mac_trans_from_app_good_pkt_num; 384 u64 mac_trans_from_app_bad_pkt_num; 385 u64 mac_rcv_fragment_pkt_num; 386 u64 mac_rcv_undermin_pkt_num; 387 u64 mac_rcv_jabber_pkt_num; 388 u64 mac_rcv_fcs_err_pkt_num; 389 u64 mac_rcv_send_app_good_pkt_num; 390 u64 mac_rcv_send_app_bad_pkt_num; 391 }; 392 393 struct hclge_hw_stats { 394 struct hclge_mac_stats mac_stats; 395 struct hclge_64_bit_stats all_64_bit_stats; 396 struct hclge_32_bit_stats all_32_bit_stats; 397 }; 398 399 struct hclge_dev { 400 struct pci_dev *pdev; 401 struct hnae3_ae_dev *ae_dev; 402 struct hclge_hw hw; 403 struct hclge_hw_stats hw_stats; 404 unsigned long state; 405 406 u32 fw_version; 407 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ 408 u16 num_tqps; /* Num task queue pairs of this PF */ 409 u16 num_req_vfs; /* Num VFs requested for this PF */ 410 411 u16 num_roce_msix; /* Num of roce vectors for this PF */ 412 int roce_base_vector; 413 414 /* Base task tqp physical id of this PF */ 415 u16 base_tqp_pid; 416 u16 alloc_rss_size; /* Allocated RSS task queue */ 417 u16 rss_size_max; /* HW defined max RSS task queue */ 418 419 /* Num of guaranteed filters for this PF */ 420 u16 fdir_pf_filter_count; 421 u16 num_alloc_vport; /* Num vports this driver supports */ 422 u32 numa_node_mask; 423 u16 rx_buf_len; 424 u16 num_desc; 425 u8 hw_tc_map; 426 u8 tc_num_last_time; 427 enum hclge_fc_mode fc_mode_last_time; 428 429 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 430 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 431 u8 tx_sch_mode; 432 u8 tc_max; 433 u8 pfc_max; 434 435 u8 default_up; 436 u8 dcbx_cap; 437 struct hclge_tm_info tm_info; 438 439 u16 num_msi; 440 u16 num_msi_left; 441 u16 num_msi_used; 442 u32 base_msi_vector; 443 struct msix_entry *msix_entries; 444 u16 *vector_status; 445 446 u16 pending_udp_bitmap; 447 448 u16 rx_itr_default; 449 u16 tx_itr_default; 450 451 u16 adminq_work_limit; /* Num of admin receive queue desc to process */ 452 unsigned long service_timer_period; 453 unsigned long service_timer_previous; 454 struct timer_list service_timer; 455 struct work_struct service_task; 456 457 bool cur_promisc; 458 int num_alloc_vfs; /* Actual number of VFs allocated */ 459 460 struct hclge_tqp *htqp; 461 struct hclge_vport *vport; 462 463 struct dentry *hclge_dbgfs; 464 465 struct hnae3_client *nic_client; 466 struct hnae3_client *roce_client; 467 468 #define HCLGE_FLAG_USE_MSI 0x00000001 469 #define HCLGE_FLAG_USE_MSIX 0x00000002 470 #define HCLGE_FLAG_MAIN 0x00000004 471 #define HCLGE_FLAG_DCB_CAPABLE 0x00000008 472 #define HCLGE_FLAG_DCB_ENABLE 0x00000010 473 u32 flag; 474 475 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 476 u32 mps; /* Max packet size */ 477 478 enum hclge_mta_dmac_sel_type mta_mac_sel_type; 479 bool enable_mta; /* Mutilcast filter enable */ 480 bool accept_mta_mc; /* Whether accept mta filter multicast */ 481 }; 482 483 struct hclge_vport { 484 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 485 486 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 487 /* User configured lookup table entries */ 488 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 489 u16 alloc_rss_size; 490 491 u16 qs_offset; 492 u16 bw_limit; /* VSI BW Limit (0 = disabled) */ 493 u8 dwrr; 494 495 int vport_id; 496 struct hclge_dev *back; /* Back reference to associated dev */ 497 struct hnae3_handle nic; 498 struct hnae3_handle roce; 499 }; 500 501 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 502 bool en_mc, bool en_bc, int vport_id); 503 504 int hclge_add_uc_addr_common(struct hclge_vport *vport, 505 const unsigned char *addr); 506 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 507 const unsigned char *addr); 508 int hclge_add_mc_addr_common(struct hclge_vport *vport, 509 const unsigned char *addr); 510 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 511 const unsigned char *addr); 512 513 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 514 u8 func_id, 515 bool enable); 516 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 517 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, 518 struct hnae3_ring_chain_node *ring_chain); 519 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 520 { 521 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 522 523 return tqp->index; 524 } 525 526 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 527 int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, 528 bool is_kill, u16 vlan, u8 qos, __be16 proto); 529 530 int hclge_buffer_alloc(struct hclge_dev *hdev); 531 int hclge_rss_init_hw(struct hclge_dev *hdev); 532 #endif 533