1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Atlantic Network Driver 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 6 #include "aq_hw.h" 7 #include "aq_hw_utils.h" 8 #include "aq_ring.h" 9 #include "aq_nic.h" 10 #include "hw_atl/hw_atl_b0.h" 11 #include "hw_atl/hw_atl_utils.h" 12 #include "hw_atl/hw_atl_llh.h" 13 #include "hw_atl2_utils.h" 14 #include "hw_atl2_llh.h" 15 #include "hw_atl2_internal.h" 16 #include "hw_atl2_llh_internal.h" 17 18 static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, 19 u32 tag, u32 mask, u32 action); 20 21 #define DEFAULT_BOARD_BASIC_CAPABILITIES \ 22 .is_64_dma = true, \ 23 .msix_irqs = 8U, \ 24 .irq_mask = ~0U, \ 25 .vecs = HW_ATL2_RSS_MAX, \ 26 .tcs = HW_ATL2_TC_MAX, \ 27 .rxd_alignment = 1U, \ 28 .rxd_size = HW_ATL2_RXD_SIZE, \ 29 .rxds_max = HW_ATL2_MAX_RXD, \ 30 .rxds_min = HW_ATL2_MIN_RXD, \ 31 .txd_alignment = 1U, \ 32 .txd_size = HW_ATL2_TXD_SIZE, \ 33 .txds_max = HW_ATL2_MAX_TXD, \ 34 .txds_min = HW_ATL2_MIN_TXD, \ 35 .txhwb_alignment = 4096U, \ 36 .tx_rings = HW_ATL2_TX_RINGS, \ 37 .rx_rings = HW_ATL2_RX_RINGS, \ 38 .hw_features = NETIF_F_HW_CSUM | \ 39 NETIF_F_RXCSUM | \ 40 NETIF_F_RXHASH | \ 41 NETIF_F_SG | \ 42 NETIF_F_TSO | \ 43 NETIF_F_TSO6 | \ 44 NETIF_F_LRO | \ 45 NETIF_F_NTUPLE | \ 46 NETIF_F_HW_VLAN_CTAG_FILTER | \ 47 NETIF_F_HW_VLAN_CTAG_RX | \ 48 NETIF_F_HW_VLAN_CTAG_TX | \ 49 NETIF_F_GSO_UDP_L4 | \ 50 NETIF_F_GSO_PARTIAL, \ 51 .hw_priv_flags = IFF_UNICAST_FLT, \ 52 .flow_control = true, \ 53 .mtu = HW_ATL2_MTU_JUMBO, \ 54 .mac_regs_count = 72, \ 55 .hw_alive_check_addr = 0x10U, \ 56 .priv_data_len = sizeof(struct hw_atl2_priv) 57 58 const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { 59 DEFAULT_BOARD_BASIC_CAPABILITIES, 60 .media_type = AQ_HW_MEDIA_TYPE_TP, 61 .link_speed_msk = AQ_NIC_RATE_10G | 62 AQ_NIC_RATE_5G | 63 AQ_NIC_RATE_2G5 | 64 AQ_NIC_RATE_1G | 65 AQ_NIC_RATE_100M | 66 AQ_NIC_RATE_10M, 67 }; 68 69 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) 70 { 71 return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR); 72 } 73 74 static int hw_atl2_hw_reset(struct aq_hw_s *self) 75 { 76 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 77 int err; 78 79 err = hw_atl2_utils_soft_reset(self); 80 if (err) 81 return err; 82 83 memset(priv, 0, sizeof(*priv)); 84 85 self->aq_fw_ops->set_state(self, MPI_RESET); 86 87 err = aq_hw_err_from_flags(self); 88 89 return err; 90 } 91 92 static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self) 93 { 94 if (!hw_atl_rpb_rpf_rx_traf_class_mode_get(self)) { 95 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x11110000); 96 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x33332222); 97 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x55554444); 98 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x77776666); 99 } else { 100 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x00000000); 101 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x11111111); 102 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x22222222); 103 aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x33333333); 104 } 105 106 return aq_hw_err_from_flags(self); 107 } 108 109 static int hw_atl2_hw_qos_set(struct aq_hw_s *self) 110 { 111 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 112 u32 tx_buff_size = HW_ATL2_TXBUF_MAX; 113 u32 rx_buff_size = HW_ATL2_RXBUF_MAX; 114 unsigned int prio = 0U; 115 u32 threshold = 0U; 116 u32 tc = 0U; 117 118 /* TPS Descriptor rate init */ 119 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 120 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); 121 122 /* TPS VM init */ 123 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); 124 125 /* TPS TC credits init */ 126 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); 127 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); 128 129 tc = 0; 130 131 /* TX Packet Scheduler Data TC0 */ 132 hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF0, tc); 133 hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, 0x640, tc); 134 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc); 135 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc); 136 137 /* Tx buf size TC0 */ 138 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); 139 140 threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; 141 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); 142 143 threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; 144 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); 145 146 /* QoS Rx buf size per TC */ 147 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); 148 149 threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; 150 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); 151 152 threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; 153 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); 154 155 /* QoS 802.1p priority -> TC mapping */ 156 for (prio = 0; prio < 8; ++prio) 157 hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, 158 cfg->tcs * prio / 8); 159 160 /* ATL2 Apply legacy ring to TC mapping */ 161 hw_atl2_hw_queue_to_tc_map_set(self); 162 163 return aq_hw_err_from_flags(self); 164 } 165 166 static int hw_atl2_hw_rss_set(struct aq_hw_s *self, 167 struct aq_rss_parameters *rss_params) 168 { 169 u8 *indirection_table = rss_params->indirection_table; 170 int i; 171 172 for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) 173 hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]); 174 175 return aq_hw_err_from_flags(self); 176 } 177 178 static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) 179 { 180 /* Tx TC/RSS number config */ 181 hw_atl_tpb_tps_tx_tc_mode_set(self, 1U); 182 183 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); 184 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); 185 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); 186 187 /* Tx interrupts */ 188 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 189 190 /* misc */ 191 hw_atl_tdm_tx_dca_en_set(self, 0U); 192 hw_atl_tdm_tx_dca_mode_set(self, 0U); 193 194 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); 195 196 hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U); 197 198 return aq_hw_err_from_flags(self); 199 } 200 201 static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) 202 { 203 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 204 u8 index; 205 206 hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF); 207 hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC, 208 HW_ATL2_MAC_UC); 209 hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC); 210 211 index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; 212 hw_atl2_act_rslvr_table_set(self, index, 0, 213 HW_ATL2_RPF_TAG_UC_MASK | 214 HW_ATL2_RPF_TAG_ALLMC_MASK, 215 HW_ATL2_ACTION_DROP); 216 217 index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; 218 hw_atl2_act_rslvr_table_set(self, index, 0, 219 HW_ATL2_RPF_TAG_VLAN_MASK | 220 HW_ATL2_RPF_TAG_UNTAG_MASK, 221 HW_ATL2_ACTION_DROP); 222 223 index = priv->art_base_index + HW_ATL2_RPF_VLAN_INDEX; 224 hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_VLAN, 225 HW_ATL2_RPF_TAG_VLAN_MASK, 226 HW_ATL2_ACTION_ASSIGN_TC(0)); 227 228 index = priv->art_base_index + HW_ATL2_RPF_MAC_INDEX; 229 hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_UC, 230 HW_ATL2_RPF_TAG_UC_MASK, 231 HW_ATL2_ACTION_ASSIGN_TC(0)); 232 233 index = priv->art_base_index + HW_ATL2_RPF_ALLMC_INDEX; 234 hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_ALLMC, 235 HW_ATL2_RPF_TAG_ALLMC_MASK, 236 HW_ATL2_ACTION_ASSIGN_TC(0)); 237 238 index = priv->art_base_index + HW_ATL2_RPF_UNTAG_INDEX; 239 hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_UNTAG_MASK, 240 HW_ATL2_RPF_TAG_UNTAG_MASK, 241 HW_ATL2_ACTION_ASSIGN_TC(0)); 242 243 index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX; 244 hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK, 245 HW_ATL2_ACTION_DISABLE); 246 247 index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_ON_INDEX; 248 hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK, 249 HW_ATL2_ACTION_DISABLE); 250 } 251 252 static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, 253 bool promisc) 254 { 255 u16 off_action = (!promisc && 256 !hw_atl_rpfl2promiscuous_mode_en_get(self)) ? 257 HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE; 258 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 259 u8 index; 260 261 index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; 262 hw_atl2_act_rslvr_table_set(self, index, 0, 263 HW_ATL2_RPF_TAG_VLAN_MASK | 264 HW_ATL2_RPF_TAG_UNTAG_MASK, off_action); 265 } 266 267 static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc) 268 { 269 u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP; 270 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 271 bool vlan_promisc_enable; 272 u8 index; 273 274 index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; 275 hw_atl2_act_rslvr_table_set(self, index, 0, 276 HW_ATL2_RPF_TAG_UC_MASK | 277 HW_ATL2_RPF_TAG_ALLMC_MASK, 278 off_action); 279 280 /* turn VLAN promisc mode too */ 281 vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self); 282 hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc | 283 vlan_promisc_enable); 284 } 285 286 static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, 287 u32 tag, u32 mask, u32 action) 288 { 289 u32 val; 290 int err; 291 292 err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get, 293 self, val, val == 1, 294 1, 10000U); 295 if (err) 296 return err; 297 298 hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask, 299 action); 300 301 hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR); 302 303 return err; 304 } 305 306 static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) 307 { 308 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 309 int i; 310 311 /* Rx TC/RSS number config */ 312 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); 313 314 /* Rx flow control */ 315 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); 316 317 hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL); 318 319 /* RSS Ring selection */ 320 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 321 HW_ATL_RSS_ENABLED_3INDEX_BITS : 322 HW_ATL_RSS_DISABLED); 323 324 /* Multicast filters */ 325 for (i = HW_ATL2_MAC_MAX; i--;) { 326 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); 327 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); 328 } 329 330 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); 331 hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U); 332 333 /* Vlan filters */ 334 hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD); 335 hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q); 336 337 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); 338 339 /* Always accept untagged packets */ 340 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); 341 hw_atl_rpf_vlan_untagged_act_set(self, 1U); 342 343 hw_atl2_hw_init_new_rx_filters(self); 344 345 /* Rx Interrupts */ 346 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 347 348 hw_atl_rpfl2broadcast_flr_act_set(self, 1U); 349 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); 350 351 hw_atl_rdm_rx_dca_en_set(self, 0U); 352 hw_atl_rdm_rx_dca_mode_set(self, 0U); 353 354 return aq_hw_err_from_flags(self); 355 } 356 357 static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr) 358 { 359 static u32 aq_hw_atl2_igcr_table_[4][2] = { 360 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, 361 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, 362 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, 363 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, 364 }; 365 366 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 367 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; 368 u8 base_index, count; 369 int err; 370 371 err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index, 372 &count); 373 if (err) 374 return err; 375 376 priv->art_base_index = 8 * base_index; 377 378 hw_atl2_init_launchtime(self); 379 380 hw_atl2_hw_init_tx_path(self); 381 hw_atl2_hw_init_rx_path(self); 382 383 hw_atl_b0_hw_mac_addr_set(self, mac_addr); 384 385 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); 386 self->aq_fw_ops->set_state(self, MPI_INIT); 387 388 hw_atl2_hw_qos_set(self); 389 hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss); 390 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 391 392 hw_atl2_rpf_new_enable_set(self, 1); 393 394 /* Reset link status and read out initial hardware counters */ 395 self->aq_link_status.mbps = 0; 396 self->aq_fw_ops->update_stats(self); 397 398 err = aq_hw_err_from_flags(self); 399 if (err < 0) 400 goto err_exit; 401 402 /* Interrupts */ 403 hw_atl_reg_irq_glb_ctl_set(self, 404 aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type] 405 [(aq_nic_cfg->vecs > 1U) ? 406 1 : 0]); 407 408 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); 409 410 /* Interrupts */ 411 hw_atl_reg_gen_irq_map_set(self, 412 ((HW_ATL2_ERR_INT << 0x18) | 413 (1U << 0x1F)) | 414 ((HW_ATL2_ERR_INT << 0x10) | 415 (1U << 0x17)), 0U); 416 417 hw_atl_b0_hw_offload_set(self, aq_nic_cfg); 418 419 err_exit: 420 return err; 421 } 422 423 static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self, 424 struct aq_ring_s *aq_ring, 425 struct aq_ring_param_s *aq_ring_param) 426 { 427 return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param); 428 } 429 430 static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self, 431 struct aq_ring_s *aq_ring, 432 struct aq_ring_param_s *aq_ring_param) 433 { 434 return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param); 435 } 436 437 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) 438 439 static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self, 440 unsigned int packet_filter) 441 { 442 hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC)); 443 444 return hw_atl_b0_hw_packet_filter_set(self, packet_filter); 445 } 446 447 #undef IS_FILTER_ENABLED 448 449 static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self, 450 u8 ar_mac 451 [AQ_HW_MULTICAST_ADDRESS_MAX] 452 [ETH_ALEN], 453 u32 count) 454 { 455 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 456 int err = 0; 457 458 if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) { 459 err = -EBADRQC; 460 goto err_exit; 461 } 462 for (cfg->mc_list_count = 0U; 463 cfg->mc_list_count < count; 464 ++cfg->mc_list_count) { 465 u32 i = cfg->mc_list_count; 466 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); 467 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | 468 (ar_mac[i][4] << 8) | ar_mac[i][5]; 469 470 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i); 471 472 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, 473 HW_ATL2_MAC_MIN + i); 474 475 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, 476 HW_ATL2_MAC_MIN + i); 477 478 hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i); 479 480 hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled), 481 HW_ATL2_MAC_MIN + i); 482 } 483 484 err = aq_hw_err_from_flags(self); 485 486 err_exit: 487 return err; 488 } 489 490 static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self) 491 { 492 unsigned int i = 0U; 493 u32 itr_tx = 2U; 494 u32 itr_rx = 2U; 495 496 switch (self->aq_nic_cfg->itr) { 497 case AQ_CFG_INTERRUPT_MODERATION_ON: 498 case AQ_CFG_INTERRUPT_MODERATION_AUTO: 499 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 500 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); 501 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 502 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); 503 504 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { 505 /* HW timers are in 2us units */ 506 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; 507 int tx_min_timer = tx_max_timer / 2; 508 509 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; 510 int rx_min_timer = rx_max_timer / 2; 511 512 tx_max_timer = min(HW_ATL2_INTR_MODER_MAX, 513 tx_max_timer); 514 tx_min_timer = min(HW_ATL2_INTR_MODER_MIN, 515 tx_min_timer); 516 rx_max_timer = min(HW_ATL2_INTR_MODER_MAX, 517 rx_max_timer); 518 rx_min_timer = min(HW_ATL2_INTR_MODER_MIN, 519 rx_min_timer); 520 521 itr_tx |= tx_min_timer << 0x8U; 522 itr_tx |= tx_max_timer << 0x10U; 523 itr_rx |= rx_min_timer << 0x8U; 524 itr_rx |= rx_max_timer << 0x10U; 525 } else { 526 static unsigned int hw_atl2_timers_table_tx_[][2] = { 527 {0xfU, 0xffU}, /* 10Gbit */ 528 {0xfU, 0x1ffU}, /* 5Gbit */ 529 {0xfU, 0x1ffU}, /* 5Gbit 5GS */ 530 {0xfU, 0x1ffU}, /* 2.5Gbit */ 531 {0xfU, 0x1ffU}, /* 1Gbit */ 532 {0xfU, 0x1ffU}, /* 100Mbit */ 533 }; 534 static unsigned int hw_atl2_timers_table_rx_[][2] = { 535 {0x6U, 0x38U},/* 10Gbit */ 536 {0xCU, 0x70U},/* 5Gbit */ 537 {0xCU, 0x70U},/* 5Gbit 5GS */ 538 {0x18U, 0xE0U},/* 2.5Gbit */ 539 {0x30U, 0x80U},/* 1Gbit */ 540 {0x4U, 0x50U},/* 100Mbit */ 541 }; 542 unsigned int mbps = self->aq_link_status.mbps; 543 unsigned int speed_index; 544 545 speed_index = hw_atl_utils_mbps_2_speed_index(mbps); 546 547 /* Update user visible ITR settings */ 548 self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_ 549 [speed_index][1] * 2; 550 self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_ 551 [speed_index][1] * 2; 552 553 itr_tx |= hw_atl2_timers_table_tx_ 554 [speed_index][0] << 0x8U; 555 itr_tx |= hw_atl2_timers_table_tx_ 556 [speed_index][1] << 0x10U; 557 558 itr_rx |= hw_atl2_timers_table_rx_ 559 [speed_index][0] << 0x8U; 560 itr_rx |= hw_atl2_timers_table_rx_ 561 [speed_index][1] << 0x10U; 562 } 563 break; 564 case AQ_CFG_INTERRUPT_MODERATION_OFF: 565 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 566 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); 567 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 568 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); 569 itr_tx = 0U; 570 itr_rx = 0U; 571 break; 572 } 573 574 for (i = HW_ATL2_RINGS_MAX; i--;) { 575 hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); 576 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); 577 } 578 579 return aq_hw_err_from_flags(self); 580 } 581 582 static int hw_atl2_hw_stop(struct aq_hw_s *self) 583 { 584 hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK); 585 586 return 0; 587 } 588 589 static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self) 590 { 591 return &self->curr_stats; 592 } 593 594 static int hw_atl2_hw_vlan_set(struct aq_hw_s *self, 595 struct aq_rx_filter_vlan *aq_vlans) 596 { 597 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 598 u32 queue; 599 u8 index; 600 int i; 601 602 hw_atl_rpf_vlan_prom_mode_en_set(self, 1U); 603 604 for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) { 605 queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue); 606 607 hw_atl_rpf_vlan_flr_en_set(self, 0U, i); 608 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); 609 index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i; 610 hw_atl2_act_rslvr_table_set(self, index, 0, 0, 611 HW_ATL2_ACTION_DISABLE); 612 if (aq_vlans[i].enable) { 613 hw_atl_rpf_vlan_id_flr_set(self, 614 aq_vlans[i].vlan_id, i); 615 hw_atl_rpf_vlan_flr_act_set(self, 1U, i); 616 hw_atl_rpf_vlan_flr_en_set(self, 1U, i); 617 618 if (aq_vlans[i].queue != 0xFF) { 619 hw_atl_rpf_vlan_rxq_flr_set(self, 620 aq_vlans[i].queue, 621 i); 622 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); 623 624 hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i); 625 626 index = priv->art_base_index + 627 HW_ATL2_RPF_VLAN_USER_INDEX + i; 628 hw_atl2_act_rslvr_table_set(self, index, 629 (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET, 630 HW_ATL2_RPF_TAG_VLAN_MASK, queue); 631 } else { 632 hw_atl2_rpf_vlan_flr_tag_set(self, 1, i); 633 } 634 } 635 } 636 637 return aq_hw_err_from_flags(self); 638 } 639 640 static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) 641 { 642 /* set promisc in case of disabing the vlan filter */ 643 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); 644 hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable); 645 646 return aq_hw_err_from_flags(self); 647 } 648 649 const struct aq_hw_ops hw_atl2_ops = { 650 .hw_soft_reset = hw_atl2_utils_soft_reset, 651 .hw_prepare = hw_atl2_utils_initfw, 652 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, 653 .hw_init = hw_atl2_hw_init, 654 .hw_reset = hw_atl2_hw_reset, 655 .hw_start = hw_atl_b0_hw_start, 656 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, 657 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, 658 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, 659 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, 660 .hw_stop = hw_atl2_hw_stop, 661 662 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, 663 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, 664 665 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, 666 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, 667 668 .hw_irq_enable = hw_atl_b0_hw_irq_enable, 669 .hw_irq_disable = hw_atl_b0_hw_irq_disable, 670 .hw_irq_read = hw_atl_b0_hw_irq_read, 671 672 .hw_ring_rx_init = hw_atl2_hw_ring_rx_init, 673 .hw_ring_tx_init = hw_atl2_hw_ring_tx_init, 674 .hw_packet_filter_set = hw_atl2_hw_packet_filter_set, 675 .hw_filter_vlan_set = hw_atl2_hw_vlan_set, 676 .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl, 677 .hw_multicast_list_set = hw_atl2_hw_multicast_list_set, 678 .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set, 679 .hw_rss_set = hw_atl2_hw_rss_set, 680 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 681 .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, 682 .hw_get_fw_version = hw_atl2_utils_get_fw_version, 683 .hw_set_offload = hw_atl_b0_hw_offload_set, 684 }; 685