1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_nic.c: Definition of common code for NIC. */ 8 9 #include "aq_nic.h" 10 #include "aq_ring.h" 11 #include "aq_vec.h" 12 #include "aq_hw.h" 13 #include "aq_pci_func.h" 14 #include "aq_main.h" 15 16 #include <linux/moduleparam.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/timer.h> 20 #include <linux/cpu.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <net/ip.h> 24 25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; 26 module_param_named(aq_itr, aq_itr, uint, 0644); 27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode"); 28 29 static unsigned int aq_itr_tx; 30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); 31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate"); 32 33 static unsigned int aq_itr_rx; 34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 36 37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self); 38 39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40 { 41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_rss_parameters *rss_params = &cfg->aq_rss; 43 int i = 0; 44 45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = { 46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, 49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c 51 }; 52 53 rss_params->hash_secret_key_size = sizeof(rss_key); 54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); 55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; 56 57 for (i = rss_params->indirection_table_size; i--;) 58 rss_params->indirection_table[i] = i & (num_rss_queues - 1); 59 } 60 61 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ 62 void aq_nic_cfg_start(struct aq_nic_s *self) 63 { 64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 65 66 cfg->tcs = AQ_CFG_TCS_DEF; 67 68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 69 70 cfg->itr = aq_itr; 71 cfg->tx_itr = aq_itr_tx; 72 cfg->rx_itr = aq_itr_rx; 73 74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER; 75 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; 78 cfg->flow_control = AQ_CFG_FC_MODE; 79 80 cfg->mtu = AQ_CFG_MTU_DEF; 81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK; 82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; 83 84 cfg->is_lro = AQ_CFG_IS_LRO_DEF; 85 86 /*descriptors */ 87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); 88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); 89 90 /*rss rings */ 91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); 92 cfg->vecs = min(cfg->vecs, num_online_cpus()); 93 if (self->irqvecs > AQ_HW_SERVICE_IRQS) 94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); 95 /* cfg->vecs should be power of 2 for RSS */ 96 if (cfg->vecs >= 8U) 97 cfg->vecs = 8U; 98 else if (cfg->vecs >= 4U) 99 cfg->vecs = 4U; 100 else if (cfg->vecs >= 2U) 101 cfg->vecs = 2U; 102 else 103 cfg->vecs = 1U; 104 105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); 106 107 aq_nic_rss_init(self, cfg->num_rss_queues); 108 109 cfg->irq_type = aq_pci_func_get_irq_type(self); 110 111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 112 (cfg->aq_hw_caps->vecs == 1U) || 113 (cfg->vecs == 1U)) { 114 cfg->is_rss = 0U; 115 cfg->vecs = 1U; 116 } 117 118 /* Check if we have enough vectors allocated for 119 * link status IRQ. If no - we'll know link state from 120 * slower service task. 121 */ 122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs) 123 cfg->link_irq_vec = cfg->vecs; 124 else 125 cfg->link_irq_vec = 0; 126 127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 128 cfg->features = cfg->aq_hw_caps->hw_features; 129 cfg->is_vlan_force_promisc = true; 130 } 131 132 static int aq_nic_update_link_status(struct aq_nic_s *self) 133 { 134 int err = self->aq_fw_ops->update_link_status(self->aq_hw); 135 u32 fc = 0; 136 137 if (err) 138 return err; 139 140 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { 141 pr_info("%s: link change old %d new %d\n", 142 AQ_CFG_DRV_NAME, self->link_status.mbps, 143 self->aq_hw->aq_link_status.mbps); 144 aq_nic_update_interrupt_moderation_settings(self); 145 146 /* Driver has to update flow control settings on RX block 147 * on any link event. 148 * We should query FW whether it negotiated FC. 149 */ 150 if (self->aq_fw_ops->get_flow_control) 151 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); 152 if (self->aq_hw_ops->hw_set_fc) 153 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); 154 } 155 156 self->link_status = self->aq_hw->aq_link_status; 157 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { 158 aq_utils_obj_set(&self->flags, 159 AQ_NIC_FLAG_STARTED); 160 aq_utils_obj_clear(&self->flags, 161 AQ_NIC_LINK_DOWN); 162 netif_carrier_on(self->ndev); 163 netif_tx_wake_all_queues(self->ndev); 164 } 165 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { 166 netif_carrier_off(self->ndev); 167 netif_tx_disable(self->ndev); 168 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN); 169 } 170 return 0; 171 } 172 173 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private) 174 { 175 struct aq_nic_s *self = private; 176 177 if (!self) 178 return IRQ_NONE; 179 180 aq_nic_update_link_status(self); 181 182 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 183 BIT(self->aq_nic_cfg.link_irq_vec)); 184 return IRQ_HANDLED; 185 } 186 187 static void aq_nic_service_task(struct work_struct *work) 188 { 189 struct aq_nic_s *self = container_of(work, struct aq_nic_s, 190 service_task); 191 int err; 192 193 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY)) 194 return; 195 196 err = aq_nic_update_link_status(self); 197 if (err) 198 return; 199 200 mutex_lock(&self->fwreq_mutex); 201 if (self->aq_fw_ops->update_stats) 202 self->aq_fw_ops->update_stats(self->aq_hw); 203 mutex_unlock(&self->fwreq_mutex); 204 205 aq_nic_update_ndev_stats(self); 206 } 207 208 static void aq_nic_service_timer_cb(struct timer_list *t) 209 { 210 struct aq_nic_s *self = from_timer(self, t, service_timer); 211 212 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); 213 214 aq_ndev_schedule_work(&self->service_task); 215 } 216 217 static void aq_nic_polling_timer_cb(struct timer_list *t) 218 { 219 struct aq_nic_s *self = from_timer(self, t, polling_timer); 220 struct aq_vec_s *aq_vec = NULL; 221 unsigned int i = 0U; 222 223 for (i = 0U, aq_vec = self->aq_vec[0]; 224 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 225 aq_vec_isr(i, (void *)aq_vec); 226 227 mod_timer(&self->polling_timer, jiffies + 228 AQ_CFG_POLLING_TIMER_INTERVAL); 229 } 230 231 int aq_nic_ndev_register(struct aq_nic_s *self) 232 { 233 int err = 0; 234 235 if (!self->ndev) { 236 err = -EINVAL; 237 goto err_exit; 238 } 239 240 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops); 241 if (err) 242 goto err_exit; 243 244 mutex_lock(&self->fwreq_mutex); 245 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, 246 self->ndev->dev_addr); 247 mutex_unlock(&self->fwreq_mutex); 248 if (err) 249 goto err_exit; 250 251 #if defined(AQ_CFG_MAC_ADDR_PERMANENT) 252 { 253 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; 254 255 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); 256 } 257 #endif 258 259 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs; 260 self->aq_vecs++) { 261 self->aq_vec[self->aq_vecs] = 262 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self)); 263 if (!self->aq_vec[self->aq_vecs]) { 264 err = -ENOMEM; 265 goto err_exit; 266 } 267 } 268 269 netif_carrier_off(self->ndev); 270 271 netif_tx_disable(self->ndev); 272 273 err = register_netdev(self->ndev); 274 if (err) 275 goto err_exit; 276 277 err_exit: 278 return err; 279 } 280 281 void aq_nic_ndev_init(struct aq_nic_s *self) 282 { 283 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; 284 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; 285 286 self->ndev->hw_features |= aq_hw_caps->hw_features; 287 self->ndev->features = aq_hw_caps->hw_features; 288 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 289 NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; 290 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 291 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 292 293 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 294 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; 295 296 } 297 298 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, 299 struct aq_ring_s *ring) 300 { 301 self->aq_ring_tx[idx] = ring; 302 } 303 304 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) 305 { 306 return self->ndev; 307 } 308 309 int aq_nic_init(struct aq_nic_s *self) 310 { 311 struct aq_vec_s *aq_vec = NULL; 312 int err = 0; 313 unsigned int i = 0U; 314 315 self->power_state = AQ_HW_POWER_STATE_D0; 316 mutex_lock(&self->fwreq_mutex); 317 err = self->aq_hw_ops->hw_reset(self->aq_hw); 318 mutex_unlock(&self->fwreq_mutex); 319 if (err < 0) 320 goto err_exit; 321 322 err = self->aq_hw_ops->hw_init(self->aq_hw, 323 aq_nic_get_ndev(self)->dev_addr); 324 if (err < 0) 325 goto err_exit; 326 327 for (i = 0U, aq_vec = self->aq_vec[0]; 328 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 329 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw); 330 331 netif_carrier_off(self->ndev); 332 333 err_exit: 334 return err; 335 } 336 337 int aq_nic_start(struct aq_nic_s *self) 338 { 339 struct aq_vec_s *aq_vec = NULL; 340 int err = 0; 341 unsigned int i = 0U; 342 343 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 344 self->mc_list.ar, 345 self->mc_list.count); 346 if (err < 0) 347 goto err_exit; 348 349 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, 350 self->packet_filter); 351 if (err < 0) 352 goto err_exit; 353 354 for (i = 0U, aq_vec = self->aq_vec[0]; 355 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 356 err = aq_vec_start(aq_vec); 357 if (err < 0) 358 goto err_exit; 359 } 360 361 err = self->aq_hw_ops->hw_start(self->aq_hw); 362 if (err < 0) 363 goto err_exit; 364 365 err = aq_nic_update_interrupt_moderation_settings(self); 366 if (err) 367 goto err_exit; 368 369 INIT_WORK(&self->service_task, aq_nic_service_task); 370 371 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); 372 aq_nic_service_timer_cb(&self->service_timer); 373 374 if (self->aq_nic_cfg.is_polling) { 375 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); 376 mod_timer(&self->polling_timer, jiffies + 377 AQ_CFG_POLLING_TIMER_INTERVAL); 378 } else { 379 for (i = 0U, aq_vec = self->aq_vec[0]; 380 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 381 err = aq_pci_func_alloc_irq(self, i, self->ndev->name, 382 aq_vec_isr, aq_vec, 383 aq_vec_get_affinity_mask(aq_vec)); 384 if (err < 0) 385 goto err_exit; 386 } 387 388 if (self->aq_nic_cfg.link_irq_vec) { 389 int irqvec = pci_irq_vector(self->pdev, 390 self->aq_nic_cfg.link_irq_vec); 391 err = request_threaded_irq(irqvec, NULL, 392 aq_linkstate_threaded_isr, 393 IRQF_SHARED, 394 self->ndev->name, self); 395 if (err < 0) 396 goto err_exit; 397 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec); 398 } 399 400 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, 401 AQ_CFG_IRQ_MASK); 402 if (err < 0) 403 goto err_exit; 404 } 405 406 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 407 if (err < 0) 408 goto err_exit; 409 410 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); 411 if (err < 0) 412 goto err_exit; 413 414 netif_tx_start_all_queues(self->ndev); 415 416 err_exit: 417 return err; 418 } 419 420 static unsigned int aq_nic_map_skb(struct aq_nic_s *self, 421 struct sk_buff *skb, 422 struct aq_ring_s *ring) 423 { 424 unsigned int ret = 0U; 425 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 426 unsigned int frag_count = 0U; 427 unsigned int dx = ring->sw_tail; 428 struct aq_ring_buff_s *first = NULL; 429 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 430 431 if (unlikely(skb_is_gso(skb))) { 432 dx_buff->flags = 0U; 433 dx_buff->len_pkt = skb->len; 434 dx_buff->len_l2 = ETH_HLEN; 435 dx_buff->len_l3 = ip_hdrlen(skb); 436 dx_buff->len_l4 = tcp_hdrlen(skb); 437 dx_buff->mss = skb_shinfo(skb)->gso_size; 438 dx_buff->is_txc = 1U; 439 dx_buff->eop_index = 0xffffU; 440 441 dx_buff->is_ipv6 = 442 (ip_hdr(skb)->version == 6) ? 1U : 0U; 443 444 dx = aq_ring_next_dx(ring, dx); 445 dx_buff = &ring->buff_ring[dx]; 446 ++ret; 447 } 448 449 dx_buff->flags = 0U; 450 dx_buff->len = skb_headlen(skb); 451 dx_buff->pa = dma_map_single(aq_nic_get_dev(self), 452 skb->data, 453 dx_buff->len, 454 DMA_TO_DEVICE); 455 456 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 457 goto exit; 458 459 first = dx_buff; 460 dx_buff->len_pkt = skb->len; 461 dx_buff->is_sop = 1U; 462 dx_buff->is_mapped = 1U; 463 ++ret; 464 465 if (skb->ip_summed == CHECKSUM_PARTIAL) { 466 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 467 1U : 0U; 468 469 if (ip_hdr(skb)->version == 4) { 470 dx_buff->is_tcp_cso = 471 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 472 1U : 0U; 473 dx_buff->is_udp_cso = 474 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 475 1U : 0U; 476 } else if (ip_hdr(skb)->version == 6) { 477 dx_buff->is_tcp_cso = 478 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? 479 1U : 0U; 480 dx_buff->is_udp_cso = 481 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? 482 1U : 0U; 483 } 484 } 485 486 for (; nr_frags--; ++frag_count) { 487 unsigned int frag_len = 0U; 488 unsigned int buff_offset = 0U; 489 unsigned int buff_size = 0U; 490 dma_addr_t frag_pa; 491 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 492 493 frag_len = skb_frag_size(frag); 494 495 while (frag_len) { 496 if (frag_len > AQ_CFG_TX_FRAME_MAX) 497 buff_size = AQ_CFG_TX_FRAME_MAX; 498 else 499 buff_size = frag_len; 500 501 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), 502 frag, 503 buff_offset, 504 buff_size, 505 DMA_TO_DEVICE); 506 507 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), 508 frag_pa))) 509 goto mapping_error; 510 511 dx = aq_ring_next_dx(ring, dx); 512 dx_buff = &ring->buff_ring[dx]; 513 514 dx_buff->flags = 0U; 515 dx_buff->len = buff_size; 516 dx_buff->pa = frag_pa; 517 dx_buff->is_mapped = 1U; 518 dx_buff->eop_index = 0xffffU; 519 520 frag_len -= buff_size; 521 buff_offset += buff_size; 522 523 ++ret; 524 } 525 } 526 527 first->eop_index = dx; 528 dx_buff->is_eop = 1U; 529 dx_buff->skb = skb; 530 goto exit; 531 532 mapping_error: 533 for (dx = ring->sw_tail; 534 ret > 0; 535 --ret, dx = aq_ring_next_dx(ring, dx)) { 536 dx_buff = &ring->buff_ring[dx]; 537 538 if (!dx_buff->is_txc && dx_buff->pa) { 539 if (unlikely(dx_buff->is_sop)) { 540 dma_unmap_single(aq_nic_get_dev(self), 541 dx_buff->pa, 542 dx_buff->len, 543 DMA_TO_DEVICE); 544 } else { 545 dma_unmap_page(aq_nic_get_dev(self), 546 dx_buff->pa, 547 dx_buff->len, 548 DMA_TO_DEVICE); 549 } 550 } 551 } 552 553 exit: 554 return ret; 555 } 556 557 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 558 { 559 struct aq_ring_s *ring = NULL; 560 unsigned int frags = 0U; 561 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 562 unsigned int tc = 0U; 563 int err = NETDEV_TX_OK; 564 565 frags = skb_shinfo(skb)->nr_frags + 1; 566 567 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; 568 569 if (frags > AQ_CFG_SKB_FRAGS_MAX) { 570 dev_kfree_skb_any(skb); 571 goto err_exit; 572 } 573 574 aq_ring_update_queue_state(ring); 575 576 /* Above status update may stop the queue. Check this. */ 577 if (__netif_subqueue_stopped(self->ndev, ring->idx)) { 578 err = NETDEV_TX_BUSY; 579 goto err_exit; 580 } 581 582 frags = aq_nic_map_skb(self, skb, ring); 583 584 if (likely(frags)) { 585 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, 586 ring, frags); 587 if (err >= 0) { 588 ++ring->stats.tx.packets; 589 ring->stats.tx.bytes += skb->len; 590 } 591 } else { 592 err = NETDEV_TX_BUSY; 593 } 594 595 err_exit: 596 return err; 597 } 598 599 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) 600 { 601 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw); 602 } 603 604 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 605 { 606 int err = 0; 607 608 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags); 609 if (err < 0) 610 goto err_exit; 611 612 self->packet_filter = flags; 613 614 err_exit: 615 return err; 616 } 617 618 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 619 { 620 unsigned int packet_filter = self->packet_filter; 621 struct netdev_hw_addr *ha = NULL; 622 unsigned int i = 0U; 623 624 self->mc_list.count = 0; 625 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { 626 packet_filter |= IFF_PROMISC; 627 } else { 628 netdev_for_each_uc_addr(ha, ndev) { 629 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 630 631 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) 632 break; 633 } 634 } 635 636 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { 637 packet_filter |= IFF_ALLMULTI; 638 } else { 639 netdev_for_each_mc_addr(ha, ndev) { 640 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 641 642 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) 643 break; 644 } 645 } 646 647 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { 648 packet_filter |= IFF_MULTICAST; 649 self->mc_list.count = i; 650 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 651 self->mc_list.ar, 652 self->mc_list.count); 653 } 654 return aq_nic_set_packet_filter(self, packet_filter); 655 } 656 657 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 658 { 659 self->aq_nic_cfg.mtu = new_mtu; 660 661 return 0; 662 } 663 664 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 665 { 666 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr); 667 } 668 669 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) 670 { 671 return self->link_status.mbps; 672 } 673 674 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) 675 { 676 u32 *regs_buff = p; 677 int err = 0; 678 679 regs->version = 1; 680 681 err = self->aq_hw_ops->hw_get_regs(self->aq_hw, 682 self->aq_nic_cfg.aq_hw_caps, 683 regs_buff); 684 if (err < 0) 685 goto err_exit; 686 687 err_exit: 688 return err; 689 } 690 691 int aq_nic_get_regs_count(struct aq_nic_s *self) 692 { 693 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; 694 } 695 696 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 697 { 698 unsigned int i = 0U; 699 unsigned int count = 0U; 700 struct aq_vec_s *aq_vec = NULL; 701 struct aq_stats_s *stats; 702 703 if (self->aq_fw_ops->update_stats) { 704 mutex_lock(&self->fwreq_mutex); 705 self->aq_fw_ops->update_stats(self->aq_hw); 706 mutex_unlock(&self->fwreq_mutex); 707 } 708 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); 709 710 if (!stats) 711 goto err_exit; 712 713 data[i] = stats->uprc + stats->mprc + stats->bprc; 714 data[++i] = stats->uprc; 715 data[++i] = stats->mprc; 716 data[++i] = stats->bprc; 717 data[++i] = stats->erpt; 718 data[++i] = stats->uptc + stats->mptc + stats->bptc; 719 data[++i] = stats->uptc; 720 data[++i] = stats->mptc; 721 data[++i] = stats->bptc; 722 data[++i] = stats->ubrc; 723 data[++i] = stats->ubtc; 724 data[++i] = stats->mbrc; 725 data[++i] = stats->mbtc; 726 data[++i] = stats->bbrc; 727 data[++i] = stats->bbtc; 728 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; 729 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; 730 data[++i] = stats->dma_pkt_rc; 731 data[++i] = stats->dma_pkt_tc; 732 data[++i] = stats->dma_oct_rc; 733 data[++i] = stats->dma_oct_tc; 734 data[++i] = stats->dpc; 735 736 i++; 737 738 data += i; 739 740 for (i = 0U, aq_vec = self->aq_vec[0]; 741 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 742 data += count; 743 aq_vec_get_sw_stats(aq_vec, data, &count); 744 } 745 746 err_exit:; 747 } 748 749 static void aq_nic_update_ndev_stats(struct aq_nic_s *self) 750 { 751 struct net_device *ndev = self->ndev; 752 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); 753 754 ndev->stats.rx_packets = stats->dma_pkt_rc; 755 ndev->stats.rx_bytes = stats->dma_oct_rc; 756 ndev->stats.rx_errors = stats->erpr; 757 ndev->stats.rx_dropped = stats->dpc; 758 ndev->stats.tx_packets = stats->dma_pkt_tc; 759 ndev->stats.tx_bytes = stats->dma_oct_tc; 760 ndev->stats.tx_errors = stats->erpt; 761 ndev->stats.multicast = stats->mprc; 762 } 763 764 void aq_nic_get_link_ksettings(struct aq_nic_s *self, 765 struct ethtool_link_ksettings *cmd) 766 { 767 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 768 cmd->base.port = PORT_FIBRE; 769 else 770 cmd->base.port = PORT_TP; 771 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ 772 cmd->base.duplex = DUPLEX_FULL; 773 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; 774 775 ethtool_link_ksettings_zero_link_mode(cmd, supported); 776 777 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G) 778 ethtool_link_ksettings_add_link_mode(cmd, supported, 779 10000baseT_Full); 780 781 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G) 782 ethtool_link_ksettings_add_link_mode(cmd, supported, 783 5000baseT_Full); 784 785 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS) 786 ethtool_link_ksettings_add_link_mode(cmd, supported, 787 2500baseT_Full); 788 789 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G) 790 ethtool_link_ksettings_add_link_mode(cmd, supported, 791 1000baseT_Full); 792 793 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M) 794 ethtool_link_ksettings_add_link_mode(cmd, supported, 795 100baseT_Full); 796 797 if (self->aq_nic_cfg.aq_hw_caps->flow_control) 798 ethtool_link_ksettings_add_link_mode(cmd, supported, 799 Pause); 800 801 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 802 803 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 804 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 805 else 806 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 807 808 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 809 810 if (self->aq_nic_cfg.is_autoneg) 811 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 812 813 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) 814 ethtool_link_ksettings_add_link_mode(cmd, advertising, 815 10000baseT_Full); 816 817 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) 818 ethtool_link_ksettings_add_link_mode(cmd, advertising, 819 5000baseT_Full); 820 821 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) 822 ethtool_link_ksettings_add_link_mode(cmd, advertising, 823 2500baseT_Full); 824 825 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) 826 ethtool_link_ksettings_add_link_mode(cmd, advertising, 827 1000baseT_Full); 828 829 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) 830 ethtool_link_ksettings_add_link_mode(cmd, advertising, 831 100baseT_Full); 832 833 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) 834 ethtool_link_ksettings_add_link_mode(cmd, advertising, 835 Pause); 836 837 /* Asym is when either RX or TX, but not both */ 838 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ 839 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) 840 ethtool_link_ksettings_add_link_mode(cmd, advertising, 841 Asym_Pause); 842 843 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 844 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 845 else 846 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 847 } 848 849 int aq_nic_set_link_ksettings(struct aq_nic_s *self, 850 const struct ethtool_link_ksettings *cmd) 851 { 852 u32 speed = 0U; 853 u32 rate = 0U; 854 int err = 0; 855 856 if (cmd->base.autoneg == AUTONEG_ENABLE) { 857 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk; 858 self->aq_nic_cfg.is_autoneg = true; 859 } else { 860 speed = cmd->base.speed; 861 862 switch (speed) { 863 case SPEED_100: 864 rate = AQ_NIC_RATE_100M; 865 break; 866 867 case SPEED_1000: 868 rate = AQ_NIC_RATE_1G; 869 break; 870 871 case SPEED_2500: 872 rate = AQ_NIC_RATE_2GS; 873 break; 874 875 case SPEED_5000: 876 rate = AQ_NIC_RATE_5G; 877 break; 878 879 case SPEED_10000: 880 rate = AQ_NIC_RATE_10G; 881 break; 882 883 default: 884 err = -1; 885 goto err_exit; 886 break; 887 } 888 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) { 889 err = -1; 890 goto err_exit; 891 } 892 893 self->aq_nic_cfg.is_autoneg = false; 894 } 895 896 mutex_lock(&self->fwreq_mutex); 897 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate); 898 mutex_unlock(&self->fwreq_mutex); 899 if (err < 0) 900 goto err_exit; 901 902 self->aq_nic_cfg.link_speed_msk = rate; 903 904 err_exit: 905 return err; 906 } 907 908 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) 909 { 910 return &self->aq_nic_cfg; 911 } 912 913 u32 aq_nic_get_fw_version(struct aq_nic_s *self) 914 { 915 u32 fw_version = 0U; 916 917 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version); 918 919 return fw_version; 920 } 921 922 int aq_nic_stop(struct aq_nic_s *self) 923 { 924 struct aq_vec_s *aq_vec = NULL; 925 unsigned int i = 0U; 926 927 netif_tx_disable(self->ndev); 928 netif_carrier_off(self->ndev); 929 930 del_timer_sync(&self->service_timer); 931 cancel_work_sync(&self->service_task); 932 933 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 934 935 if (self->aq_nic_cfg.is_polling) 936 del_timer_sync(&self->polling_timer); 937 else 938 aq_pci_func_free_irqs(self); 939 940 for (i = 0U, aq_vec = self->aq_vec[0]; 941 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 942 aq_vec_stop(aq_vec); 943 944 return self->aq_hw_ops->hw_stop(self->aq_hw); 945 } 946 947 void aq_nic_deinit(struct aq_nic_s *self) 948 { 949 struct aq_vec_s *aq_vec = NULL; 950 unsigned int i = 0U; 951 952 if (!self) 953 goto err_exit; 954 955 for (i = 0U, aq_vec = self->aq_vec[0]; 956 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 957 aq_vec_deinit(aq_vec); 958 959 if (likely(self->aq_fw_ops->deinit)) { 960 mutex_lock(&self->fwreq_mutex); 961 self->aq_fw_ops->deinit(self->aq_hw); 962 mutex_unlock(&self->fwreq_mutex); 963 } 964 965 if (self->power_state != AQ_HW_POWER_STATE_D0 || 966 self->aq_hw->aq_nic_cfg->wol) 967 if (likely(self->aq_fw_ops->set_power)) { 968 mutex_lock(&self->fwreq_mutex); 969 self->aq_fw_ops->set_power(self->aq_hw, 970 self->power_state, 971 self->ndev->dev_addr); 972 mutex_unlock(&self->fwreq_mutex); 973 } 974 975 976 err_exit:; 977 } 978 979 void aq_nic_free_vectors(struct aq_nic_s *self) 980 { 981 unsigned int i = 0U; 982 983 if (!self) 984 goto err_exit; 985 986 for (i = ARRAY_SIZE(self->aq_vec); i--;) { 987 if (self->aq_vec[i]) { 988 aq_vec_free(self->aq_vec[i]); 989 self->aq_vec[i] = NULL; 990 } 991 } 992 993 err_exit:; 994 } 995 996 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) 997 { 998 int err = 0; 999 1000 if (!netif_running(self->ndev)) { 1001 err = 0; 1002 goto out; 1003 } 1004 rtnl_lock(); 1005 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { 1006 self->power_state = AQ_HW_POWER_STATE_D3; 1007 netif_device_detach(self->ndev); 1008 netif_tx_stop_all_queues(self->ndev); 1009 1010 err = aq_nic_stop(self); 1011 if (err < 0) 1012 goto err_exit; 1013 1014 aq_nic_deinit(self); 1015 } else { 1016 err = aq_nic_init(self); 1017 if (err < 0) 1018 goto err_exit; 1019 1020 err = aq_nic_start(self); 1021 if (err < 0) 1022 goto err_exit; 1023 1024 netif_device_attach(self->ndev); 1025 netif_tx_start_all_queues(self->ndev); 1026 } 1027 1028 err_exit: 1029 rtnl_unlock(); 1030 out: 1031 return err; 1032 } 1033 1034 void aq_nic_shutdown(struct aq_nic_s *self) 1035 { 1036 int err = 0; 1037 1038 if (!self->ndev) 1039 return; 1040 1041 rtnl_lock(); 1042 1043 netif_device_detach(self->ndev); 1044 1045 if (netif_running(self->ndev)) { 1046 err = aq_nic_stop(self); 1047 if (err < 0) 1048 goto err_exit; 1049 } 1050 aq_nic_deinit(self); 1051 1052 err_exit: 1053 rtnl_unlock(); 1054 } 1055