Lines Matching +full:txs +full:- +full:delta
2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
107 {0, 0}, /* 0 - 4 Gbps */
108 {0, 3}, /* 4 - 10 Gbps */
114 int numa_node = dev_to_node(&enic->pdev->dev); in enic_init_affinity_hint()
117 for (i = 0; i < enic->intr_count; i++) { in enic_init_affinity_hint()
119 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint()
120 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
122 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
125 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
133 for (i = 0; i < enic->intr_count; i++) { in enic_free_affinity_hint()
136 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
145 for (i = 0; i < enic->intr_count; i++) { in enic_set_affinity_hint()
148 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint()
149 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
151 err = irq_update_affinity_hint(enic->msix_entry[i].vector, in enic_set_affinity_hint()
152 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
154 netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n", in enic_set_affinity_hint()
158 for (i = 0; i < enic->wq_count; i++) { in enic_set_affinity_hint()
161 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint()
162 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
163 netif_set_xps_queue(enic->netdev, in enic_set_affinity_hint()
164 enic->msix[wq_intr].affinity_mask, in enic_set_affinity_hint()
173 for (i = 0; i < enic->intr_count; i++) in enic_unset_affinity_hint()
174 irq_update_affinity_hint(enic->msix_entry[i].vector, NULL); in enic_unset_affinity_hint()
184 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
186 err = vnic_dev_overlay_offload_cfg(enic->vdev, in enic_udp_tunnel_set_port()
188 ntohs(ti->port)); in enic_udp_tunnel_set_port()
192 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_set_port()
193 enic->vxlan.patch_level); in enic_udp_tunnel_set_port()
197 enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); in enic_udp_tunnel_set_port()
199 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
211 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
213 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_unset_port()
218 enic->vxlan.vxlan_udp_port_number = 0; in enic_udp_tunnel_unset_port()
221 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
251 if (!skb->encapsulation) in enic_features_check()
258 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) in enic_features_check()
260 proto = ipv6_hdr(skb)->nexthdr; in enic_features_check()
263 proto = ip_hdr(skb)->protocol; in enic_features_check()
269 switch (eth->h_proto) { in enic_features_check()
271 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) in enic_features_check()
283 port = be16_to_cpu(udph->dest); in enic_features_check()
289 if (port != enic->vxlan.vxlan_udp_port_number) in enic_features_check()
300 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
305 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
310 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
316 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
328 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
329 error_status = vnic_wq_error_status(&enic->wq[i].vwq); in enic_log_q_error()
332 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
336 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
337 error_status = vnic_rq_error_status(&enic->rq[i].vrq); in enic_log_q_error()
340 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
349 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
351 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
352 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
353 enic->msg_enable, msg_enable); in enic_msglvl_check()
354 enic->msg_enable = msg_enable; in enic_msglvl_check()
360 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
361 struct net_device *netdev = enic->netdev; in enic_mtu_check()
363 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
364 enic->port_mtu = mtu; in enic_mtu_check()
368 if (mtu != netdev->mtu) in enic_mtu_check()
369 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
371 if (mtu < netdev->mtu) in enic_mtu_check()
375 netdev->mtu, mtu); in enic_mtu_check()
383 int index = -1; in enic_set_rx_coal_setting()
384 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
388 * 3. Update it in enic->rx_coalesce_setting in enic_set_rx_coal_setting()
390 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
398 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; in enic_set_rx_coal_setting()
399 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; in enic_set_rx_coal_setting()
400 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; in enic_set_rx_coal_setting()
403 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
404 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
405 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
407 rx_coal->use_adaptive_rx_coalesce = 1; in enic_set_rx_coal_setting()
412 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
413 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
416 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
417 netif_carrier_on(enic->netdev); in enic_link_check()
420 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
421 netif_carrier_off(enic->netdev); in enic_link_check()
443 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
445 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
447 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
453 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
457 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
460 schedule_work(&enic->reset); in enic_isr_legacy()
465 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
467 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
478 * is not providing per-vector masking, so the OS will not in enic_isr_msi()
492 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
511 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
515 schedule_work(&enic->reset); in enic_isr_msix_err()
526 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
539 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { in enic_queue_wq_skb_cont()
540 len_left -= skb_frag_size(frag); in enic_queue_wq_skb_cont()
541 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
545 return -ENOMEM; in enic_queue_wq_skb_cont()
559 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_vlan()
564 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_vlan()
567 return -ENOMEM; in enic_queue_wq_skb_vlan()
581 enic->wq[wq->index].stats.csum_none++; in enic_queue_wq_skb_vlan()
582 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_vlan()
583 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_vlan()
593 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_csum_l4()
595 unsigned int csum_offset = hdr_len + skb->csum_offset; in enic_queue_wq_skb_csum_l4()
600 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
603 return -ENOMEM; in enic_queue_wq_skb_csum_l4()
617 enic->wq[wq->index].stats.csum_partial++; in enic_queue_wq_skb_csum_l4()
618 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_csum_l4()
619 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_csum_l4()
628 switch (eth->h_proto) { in enic_preload_tcp_csum_encap()
630 inner_ip_hdr(skb)->check = 0; in enic_preload_tcp_csum_encap()
631 inner_tcp_hdr(skb)->check = in enic_preload_tcp_csum_encap()
632 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, in enic_preload_tcp_csum_encap()
633 inner_ip_hdr(skb)->daddr, 0, in enic_preload_tcp_csum_encap()
637 inner_tcp_hdr(skb)->check = in enic_preload_tcp_csum_encap()
638 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, in enic_preload_tcp_csum_encap()
639 &inner_ipv6_hdr(skb)->daddr, 0, in enic_preload_tcp_csum_encap()
655 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { in enic_preload_tcp_csum()
656 ip_hdr(skb)->check = 0; in enic_preload_tcp_csum()
657 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in enic_preload_tcp_csum()
658 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); in enic_preload_tcp_csum()
659 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { in enic_preload_tcp_csum()
670 unsigned int len_left = skb->len - frag_len_left; in enic_queue_wq_skb_tso()
679 if (skb->encapsulation) { in enic_queue_wq_skb_tso()
682 enic->wq[wq->index].stats.encap_tso++; in enic_queue_wq_skb_tso()
686 enic->wq[wq->index].stats.tso++; in enic_queue_wq_skb_tso()
694 dma_addr = dma_map_single(&enic->pdev->dev, in enic_queue_wq_skb_tso()
695 skb->data + offset, len, in enic_queue_wq_skb_tso()
698 return -ENOMEM; in enic_queue_wq_skb_tso()
702 frag_len_left -= len; in enic_queue_wq_skb_tso()
712 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { in enic_queue_wq_skb_tso()
713 len_left -= skb_frag_size(frag); in enic_queue_wq_skb_tso()
720 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
724 return -ENOMEM; in enic_queue_wq_skb_tso()
729 frag_len_left -= len; in enic_queue_wq_skb_tso()
736 len = skb->len - hdr_len; in enic_queue_wq_skb_tso()
740 enic->wq[wq->index].stats.packets += pkts; in enic_queue_wq_skb_tso()
741 enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len)); in enic_queue_wq_skb_tso()
752 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_encap()
763 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_encap()
766 return -ENOMEM; in enic_queue_wq_skb_encap()
775 enic->wq[wq->index].stats.encap_csum++; in enic_queue_wq_skb_encap()
776 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_encap()
777 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_encap()
785 unsigned int mss = skb_shinfo(skb)->gso_size; in enic_queue_wq_skb()
795 enic->wq[wq->index].stats.add_vlan++; in enic_queue_wq_skb()
796 } else if (enic->loop_enable) { in enic_queue_wq_skb()
797 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
805 else if (skb->encapsulation) in enic_queue_wq_skb()
808 else if (skb->ip_summed == CHECKSUM_PARTIAL) in enic_queue_wq_skb()
817 buf = wq->to_use->prev; in enic_queue_wq_skb()
821 while (!buf->os_buf && (buf->next != wq->to_clean)) { in enic_queue_wq_skb()
823 wq->ring.desc_avail++; in enic_queue_wq_skb()
824 buf = buf->prev; in enic_queue_wq_skb()
826 wq->to_use = buf->next; in enic_queue_wq_skb()
841 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
842 wq = &enic->wq[txq_map].vwq; in enic_hard_start_xmit()
844 if (skb->len <= 0) { in enic_hard_start_xmit()
846 enic->wq[wq->index].stats.null_pkt++; in enic_hard_start_xmit()
852 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, in enic_hard_start_xmit()
857 if (skb_shinfo(skb)->gso_size == 0 && in enic_hard_start_xmit()
858 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && in enic_hard_start_xmit()
861 enic->wq[wq->index].stats.skb_linear_fail++; in enic_hard_start_xmit()
865 spin_lock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
868 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { in enic_hard_start_xmit()
872 spin_unlock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
873 enic->wq[wq->index].stats.desc_full_awake++; in enic_hard_start_xmit()
882 enic->wq[wq->index].stats.stopped++; in enic_hard_start_xmit()
889 spin_unlock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
910 if (err == -ENOMEM) in enic_get_stats()
913 net_stats->tx_packets = stats->tx.tx_frames_ok; in enic_get_stats()
914 net_stats->tx_bytes = stats->tx.tx_bytes_ok; in enic_get_stats()
915 net_stats->tx_errors = stats->tx.tx_errors; in enic_get_stats()
916 net_stats->tx_dropped = stats->tx.tx_drops; in enic_get_stats()
918 net_stats->rx_packets = stats->rx.rx_frames_ok; in enic_get_stats()
919 net_stats->rx_bytes = stats->rx.rx_bytes_ok; in enic_get_stats()
920 net_stats->rx_errors = stats->rx.rx_errors; in enic_get_stats()
921 net_stats->multicast = stats->rx.rx_multicast_frames_ok; in enic_get_stats()
923 for (i = 0; i < enic->rq_count; i++) { in enic_get_stats()
924 struct enic_rq_stats *rqs = &enic->rq[i].stats; in enic_get_stats()
926 if (!enic->rq[i].vrq.ctrl) in enic_get_stats()
928 pkt_truncated += rqs->pkt_truncated; in enic_get_stats()
929 bad_fcs += rqs->bad_fcs; in enic_get_stats()
931 net_stats->rx_over_errors = pkt_truncated; in enic_get_stats()
932 net_stats->rx_crc_errors = bad_fcs; in enic_get_stats()
933 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; in enic_get_stats()
940 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
946 return -ENOSPC; in enic_mc_sync()
950 enic->mc_count++; in enic_mc_sync()
960 enic->mc_count--; in enic_mc_unsync()
969 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
975 return -ENOSPC; in enic_uc_sync()
979 enic->uc_count++; in enic_uc_sync()
989 enic->uc_count--; in enic_uc_unsync()
996 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
1001 enic->mc_count = 0; in enic_reset_addr_lists()
1002 enic->uc_count = 0; in enic_reset_addr_lists()
1003 enic->flags = 0; in enic_reset_addr_lists()
1012 return -EADDRNOTAVAIL; in enic_set_mac_addr()
1015 return -EADDRNOTAVAIL; in enic_set_mac_addr()
1027 char *addr = saddr->sa_data; in enic_set_mac_address_dynamic()
1030 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1040 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1052 char *addr = saddr->sa_data; in enic_set_mac_address()
1072 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; in enic_set_rx_mode()
1073 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; in enic_set_rx_mode()
1074 int promisc = (netdev->flags & IFF_PROMISC) || in enic_set_rx_mode()
1076 int allmulti = (netdev->flags & IFF_ALLMULTI) || in enic_set_rx_mode()
1078 unsigned int flags = netdev->flags | in enic_set_rx_mode()
1082 if (enic->flags != flags) { in enic_set_rx_mode()
1083 enic->flags = flags; in enic_set_rx_mode()
1099 schedule_work(&enic->tx_hang_reset); in enic_tx_timeout()
1114 memcpy(pp->vf_mac, mac, ETH_ALEN); in enic_set_vf_mac()
1125 return -EINVAL; in enic_set_vf_mac()
1142 return -EOPNOTSUPP; in enic_set_vf_port()
1144 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
1145 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
1147 pp->set |= ENIC_SET_REQUEST; in enic_set_vf_port()
1148 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); in enic_set_vf_port()
1153 return -EINVAL; in enic_set_vf_port()
1155 pp->set |= ENIC_SET_NAME; in enic_set_vf_port()
1156 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), in enic_set_vf_port()
1163 return -EINVAL; in enic_set_vf_port()
1165 pp->set |= ENIC_SET_INSTANCE; in enic_set_vf_port()
1166 memcpy(pp->instance_uuid, in enic_set_vf_port()
1173 return -EINVAL; in enic_set_vf_port()
1175 pp->set |= ENIC_SET_HOST; in enic_set_vf_port()
1176 memcpy(pp->host_uuid, in enic_set_vf_port()
1183 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); in enic_set_vf_port()
1185 if (is_zero_ether_addr(netdev->dev_addr)) in enic_set_vf_port()
1188 /* SR-IOV VF: get mac from adapter */ in enic_set_vf_port()
1190 vnic_dev_get_mac_addr, pp->mac_addr); in enic_set_vf_port()
1214 pp->set |= ENIC_PORT_REQUEST_APPLIED; in enic_set_vf_port()
1217 if (pp->request == PORT_REQUEST_DISASSOCIATE) { in enic_set_vf_port()
1218 eth_zero_addr(pp->mac_addr); in enic_set_vf_port()
1225 eth_zero_addr(pp->vf_mac); in enic_set_vf_port()
1242 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) in enic_get_vf_port()
1243 return -ENODATA; in enic_get_vf_port()
1245 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
1249 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || in enic_get_vf_port()
1251 ((pp->set & ENIC_SET_NAME) && in enic_get_vf_port()
1252 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || in enic_get_vf_port()
1253 ((pp->set & ENIC_SET_INSTANCE) && in enic_get_vf_port()
1255 pp->instance_uuid)) || in enic_get_vf_port()
1256 ((pp->set & ENIC_SET_HOST) && in enic_get_vf_port()
1257 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) in enic_get_vf_port()
1262 return -EMSGSIZE; in enic_get_vf_port()
1267 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1268 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1269 u32 timer = cq->tobe_rx_coal_timeval; in enic_set_int_moderation()
1271 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { in enic_set_int_moderation()
1272 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1273 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; in enic_set_int_moderation()
1279 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1280 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1281 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; in enic_calc_int_moderation()
1286 u64 delta; in enic_calc_int_moderation() local
1289 delta = ktime_us_delta(now, cq->prev_ts); in enic_calc_int_moderation()
1290 if (delta < ENIC_AIC_TS_BREAK) in enic_calc_int_moderation()
1292 cq->prev_ts = now; in enic_calc_int_moderation()
1294 traffic = pkt_size_counter->large_pkt_bytes_cnt + in enic_calc_int_moderation()
1295 pkt_size_counter->small_pkt_bytes_cnt; in enic_calc_int_moderation()
1298 * traffic *= (10^6 / delta) => bps in enic_calc_int_moderation()
1301 * Combining, traffic *= (8 / delta) in enic_calc_int_moderation()
1305 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; in enic_calc_int_moderation()
1310 range_start = (pkt_size_counter->small_pkt_bytes_cnt > in enic_calc_int_moderation()
1311 pkt_size_counter->large_pkt_bytes_cnt << 1) ? in enic_calc_int_moderation()
1312 rx_coal->small_pkt_range_start : in enic_calc_int_moderation()
1313 rx_coal->large_pkt_range_start; in enic_calc_int_moderation()
1314 timer = range_start + ((rx_coal->range_end - range_start) * in enic_calc_int_moderation()
1317 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; in enic_calc_int_moderation()
1319 pkt_size_counter->large_pkt_bytes_cnt = 0; in enic_calc_int_moderation()
1320 pkt_size_counter->small_pkt_bytes_cnt = 0; in enic_calc_int_moderation()
1325 struct net_device *netdev = napi->dev; in enic_poll()
1348 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1353 err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf); in enic_poll()
1361 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1365 enic_calc_int_moderation(enic, &enic->rq[0].vrq); in enic_poll()
1373 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1374 enic_set_int_moderation(enic, &enic->rq[0].vrq); in enic_poll()
1375 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1376 enic->rq[0].stats.napi_complete++; in enic_poll()
1378 enic->rq[0].stats.napi_repoll++; in enic_poll()
1387 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1388 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1395 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1396 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1397 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1399 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1400 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1401 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1424 struct net_device *netdev = napi->dev; in enic_poll_msix_wq()
1426 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1427 struct vnic_wq *wq = &enic->wq[wq_index].vwq; in enic_poll_msix_wq()
1434 wq_irq = wq->index; in enic_poll_msix_wq()
1440 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1445 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1454 struct net_device *netdev = napi->dev; in enic_poll_msix_rq()
1456 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1475 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1480 err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf); in enic_poll_msix_rq()
1488 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1492 enic_calc_int_moderation(enic, &enic->rq[rq].vrq); in enic_poll_msix_rq()
1500 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1501 enic_set_int_moderation(enic, &enic->rq[rq].vrq); in enic_poll_msix_rq()
1502 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1503 enic->rq[rq].stats.napi_complete++; in enic_poll_msix_rq()
1505 enic->rq[rq].stats.napi_repoll++; in enic_poll_msix_rq()
1517 mod_timer(&enic->notify_timer, in enic_notify_timer()
1523 struct net_device *netdev = enic->netdev; in enic_free_intr()
1527 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1529 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1532 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1535 for (i = 0; i < enic->intr_count; i++) in enic_free_intr()
1536 if (enic->msix[i].requested) in enic_free_intr()
1537 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1538 enic->msix[i].devid); in enic_free_intr()
1547 struct net_device *netdev = enic->netdev; in enic_request_intr()
1552 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1556 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1557 IRQF_SHARED, netdev->name, netdev); in enic_request_intr()
1562 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1563 0, netdev->name, enic); in enic_request_intr()
1568 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1570 snprintf(enic->msix[intr].devname, in enic_request_intr()
1571 sizeof(enic->msix[intr].devname), in enic_request_intr()
1572 "%s-rx-%u", netdev->name, i); in enic_request_intr()
1573 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1574 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1577 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1581 snprintf(enic->msix[intr].devname, in enic_request_intr()
1582 sizeof(enic->msix[intr].devname), in enic_request_intr()
1583 "%s-tx-%u", netdev->name, i); in enic_request_intr()
1584 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1585 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1589 snprintf(enic->msix[intr].devname, in enic_request_intr()
1590 sizeof(enic->msix[intr].devname), in enic_request_intr()
1591 "%s-err", netdev->name); in enic_request_intr()
1592 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1593 enic->msix[intr].devid = enic; in enic_request_intr()
1596 snprintf(enic->msix[intr].devname, in enic_request_intr()
1597 sizeof(enic->msix[intr].devname), in enic_request_intr()
1598 "%s-notify", netdev->name); in enic_request_intr()
1599 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1600 enic->msix[intr].devid = enic; in enic_request_intr()
1602 for (i = 0; i < enic->intr_count; i++) in enic_request_intr()
1603 enic->msix[i].requested = 0; in enic_request_intr()
1605 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1606 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1607 enic->msix[i].isr, 0, in enic_request_intr()
1608 enic->msix[i].devname, in enic_request_intr()
1609 enic->msix[i].devid); in enic_request_intr()
1614 enic->msix[i].requested = 1; in enic_request_intr()
1630 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1633 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1636 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1637 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1648 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1649 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1651 err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR); in enic_dev_notify_set()
1654 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1658 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1661 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1668 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1670 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1673 /* Using intr for notification for INTx/MSI-X */ in enic_notify_timer_start()
1684 unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN; in enic_open()
1687 .pool_size = enic->config.rq_desc_count, in enic_open()
1688 .nid = dev_to_node(&enic->pdev->dev), in enic_open()
1689 .dev = &enic->pdev->dev, in enic_open()
1711 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1713 pp_params.napi = &enic->napi[i]; in enic_open()
1715 enic->rq[i].pool = page_pool_create(&pp_params); in enic_open()
1716 if (IS_ERR(enic->rq[i].pool)) { in enic_open()
1717 err = PTR_ERR(enic->rq[i].pool); in enic_open()
1718 enic->rq[i].pool = NULL; in enic_open()
1723 vnic_rq_enable(&enic->rq[i].vrq); in enic_open()
1724 vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); in enic_open()
1726 if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) { in enic_open()
1728 err = -ENOMEM; in enic_open()
1733 for (i = 0; i < enic->wq_count; i++) in enic_open()
1734 vnic_wq_enable(&enic->wq[i].vwq); in enic_open()
1743 for (i = 0; i < enic->rq_count; i++) in enic_open()
1744 napi_enable(&enic->napi[i]); in enic_open()
1746 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
1747 for (i = 0; i < enic->wq_count; i++) in enic_open()
1748 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
1751 for (i = 0; i < enic->intr_count; i++) in enic_open()
1752 vnic_intr_unmask(&enic->intr[i]); in enic_open()
1760 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1761 ret = vnic_rq_disable(&enic->rq[i].vrq); in enic_open()
1763 vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); in enic_open()
1764 page_pool_destroy(enic->rq[i].pool); in enic_open()
1765 enic->rq[i].pool = NULL; in enic_open()
1783 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
1784 vnic_intr_mask(&enic->intr[i]); in enic_stop()
1785 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
1790 timer_delete_sync(&enic->notify_timer); in enic_stop()
1795 for (i = 0; i < enic->rq_count; i++) in enic_stop()
1796 napi_disable(&enic->napi[i]); in enic_stop()
1799 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
1800 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1801 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
1807 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
1808 err = vnic_wq_disable(&enic->wq[i].vwq); in enic_stop()
1812 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1813 err = vnic_rq_disable(&enic->rq[i].vrq); in enic_stop()
1822 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1823 vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); in enic_stop()
1824 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1825 vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); in enic_stop()
1826 page_pool_destroy(enic->rq[i].pool); in enic_stop()
1827 enic->rq[i].pool = NULL; in enic_stop()
1829 for (i = 0; i < enic->cq_count; i++) in enic_stop()
1830 vnic_cq_clean(&enic->cq[i]); in enic_stop()
1831 for (i = 0; i < enic->intr_count; i++) in enic_stop()
1832 vnic_intr_clean(&enic->intr[i]); in enic_stop()
1849 WRITE_ONCE(netdev->mtu, new_mtu); in _enic_change_mtu()
1865 return -EOPNOTSUPP; in enic_change_mtu()
1867 if (new_mtu > enic->port_mtu) in enic_change_mtu()
1870 new_mtu, enic->port_mtu); in enic_change_mtu()
1878 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
1879 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
1885 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); in enic_change_mtu_work()
1892 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
1897 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
1899 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1900 &enic->napi[i]); in enic_poll_controller()
1903 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
1905 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1906 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
1911 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
1914 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
1952 return -ETIMEDOUT; in enic_dev_wait()
1960 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
1973 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, in enic_dev_soft_reset()
1976 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", in enic_dev_soft_reset()
1986 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
1989 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
2001 rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, in __enic_set_rsskey()
2005 return -ENOMEM; in __enic_set_rsskey()
2010 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
2012 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2016 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2018 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
2026 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
2038 rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, in enic_set_rsscpu()
2042 return -ENOMEM; in enic_set_rsscpu()
2045 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
2047 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2051 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2053 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2069 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2075 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2088 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2090 spin_lock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2091 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); in enic_set_rss_nic_cfg()
2092 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2121 spin_lock(&enic->enic_api_lock); in enic_set_api_busy()
2122 enic->enic_api_busy = busy; in enic_set_api_busy()
2123 spin_unlock(&enic->enic_api_lock); in enic_set_api_busy()
2130 if (!netif_running(enic->netdev)) in enic_reset()
2138 enic_stop(enic->netdev); in enic_reset()
2145 enic_open(enic->netdev); in enic_reset()
2150 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2165 enic_stop(enic->netdev); in enic_tx_hang_reset()
2172 enic_open(enic->netdev); in enic_tx_hang_reset()
2177 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_tx_hang_reset()
2187 /* Set interrupt mode (INTx, MSI, MSI-X) depending in enic_set_intr_mode()
2190 * Try MSI-X first in enic_set_intr_mode()
2193 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2194 enic->intr_avail >= ENIC_MSIX_MIN_INTR) { in enic_set_intr_mode()
2195 for (i = 0; i < enic->intr_avail; i++) in enic_set_intr_mode()
2196 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2198 num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2200 enic->intr_avail); in enic_set_intr_mode()
2202 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2204 enic->intr_avail = num_intr; in enic_set_intr_mode()
2214 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2215 enic->intr_avail >= 1 && in enic_set_intr_mode()
2216 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2217 enic->intr_avail = 1; in enic_set_intr_mode()
2218 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2230 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2231 enic->intr_avail >= 3) { in enic_set_intr_mode()
2232 enic->intr_avail = 3; in enic_set_intr_mode()
2233 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2237 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2239 return -EINVAL; in enic_set_intr_mode()
2244 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2246 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2249 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2255 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2265 if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) { in enic_adjust_resources()
2268 enic->rq_avail, enic->wq_avail, in enic_adjust_resources()
2269 enic->cq_avail); in enic_adjust_resources()
2270 return -ENOSPC; in enic_adjust_resources()
2275 enic->rq_avail = 1; in enic_adjust_resources()
2276 enic->wq_avail = 1; in enic_adjust_resources()
2277 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_adjust_resources()
2278 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_adjust_resources()
2279 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_adjust_resources()
2284 enic->rq_avail = 1; in enic_adjust_resources()
2286 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_adjust_resources()
2289 enic->rq_count = 1; in enic_adjust_resources()
2290 enic->wq_count = 1; in enic_adjust_resources()
2291 enic->cq_count = 2; in enic_adjust_resources()
2292 enic->intr_count = enic->intr_avail; in enic_adjust_resources()
2298 wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); in enic_adjust_resources()
2301 rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); in enic_adjust_resources()
2302 max_queues = min(enic->cq_avail, in enic_adjust_resources()
2303 enic->intr_avail - ENIC_MSIX_RESERVED_INTR); in enic_adjust_resources()
2305 enic->rq_count = rq_avail; in enic_adjust_resources()
2306 enic->wq_count = wq_avail; in enic_adjust_resources()
2310 enic->rq_count = min(rq_avail, max_queues / 2); in enic_adjust_resources()
2311 enic->wq_count = max_queues - enic->rq_count; in enic_adjust_resources()
2313 enic->wq_count = min(wq_avail, max_queues / 2); in enic_adjust_resources()
2314 enic->rq_count = max_queues - enic->wq_count; in enic_adjust_resources()
2317 enic->cq_count = enic->rq_count + enic->wq_count; in enic_adjust_resources()
2318 enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR; in enic_adjust_resources()
2323 return -EINVAL; in enic_adjust_resources()
2333 struct enic_rq_stats *rqstats = &enic->rq[idx].stats; in enic_get_queue_stats_rx()
2335 rxs->bytes = rqstats->bytes; in enic_get_queue_stats_rx()
2336 rxs->packets = rqstats->packets; in enic_get_queue_stats_rx()
2337 rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated; in enic_get_queue_stats_rx()
2338 rxs->hw_drop_overruns = rqstats->pkt_truncated; in enic_get_queue_stats_rx()
2339 rxs->csum_unnecessary = rqstats->csum_unnecessary + in enic_get_queue_stats_rx()
2340 rqstats->csum_unnecessary_encap; in enic_get_queue_stats_rx()
2341 rxs->alloc_fail = rqstats->pp_alloc_fail; in enic_get_queue_stats_rx()
2345 struct netdev_queue_stats_tx *txs) in enic_get_queue_stats_tx() argument
2348 struct enic_wq_stats *wqstats = &enic->wq[idx].stats; in enic_get_queue_stats_tx()
2350 txs->bytes = wqstats->bytes; in enic_get_queue_stats_tx()
2351 txs->packets = wqstats->packets; in enic_get_queue_stats_tx()
2352 txs->csum_none = wqstats->csum_none; in enic_get_queue_stats_tx()
2353 txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum + in enic_get_queue_stats_tx()
2354 wqstats->tso; in enic_get_queue_stats_tx()
2355 txs->hw_gso_packets = wqstats->tso; in enic_get_queue_stats_tx()
2356 txs->stop = wqstats->stopped; in enic_get_queue_stats_tx()
2357 txs->wake = wqstats->wake; in enic_get_queue_stats_tx()
2362 struct netdev_queue_stats_tx *txs) in enic_get_base_stats() argument
2364 rxs->bytes = 0; in enic_get_base_stats()
2365 rxs->packets = 0; in enic_get_base_stats()
2366 rxs->hw_drops = 0; in enic_get_base_stats()
2367 rxs->hw_drop_overruns = 0; in enic_get_base_stats()
2368 rxs->csum_unnecessary = 0; in enic_get_base_stats()
2369 rxs->alloc_fail = 0; in enic_get_base_stats()
2370 txs->bytes = 0; in enic_get_base_stats()
2371 txs->packets = 0; in enic_get_base_stats()
2372 txs->csum_none = 0; in enic_get_base_stats()
2373 txs->needs_csum = 0; in enic_get_base_stats()
2374 txs->hw_gso_packets = 0; in enic_get_base_stats()
2375 txs->stop = 0; in enic_get_base_stats()
2376 txs->wake = 0; in enic_get_base_stats()
2435 kfree(enic->wq); in enic_free_enic_resources()
2436 enic->wq = NULL; in enic_free_enic_resources()
2438 kfree(enic->rq); in enic_free_enic_resources()
2439 enic->rq = NULL; in enic_free_enic_resources()
2441 kfree(enic->cq); in enic_free_enic_resources()
2442 enic->cq = NULL; in enic_free_enic_resources()
2444 kfree(enic->napi); in enic_free_enic_resources()
2445 enic->napi = NULL; in enic_free_enic_resources()
2447 kfree(enic->msix_entry); in enic_free_enic_resources()
2448 enic->msix_entry = NULL; in enic_free_enic_resources()
2450 kfree(enic->msix); in enic_free_enic_resources()
2451 enic->msix = NULL; in enic_free_enic_resources()
2453 kfree(enic->intr); in enic_free_enic_resources()
2454 enic->intr = NULL; in enic_free_enic_resources()
2459 enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL); in enic_alloc_enic_resources()
2460 if (!enic->wq) in enic_alloc_enic_resources()
2463 enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL); in enic_alloc_enic_resources()
2464 if (!enic->rq) in enic_alloc_enic_resources()
2467 enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL); in enic_alloc_enic_resources()
2468 if (!enic->cq) in enic_alloc_enic_resources()
2471 enic->napi = kcalloc(enic->wq_avail + enic->rq_avail, in enic_alloc_enic_resources()
2473 if (!enic->napi) in enic_alloc_enic_resources()
2476 enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry), in enic_alloc_enic_resources()
2478 if (!enic->msix_entry) in enic_alloc_enic_resources()
2481 enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry), in enic_alloc_enic_resources()
2483 if (!enic->msix) in enic_alloc_enic_resources()
2486 enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr), in enic_alloc_enic_resources()
2488 if (!enic->intr) in enic_alloc_enic_resources()
2495 return -ENOMEM; in enic_alloc_enic_resources()
2502 for (i = 0; i < enic->rq_count; i++) in enic_dev_deinit()
2503 __netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2505 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2506 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2507 __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2521 struct net_device *netdev = enic->netdev; in enic_dev_init()
2530 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2588 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2590 netif_napi_add(netdev, &enic->napi[0], enic_poll); in enic_dev_init()
2593 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2594 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2597 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2599 &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2619 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2620 if (enic->bar[i].vaddr) in enic_iounmap()
2621 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2626 struct device *dev = &pdev->dev; in enic_probe()
2644 return -ENOMEM; in enic_probe()
2648 SET_NETDEV_DEV(netdev, &pdev->dev); in enic_probe()
2651 enic->netdev = netdev; in enic_probe()
2652 enic->pdev = pdev; in enic_probe()
2672 * limitation for the device. Try 47-bit first, and in enic_probe()
2673 * fail to 32-bit. in enic_probe()
2676 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); in enic_probe()
2678 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in enic_probe()
2687 /* Map vNIC resources from BAR0-5 in enic_probe()
2690 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2693 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2694 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2695 if (!enic->bar[i].vaddr) { in enic_probe()
2696 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); in enic_probe()
2697 err = -ENODEV; in enic_probe()
2700 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2706 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2707 ARRAY_SIZE(enic->bar)); in enic_probe()
2708 if (!enic->vdev) { in enic_probe()
2710 err = -ENODEV; in enic_probe()
2714 err = vnic_devcmd_init(enic->vdev); in enic_probe()
2724 &enic->num_vfs); in enic_probe()
2725 if (enic->num_vfs) { in enic_probe()
2726 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2733 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2734 num_pps = enic->num_vfs; in enic_probe()
2740 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2741 if (!enic->pp) { in enic_probe()
2742 err = -ENOMEM; in enic_probe()
2758 spin_lock_init(&enic->devcmd_lock); in enic_probe()
2759 spin_lock_init(&enic->enic_api_lock); in enic_probe()
2772 /* Issue device init to initialize the vnic-to-switch link. in enic_probe()
2775 * to wait here for the vnic-to-switch link initialization in enic_probe()
2788 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
2801 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
2802 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
2807 timer_setup(&enic->notify_timer, enic_notify_timer, 0); in enic_probe()
2810 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
2811 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); in enic_probe()
2812 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
2814 for (i = 0; i < enic->wq_count; i++) in enic_probe()
2815 spin_lock_init(&enic->wq[i].lock); in enic_probe()
2820 enic->port_mtu = enic->config.mtu; in enic_probe()
2822 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
2828 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
2832 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
2835 netdev->netdev_ops = &enic_netdev_dynamic_ops; in enic_probe()
2837 netdev->netdev_ops = &enic_netdev_ops; in enic_probe()
2838 netdev->stat_ops = &enic_netdev_stat_ops; in enic_probe()
2840 netdev->watchdog_timeo = 2 * HZ; in enic_probe()
2843 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; in enic_probe()
2845 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; in enic_probe()
2846 enic->loop_enable = 1; in enic_probe()
2847 enic->loop_tag = enic->config.loop_tag; in enic_probe()
2848 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
2851 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; in enic_probe()
2853 netdev->hw_features |= NETIF_F_TSO | in enic_probe()
2856 netdev->hw_features |= NETIF_F_RXHASH; in enic_probe()
2858 netdev->hw_features |= NETIF_F_RXCSUM; in enic_probe()
2863 netdev->hw_enc_features |= NETIF_F_RXCSUM | in enic_probe()
2870 netdev->hw_features |= netdev->hw_enc_features; in enic_probe()
2882 err = vnic_dev_get_supported_feature_ver(enic->vdev, in enic_probe()
2887 enic->vxlan.flags = (u8)a1; in enic_probe()
2892 patch_level = patch_level ? patch_level - 1 : 0; in enic_probe()
2893 enic->vxlan.patch_level = patch_level; in enic_probe()
2895 if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || in enic_probe()
2896 enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { in enic_probe()
2897 netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; in enic_probe()
2898 if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) in enic_probe()
2899 netdev->udp_tunnel_nic_info = &enic_udp_tunnels; in enic_probe()
2903 netdev->features |= netdev->hw_features; in enic_probe()
2904 netdev->vlan_features |= netdev->features; in enic_probe()
2907 netdev->hw_features |= NETIF_F_NTUPLE; in enic_probe()
2911 netdev->features |= NETIF_F_HIGHDMA; in enic_probe()
2913 netdev->priv_flags |= IFF_UNICAST_FLT; in enic_probe()
2915 /* MTU range: 68 - 9000 */ in enic_probe()
2916 netdev->min_mtu = ENIC_MIN_MTU; in enic_probe()
2917 netdev->max_mtu = ENIC_MAX_MTU; in enic_probe()
2918 netdev->mtu = enic->port_mtu; in enic_probe()
2931 vnic_dev_close(enic->vdev); in enic_probe()
2933 kfree(enic->pp); in enic_probe()
2938 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
2942 vnic_dev_unregister(enic->vdev); in enic_probe()
2962 cancel_work_sync(&enic->reset); in enic_remove()
2963 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
2966 vnic_dev_close(enic->vdev); in enic_remove()
2970 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
2973 kfree(enic->pp); in enic_remove()
2974 vnic_dev_unregister(enic->vdev); in enic_remove()