Lines Matching +full:spi +full:- +full:feedback +full:- +full:delay
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
60 #include <linux/dma-mapping.h>
67 #include <linux/delay.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
89 #include "s2io-regs.h"
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && in RXD_IS_UP2DT()
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); in RXD_IS_UP2DT()
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state); in is_s2io_card_up()
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); in do_s2io_copy_mac_addr()
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); in do_s2io_copy_mac_addr()
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); in do_s2io_copy_mac_addr()
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); in do_s2io_copy_mac_addr()
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); in do_s2io_copy_mac_addr()
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); in do_s2io_copy_mac_addr()
453 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
455 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
457 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
501 if (!sp->config.multiq) { in s2io_stop_all_tx_queue()
504 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_stop_all_tx_queue()
505 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; in s2io_stop_all_tx_queue()
507 netif_tx_stop_all_queues(sp->dev); in s2io_stop_all_tx_queue()
512 if (!sp->config.multiq) in s2io_stop_tx_queue()
513 sp->mac_control.fifos[fifo_no].queue_state = in s2io_stop_tx_queue()
516 netif_tx_stop_all_queues(sp->dev); in s2io_stop_tx_queue()
521 if (!sp->config.multiq) { in s2io_start_all_tx_queue()
524 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_start_all_tx_queue()
525 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; in s2io_start_all_tx_queue()
527 netif_tx_start_all_queues(sp->dev); in s2io_start_all_tx_queue()
532 if (!sp->config.multiq) { in s2io_wake_all_tx_queue()
535 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_wake_all_tx_queue()
536 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; in s2io_wake_all_tx_queue()
538 netif_tx_wake_all_queues(sp->dev); in s2io_wake_all_tx_queue()
546 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) in s2io_wake_tx_queue()
547 netif_wake_subqueue(fifo->dev, fifo->fifo_no); in s2io_wake_tx_queue()
548 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { in s2io_wake_tx_queue()
549 if (netif_queue_stopped(fifo->dev)) { in s2io_wake_tx_queue()
550 fifo->queue_state = FIFO_QUEUE_START; in s2io_wake_tx_queue()
551 netif_wake_queue(fifo->dev); in s2io_wake_tx_queue()
557 * init_shared_mem - Allocation and Initialization of Memory
572 struct net_device *dev = nic->dev; in init_shared_mem()
575 struct config_param *config = &nic->config; in init_shared_mem()
576 struct mac_info *mac_control = &nic->mac_control; in init_shared_mem()
581 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
582 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
584 size += tx_cfg->fifo_len; in init_shared_mem()
590 return -EINVAL; in init_shared_mem()
594 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
595 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
597 size = tx_cfg->fifo_len; in init_shared_mem()
602 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - " in init_shared_mem()
605 return -EINVAL; in init_shared_mem()
609 lst_size = (sizeof(struct TxD) * config->max_txds); in init_shared_mem()
612 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
613 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
614 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
615 int fifo_len = tx_cfg->fifo_len; in init_shared_mem()
618 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); in init_shared_mem()
619 if (!fifo->list_info) { in init_shared_mem()
621 return -ENOMEM; in init_shared_mem()
625 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
626 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, in init_shared_mem()
628 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
629 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
631 fifo->tx_curr_put_info.offset = 0; in init_shared_mem()
632 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1; in init_shared_mem()
633 fifo->tx_curr_get_info.offset = 0; in init_shared_mem()
634 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1; in init_shared_mem()
635 fifo->fifo_no = i; in init_shared_mem()
636 fifo->nic = nic; in init_shared_mem()
637 fifo->max_txds = MAX_SKB_FRAGS + 2; in init_shared_mem()
638 fifo->dev = dev; in init_shared_mem()
644 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE, in init_shared_mem()
649 return -ENOMEM; in init_shared_mem()
657 mac_control->zerodma_virt_addr = tmp_v; in init_shared_mem()
661 dev->name, tmp_v); in init_shared_mem()
662 tmp_v = dma_alloc_coherent(&nic->pdev->dev, in init_shared_mem()
668 return -ENOMEM; in init_shared_mem()
674 if (l == tx_cfg->fifo_len) in init_shared_mem()
676 fifo->list_info[l].list_virt_addr = in init_shared_mem()
678 fifo->list_info[l].list_phy_addr = in init_shared_mem()
685 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
686 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
687 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
689 size = tx_cfg->fifo_len; in init_shared_mem()
690 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); in init_shared_mem()
691 if (!fifo->ufo_in_band_v) in init_shared_mem()
692 return -ENOMEM; in init_shared_mem()
698 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
699 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
700 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
702 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) { in init_shared_mem()
705 dev->name, i); in init_shared_mem()
708 size += rx_cfg->num_rxd; in init_shared_mem()
709 ring->block_count = rx_cfg->num_rxd / in init_shared_mem()
710 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
711 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; in init_shared_mem()
713 if (nic->rxd_mode == RXD_MODE_1) in init_shared_mem()
718 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
719 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
720 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
722 ring->rx_curr_get_info.block_index = 0; in init_shared_mem()
723 ring->rx_curr_get_info.offset = 0; in init_shared_mem()
724 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem()
725 ring->rx_curr_put_info.block_index = 0; in init_shared_mem()
726 ring->rx_curr_put_info.offset = 0; in init_shared_mem()
727 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem()
728 ring->nic = nic; in init_shared_mem()
729 ring->ring_no = i; in init_shared_mem()
731 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
737 rx_blocks = &ring->rx_blocks[j]; in init_shared_mem()
739 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size, in init_shared_mem()
748 rx_blocks->block_virt_addr = tmp_v_addr; in init_shared_mem()
749 return -ENOMEM; in init_shared_mem()
754 rxd_count[nic->rxd_mode]; in init_shared_mem()
755 rx_blocks->block_virt_addr = tmp_v_addr; in init_shared_mem()
756 rx_blocks->block_dma_addr = tmp_p_addr; in init_shared_mem()
757 rx_blocks->rxds = kmalloc(size, GFP_KERNEL); in init_shared_mem()
758 if (!rx_blocks->rxds) in init_shared_mem()
759 return -ENOMEM; in init_shared_mem()
761 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { in init_shared_mem()
762 rx_blocks->rxds[l].virt_addr = in init_shared_mem()
763 rx_blocks->block_virt_addr + in init_shared_mem()
764 (rxd_size[nic->rxd_mode] * l); in init_shared_mem()
765 rx_blocks->rxds[l].dma_addr = in init_shared_mem()
766 rx_blocks->block_dma_addr + in init_shared_mem()
767 (rxd_size[nic->rxd_mode] * l); in init_shared_mem()
773 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; in init_shared_mem()
774 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr; in init_shared_mem()
775 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; in init_shared_mem()
776 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; in init_shared_mem()
779 pre_rxd_blk->reserved_2_pNext_RxD_block = in init_shared_mem()
781 pre_rxd_blk->pNext_RxD_Blk_physical = in init_shared_mem()
785 if (nic->rxd_mode == RXD_MODE_3B) { in init_shared_mem()
790 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
791 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
792 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
794 blk_cnt = rx_cfg->num_rxd / in init_shared_mem()
795 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
797 ring->ba = kmalloc(size, GFP_KERNEL); in init_shared_mem()
798 if (!ring->ba) in init_shared_mem()
799 return -ENOMEM; in init_shared_mem()
805 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
806 ring->ba[j] = kmalloc(size, GFP_KERNEL); in init_shared_mem()
807 if (!ring->ba[j]) in init_shared_mem()
808 return -ENOMEM; in init_shared_mem()
810 while (k != rxd_count[nic->rxd_mode]) { in init_shared_mem()
811 ba = &ring->ba[j][k]; in init_shared_mem()
813 ba->ba_0_org = kmalloc(size, GFP_KERNEL); in init_shared_mem()
814 if (!ba->ba_0_org) in init_shared_mem()
815 return -ENOMEM; in init_shared_mem()
817 tmp = (unsigned long)ba->ba_0_org; in init_shared_mem()
820 ba->ba_0 = (void *)tmp; in init_shared_mem()
823 ba->ba_1_org = kmalloc(size, GFP_KERNEL); in init_shared_mem()
824 if (!ba->ba_1_org) in init_shared_mem()
825 return -ENOMEM; in init_shared_mem()
827 tmp = (unsigned long)ba->ba_1_org; in init_shared_mem()
830 ba->ba_1 = (void *)tmp; in init_shared_mem()
839 mac_control->stats_mem = in init_shared_mem()
840 dma_alloc_coherent(&nic->pdev->dev, size, in init_shared_mem()
841 &mac_control->stats_mem_phy, GFP_KERNEL); in init_shared_mem()
843 if (!mac_control->stats_mem) { in init_shared_mem()
849 return -ENOMEM; in init_shared_mem()
852 mac_control->stats_mem_sz = size; in init_shared_mem()
854 tmp_v_addr = mac_control->stats_mem; in init_shared_mem()
855 mac_control->stats_info = tmp_v_addr; in init_shared_mem()
858 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr); in init_shared_mem()
859 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; in init_shared_mem()
864 * free_shared_mem - Free the allocated Memory
886 dev = nic->dev; in free_shared_mem()
888 config = &nic->config; in free_shared_mem()
889 mac_control = &nic->mac_control; in free_shared_mem()
890 stats = mac_control->stats_info; in free_shared_mem()
891 swstats = &stats->sw_stat; in free_shared_mem()
893 lst_size = sizeof(struct TxD) * config->max_txds; in free_shared_mem()
896 for (i = 0; i < config->tx_fifo_num; i++) { in free_shared_mem()
897 struct fifo_info *fifo = &mac_control->fifos[i]; in free_shared_mem()
898 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_shared_mem()
900 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page); in free_shared_mem()
905 if (!fifo->list_info) in free_shared_mem()
908 fli = &fifo->list_info[mem_blks]; in free_shared_mem()
909 if (!fli->list_virt_addr) in free_shared_mem()
911 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, in free_shared_mem()
912 fli->list_virt_addr, in free_shared_mem()
913 fli->list_phy_addr); in free_shared_mem()
914 swstats->mem_freed += PAGE_SIZE; in free_shared_mem()
919 if (mac_control->zerodma_virt_addr) { in free_shared_mem()
920 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, in free_shared_mem()
921 mac_control->zerodma_virt_addr, in free_shared_mem()
926 dev->name, mac_control->zerodma_virt_addr); in free_shared_mem()
927 swstats->mem_freed += PAGE_SIZE; in free_shared_mem()
929 kfree(fifo->list_info); in free_shared_mem()
930 swstats->mem_freed += tx_cfg->fifo_len * in free_shared_mem()
935 for (i = 0; i < config->rx_ring_num; i++) { in free_shared_mem()
936 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem()
938 blk_cnt = ring->block_count; in free_shared_mem()
940 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; in free_shared_mem()
941 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; in free_shared_mem()
944 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr, in free_shared_mem()
946 swstats->mem_freed += size; in free_shared_mem()
947 kfree(ring->rx_blocks[j].rxds); in free_shared_mem()
948 swstats->mem_freed += sizeof(struct rxd_info) * in free_shared_mem()
949 rxd_count[nic->rxd_mode]; in free_shared_mem()
953 if (nic->rxd_mode == RXD_MODE_3B) { in free_shared_mem()
955 for (i = 0; i < config->rx_ring_num; i++) { in free_shared_mem()
956 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in free_shared_mem()
957 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem()
959 blk_cnt = rx_cfg->num_rxd / in free_shared_mem()
960 (rxd_count[nic->rxd_mode] + 1); in free_shared_mem()
963 if (!ring->ba[j]) in free_shared_mem()
965 while (k != rxd_count[nic->rxd_mode]) { in free_shared_mem()
966 struct buffAdd *ba = &ring->ba[j][k]; in free_shared_mem()
967 kfree(ba->ba_0_org); in free_shared_mem()
968 swstats->mem_freed += in free_shared_mem()
970 kfree(ba->ba_1_org); in free_shared_mem()
971 swstats->mem_freed += in free_shared_mem()
975 kfree(ring->ba[j]); in free_shared_mem()
976 swstats->mem_freed += sizeof(struct buffAdd) * in free_shared_mem()
977 (rxd_count[nic->rxd_mode] + 1); in free_shared_mem()
979 kfree(ring->ba); in free_shared_mem()
980 swstats->mem_freed += sizeof(struct buffAdd *) * in free_shared_mem()
985 for (i = 0; i < nic->config.tx_fifo_num; i++) { in free_shared_mem()
986 struct fifo_info *fifo = &mac_control->fifos[i]; in free_shared_mem()
987 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_shared_mem()
989 if (fifo->ufo_in_band_v) { in free_shared_mem()
990 swstats->mem_freed += tx_cfg->fifo_len * in free_shared_mem()
992 kfree(fifo->ufo_in_band_v); in free_shared_mem()
996 if (mac_control->stats_mem) { in free_shared_mem()
997 swstats->mem_freed += mac_control->stats_mem_sz; in free_shared_mem()
998 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz, in free_shared_mem()
999 mac_control->stats_mem, in free_shared_mem()
1000 mac_control->stats_mem_phy); in free_shared_mem()
1005 * s2io_verify_pci_mode -
1010 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_verify_pci_mode()
1014 val64 = readq(&bar0->pci_mode); in s2io_verify_pci_mode()
1018 return -1; /* Unknown PCI mode */ in s2io_verify_pci_mode()
1028 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { in s2io_on_nec_bridge()
1029 if (tdev->bus == s2io_pdev->bus->parent) { in s2io_on_nec_bridge()
1040 * s2io_print_pci_mode -
1044 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_print_pci_mode()
1047 struct config_param *config = &nic->config; in s2io_print_pci_mode()
1050 val64 = readq(&bar0->pci_mode); in s2io_print_pci_mode()
1054 return -1; /* Unknown PCI mode */ in s2io_print_pci_mode()
1056 config->bus_speed = bus_speed[mode]; in s2io_print_pci_mode()
1058 if (s2io_on_nec_bridge(nic->pdev)) { in s2io_print_pci_mode()
1059 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", in s2io_print_pci_mode()
1060 nic->dev->name); in s2io_print_pci_mode()
1091 mode = -1; in s2io_print_pci_mode()
1095 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode); in s2io_print_pci_mode()
1101 * init_tti - Initialization transmit traffic interrupt scheme
1109 * '-1' on failure
1114 struct XENA_dev_config __iomem *bar0 = nic->bar0; in init_tti()
1117 struct config_param *config = &nic->config; in init_tti()
1119 for (i = 0; i < config->tx_fifo_num; i++) { in init_tti()
1125 if (nic->device_type == XFRAME_II_DEVICE) { in init_tti()
1126 int count = (nic->config.bus_speed * 125)/2; in init_tti()
1138 writeq(val64, &bar0->tti_data1_mem); in init_tti()
1140 if (nic->config.intr_type == MSI_X) { in init_tti()
1146 if ((nic->config.tx_steering_type == in init_tti()
1148 (config->tx_fifo_num > 1) && in init_tti()
1149 (i >= nic->udp_fifo_idx) && in init_tti()
1150 (i < (nic->udp_fifo_idx + in init_tti()
1151 nic->total_udp_fifos))) in init_tti()
1163 writeq(val64, &bar0->tti_data2_mem); in init_tti()
1168 writeq(val64, &bar0->tti_command_mem); in init_tti()
1170 if (wait_for_cmd_complete(&bar0->tti_command_mem, in init_tti()
1180 * init_nic - Initialization of hardware
1185 * '-1' on failure (endian settings incorrect).
1190 struct XENA_dev_config __iomem *bar0 = nic->bar0; in init_nic()
1191 struct net_device *dev = nic->dev; in init_nic()
1199 struct config_param *config = &nic->config; in init_nic()
1200 struct mac_info *mac_control = &nic->mac_control; in init_nic()
1205 return -EIO; in init_nic()
1211 if (nic->device_type & XFRAME_II_DEVICE) { in init_nic()
1213 writeq(val64, &bar0->sw_reset); in init_nic()
1215 val64 = readq(&bar0->sw_reset); in init_nic()
1220 writeq(val64, &bar0->sw_reset); in init_nic()
1222 val64 = readq(&bar0->sw_reset); in init_nic()
1227 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1229 val64 = readq(&bar0->adapter_status); in init_nic()
1235 return -ENODEV; in init_nic()
1239 add = &bar0->mac_cfg; in init_nic()
1240 val64 = readq(&bar0->mac_cfg); in init_nic()
1242 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1244 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1248 val64 = readq(&bar0->mac_int_mask); in init_nic()
1249 val64 = readq(&bar0->mc_int_mask); in init_nic()
1250 val64 = readq(&bar0->xgxs_int_mask); in init_nic()
1253 val64 = dev->mtu; in init_nic()
1254 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); in init_nic()
1256 if (nic->device_type & XFRAME_II_DEVICE) { in init_nic()
1259 &bar0->dtx_control, UF); in init_nic()
1267 &bar0->dtx_control, UF); in init_nic()
1268 val64 = readq(&bar0->dtx_control); in init_nic()
1275 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1276 writeq(val64, &bar0->tx_fifo_partition_1); in init_nic()
1277 writeq(val64, &bar0->tx_fifo_partition_2); in init_nic()
1278 writeq(val64, &bar0->tx_fifo_partition_3); in init_nic()
1280 for (i = 0, j = 0; i < config->tx_fifo_num; i++) { in init_nic()
1281 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_nic()
1283 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) | in init_nic()
1284 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3); in init_nic()
1286 if (i == (config->tx_fifo_num - 1)) { in init_nic()
1293 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1298 writeq(val64, &bar0->tx_fifo_partition_1); in init_nic()
1303 writeq(val64, &bar0->tx_fifo_partition_2); in init_nic()
1308 writeq(val64, &bar0->tx_fifo_partition_3); in init_nic()
1320 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. in init_nic()
1322 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4)) in init_nic()
1323 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); in init_nic()
1325 val64 = readq(&bar0->tx_fifo_partition_0); in init_nic()
1327 &bar0->tx_fifo_partition_0, (unsigned long long)val64); in init_nic()
1333 val64 = readq(&bar0->tx_pa_cfg); in init_nic()
1338 writeq(val64, &bar0->tx_pa_cfg); in init_nic()
1342 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1343 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_nic()
1345 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3); in init_nic()
1347 writeq(val64, &bar0->rx_queue_priority); in init_nic()
1354 if (nic->device_type & XFRAME_II_DEVICE) in init_nic()
1359 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1362 mem_share = (mem_size / config->rx_ring_num + in init_nic()
1363 mem_size % config->rx_ring_num); in init_nic()
1367 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1371 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1375 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1379 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1383 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1387 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1391 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1396 writeq(val64, &bar0->rx_queue_cfg); in init_nic()
1402 switch (config->tx_fifo_num) { in init_nic()
1405 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1406 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1407 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1408 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1409 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1413 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1414 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1415 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1416 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1418 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1422 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1424 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1426 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1428 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1430 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1434 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1435 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1436 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1437 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1439 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1443 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1445 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1447 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1449 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1451 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1455 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1457 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1459 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1461 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1463 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1467 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1469 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1471 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1473 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1475 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1479 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1480 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1481 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1482 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1484 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1489 val64 = readq(&bar0->tx_fifo_partition_0); in init_nic()
1491 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1497 switch (config->rx_ring_num) { in init_nic()
1500 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1501 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1502 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1503 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1504 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1507 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1511 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1512 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1513 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1514 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1516 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1519 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1523 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1525 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1527 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1529 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1531 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1534 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1538 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1539 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1540 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1541 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1543 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1546 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1550 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1552 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1554 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1556 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1558 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1561 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1565 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1567 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1569 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1571 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1573 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1576 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1580 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1582 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1584 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1586 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1588 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1591 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1595 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1596 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1597 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1598 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1600 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1603 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1610 writeq(val64, &bar0->rts_frm_len_n[i]); in init_nic()
1613 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22); in init_nic()
1614 for (i = 0 ; i < config->rx_ring_num ; i++) in init_nic()
1615 writeq(val64, &bar0->rts_frm_len_n[i]); in init_nic()
1620 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1629 &bar0->rts_frm_len_n[i]); in init_nic()
1638 dev->name, i); in init_nic()
1639 return -ENODEV; in init_nic()
1644 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); in init_nic()
1646 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1648 writeq(val64, &bar0->stat_byte_cnt); in init_nic()
1657 writeq(val64, &bar0->mac_link_util); in init_nic()
1665 if (SUCCESS != init_tti(nic, nic->last_link_state, true)) in init_nic()
1666 return -ENODEV; in init_nic()
1669 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1674 int count = (nic->config.bus_speed * 125)/4; in init_nic()
1683 writeq(val64, &bar0->rti_data1_mem); in init_nic()
1687 if (nic->config.intr_type == MSI_X) in init_nic()
1693 writeq(val64, &bar0->rti_data2_mem); in init_nic()
1695 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1699 writeq(val64, &bar0->rti_command_mem); in init_nic()
1710 val64 = readq(&bar0->rti_command_mem); in init_nic()
1716 dev->name); in init_nic()
1717 return -ENODEV; in init_nic()
1728 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); in init_nic()
1729 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); in init_nic()
1732 add = &bar0->mac_cfg; in init_nic()
1733 val64 = readq(&bar0->mac_cfg); in init_nic()
1735 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1737 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1739 val64 = readq(&bar0->mac_cfg); in init_nic()
1742 add = &bar0->mac_cfg; in init_nic()
1743 val64 = readq(&bar0->mac_cfg); in init_nic()
1745 if (nic->device_type == XFRAME_II_DEVICE) in init_nic()
1746 writeq(val64, &bar0->mac_cfg); in init_nic()
1748 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1750 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1758 val64 = readq(&bar0->rmac_pause_cfg); in init_nic()
1760 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); in init_nic()
1761 writeq(val64, &bar0->rmac_pause_cfg); in init_nic()
1772 nic->mac_control.mc_pause_threshold_q0q3) in init_nic()
1775 writeq(val64, &bar0->mc_pause_thresh_q0q3); in init_nic()
1780 nic->mac_control.mc_pause_threshold_q4q7) in init_nic()
1783 writeq(val64, &bar0->mc_pause_thresh_q4q7); in init_nic()
1789 val64 = readq(&bar0->pic_control); in init_nic()
1791 writeq(val64, &bar0->pic_control); in init_nic()
1793 if (nic->config.bus_speed == 266) { in init_nic()
1794 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout); in init_nic()
1795 writeq(0x0, &bar0->read_retry_delay); in init_nic()
1796 writeq(0x0, &bar0->write_retry_delay); in init_nic()
1803 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1806 writeq(val64, &bar0->misc_control); in init_nic()
1807 val64 = readq(&bar0->pic_control2); in init_nic()
1809 writeq(val64, &bar0->pic_control2); in init_nic()
1811 if (strstr(nic->product_name, "CX4")) { in init_nic()
1813 writeq(val64, &bar0->tmac_avg_ipg); in init_nic()
1823 if (nic->device_type == XFRAME_II_DEVICE) in s2io_link_fault_indication()
1830 * do_s2io_write_bits - update alarm bits in alarm register
1853 struct XENA_dev_config __iomem *bar0 = nic->bar0; in en_dis_err_alarms()
1857 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); in en_dis_err_alarms()
1864 TXDMA_SM_INT, flag, &bar0->txdma_int_mask); in en_dis_err_alarms()
1869 &bar0->pfc_err_mask); in en_dis_err_alarms()
1873 TDA_PCIX_ERR, flag, &bar0->tda_err_mask); in en_dis_err_alarms()
1881 flag, &bar0->pcc_err_mask); in en_dis_err_alarms()
1884 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); in en_dis_err_alarms()
1889 flag, &bar0->lso_err_mask); in en_dis_err_alarms()
1892 flag, &bar0->tpa_err_mask); in en_dis_err_alarms()
1894 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); in en_dis_err_alarms()
1900 &bar0->mac_int_mask); in en_dis_err_alarms()
1904 flag, &bar0->mac_tmac_err_mask); in en_dis_err_alarms()
1910 &bar0->xgxs_int_mask); in en_dis_err_alarms()
1913 flag, &bar0->xgxs_txgxs_err_mask); in en_dis_err_alarms()
1920 flag, &bar0->rxdma_int_mask); in en_dis_err_alarms()
1924 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); in en_dis_err_alarms()
1928 &bar0->prc_pcix_err_mask); in en_dis_err_alarms()
1931 &bar0->rpa_err_mask); in en_dis_err_alarms()
1937 flag, &bar0->rda_err_mask); in en_dis_err_alarms()
1940 flag, &bar0->rti_err_mask); in en_dis_err_alarms()
1946 &bar0->mac_int_mask); in en_dis_err_alarms()
1953 flag, &bar0->mac_rmac_err_mask); in en_dis_err_alarms()
1959 &bar0->xgxs_int_mask); in en_dis_err_alarms()
1961 &bar0->xgxs_rxgxs_err_mask); in en_dis_err_alarms()
1967 flag, &bar0->mc_int_mask); in en_dis_err_alarms()
1970 &bar0->mc_err_mask); in en_dis_err_alarms()
1972 nic->general_int_mask = gen_int_mask; in en_dis_err_alarms()
1975 nic->general_int_mask = 0; in en_dis_err_alarms()
1979 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1991 struct XENA_dev_config __iomem *bar0 = nic->bar0; in en_dis_able_nic_intrs()
1994 intr_mask = nic->general_int_mask; in en_dis_able_nic_intrs()
2011 &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2013 &bar0->gpio_int_mask); in en_dis_able_nic_intrs()
2015 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2021 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2033 writeq(0x0, &bar0->tx_traffic_mask); in en_dis_able_nic_intrs()
2039 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); in en_dis_able_nic_intrs()
2048 writeq(0x0, &bar0->rx_traffic_mask); in en_dis_able_nic_intrs()
2054 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); in en_dis_able_nic_intrs()
2058 temp64 = readq(&bar0->general_int_mask); in en_dis_able_nic_intrs()
2063 writeq(temp64, &bar0->general_int_mask); in en_dis_able_nic_intrs()
2065 nic->general_int_mask = readq(&bar0->general_int_mask); in en_dis_able_nic_intrs()
2069 * verify_pcc_quiescent- Checks for PCC quiescent state
2079 struct XENA_dev_config __iomem *bar0 = sp->bar0; in verify_pcc_quiescent()
2080 u64 val64 = readq(&bar0->adapter_status); in verify_pcc_quiescent()
2082 herc = (sp->device_type == XFRAME_II_DEVICE); in verify_pcc_quiescent()
2085 if ((!herc && (sp->pdev->revision >= 4)) || herc) { in verify_pcc_quiescent()
2093 if ((!herc && (sp->pdev->revision >= 4)) || herc) { in verify_pcc_quiescent()
2107 * verify_xena_quiescence - Checks whether the H/W is ready
2121 struct XENA_dev_config __iomem *bar0 = sp->bar0; in verify_xena_quiescence()
2122 u64 val64 = readq(&bar0->adapter_status); in verify_xena_quiescence()
2164 sp->device_type == XFRAME_II_DEVICE && in verify_xena_quiescence()
2178 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2187 struct XENA_dev_config __iomem *bar0 = sp->bar0; in fix_mac_address()
2191 writeq(fix_mac[i++], &bar0->gpio_control); in fix_mac_address()
2193 (void) readq(&bar0->gpio_control); in fix_mac_address()
2198 * start_nic - Turns the device on
2207 * SUCCESS on success and -1 on failure.
2212 struct XENA_dev_config __iomem *bar0 = nic->bar0; in start_nic()
2213 struct net_device *dev = nic->dev; in start_nic()
2216 struct config_param *config = &nic->config; in start_nic()
2217 struct mac_info *mac_control = &nic->mac_control; in start_nic()
2220 for (i = 0; i < config->rx_ring_num; i++) { in start_nic()
2221 struct ring_info *ring = &mac_control->rings[i]; in start_nic()
2223 writeq((u64)ring->rx_blocks[0].block_dma_addr, in start_nic()
2224 &bar0->prc_rxd0_n[i]); in start_nic()
2226 val64 = readq(&bar0->prc_ctrl_n[i]); in start_nic()
2227 if (nic->rxd_mode == RXD_MODE_1) in start_nic()
2231 if (nic->device_type == XFRAME_II_DEVICE) in start_nic()
2235 writeq(val64, &bar0->prc_ctrl_n[i]); in start_nic()
2238 if (nic->rxd_mode == RXD_MODE_3B) { in start_nic()
2240 val64 = readq(&bar0->rx_pa_cfg); in start_nic()
2242 writeq(val64, &bar0->rx_pa_cfg); in start_nic()
2246 val64 = readq(&bar0->rx_pa_cfg); in start_nic()
2248 writeq(val64, &bar0->rx_pa_cfg); in start_nic()
2249 nic->vlan_strip_flag = 0; in start_nic()
2253 * Enabling MC-RLDRAM. After enabling the device, we timeout in start_nic()
2257 val64 = readq(&bar0->mc_rldram_mrs); in start_nic()
2259 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in start_nic()
2260 val64 = readq(&bar0->mc_rldram_mrs); in start_nic()
2262 msleep(100); /* Delay by around 100 ms. */ in start_nic()
2265 val64 = readq(&bar0->adapter_control); in start_nic()
2267 writeq(val64, &bar0->adapter_control); in start_nic()
2273 val64 = readq(&bar0->adapter_status); in start_nic()
2277 dev->name, (unsigned long long)val64); in start_nic()
2290 val64 = readq(&bar0->adapter_control); in start_nic()
2292 writeq(val64, &bar0->adapter_control); in start_nic()
2299 schedule_work(&nic->set_link_task); in start_nic()
2301 /* SXE-002: Initialize link and activity LED */ in start_nic()
2302 subid = nic->pdev->subsystem_device; in start_nic()
2304 (nic->device_type == XFRAME_I_DEVICE)) { in start_nic()
2305 val64 = readq(&bar0->gpio_control); in start_nic()
2307 writeq(val64, &bar0->gpio_control); in start_nic()
2315 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2323 struct s2io_nic *nic = fifo_data->nic; in s2io_txdl_getskb()
2329 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { in s2io_txdl_getskb()
2330 dma_unmap_single(&nic->pdev->dev, in s2io_txdl_getskb()
2331 (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2336 skb = (struct sk_buff *)((unsigned long)txds->Host_Control); in s2io_txdl_getskb()
2338 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); in s2io_txdl_getskb()
2341 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2343 frg_cnt = skb_shinfo(skb)->nr_frags; in s2io_txdl_getskb()
2347 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; in s2io_txdl_getskb()
2348 if (!txds->Buffer_Pointer) in s2io_txdl_getskb()
2350 dma_unmap_page(&nic->pdev->dev, in s2io_txdl_getskb()
2351 (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2355 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); in s2io_txdl_getskb()
2360 * free_tx_buffers - Free all queued Tx buffers
2369 struct net_device *dev = nic->dev; in free_tx_buffers()
2374 struct config_param *config = &nic->config; in free_tx_buffers()
2375 struct mac_info *mac_control = &nic->mac_control; in free_tx_buffers()
2376 struct stat_block *stats = mac_control->stats_info; in free_tx_buffers()
2377 struct swStat *swstats = &stats->sw_stat; in free_tx_buffers()
2379 for (i = 0; i < config->tx_fifo_num; i++) { in free_tx_buffers()
2380 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_tx_buffers()
2381 struct fifo_info *fifo = &mac_control->fifos[i]; in free_tx_buffers()
2384 spin_lock_irqsave(&fifo->tx_lock, flags); in free_tx_buffers()
2385 for (j = 0; j < tx_cfg->fifo_len; j++) { in free_tx_buffers()
2386 txdp = fifo->list_info[j].list_virt_addr; in free_tx_buffers()
2387 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); in free_tx_buffers()
2389 swstats->mem_freed += skb->truesize; in free_tx_buffers()
2396 dev->name, cnt, i); in free_tx_buffers()
2397 fifo->tx_curr_get_info.offset = 0; in free_tx_buffers()
2398 fifo->tx_curr_put_info.offset = 0; in free_tx_buffers()
2399 spin_unlock_irqrestore(&fifo->tx_lock, flags); in free_tx_buffers()
2404 * stop_nic - To stop the nic
2415 struct XENA_dev_config __iomem *bar0 = nic->bar0; in stop_nic()
2426 val64 = readq(&bar0->adapter_control); in stop_nic()
2428 writeq(val64, &bar0->adapter_control); in stop_nic()
2432 * fill_rx_buffers - Allocates the Rx side skbs
2452 * SUCCESS on success or an appropriate -ve value on failure.
2468 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat; in fill_rx_buffers()
2470 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; in fill_rx_buffers()
2472 block_no1 = ring->rx_curr_get_info.block_index; in fill_rx_buffers()
2474 block_no = ring->rx_curr_put_info.block_index; in fill_rx_buffers()
2476 off = ring->rx_curr_put_info.offset; in fill_rx_buffers()
2478 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; in fill_rx_buffers()
2481 (off == ring->rx_curr_get_info.offset) && in fill_rx_buffers()
2482 (rxdp->Host_Control)) { in fill_rx_buffers()
2484 ring->dev->name); in fill_rx_buffers()
2487 if (off && (off == ring->rxd_count)) { in fill_rx_buffers()
2488 ring->rx_curr_put_info.block_index++; in fill_rx_buffers()
2489 if (ring->rx_curr_put_info.block_index == in fill_rx_buffers()
2490 ring->block_count) in fill_rx_buffers()
2491 ring->rx_curr_put_info.block_index = 0; in fill_rx_buffers()
2492 block_no = ring->rx_curr_put_info.block_index; in fill_rx_buffers()
2494 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2495 rxdp = ring->rx_blocks[block_no].block_virt_addr; in fill_rx_buffers()
2497 ring->dev->name, rxdp); in fill_rx_buffers()
2501 if ((rxdp->Control_1 & RXD_OWN_XENA) && in fill_rx_buffers()
2502 ((ring->rxd_mode == RXD_MODE_3B) && in fill_rx_buffers()
2503 (rxdp->Control_2 & s2BIT(0)))) { in fill_rx_buffers()
2504 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2508 size = ring->mtu + in fill_rx_buffers()
2511 if (ring->rxd_mode == RXD_MODE_1) in fill_rx_buffers()
2514 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; in fill_rx_buffers()
2517 skb = netdev_alloc_skb(nic->dev, size); in fill_rx_buffers()
2520 ring->dev->name); in fill_rx_buffers()
2523 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2525 swstats->mem_alloc_fail_cnt++; in fill_rx_buffers()
2527 return -ENOMEM ; in fill_rx_buffers()
2529 swstats->mem_allocated += skb->truesize; in fill_rx_buffers()
2531 if (ring->rxd_mode == RXD_MODE_1) { in fill_rx_buffers()
2532 /* 1 buffer mode - normal operation mode */ in fill_rx_buffers()
2536 rxdp1->Buffer0_ptr = in fill_rx_buffers()
2537 dma_map_single(&ring->pdev->dev, skb->data, in fill_rx_buffers()
2538 size - NET_IP_ALIGN, in fill_rx_buffers()
2540 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr)) in fill_rx_buffers()
2543 rxdp->Control_2 = in fill_rx_buffers()
2544 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); in fill_rx_buffers()
2545 rxdp->Host_Control = (unsigned long)skb; in fill_rx_buffers()
2546 } else if (ring->rxd_mode == RXD_MODE_3B) { in fill_rx_buffers()
2548 * 2 buffer mode - in fill_rx_buffers()
2555 Buffer0_ptr = rxdp3->Buffer0_ptr; in fill_rx_buffers()
2556 Buffer1_ptr = rxdp3->Buffer1_ptr; in fill_rx_buffers()
2559 rxdp3->Buffer0_ptr = Buffer0_ptr; in fill_rx_buffers()
2560 rxdp3->Buffer1_ptr = Buffer1_ptr; in fill_rx_buffers()
2562 ba = &ring->ba[block_no][off]; in fill_rx_buffers()
2564 tmp = (u64)(unsigned long)skb->data; in fill_rx_buffers()
2567 skb->data = (void *) (unsigned long)tmp; in fill_rx_buffers()
2571 rxdp3->Buffer0_ptr = in fill_rx_buffers()
2572 dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2573 ba->ba_0, BUF0_LEN, in fill_rx_buffers()
2575 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr)) in fill_rx_buffers()
2578 dma_sync_single_for_device(&ring->pdev->dev, in fill_rx_buffers()
2579 (dma_addr_t)rxdp3->Buffer0_ptr, in fill_rx_buffers()
2583 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); in fill_rx_buffers()
2584 if (ring->rxd_mode == RXD_MODE_3B) { in fill_rx_buffers()
2591 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2592 skb->data, in fill_rx_buffers()
2593 ring->mtu + 4, in fill_rx_buffers()
2596 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr)) in fill_rx_buffers()
2600 rxdp3->Buffer1_ptr = in fill_rx_buffers()
2601 dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2602 ba->ba_1, in fill_rx_buffers()
2606 if (dma_mapping_error(&nic->pdev->dev, in fill_rx_buffers()
2607 rxdp3->Buffer1_ptr)) { in fill_rx_buffers()
2608 dma_unmap_single(&ring->pdev->dev, in fill_rx_buffers()
2610 skb->data, in fill_rx_buffers()
2611 ring->mtu + 4, in fill_rx_buffers()
2616 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); in fill_rx_buffers()
2617 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 in fill_rx_buffers()
2618 (ring->mtu + 4); in fill_rx_buffers()
2620 rxdp->Control_2 |= s2BIT(0); in fill_rx_buffers()
2621 rxdp->Host_Control = (unsigned long) (skb); in fill_rx_buffers()
2623 if (alloc_tab & ((1 << rxsync_frequency) - 1)) in fill_rx_buffers()
2624 rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2626 if (off == (ring->rxd_count + 1)) in fill_rx_buffers()
2628 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2630 rxdp->Control_2 |= SET_RXD_MARKER; in fill_rx_buffers()
2631 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { in fill_rx_buffers()
2634 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2638 ring->rx_bufs_left += 1; in fill_rx_buffers()
2649 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2655 swstats->pci_map_fail_cnt++; in fill_rx_buffers()
2656 swstats->mem_freed += skb->truesize; in fill_rx_buffers()
2658 return -ENOMEM; in fill_rx_buffers()
2663 struct net_device *dev = sp->dev; in free_rxd_blk()
2669 struct mac_info *mac_control = &sp->mac_control; in free_rxd_blk()
2670 struct stat_block *stats = mac_control->stats_info; in free_rxd_blk()
2671 struct swStat *swstats = &stats->sw_stat; in free_rxd_blk()
2673 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { in free_rxd_blk()
2674 rxdp = mac_control->rings[ring_no]. in free_rxd_blk()
2676 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); in free_rxd_blk()
2679 if (sp->rxd_mode == RXD_MODE_1) { in free_rxd_blk()
2681 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2682 (dma_addr_t)rxdp1->Buffer0_ptr, in free_rxd_blk()
2683 dev->mtu + in free_rxd_blk()
2688 } else if (sp->rxd_mode == RXD_MODE_3B) { in free_rxd_blk()
2690 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2691 (dma_addr_t)rxdp3->Buffer0_ptr, in free_rxd_blk()
2693 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2694 (dma_addr_t)rxdp3->Buffer1_ptr, in free_rxd_blk()
2696 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2697 (dma_addr_t)rxdp3->Buffer2_ptr, in free_rxd_blk()
2698 dev->mtu + 4, DMA_FROM_DEVICE); in free_rxd_blk()
2701 swstats->mem_freed += skb->truesize; in free_rxd_blk()
2703 mac_control->rings[ring_no].rx_bufs_left -= 1; in free_rxd_blk()
2708 * free_rx_buffers - Frees all Rx buffers
2718 struct net_device *dev = sp->dev; in free_rx_buffers()
2720 struct config_param *config = &sp->config; in free_rx_buffers()
2721 struct mac_info *mac_control = &sp->mac_control; in free_rx_buffers()
2723 for (i = 0; i < config->rx_ring_num; i++) { in free_rx_buffers()
2724 struct ring_info *ring = &mac_control->rings[i]; in free_rx_buffers()
2729 ring->rx_curr_put_info.block_index = 0; in free_rx_buffers()
2730 ring->rx_curr_get_info.block_index = 0; in free_rx_buffers()
2731 ring->rx_curr_put_info.offset = 0; in free_rx_buffers()
2732 ring->rx_curr_get_info.offset = 0; in free_rx_buffers()
2733 ring->rx_bufs_left = 0; in free_rx_buffers()
2735 dev->name, buf_cnt, i); in free_rx_buffers()
2741 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { in s2io_chk_rx_buffers()
2743 ring->dev->name); in s2io_chk_rx_buffers()
2749 * s2io_poll_msix - Rx interrupt handler for NAPI support
2764 struct net_device *dev = ring->dev; in s2io_poll_msix()
2769 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_poll_msix()
2780 /*Re Enable MSI-Rx Vector*/ in s2io_poll_msix()
2781 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; in s2io_poll_msix()
2782 addr += 7 - ring->ring_no; in s2io_poll_msix()
2783 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; in s2io_poll_msix()
2795 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_poll_inta()
2797 struct config_param *config = &nic->config; in s2io_poll_inta()
2798 struct mac_info *mac_control = &nic->mac_control; in s2io_poll_inta()
2803 for (i = 0; i < config->rx_ring_num; i++) { in s2io_poll_inta()
2804 struct ring_info *ring = &mac_control->rings[i]; in s2io_poll_inta()
2808 budget -= ring_pkts_processed; in s2io_poll_inta()
2815 writeq(0, &bar0->rx_traffic_mask); in s2io_poll_inta()
2816 readl(&bar0->rx_traffic_mask); in s2io_poll_inta()
2823 * s2io_netpoll - netpoll event handler entry point
2828 * specific in-kernel networking tasks, such as remote consoles and kernel
2834 const int irq = nic->pdev->irq; in s2io_netpoll()
2835 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_netpoll()
2838 struct config_param *config = &nic->config; in s2io_netpoll()
2839 struct mac_info *mac_control = &nic->mac_control; in s2io_netpoll()
2841 if (pci_channel_offline(nic->pdev)) in s2io_netpoll()
2846 writeq(val64, &bar0->rx_traffic_int); in s2io_netpoll()
2847 writeq(val64, &bar0->tx_traffic_int); in s2io_netpoll()
2853 for (i = 0; i < config->tx_fifo_num; i++) in s2io_netpoll()
2854 tx_intr_handler(&mac_control->fifos[i]); in s2io_netpoll()
2857 for (i = 0; i < config->rx_ring_num; i++) { in s2io_netpoll()
2858 struct ring_info *ring = &mac_control->rings[i]; in s2io_netpoll()
2863 for (i = 0; i < config->rx_ring_num; i++) { in s2io_netpoll()
2864 struct ring_info *ring = &mac_control->rings[i]; in s2io_netpoll()
2866 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { in s2io_netpoll()
2869 dev->name); in s2io_netpoll()
2878 * rx_intr_handler - Rx interrupt handler
2883 * receive ring contains fresh as yet un-processed frames,this function is
2904 get_info = ring_data->rx_curr_get_info; in rx_intr_handler()
2906 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); in rx_intr_handler()
2908 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; in rx_intr_handler()
2918 ring_data->dev->name); in rx_intr_handler()
2921 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); in rx_intr_handler()
2924 ring_data->dev->name); in rx_intr_handler()
2927 if (ring_data->rxd_mode == RXD_MODE_1) { in rx_intr_handler()
2929 dma_unmap_single(&ring_data->pdev->dev, in rx_intr_handler()
2930 (dma_addr_t)rxdp1->Buffer0_ptr, in rx_intr_handler()
2931 ring_data->mtu + in rx_intr_handler()
2936 } else if (ring_data->rxd_mode == RXD_MODE_3B) { in rx_intr_handler()
2938 dma_sync_single_for_cpu(&ring_data->pdev->dev, in rx_intr_handler()
2939 (dma_addr_t)rxdp3->Buffer0_ptr, in rx_intr_handler()
2941 dma_unmap_single(&ring_data->pdev->dev, in rx_intr_handler()
2942 (dma_addr_t)rxdp3->Buffer2_ptr, in rx_intr_handler()
2943 ring_data->mtu + 4, DMA_FROM_DEVICE); in rx_intr_handler()
2945 prefetch(skb->data); in rx_intr_handler()
2948 ring_data->rx_curr_get_info.offset = get_info.offset; in rx_intr_handler()
2949 rxdp = ring_data->rx_blocks[get_block]. in rx_intr_handler()
2951 if (get_info.offset == rxd_count[ring_data->rxd_mode]) { in rx_intr_handler()
2953 ring_data->rx_curr_get_info.offset = get_info.offset; in rx_intr_handler()
2955 if (get_block == ring_data->block_count) in rx_intr_handler()
2957 ring_data->rx_curr_get_info.block_index = get_block; in rx_intr_handler()
2958 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; in rx_intr_handler()
2961 if (ring_data->nic->config.napi) { in rx_intr_handler()
2962 budget--; in rx_intr_handler()
2971 if (ring_data->lro) { in rx_intr_handler()
2974 struct lro *lro = &ring_data->lro0_n[i]; in rx_intr_handler()
2975 if (lro->in_use) { in rx_intr_handler()
2976 update_L3L4_header(ring_data->nic, lro); in rx_intr_handler()
2977 queue_rx_frame(lro->parent, lro->vlan_tag); in rx_intr_handler()
2986 * tx_intr_handler - Transmit interrupt handler
2999 struct s2io_nic *nic = fifo_data->nic; in tx_intr_handler()
3006 struct stat_block *stats = nic->mac_control.stats_info; in tx_intr_handler()
3007 struct swStat *swstats = &stats->sw_stat; in tx_intr_handler()
3009 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) in tx_intr_handler()
3012 get_info = fifo_data->tx_curr_get_info; in tx_intr_handler()
3013 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); in tx_intr_handler()
3014 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; in tx_intr_handler()
3015 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && in tx_intr_handler()
3017 (txdlp->Host_Control)) { in tx_intr_handler()
3019 if (txdlp->Control_1 & TXD_T_CODE) { in tx_intr_handler()
3021 err = txdlp->Control_1 & TXD_T_CODE; in tx_intr_handler()
3023 swstats->parity_err_cnt++; in tx_intr_handler()
3030 swstats->tx_buf_abort_cnt++; in tx_intr_handler()
3034 swstats->tx_desc_abort_cnt++; in tx_intr_handler()
3038 swstats->tx_parity_err_cnt++; in tx_intr_handler()
3042 swstats->tx_link_loss_cnt++; in tx_intr_handler()
3046 swstats->tx_list_proc_err_cnt++; in tx_intr_handler()
3053 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); in tx_intr_handler()
3061 swstats->mem_freed += skb->truesize; in tx_intr_handler()
3067 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; in tx_intr_handler()
3068 fifo_data->tx_curr_get_info.offset = get_info.offset; in tx_intr_handler()
3071 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); in tx_intr_handler()
3073 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); in tx_intr_handler()
3077 * s2io_mdio_write - Function to write in to MDIO registers
3091 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_mdio_write()
3097 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3099 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3108 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3110 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3117 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3119 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3124 * s2io_mdio_read - Function to write in to MDIO registers
3137 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_mdio_read()
3143 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3145 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3153 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3155 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3159 rval64 = readq(&bar0->mdio_control); in s2io_mdio_read()
3166 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3209 "Excessive laser output power may saturate far-end receiver.\n"); in s2io_chk_xpak_counter()
3226 * s2io_updt_xpak_counter - Function to update the xpak counters
3241 struct stat_block *stats = sp->mac_control.stats_info; in s2io_updt_xpak_counter()
3242 struct xpakStat *xstats = &stats->xpak_stat; in s2io_updt_xpak_counter()
3250 "ERR: MDIO slave access failed - Returned %llx\n", in s2io_updt_xpak_counter()
3257 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - " in s2io_updt_xpak_counter()
3258 "Returned: %llx- Expected: 0x%x\n", in s2io_updt_xpak_counter()
3275 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high, in s2io_updt_xpak_counter()
3276 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3280 xstats->alarm_transceiver_temp_low++; in s2io_updt_xpak_counter()
3284 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high, in s2io_updt_xpak_counter()
3285 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3289 xstats->alarm_laser_bias_current_low++; in s2io_updt_xpak_counter()
3293 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high, in s2io_updt_xpak_counter()
3294 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3298 xstats->alarm_laser_output_power_low++; in s2io_updt_xpak_counter()
3306 xstats->warn_transceiver_temp_high++; in s2io_updt_xpak_counter()
3309 xstats->warn_transceiver_temp_low++; in s2io_updt_xpak_counter()
3312 xstats->warn_laser_bias_current_high++; in s2io_updt_xpak_counter()
3315 xstats->warn_laser_bias_current_low++; in s2io_updt_xpak_counter()
3318 xstats->warn_laser_output_power_high++; in s2io_updt_xpak_counter()
3321 xstats->warn_laser_output_power_low++; in s2io_updt_xpak_counter()
3325 * wait_for_cmd_complete - waits for a command to complete.
3341 int ret = FAILURE, cnt = 0, delay = 1; in wait_for_cmd_complete() local
3362 mdelay(delay); in wait_for_cmd_complete()
3364 msleep(delay); in wait_for_cmd_complete()
3367 delay = 50; in wait_for_cmd_complete()
3372 * check_pci_device_id - Checks if the device id is supported
3392 * s2io_reset - Resets the card.
3403 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_reset()
3414 __func__, pci_name(sp->pdev)); in s2io_reset()
3416 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ in s2io_reset()
3417 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); in s2io_reset()
3420 writeq(val64, &bar0->sw_reset); in s2io_reset()
3421 if (strstr(sp->product_name, "CX4")) in s2io_reset()
3427 pci_restore_state(sp->pdev); in s2io_reset()
3428 pci_save_state(sp->pdev); in s2io_reset()
3429 pci_read_config_word(sp->pdev, 0x2, &val16); in s2io_reset()
3438 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); in s2io_reset()
3451 /* Clear certain PCI/PCI-X fields after reset */ in s2io_reset()
3452 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_reset()
3454 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); in s2io_reset()
3457 pci_write_config_dword(sp->pdev, 0x68, 0x7C); in s2io_reset()
3460 writeq(s2BIT(62), &bar0->txpic_int_reg); in s2io_reset()
3464 memset(&sp->stats, 0, sizeof(struct net_device_stats)); in s2io_reset()
3466 stats = sp->mac_control.stats_info; in s2io_reset()
3467 swstats = &stats->sw_stat; in s2io_reset()
3470 up_cnt = swstats->link_up_cnt; in s2io_reset()
3471 down_cnt = swstats->link_down_cnt; in s2io_reset()
3472 up_time = swstats->link_up_time; in s2io_reset()
3473 down_time = swstats->link_down_time; in s2io_reset()
3474 reset_cnt = swstats->soft_reset_cnt; in s2io_reset()
3475 mem_alloc_cnt = swstats->mem_allocated; in s2io_reset()
3476 mem_free_cnt = swstats->mem_freed; in s2io_reset()
3477 watchdog_cnt = swstats->watchdog_timer_cnt; in s2io_reset()
3482 swstats->link_up_cnt = up_cnt; in s2io_reset()
3483 swstats->link_down_cnt = down_cnt; in s2io_reset()
3484 swstats->link_up_time = up_time; in s2io_reset()
3485 swstats->link_down_time = down_time; in s2io_reset()
3486 swstats->soft_reset_cnt = reset_cnt; in s2io_reset()
3487 swstats->mem_allocated = mem_alloc_cnt; in s2io_reset()
3488 swstats->mem_freed = mem_free_cnt; in s2io_reset()
3489 swstats->watchdog_timer_cnt = watchdog_cnt; in s2io_reset()
3491 /* SXE-002: Configure link and activity LED to turn it off */ in s2io_reset()
3492 subid = sp->pdev->subsystem_device; in s2io_reset()
3494 (sp->device_type == XFRAME_I_DEVICE)) { in s2io_reset()
3495 val64 = readq(&bar0->gpio_control); in s2io_reset()
3497 writeq(val64, &bar0->gpio_control); in s2io_reset()
3506 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_reset()
3507 val64 = readq(&bar0->pcc_err_reg); in s2io_reset()
3508 writeq(val64, &bar0->pcc_err_reg); in s2io_reset()
3511 sp->device_enabled_once = false; in s2io_reset()
3515 * s2io_set_swapper - to set the swapper controle on the card
3526 struct net_device *dev = sp->dev; in s2io_set_swapper()
3527 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_swapper()
3532 * the PIF Feed-back register. in s2io_set_swapper()
3535 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3546 writeq(value[i], &bar0->swapper_ctrl); in s2io_set_swapper()
3547 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3554 "feedback read %llx\n", in s2io_set_swapper()
3555 dev->name, (unsigned long long)val64); in s2io_set_swapper()
3560 valr = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3564 writeq(valt, &bar0->xmsi_address); in s2io_set_swapper()
3565 val64 = readq(&bar0->xmsi_address); in s2io_set_swapper()
3577 writeq((value[i] | valr), &bar0->swapper_ctrl); in s2io_set_swapper()
3578 writeq(valt, &bar0->xmsi_address); in s2io_set_swapper()
3579 val64 = readq(&bar0->xmsi_address); in s2io_set_swapper()
3591 val64 = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3610 if (sp->config.intr_type == INTA) in s2io_set_swapper()
3612 writeq(val64, &bar0->swapper_ctrl); in s2io_set_swapper()
3634 if (sp->config.intr_type == INTA) in s2io_set_swapper()
3636 writeq(val64, &bar0->swapper_ctrl); in s2io_set_swapper()
3638 val64 = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3642 * feedback register. in s2io_set_swapper()
3644 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3648 "%s: Endian settings are wrong, feedback read %llx\n", in s2io_set_swapper()
3649 dev->name, (unsigned long long)val64); in s2io_set_swapper()
3658 struct XENA_dev_config __iomem *bar0 = nic->bar0; in wait_for_msix_trans()
3663 val64 = readq(&bar0->xmsi_access); in wait_for_msix_trans()
3679 struct XENA_dev_config __iomem *bar0 = nic->bar0; in restore_xmsi_data()
3683 if (nic->device_type == XFRAME_I_DEVICE) in restore_xmsi_data()
3687 msix_index = (i) ? ((i-1) * 8 + 1) : 0; in restore_xmsi_data()
3688 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); in restore_xmsi_data()
3689 writeq(nic->msix_info[i].data, &bar0->xmsi_data); in restore_xmsi_data()
3691 writeq(val64, &bar0->xmsi_access); in restore_xmsi_data()
3700 struct XENA_dev_config __iomem *bar0 = nic->bar0; in store_xmsi_data()
3704 if (nic->device_type == XFRAME_I_DEVICE) in store_xmsi_data()
3709 msix_index = (i) ? ((i-1) * 8 + 1) : 0; in store_xmsi_data()
3711 writeq(val64, &bar0->xmsi_access); in store_xmsi_data()
3717 addr = readq(&bar0->xmsi_address); in store_xmsi_data()
3718 data = readq(&bar0->xmsi_data); in store_xmsi_data()
3720 nic->msix_info[i].addr = addr; in store_xmsi_data()
3721 nic->msix_info[i].data = data; in store_xmsi_data()
3728 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_enable_msi_x()
3733 struct stat_block *stats = nic->mac_control.stats_info; in s2io_enable_msi_x()
3734 struct swStat *swstats = &stats->sw_stat; in s2io_enable_msi_x()
3736 size = nic->num_entries * sizeof(struct msix_entry); in s2io_enable_msi_x()
3737 nic->entries = kzalloc(size, GFP_KERNEL); in s2io_enable_msi_x()
3738 if (!nic->entries) { in s2io_enable_msi_x()
3741 swstats->mem_alloc_fail_cnt++; in s2io_enable_msi_x()
3742 return -ENOMEM; in s2io_enable_msi_x()
3744 swstats->mem_allocated += size; in s2io_enable_msi_x()
3746 size = nic->num_entries * sizeof(struct s2io_msix_entry); in s2io_enable_msi_x()
3747 nic->s2io_entries = kzalloc(size, GFP_KERNEL); in s2io_enable_msi_x()
3748 if (!nic->s2io_entries) { in s2io_enable_msi_x()
3751 swstats->mem_alloc_fail_cnt++; in s2io_enable_msi_x()
3752 kfree(nic->entries); in s2io_enable_msi_x()
3753 swstats->mem_freed in s2io_enable_msi_x()
3754 += (nic->num_entries * sizeof(struct msix_entry)); in s2io_enable_msi_x()
3755 return -ENOMEM; in s2io_enable_msi_x()
3757 swstats->mem_allocated += size; in s2io_enable_msi_x()
3759 nic->entries[0].entry = 0; in s2io_enable_msi_x()
3760 nic->s2io_entries[0].entry = 0; in s2io_enable_msi_x()
3761 nic->s2io_entries[0].in_use = MSIX_FLG; in s2io_enable_msi_x()
3762 nic->s2io_entries[0].type = MSIX_ALARM_TYPE; in s2io_enable_msi_x()
3763 nic->s2io_entries[0].arg = &nic->mac_control.fifos; in s2io_enable_msi_x()
3765 for (i = 1; i < nic->num_entries; i++) { in s2io_enable_msi_x()
3766 nic->entries[i].entry = ((i - 1) * 8) + 1; in s2io_enable_msi_x()
3767 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; in s2io_enable_msi_x()
3768 nic->s2io_entries[i].arg = NULL; in s2io_enable_msi_x()
3769 nic->s2io_entries[i].in_use = 0; in s2io_enable_msi_x()
3772 rx_mat = readq(&bar0->rx_mat); in s2io_enable_msi_x()
3773 for (j = 0; j < nic->config.rx_ring_num; j++) { in s2io_enable_msi_x()
3775 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; in s2io_enable_msi_x()
3776 nic->s2io_entries[j+1].type = MSIX_RING_TYPE; in s2io_enable_msi_x()
3777 nic->s2io_entries[j+1].in_use = MSIX_FLG; in s2io_enable_msi_x()
3780 writeq(rx_mat, &bar0->rx_mat); in s2io_enable_msi_x()
3781 readq(&bar0->rx_mat); in s2io_enable_msi_x()
3783 ret = pci_enable_msix_range(nic->pdev, nic->entries, in s2io_enable_msi_x()
3784 nic->num_entries, nic->num_entries); in s2io_enable_msi_x()
3787 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); in s2io_enable_msi_x()
3788 kfree(nic->entries); in s2io_enable_msi_x()
3789 swstats->mem_freed += nic->num_entries * in s2io_enable_msi_x()
3791 kfree(nic->s2io_entries); in s2io_enable_msi_x()
3792 swstats->mem_freed += nic->num_entries * in s2io_enable_msi_x()
3794 nic->entries = NULL; in s2io_enable_msi_x()
3795 nic->s2io_entries = NULL; in s2io_enable_msi_x()
3796 return -ENOMEM; in s2io_enable_msi_x()
3800 * To enable MSI-X, MSI also needs to be enabled, due to a bug in s2io_enable_msi_x()
3803 pci_read_config_word(nic->pdev, 0x42, &msi_control); in s2io_enable_msi_x()
3805 pci_write_config_word(nic->pdev, 0x42, msi_control); in s2io_enable_msi_x()
3815 sp->msi_detected = 1; in s2io_test_intr()
3816 wake_up(&sp->msi_wait); in s2io_test_intr()
3824 struct pci_dev *pdev = sp->pdev; in s2io_test_msi()
3825 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_test_msi()
3829 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, in s2io_test_msi()
3830 sp->name, sp); in s2io_test_msi()
3833 sp->dev->name, pci_name(pdev), pdev->irq); in s2io_test_msi()
3837 init_waitqueue_head(&sp->msi_wait); in s2io_test_msi()
3838 sp->msi_detected = 0; in s2io_test_msi()
3840 saved64 = val64 = readq(&bar0->scheduled_int_ctrl); in s2io_test_msi()
3844 writeq(val64, &bar0->scheduled_int_ctrl); in s2io_test_msi()
3846 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); in s2io_test_msi()
3848 if (!sp->msi_detected) { in s2io_test_msi()
3852 sp->dev->name, pci_name(pdev)); in s2io_test_msi()
3854 err = -EOPNOTSUPP; in s2io_test_msi()
3857 free_irq(sp->entries[1].vector, sp); in s2io_test_msi()
3859 writeq(saved64, &bar0->scheduled_int_ctrl); in s2io_test_msi()
3869 for (i = 0; i < sp->num_entries; i++) { in remove_msix_isr()
3870 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { in remove_msix_isr()
3871 int vector = sp->entries[i].vector; in remove_msix_isr()
3872 void *arg = sp->s2io_entries[i].arg; in remove_msix_isr()
3877 kfree(sp->entries); in remove_msix_isr()
3878 kfree(sp->s2io_entries); in remove_msix_isr()
3879 sp->entries = NULL; in remove_msix_isr()
3880 sp->s2io_entries = NULL; in remove_msix_isr()
3882 pci_read_config_word(sp->pdev, 0x42, &msi_control); in remove_msix_isr()
3884 pci_write_config_word(sp->pdev, 0x42, msi_control); in remove_msix_isr()
3886 pci_disable_msix(sp->pdev); in remove_msix_isr()
3891 free_irq(sp->pdev->irq, sp->dev); in remove_inta_isr()
3899 * s2io_open - open entry point of the driver
3906 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3913 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_open()
3921 sp->last_link_state = 0; in s2io_open()
3927 dev->name); in s2io_open()
3931 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { in s2io_open()
3934 err = -ENODEV; in s2io_open()
3941 if (sp->config.intr_type == MSI_X) { in s2io_open()
3942 if (sp->entries) { in s2io_open()
3943 kfree(sp->entries); in s2io_open()
3944 swstats->mem_freed += sp->num_entries * in s2io_open()
3947 if (sp->s2io_entries) { in s2io_open()
3948 kfree(sp->s2io_entries); in s2io_open()
3949 swstats->mem_freed += sp->num_entries * in s2io_open()
3957 * s2io_close -close entry point of the driver
3965 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3972 struct config_param *config = &sp->config; in s2io_close()
3984 for (offset = 1; offset < config->max_mc_addr; offset++) { in s2io_close()
3996 * s2io_xmit - Tx entry point of te driver
4020 struct config_param *config = &sp->config; in s2io_xmit()
4021 struct mac_info *mac_control = &sp->mac_control; in s2io_xmit()
4022 struct stat_block *stats = mac_control->stats_info; in s2io_xmit()
4023 struct swStat *swstats = &stats->sw_stat; in s2io_xmit()
4025 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name); in s2io_xmit()
4027 if (unlikely(skb->len <= 0)) { in s2io_xmit()
4028 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name); in s2io_xmit()
4035 dev->name); in s2io_xmit()
4043 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { in s2io_xmit()
4044 if (skb->protocol == htons(ETH_P_IP)) { in s2io_xmit()
4051 ip->ihl*4); in s2io_xmit()
4053 if (ip->protocol == IPPROTO_TCP) { in s2io_xmit()
4054 queue_len = sp->total_tcp_fifos; in s2io_xmit()
4055 queue = (ntohs(th->source) + in s2io_xmit()
4056 ntohs(th->dest)) & in s2io_xmit()
4057 sp->fifo_selector[queue_len - 1]; in s2io_xmit()
4059 queue = queue_len - 1; in s2io_xmit()
4060 } else if (ip->protocol == IPPROTO_UDP) { in s2io_xmit()
4061 queue_len = sp->total_udp_fifos; in s2io_xmit()
4062 queue = (ntohs(th->source) + in s2io_xmit()
4063 ntohs(th->dest)) & in s2io_xmit()
4064 sp->fifo_selector[queue_len - 1]; in s2io_xmit()
4066 queue = queue_len - 1; in s2io_xmit()
4067 queue += sp->udp_fifo_idx; in s2io_xmit()
4068 if (skb->len > 1024) in s2io_xmit()
4073 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) in s2io_xmit()
4074 /* get fifo number based on skb->priority value */ in s2io_xmit()
4075 queue = config->fifo_mapping in s2io_xmit()
4076 [skb->priority & (MAX_TX_FIFOS - 1)]; in s2io_xmit()
4077 fifo = &mac_control->fifos[queue]; in s2io_xmit()
4079 spin_lock_irqsave(&fifo->tx_lock, flags); in s2io_xmit()
4081 if (sp->config.multiq) { in s2io_xmit()
4082 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { in s2io_xmit()
4083 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4086 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { in s2io_xmit()
4088 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4093 put_off = (u16)fifo->tx_curr_put_info.offset; in s2io_xmit()
4094 get_off = (u16)fifo->tx_curr_get_info.offset; in s2io_xmit()
4095 txdp = fifo->list_info[put_off].list_virt_addr; in s2io_xmit()
4097 queue_len = fifo->tx_curr_put_info.fifo_len + 1; in s2io_xmit()
4099 if (txdp->Host_Control || in s2io_xmit()
4102 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4104 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4110 txdp->Control_1 |= TXD_TCP_LSO_EN; in s2io_xmit()
4111 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); in s2io_xmit()
4113 if (skb->ip_summed == CHECKSUM_PARTIAL) { in s2io_xmit()
4114 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN | in s2io_xmit()
4118 txdp->Control_1 |= TXD_GATHER_CODE_FIRST; in s2io_xmit()
4119 txdp->Control_1 |= TXD_LIST_OWN_XENA; in s2io_xmit()
4120 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); in s2io_xmit()
4123 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST; in s2io_xmit()
4125 txdp->Control_2 |= TXD_VLAN_ENABLE; in s2io_xmit()
4126 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); in s2io_xmit()
4130 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data, in s2io_xmit()
4132 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer)) in s2io_xmit()
4135 txdp->Host_Control = (unsigned long)skb; in s2io_xmit()
4136 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); in s2io_xmit()
4138 frg_cnt = skb_shinfo(skb)->nr_frags; in s2io_xmit()
4141 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in s2io_xmit()
4146 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev, in s2io_xmit()
4150 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); in s2io_xmit()
4152 txdp->Control_1 |= TXD_GATHER_CODE_LAST; in s2io_xmit()
4154 tx_fifo = mac_control->tx_FIFO_start[queue]; in s2io_xmit()
4155 val64 = fifo->list_info[put_off].list_phy_addr; in s2io_xmit()
4156 writeq(val64, &tx_fifo->TxDL_Pointer); in s2io_xmit()
4163 writeq(val64, &tx_fifo->List_Control); in s2io_xmit()
4166 if (put_off == fifo->tx_curr_put_info.fifo_len + 1) in s2io_xmit()
4168 fifo->tx_curr_put_info.offset = put_off; in s2io_xmit()
4172 swstats->fifo_full_cnt++; in s2io_xmit()
4176 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4178 swstats->mem_allocated += skb->truesize; in s2io_xmit()
4179 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4181 if (sp->config.intr_type == MSI_X) in s2io_xmit()
4187 swstats->pci_map_fail_cnt++; in s2io_xmit()
4188 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4189 swstats->mem_freed += skb->truesize; in s2io_xmit()
4191 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4199 struct net_device *dev = sp->dev; in s2io_alarm_handle()
4202 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); in s2io_alarm_handle()
4208 struct s2io_nic *sp = ring->nic; in s2io_msix_ring_handle()
4209 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_msix_ring_handle()
4214 if (sp->config.napi) { in s2io_msix_ring_handle()
4218 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; in s2io_msix_ring_handle()
4219 addr += (7 - ring->ring_no); in s2io_msix_ring_handle()
4220 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; in s2io_msix_ring_handle()
4223 napi_schedule(&ring->napi); in s2io_msix_ring_handle()
4236 struct s2io_nic *sp = fifos->nic; in s2io_msix_fifo_handle()
4237 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_msix_fifo_handle()
4238 struct config_param *config = &sp->config; in s2io_msix_fifo_handle()
4244 reason = readq(&bar0->general_int_status); in s2io_msix_fifo_handle()
4250 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); in s2io_msix_fifo_handle()
4256 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); in s2io_msix_fifo_handle()
4258 for (i = 0; i < config->tx_fifo_num; i++) in s2io_msix_fifo_handle()
4261 writeq(sp->general_int_mask, &bar0->general_int_mask); in s2io_msix_fifo_handle()
4262 readl(&bar0->general_int_status); in s2io_msix_fifo_handle()
4271 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_txpic_intr_handle()
4274 val64 = readq(&bar0->pic_int_status); in s2io_txpic_intr_handle()
4276 val64 = readq(&bar0->gpio_int_reg); in s2io_txpic_intr_handle()
4281 * interrupt and adapter to re-evaluate the link state. in s2io_txpic_intr_handle()
4285 writeq(val64, &bar0->gpio_int_reg); in s2io_txpic_intr_handle()
4286 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4289 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4291 val64 = readq(&bar0->adapter_status); in s2io_txpic_intr_handle()
4293 val64 = readq(&bar0->adapter_control); in s2io_txpic_intr_handle()
4295 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4297 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4298 if (!sp->device_enabled_once) in s2io_txpic_intr_handle()
4299 sp->device_enabled_once = 1; in s2io_txpic_intr_handle()
4303 * unmask link down interrupt and mask link-up in s2io_txpic_intr_handle()
4306 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4309 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4312 val64 = readq(&bar0->adapter_status); in s2io_txpic_intr_handle()
4315 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4318 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4321 val64 = readq(&bar0->adapter_control); in s2io_txpic_intr_handle()
4323 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4326 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4330 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4336 * 1 - if alarm bit set
4337 * 0 - if alarm bit is not set
4354 * s2io_handle_errors - Xframe error indication handler
4365 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_handle_errors()
4369 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; in s2io_handle_errors()
4370 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; in s2io_handle_errors()
4375 if (pci_channel_offline(sp->pdev)) in s2io_handle_errors()
4378 memset(&sw_stat->ring_full_cnt, 0, in s2io_handle_errors()
4379 sizeof(sw_stat->ring_full_cnt)); in s2io_handle_errors()
4382 if (stats->xpak_timer_count < 72000) { in s2io_handle_errors()
4384 stats->xpak_timer_count++; in s2io_handle_errors()
4388 stats->xpak_timer_count = 0; in s2io_handle_errors()
4393 val64 = readq(&bar0->mac_rmac_err_reg); in s2io_handle_errors()
4394 writeq(val64, &bar0->mac_rmac_err_reg); in s2io_handle_errors()
4396 schedule_work(&sp->set_link_task); in s2io_handle_errors()
4400 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, in s2io_handle_errors()
4401 &sw_stat->serious_err_cnt)) in s2io_handle_errors()
4405 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, in s2io_handle_errors()
4406 &sw_stat->parity_err_cnt)) in s2io_handle_errors()
4410 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_handle_errors()
4411 val64 = readq(&bar0->ring_bump_counter1); in s2io_handle_errors()
4414 temp64 >>= 64 - ((i+1)*16); in s2io_handle_errors()
4415 sw_stat->ring_full_cnt[i] += temp64; in s2io_handle_errors()
4418 val64 = readq(&bar0->ring_bump_counter2); in s2io_handle_errors()
4421 temp64 >>= 64 - ((i+1)*16); in s2io_handle_errors()
4422 sw_stat->ring_full_cnt[i+4] += temp64; in s2io_handle_errors()
4426 val64 = readq(&bar0->txdma_int_status); in s2io_handle_errors()
4432 &bar0->pfc_err_reg, in s2io_handle_errors()
4433 &sw_stat->pfc_err_cnt)) in s2io_handle_errors()
4436 &bar0->pfc_err_reg, in s2io_handle_errors()
4437 &sw_stat->pfc_err_cnt); in s2io_handle_errors()
4445 &bar0->tda_err_reg, in s2io_handle_errors()
4446 &sw_stat->tda_err_cnt)) in s2io_handle_errors()
4449 &bar0->tda_err_reg, in s2io_handle_errors()
4450 &sw_stat->tda_err_cnt); in s2io_handle_errors()
4459 &bar0->pcc_err_reg, in s2io_handle_errors()
4460 &sw_stat->pcc_err_cnt)) in s2io_handle_errors()
4463 &bar0->pcc_err_reg, in s2io_handle_errors()
4464 &sw_stat->pcc_err_cnt); in s2io_handle_errors()
4470 &bar0->tti_err_reg, in s2io_handle_errors()
4471 &sw_stat->tti_err_cnt)) in s2io_handle_errors()
4474 &bar0->tti_err_reg, in s2io_handle_errors()
4475 &sw_stat->tti_err_cnt); in s2io_handle_errors()
4482 &bar0->lso_err_reg, in s2io_handle_errors()
4483 &sw_stat->lso_err_cnt)) in s2io_handle_errors()
4486 &bar0->lso_err_reg, in s2io_handle_errors()
4487 &sw_stat->lso_err_cnt); in s2io_handle_errors()
4493 &bar0->tpa_err_reg, in s2io_handle_errors()
4494 &sw_stat->tpa_err_cnt)) in s2io_handle_errors()
4497 &bar0->tpa_err_reg, in s2io_handle_errors()
4498 &sw_stat->tpa_err_cnt); in s2io_handle_errors()
4504 &bar0->sm_err_reg, in s2io_handle_errors()
4505 &sw_stat->sm_err_cnt)) in s2io_handle_errors()
4509 val64 = readq(&bar0->mac_int_status); in s2io_handle_errors()
4512 &bar0->mac_tmac_err_reg, in s2io_handle_errors()
4513 &sw_stat->mac_tmac_err_cnt)) in s2io_handle_errors()
4518 &bar0->mac_tmac_err_reg, in s2io_handle_errors()
4519 &sw_stat->mac_tmac_err_cnt); in s2io_handle_errors()
4522 val64 = readq(&bar0->xgxs_int_status); in s2io_handle_errors()
4525 &bar0->xgxs_txgxs_err_reg, in s2io_handle_errors()
4526 &sw_stat->xgxs_txgxs_err_cnt)) in s2io_handle_errors()
4529 &bar0->xgxs_txgxs_err_reg, in s2io_handle_errors()
4530 &sw_stat->xgxs_txgxs_err_cnt); in s2io_handle_errors()
4533 val64 = readq(&bar0->rxdma_int_status); in s2io_handle_errors()
4539 &bar0->rc_err_reg, in s2io_handle_errors()
4540 &sw_stat->rc_err_cnt)) in s2io_handle_errors()
4544 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, in s2io_handle_errors()
4545 &sw_stat->rc_err_cnt); in s2io_handle_errors()
4549 &bar0->prc_pcix_err_reg, in s2io_handle_errors()
4550 &sw_stat->prc_pcix_err_cnt)) in s2io_handle_errors()
4555 &bar0->prc_pcix_err_reg, in s2io_handle_errors()
4556 &sw_stat->prc_pcix_err_cnt); in s2io_handle_errors()
4561 &bar0->rpa_err_reg, in s2io_handle_errors()
4562 &sw_stat->rpa_err_cnt)) in s2io_handle_errors()
4565 &bar0->rpa_err_reg, in s2io_handle_errors()
4566 &sw_stat->rpa_err_cnt); in s2io_handle_errors()
4575 &bar0->rda_err_reg, in s2io_handle_errors()
4576 &sw_stat->rda_err_cnt)) in s2io_handle_errors()
4582 &bar0->rda_err_reg, in s2io_handle_errors()
4583 &sw_stat->rda_err_cnt); in s2io_handle_errors()
4588 &bar0->rti_err_reg, in s2io_handle_errors()
4589 &sw_stat->rti_err_cnt)) in s2io_handle_errors()
4592 &bar0->rti_err_reg, in s2io_handle_errors()
4593 &sw_stat->rti_err_cnt); in s2io_handle_errors()
4596 val64 = readq(&bar0->mac_int_status); in s2io_handle_errors()
4599 &bar0->mac_rmac_err_reg, in s2io_handle_errors()
4600 &sw_stat->mac_rmac_err_cnt)) in s2io_handle_errors()
4605 &bar0->mac_rmac_err_reg, in s2io_handle_errors()
4606 &sw_stat->mac_rmac_err_cnt); in s2io_handle_errors()
4609 val64 = readq(&bar0->xgxs_int_status); in s2io_handle_errors()
4612 &bar0->xgxs_rxgxs_err_reg, in s2io_handle_errors()
4613 &sw_stat->xgxs_rxgxs_err_cnt)) in s2io_handle_errors()
4617 val64 = readq(&bar0->mc_int_status); in s2io_handle_errors()
4620 &bar0->mc_err_reg, in s2io_handle_errors()
4621 &sw_stat->mc_err_cnt)) in s2io_handle_errors()
4626 writeq(val64, &bar0->mc_err_reg); in s2io_handle_errors()
4628 sw_stat->double_ecc_errs++; in s2io_handle_errors()
4629 if (sp->device_type != XFRAME_II_DEVICE) { in s2io_handle_errors()
4639 sw_stat->single_ecc_errs++; in s2io_handle_errors()
4646 schedule_work(&sp->rst_timer_task); in s2io_handle_errors()
4647 sw_stat->soft_reset_cnt++; in s2io_handle_errors()
4651 * s2io_isr - ISR handler of the device .
4667 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_isr()
4674 if (pci_channel_offline(sp->pdev)) in s2io_isr()
4680 config = &sp->config; in s2io_isr()
4681 mac_control = &sp->mac_control; in s2io_isr()
4690 reason = readq(&bar0->general_int_status); in s2io_isr()
4697 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); in s2io_isr()
4699 if (config->napi) { in s2io_isr()
4701 napi_schedule(&sp->napi); in s2io_isr()
4702 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); in s2io_isr()
4703 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); in s2io_isr()
4704 readl(&bar0->rx_traffic_int); in s2io_isr()
4713 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); in s2io_isr()
4715 for (i = 0; i < config->rx_ring_num; i++) { in s2io_isr()
4716 struct ring_info *ring = &mac_control->rings[i]; in s2io_isr()
4728 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); in s2io_isr()
4730 for (i = 0; i < config->tx_fifo_num; i++) in s2io_isr()
4731 tx_intr_handler(&mac_control->fifos[i]); in s2io_isr()
4739 if (!config->napi) { in s2io_isr()
4740 for (i = 0; i < config->rx_ring_num; i++) { in s2io_isr()
4741 struct ring_info *ring = &mac_control->rings[i]; in s2io_isr()
4746 writeq(sp->general_int_mask, &bar0->general_int_mask); in s2io_isr()
4747 readl(&bar0->general_int_status); in s2io_isr()
4760 * s2io_updt_stats -
4764 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_updt_stats()
4772 writeq(val64, &bar0->stat_cfg); in s2io_updt_stats()
4775 val64 = readq(&bar0->stat_cfg); in s2io_updt_stats()
4786 * s2io_get_stats - Updates the device statistics structure.
4797 struct mac_info *mac_control = &sp->mac_control; in s2io_get_stats()
4798 struct stat_block *stats = mac_control->stats_info; in s2io_get_stats()
4804 /* A device reset will cause the on-adapter statistics to be zero'ed. in s2io_get_stats()
4811 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | in s2io_get_stats()
4812 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets; in s2io_get_stats()
4813 sp->stats.rx_packets += delta; in s2io_get_stats()
4814 dev->stats.rx_packets += delta; in s2io_get_stats()
4816 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 | in s2io_get_stats()
4817 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets; in s2io_get_stats()
4818 sp->stats.tx_packets += delta; in s2io_get_stats()
4819 dev->stats.tx_packets += delta; in s2io_get_stats()
4821 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | in s2io_get_stats()
4822 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes; in s2io_get_stats()
4823 sp->stats.rx_bytes += delta; in s2io_get_stats()
4824 dev->stats.rx_bytes += delta; in s2io_get_stats()
4826 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | in s2io_get_stats()
4827 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes; in s2io_get_stats()
4828 sp->stats.tx_bytes += delta; in s2io_get_stats()
4829 dev->stats.tx_bytes += delta; in s2io_get_stats()
4831 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors; in s2io_get_stats()
4832 sp->stats.rx_errors += delta; in s2io_get_stats()
4833 dev->stats.rx_errors += delta; in s2io_get_stats()
4835 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | in s2io_get_stats()
4836 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors; in s2io_get_stats()
4837 sp->stats.tx_errors += delta; in s2io_get_stats()
4838 dev->stats.tx_errors += delta; in s2io_get_stats()
4840 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped; in s2io_get_stats()
4841 sp->stats.rx_dropped += delta; in s2io_get_stats()
4842 dev->stats.rx_dropped += delta; in s2io_get_stats()
4844 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped; in s2io_get_stats()
4845 sp->stats.tx_dropped += delta; in s2io_get_stats()
4846 dev->stats.tx_dropped += delta; in s2io_get_stats()
4853 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | in s2io_get_stats()
4854 le32_to_cpu(stats->rmac_vld_mcst_frms); in s2io_get_stats()
4855 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms); in s2io_get_stats()
4856 delta -= sp->stats.multicast; in s2io_get_stats()
4857 sp->stats.multicast += delta; in s2io_get_stats()
4858 dev->stats.multicast += delta; in s2io_get_stats()
4860 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | in s2io_get_stats()
4861 le32_to_cpu(stats->rmac_usized_frms)) + in s2io_get_stats()
4862 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors; in s2io_get_stats()
4863 sp->stats.rx_length_errors += delta; in s2io_get_stats()
4864 dev->stats.rx_length_errors += delta; in s2io_get_stats()
4866 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors; in s2io_get_stats()
4867 sp->stats.rx_crc_errors += delta; in s2io_get_stats()
4868 dev->stats.rx_crc_errors += delta; in s2io_get_stats()
4870 return &dev->stats; in s2io_get_stats()
4874 * s2io_set_multicast - entry point for multicast address enable/disable.
4892 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_multicast()
4897 struct config_param *config = &sp->config; in s2io_set_multicast()
4899 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) { in s2io_set_multicast()
4902 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4904 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
4907 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); in s2io_set_multicast()
4908 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
4910 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
4914 sp->m_cast_flg = 1; in s2io_set_multicast()
4915 sp->all_multi_pos = config->max_mc_addr - 1; in s2io_set_multicast()
4916 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) { in s2io_set_multicast()
4919 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4921 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
4924 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); in s2io_set_multicast()
4925 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
4927 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
4931 sp->m_cast_flg = 0; in s2io_set_multicast()
4932 sp->all_multi_pos = 0; in s2io_set_multicast()
4935 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) { in s2io_set_multicast()
4937 add = &bar0->mac_cfg; in s2io_set_multicast()
4938 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4941 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4943 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4947 val64 = readq(&bar0->rx_pa_cfg); in s2io_set_multicast()
4949 writeq(val64, &bar0->rx_pa_cfg); in s2io_set_multicast()
4950 sp->vlan_strip_flag = 0; in s2io_set_multicast()
4953 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4954 sp->promisc_flg = 1; in s2io_set_multicast()
4956 dev->name); in s2io_set_multicast()
4957 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { in s2io_set_multicast()
4959 add = &bar0->mac_cfg; in s2io_set_multicast()
4960 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4963 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4965 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4969 val64 = readq(&bar0->rx_pa_cfg); in s2io_set_multicast()
4971 writeq(val64, &bar0->rx_pa_cfg); in s2io_set_multicast()
4972 sp->vlan_strip_flag = 1; in s2io_set_multicast()
4975 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4976 sp->promisc_flg = 0; in s2io_set_multicast()
4977 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name); in s2io_set_multicast()
4981 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) { in s2io_set_multicast()
4983 (config->max_mc_addr - config->max_mac_addr)) { in s2io_set_multicast()
4985 "%s: No more Rx filters can be added - " in s2io_set_multicast()
4987 dev->name); in s2io_set_multicast()
4991 prev_cnt = sp->mc_addr_count; in s2io_set_multicast()
4992 sp->mc_addr_count = netdev_mc_count(dev); in s2io_set_multicast()
4997 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4999 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
5003 (config->mc_start_offset + i); in s2io_set_multicast()
5004 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
5007 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
5012 dev->name); in s2io_set_multicast()
5022 mac_addr |= ha->addr[j]; in s2io_set_multicast()
5027 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
5029 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
5033 (i + config->mc_start_offset); in s2io_set_multicast()
5034 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
5037 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
5042 dev->name); in s2io_set_multicast()
5063 struct config_param *config = &sp->config; in do_s2io_store_unicast_mc()
5066 for (offset = 0; offset < config->max_mc_addr; offset++) { in do_s2io_store_unicast_mc()
5079 struct config_param *config = &sp->config; in do_s2io_restore_unicast_mc()
5081 for (offset = 0; offset < config->max_mac_addr; offset++) in do_s2io_restore_unicast_mc()
5082 do_s2io_prog_unicast(sp->dev, in do_s2io_restore_unicast_mc()
5083 sp->def_mac_addr[offset].mac_addr); in do_s2io_restore_unicast_mc()
5086 for (offset = config->mc_start_offset; in do_s2io_restore_unicast_mc()
5087 offset < config->max_mc_addr; offset++) in do_s2io_restore_unicast_mc()
5088 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); in do_s2io_restore_unicast_mc()
5096 struct config_param *config = &sp->config; in do_s2io_add_mc()
5103 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) { in do_s2io_add_mc()
5112 if (i == config->max_mc_addr) { in do_s2io_add_mc()
5127 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_add_mac()
5130 &bar0->rmac_addr_data0_mem); in do_s2io_add_mac()
5134 writeq(val64, &bar0->rmac_addr_cmd_mem); in do_s2io_add_mac()
5137 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in do_s2io_add_mac()
5150 struct config_param *config = &sp->config; in do_s2io_delete_unicast_mc()
5153 offset < config->max_mc_addr; offset++) { in do_s2io_delete_unicast_mc()
5173 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_read_unicast_mc()
5178 writeq(val64, &bar0->rmac_addr_cmd_mem); in do_s2io_read_unicast_mc()
5181 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in do_s2io_read_unicast_mc()
5187 tmp64 = readq(&bar0->rmac_addr_data0_mem); in do_s2io_read_unicast_mc()
5193 * s2io_set_mac_addr - driver entry point
5200 if (!is_valid_ether_addr(addr->sa_data)) in s2io_set_mac_addr()
5201 return -EADDRNOTAVAIL; in s2io_set_mac_addr()
5203 eth_hw_addr_set(dev, addr->sa_data); in s2io_set_mac_addr()
5206 return do_s2io_prog_unicast(dev, dev->dev_addr); in s2io_set_mac_addr()
5209 * do_s2io_prog_unicast - Programs the Xframe mac address
5214 * Return value: SUCCESS on success and an appropriate (-)ve integer
5224 struct config_param *config = &sp->config; in do_s2io_prog_unicast()
5232 perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr); in do_s2io_prog_unicast()
5239 for (i = 1; i < config->max_mac_addr; i++) { in do_s2io_prog_unicast()
5251 if (i == config->max_mac_addr) { in do_s2io_prog_unicast()
5262 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5278 if ((cmd->base.autoneg == AUTONEG_ENABLE) || in s2io_ethtool_set_link_ksettings()
5279 (cmd->base.speed != SPEED_10000) || in s2io_ethtool_set_link_ksettings()
5280 (cmd->base.duplex != DUPLEX_FULL)) in s2io_ethtool_set_link_ksettings()
5281 return -EINVAL; in s2io_ethtool_set_link_ksettings()
5283 s2io_close(sp->dev); in s2io_ethtool_set_link_ksettings()
5284 s2io_open(sp->dev); in s2io_ethtool_set_link_ksettings()
5291 * s2io_ethtool_get_link_ksettings - Return link specific information.
5315 cmd->base.port = PORT_FIBRE; in s2io_ethtool_get_link_ksettings()
5317 if (netif_carrier_ok(sp->dev)) { in s2io_ethtool_get_link_ksettings()
5318 cmd->base.speed = SPEED_10000; in s2io_ethtool_get_link_ksettings()
5319 cmd->base.duplex = DUPLEX_FULL; in s2io_ethtool_get_link_ksettings()
5321 cmd->base.speed = SPEED_UNKNOWN; in s2io_ethtool_get_link_ksettings()
5322 cmd->base.duplex = DUPLEX_UNKNOWN; in s2io_ethtool_get_link_ksettings()
5325 cmd->base.autoneg = AUTONEG_DISABLE; in s2io_ethtool_get_link_ksettings()
5330 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5345 strscpy(info->driver, s2io_driver_name, sizeof(info->driver)); in s2io_ethtool_gdrvinfo()
5346 strscpy(info->version, s2io_driver_version, sizeof(info->version)); in s2io_ethtool_gdrvinfo()
5347 strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); in s2io_ethtool_gdrvinfo()
5351 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5371 regs->len = XENA_REG_SPACE; in s2io_ethtool_gregs()
5372 regs->version = sp->pdev->subsystem_device; in s2io_ethtool_gregs()
5374 for (i = 0; i < regs->len; i += 8) { in s2io_ethtool_gregs()
5375 reg = readq(sp->bar0 + i); in s2io_ethtool_gregs()
5381 * s2io_set_led - control NIC led
5385 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_led()
5386 u16 subid = sp->pdev->subsystem_device; in s2io_set_led()
5389 if ((sp->device_type == XFRAME_II_DEVICE) || in s2io_set_led()
5391 val64 = readq(&bar0->gpio_control); in s2io_set_led()
5397 writeq(val64, &bar0->gpio_control); in s2io_set_led()
5399 val64 = readq(&bar0->adapter_control); in s2io_set_led()
5405 writeq(val64, &bar0->adapter_control); in s2io_set_led()
5411 * s2io_ethtool_set_led - To physically identify the nic on the system.
5426 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_set_led()
5427 u16 subid = sp->pdev->subsystem_device; in s2io_ethtool_set_led()
5429 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { in s2io_ethtool_set_led()
5430 u64 val64 = readq(&bar0->adapter_control); in s2io_ethtool_set_led()
5433 return -EAGAIN; in s2io_ethtool_set_led()
5439 sp->adapt_ctrl_org = readq(&bar0->gpio_control); in s2io_ethtool_set_led()
5451 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) in s2io_ethtool_set_led()
5452 writeq(sp->adapt_ctrl_org, &bar0->gpio_control); in s2io_ethtool_set_led()
5467 if (sp->rxd_mode == RXD_MODE_1) { in s2io_ethtool_gringparam()
5468 ering->rx_max_pending = MAX_RX_DESC_1; in s2io_ethtool_gringparam()
5469 ering->rx_jumbo_max_pending = MAX_RX_DESC_1; in s2io_ethtool_gringparam()
5471 ering->rx_max_pending = MAX_RX_DESC_2; in s2io_ethtool_gringparam()
5472 ering->rx_jumbo_max_pending = MAX_RX_DESC_2; in s2io_ethtool_gringparam()
5475 ering->tx_max_pending = MAX_TX_DESC; in s2io_ethtool_gringparam()
5477 for (i = 0; i < sp->config.rx_ring_num; i++) in s2io_ethtool_gringparam()
5478 rx_desc_count += sp->config.rx_cfg[i].num_rxd; in s2io_ethtool_gringparam()
5479 ering->rx_pending = rx_desc_count; in s2io_ethtool_gringparam()
5480 ering->rx_jumbo_pending = rx_desc_count; in s2io_ethtool_gringparam()
5482 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_ethtool_gringparam()
5483 tx_desc_count += sp->config.tx_cfg[i].fifo_len; in s2io_ethtool_gringparam()
5484 ering->tx_pending = tx_desc_count; in s2io_ethtool_gringparam()
5485 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); in s2io_ethtool_gringparam()
5489 * s2io_ethtool_getpause_data -Pause frame generation and reception.
5502 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_getpause_data()
5504 val64 = readq(&bar0->rmac_pause_cfg); in s2io_ethtool_getpause_data()
5506 ep->tx_pause = true; in s2io_ethtool_getpause_data()
5508 ep->rx_pause = true; in s2io_ethtool_getpause_data()
5509 ep->autoneg = false; in s2io_ethtool_getpause_data()
5513 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5528 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_setpause_data()
5530 val64 = readq(&bar0->rmac_pause_cfg); in s2io_ethtool_setpause_data()
5531 if (ep->tx_pause) in s2io_ethtool_setpause_data()
5535 if (ep->rx_pause) in s2io_ethtool_setpause_data()
5539 writeq(val64, &bar0->rmac_pause_cfg); in s2io_ethtool_setpause_data()
5545 * read_eeprom - reads 4 bytes of data from user given offset.
5557 * -1 on failure and 0 on success.
5561 int ret = -1; in read_eeprom()
5564 struct XENA_dev_config __iomem *bar0 = sp->bar0; in read_eeprom()
5566 if (sp->device_type == XFRAME_I_DEVICE) { in read_eeprom()
5572 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); in read_eeprom()
5575 val64 = readq(&bar0->i2c_control); in read_eeprom()
5586 if (sp->device_type == XFRAME_II_DEVICE) { in read_eeprom()
5590 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in read_eeprom()
5592 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in read_eeprom()
5594 val64 = readq(&bar0->spi_control); in read_eeprom()
5599 *data = readq(&bar0->spi_data); in read_eeprom()
5612 * write_eeprom - actually writes the relevant part of the data value.
5623 * 0 on success, -1 on failure.
5628 int exit_cnt = 0, ret = -1; in write_eeprom()
5630 struct XENA_dev_config __iomem *bar0 = sp->bar0; in write_eeprom()
5632 if (sp->device_type == XFRAME_I_DEVICE) { in write_eeprom()
5638 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); in write_eeprom()
5641 val64 = readq(&bar0->i2c_control); in write_eeprom()
5652 if (sp->device_type == XFRAME_II_DEVICE) { in write_eeprom()
5654 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data); in write_eeprom()
5659 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in write_eeprom()
5661 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in write_eeprom()
5663 val64 = readq(&bar0->spi_control); in write_eeprom()
5683 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; in s2io_vpd_read()
5685 if (nic->device_type == XFRAME_II_DEVICE) { in s2io_vpd_read()
5686 strcpy(nic->product_name, "Xframe II 10GbE network adapter"); in s2io_vpd_read()
5689 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); in s2io_vpd_read()
5692 strcpy(nic->serial_num, "NOT AVAILABLE"); in s2io_vpd_read()
5696 swstats->mem_alloc_fail_cnt++; in s2io_vpd_read()
5699 swstats->mem_allocated += 256; in s2io_vpd_read()
5702 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); in s2io_vpd_read()
5703 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); in s2io_vpd_read()
5704 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); in s2io_vpd_read()
5707 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); in s2io_vpd_read()
5716 pci_read_config_dword(nic->pdev, (vpd_addr + 4), in s2io_vpd_read()
5726 if (len < min(VPD_STRING_LEN, 256-cnt-2)) { in s2io_vpd_read()
5727 memcpy(nic->serial_num, in s2io_vpd_read()
5730 memset(nic->serial_num+len, in s2io_vpd_read()
5732 VPD_STRING_LEN-len); in s2io_vpd_read()
5741 memcpy(nic->product_name, &vpd_data[3], len); in s2io_vpd_read()
5742 nic->product_name[len] = 0; in s2io_vpd_read()
5745 swstats->mem_freed += 256; in s2io_vpd_read()
5749 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5768 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); in s2io_ethtool_geeprom()
5770 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE)) in s2io_ethtool_geeprom()
5771 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset; in s2io_ethtool_geeprom()
5773 for (i = 0; i < eeprom->len; i += 4) { in s2io_ethtool_geeprom()
5774 if (read_eeprom(sp, (eeprom->offset + i), &data)) { in s2io_ethtool_geeprom()
5776 return -EFAULT; in s2io_ethtool_geeprom()
5785 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5794 * 0 on success, -EFAULT on failure.
5801 int len = eeprom->len, cnt = 0; in s2io_ethtool_seeprom()
5805 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { in s2io_ethtool_seeprom()
5809 (sp->pdev->vendor | (sp->pdev->device << 16)), in s2io_ethtool_seeprom()
5810 eeprom->magic); in s2io_ethtool_seeprom()
5811 return -EFAULT; in s2io_ethtool_seeprom()
5821 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { in s2io_ethtool_seeprom()
5825 return -EFAULT; in s2io_ethtool_seeprom()
5828 len--; in s2io_ethtool_seeprom()
5835 * s2io_register_test - reads and writes into all clock domains.
5849 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_register_test()
5853 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_register_test()
5859 val64 = readq(&bar0->rmac_pause_cfg); in s2io_register_test()
5865 val64 = readq(&bar0->rx_queue_cfg); in s2io_register_test()
5866 if (sp->device_type == XFRAME_II_DEVICE) in s2io_register_test()
5875 val64 = readq(&bar0->xgxs_efifo_cfg); in s2io_register_test()
5882 writeq(val64, &bar0->xmsi_data); in s2io_register_test()
5883 val64 = readq(&bar0->xmsi_data); in s2io_register_test()
5890 writeq(val64, &bar0->xmsi_data); in s2io_register_test()
5891 val64 = readq(&bar0->xmsi_data); in s2io_register_test()
5902 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5919 struct net_device *dev = sp->dev; in s2io_eeprom_test()
5922 /* Note that SPI interface allows write access to all areas in s2io_eeprom_test()
5925 if (sp->device_type == XFRAME_I_DEVICE) in s2io_eeprom_test()
5944 dev->name, (unsigned long long)0x12345, in s2io_eeprom_test()
5953 if (sp->device_type == XFRAME_I_DEVICE) in s2io_eeprom_test()
5966 dev->name, (unsigned long long)0x12345, in s2io_eeprom_test()
5974 if (sp->device_type == XFRAME_I_DEVICE) { in s2io_eeprom_test()
6003 * s2io_bist_test - invokes the MemBist test of the card .
6013 * 0 on success and -1 on failure.
6019 int cnt = 0, ret = -1; in s2io_bist_test()
6021 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); in s2io_bist_test()
6023 pci_write_config_word(sp->pdev, PCI_BIST, bist); in s2io_bist_test()
6026 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); in s2io_bist_test()
6040 * s2io_link_test - verifies the link state of the nic
6054 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_link_test()
6057 val64 = readq(&bar0->adapter_status); in s2io_link_test()
6067 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6081 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_rldram_test()
6085 val64 = readq(&bar0->adapter_control); in s2io_rldram_test()
6087 writeq(val64, &bar0->adapter_control); in s2io_rldram_test()
6089 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6091 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6093 val64 = readq(&bar0->mc_rldram_mrs); in s2io_rldram_test()
6095 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in s2io_rldram_test()
6098 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in s2io_rldram_test()
6104 writeq(val64, &bar0->mc_rldram_test_d0); in s2io_rldram_test()
6109 writeq(val64, &bar0->mc_rldram_test_d1); in s2io_rldram_test()
6114 writeq(val64, &bar0->mc_rldram_test_d2); in s2io_rldram_test()
6117 writeq(val64, &bar0->mc_rldram_test_add); in s2io_rldram_test()
6122 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6125 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6135 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6138 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6147 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6157 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6163 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6181 int orig_state = netif_running(sp->dev); in s2io_ethtool_test()
6183 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { in s2io_ethtool_test()
6186 s2io_close(sp->dev); in s2io_ethtool_test()
6189 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6194 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6199 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6202 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6205 s2io_open(sp->dev); in s2io_ethtool_test()
6212 dev->name); in s2io_ethtool_test()
6213 data[0] = -1; in s2io_ethtool_test()
6214 data[1] = -1; in s2io_ethtool_test()
6215 data[2] = -1; in s2io_ethtool_test()
6216 data[3] = -1; in s2io_ethtool_test()
6217 data[4] = -1; in s2io_ethtool_test()
6221 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6236 struct stat_block *stats = sp->mac_control.stats_info; in s2io_get_ethtool_stats()
6237 struct swStat *swstats = &stats->sw_stat; in s2io_get_ethtool_stats()
6238 struct xpakStat *xstats = &stats->xpak_stat; in s2io_get_ethtool_stats()
6242 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6243 le32_to_cpu(stats->tmac_frms); in s2io_get_ethtool_stats()
6245 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6246 le32_to_cpu(stats->tmac_data_octets); in s2io_get_ethtool_stats()
6247 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms); in s2io_get_ethtool_stats()
6249 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6250 le32_to_cpu(stats->tmac_mcst_frms); in s2io_get_ethtool_stats()
6252 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6253 le32_to_cpu(stats->tmac_bcst_frms); in s2io_get_ethtool_stats()
6254 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms); in s2io_get_ethtool_stats()
6256 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6257 le32_to_cpu(stats->tmac_ttl_octets); in s2io_get_ethtool_stats()
6259 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6260 le32_to_cpu(stats->tmac_ucst_frms); in s2io_get_ethtool_stats()
6262 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6263 le32_to_cpu(stats->tmac_nucst_frms); in s2io_get_ethtool_stats()
6265 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6266 le32_to_cpu(stats->tmac_any_err_frms); in s2io_get_ethtool_stats()
6267 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets); in s2io_get_ethtool_stats()
6268 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets); in s2io_get_ethtool_stats()
6270 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6271 le32_to_cpu(stats->tmac_vld_ip); in s2io_get_ethtool_stats()
6273 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6274 le32_to_cpu(stats->tmac_drop_ip); in s2io_get_ethtool_stats()
6276 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 | in s2io_get_ethtool_stats()
6277 le32_to_cpu(stats->tmac_icmp); in s2io_get_ethtool_stats()
6279 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 | in s2io_get_ethtool_stats()
6280 le32_to_cpu(stats->tmac_rst_tcp); in s2io_get_ethtool_stats()
6281 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp); in s2io_get_ethtool_stats()
6282 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6283 le32_to_cpu(stats->tmac_udp); in s2io_get_ethtool_stats()
6285 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6286 le32_to_cpu(stats->rmac_vld_frms); in s2io_get_ethtool_stats()
6288 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6289 le32_to_cpu(stats->rmac_data_octets); in s2io_get_ethtool_stats()
6290 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms); in s2io_get_ethtool_stats()
6291 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms); in s2io_get_ethtool_stats()
6293 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6294 le32_to_cpu(stats->rmac_vld_mcst_frms); in s2io_get_ethtool_stats()
6296 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6297 le32_to_cpu(stats->rmac_vld_bcst_frms); in s2io_get_ethtool_stats()
6298 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms); in s2io_get_ethtool_stats()
6299 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms); in s2io_get_ethtool_stats()
6300 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms); in s2io_get_ethtool_stats()
6301 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms); in s2io_get_ethtool_stats()
6302 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms); in s2io_get_ethtool_stats()
6304 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6305 le32_to_cpu(stats->rmac_ttl_octets); in s2io_get_ethtool_stats()
6307 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32 in s2io_get_ethtool_stats()
6308 | le32_to_cpu(stats->rmac_accepted_ucst_frms); in s2io_get_ethtool_stats()
6310 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow) in s2io_get_ethtool_stats()
6311 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms); in s2io_get_ethtool_stats()
6313 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6314 le32_to_cpu(stats->rmac_discarded_frms); in s2io_get_ethtool_stats()
6316 (u64)le32_to_cpu(stats->rmac_drop_events_oflow) in s2io_get_ethtool_stats()
6317 << 32 | le32_to_cpu(stats->rmac_drop_events); in s2io_get_ethtool_stats()
6318 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets); in s2io_get_ethtool_stats()
6319 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms); in s2io_get_ethtool_stats()
6321 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6322 le32_to_cpu(stats->rmac_usized_frms); in s2io_get_ethtool_stats()
6324 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6325 le32_to_cpu(stats->rmac_osized_frms); in s2io_get_ethtool_stats()
6327 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6328 le32_to_cpu(stats->rmac_frag_frms); in s2io_get_ethtool_stats()
6330 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6331 le32_to_cpu(stats->rmac_jabber_frms); in s2io_get_ethtool_stats()
6332 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms); in s2io_get_ethtool_stats()
6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms); in s2io_get_ethtool_stats()
6334 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms); in s2io_get_ethtool_stats()
6335 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms); in s2io_get_ethtool_stats()
6336 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms); in s2io_get_ethtool_stats()
6337 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms); in s2io_get_ethtool_stats()
6339 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6340 le32_to_cpu(stats->rmac_ip); in s2io_get_ethtool_stats()
6341 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets); in s2io_get_ethtool_stats()
6342 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip); in s2io_get_ethtool_stats()
6344 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6345 le32_to_cpu(stats->rmac_drop_ip); in s2io_get_ethtool_stats()
6347 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 | in s2io_get_ethtool_stats()
6348 le32_to_cpu(stats->rmac_icmp); in s2io_get_ethtool_stats()
6349 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp); in s2io_get_ethtool_stats()
6351 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6352 le32_to_cpu(stats->rmac_udp); in s2io_get_ethtool_stats()
6354 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6355 le32_to_cpu(stats->rmac_err_drp_udp); in s2io_get_ethtool_stats()
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym); in s2io_get_ethtool_stats()
6357 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0); in s2io_get_ethtool_stats()
6358 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1); in s2io_get_ethtool_stats()
6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2); in s2io_get_ethtool_stats()
6360 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3); in s2io_get_ethtool_stats()
6361 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4); in s2io_get_ethtool_stats()
6362 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5); in s2io_get_ethtool_stats()
6363 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6); in s2io_get_ethtool_stats()
6364 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7); in s2io_get_ethtool_stats()
6365 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0); in s2io_get_ethtool_stats()
6366 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1); in s2io_get_ethtool_stats()
6367 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2); in s2io_get_ethtool_stats()
6368 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3); in s2io_get_ethtool_stats()
6369 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4); in s2io_get_ethtool_stats()
6370 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5); in s2io_get_ethtool_stats()
6371 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6); in s2io_get_ethtool_stats()
6372 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7); in s2io_get_ethtool_stats()
6374 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 | in s2io_get_ethtool_stats()
6375 le32_to_cpu(stats->rmac_pause_cnt); in s2io_get_ethtool_stats()
6376 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt); in s2io_get_ethtool_stats()
6377 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt); in s2io_get_ethtool_stats()
6379 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6380 le32_to_cpu(stats->rmac_accepted_ip); in s2io_get_ethtool_stats()
6381 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp); in s2io_get_ethtool_stats()
6382 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt); in s2io_get_ethtool_stats()
6383 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt); in s2io_get_ethtool_stats()
6384 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt); in s2io_get_ethtool_stats()
6385 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt); in s2io_get_ethtool_stats()
6386 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt); in s2io_get_ethtool_stats()
6387 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt); in s2io_get_ethtool_stats()
6388 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt); in s2io_get_ethtool_stats()
6389 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt); in s2io_get_ethtool_stats()
6390 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt); in s2io_get_ethtool_stats()
6391 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt); in s2io_get_ethtool_stats()
6392 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt); in s2io_get_ethtool_stats()
6393 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt); in s2io_get_ethtool_stats()
6394 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt); in s2io_get_ethtool_stats()
6395 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt); in s2io_get_ethtool_stats()
6396 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt); in s2io_get_ethtool_stats()
6397 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt); in s2io_get_ethtool_stats()
6398 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt); in s2io_get_ethtool_stats()
6399 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt); in s2io_get_ethtool_stats()
6402 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_get_ethtool_stats()
6404 le64_to_cpu(stats->rmac_ttl_1519_4095_frms); in s2io_get_ethtool_stats()
6406 le64_to_cpu(stats->rmac_ttl_4096_8191_frms); in s2io_get_ethtool_stats()
6408 le64_to_cpu(stats->rmac_ttl_8192_max_frms); in s2io_get_ethtool_stats()
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms); in s2io_get_ethtool_stats()
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms); in s2io_get_ethtool_stats()
6411 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms); in s2io_get_ethtool_stats()
6412 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms); in s2io_get_ethtool_stats()
6413 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms); in s2io_get_ethtool_stats()
6414 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard); in s2io_get_ethtool_stats()
6415 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard); in s2io_get_ethtool_stats()
6416 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard); in s2io_get_ethtool_stats()
6417 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard); in s2io_get_ethtool_stats()
6418 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard); in s2io_get_ethtool_stats()
6419 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard); in s2io_get_ethtool_stats()
6420 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard); in s2io_get_ethtool_stats()
6421 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt); in s2io_get_ethtool_stats()
6425 tmp_stats[i++] = swstats->single_ecc_errs; in s2io_get_ethtool_stats()
6426 tmp_stats[i++] = swstats->double_ecc_errs; in s2io_get_ethtool_stats()
6427 tmp_stats[i++] = swstats->parity_err_cnt; in s2io_get_ethtool_stats()
6428 tmp_stats[i++] = swstats->serious_err_cnt; in s2io_get_ethtool_stats()
6429 tmp_stats[i++] = swstats->soft_reset_cnt; in s2io_get_ethtool_stats()
6430 tmp_stats[i++] = swstats->fifo_full_cnt; in s2io_get_ethtool_stats()
6432 tmp_stats[i++] = swstats->ring_full_cnt[k]; in s2io_get_ethtool_stats()
6433 tmp_stats[i++] = xstats->alarm_transceiver_temp_high; in s2io_get_ethtool_stats()
6434 tmp_stats[i++] = xstats->alarm_transceiver_temp_low; in s2io_get_ethtool_stats()
6435 tmp_stats[i++] = xstats->alarm_laser_bias_current_high; in s2io_get_ethtool_stats()
6436 tmp_stats[i++] = xstats->alarm_laser_bias_current_low; in s2io_get_ethtool_stats()
6437 tmp_stats[i++] = xstats->alarm_laser_output_power_high; in s2io_get_ethtool_stats()
6438 tmp_stats[i++] = xstats->alarm_laser_output_power_low; in s2io_get_ethtool_stats()
6439 tmp_stats[i++] = xstats->warn_transceiver_temp_high; in s2io_get_ethtool_stats()
6440 tmp_stats[i++] = xstats->warn_transceiver_temp_low; in s2io_get_ethtool_stats()
6441 tmp_stats[i++] = xstats->warn_laser_bias_current_high; in s2io_get_ethtool_stats()
6442 tmp_stats[i++] = xstats->warn_laser_bias_current_low; in s2io_get_ethtool_stats()
6443 tmp_stats[i++] = xstats->warn_laser_output_power_high; in s2io_get_ethtool_stats()
6444 tmp_stats[i++] = xstats->warn_laser_output_power_low; in s2io_get_ethtool_stats()
6445 tmp_stats[i++] = swstats->clubbed_frms_cnt; in s2io_get_ethtool_stats()
6446 tmp_stats[i++] = swstats->sending_both; in s2io_get_ethtool_stats()
6447 tmp_stats[i++] = swstats->outof_sequence_pkts; in s2io_get_ethtool_stats()
6448 tmp_stats[i++] = swstats->flush_max_pkts; in s2io_get_ethtool_stats()
6449 if (swstats->num_aggregations) { in s2io_get_ethtool_stats()
6450 u64 tmp = swstats->sum_avg_pkts_aggregated; in s2io_get_ethtool_stats()
6453 * Since 64-bit divide does not work on all platforms, in s2io_get_ethtool_stats()
6456 while (tmp >= swstats->num_aggregations) { in s2io_get_ethtool_stats()
6457 tmp -= swstats->num_aggregations; in s2io_get_ethtool_stats()
6463 tmp_stats[i++] = swstats->mem_alloc_fail_cnt; in s2io_get_ethtool_stats()
6464 tmp_stats[i++] = swstats->pci_map_fail_cnt; in s2io_get_ethtool_stats()
6465 tmp_stats[i++] = swstats->watchdog_timer_cnt; in s2io_get_ethtool_stats()
6466 tmp_stats[i++] = swstats->mem_allocated; in s2io_get_ethtool_stats()
6467 tmp_stats[i++] = swstats->mem_freed; in s2io_get_ethtool_stats()
6468 tmp_stats[i++] = swstats->link_up_cnt; in s2io_get_ethtool_stats()
6469 tmp_stats[i++] = swstats->link_down_cnt; in s2io_get_ethtool_stats()
6470 tmp_stats[i++] = swstats->link_up_time; in s2io_get_ethtool_stats()
6471 tmp_stats[i++] = swstats->link_down_time; in s2io_get_ethtool_stats()
6473 tmp_stats[i++] = swstats->tx_buf_abort_cnt; in s2io_get_ethtool_stats()
6474 tmp_stats[i++] = swstats->tx_desc_abort_cnt; in s2io_get_ethtool_stats()
6475 tmp_stats[i++] = swstats->tx_parity_err_cnt; in s2io_get_ethtool_stats()
6476 tmp_stats[i++] = swstats->tx_link_loss_cnt; in s2io_get_ethtool_stats()
6477 tmp_stats[i++] = swstats->tx_list_proc_err_cnt; in s2io_get_ethtool_stats()
6479 tmp_stats[i++] = swstats->rx_parity_err_cnt; in s2io_get_ethtool_stats()
6480 tmp_stats[i++] = swstats->rx_abort_cnt; in s2io_get_ethtool_stats()
6481 tmp_stats[i++] = swstats->rx_parity_abort_cnt; in s2io_get_ethtool_stats()
6482 tmp_stats[i++] = swstats->rx_rda_fail_cnt; in s2io_get_ethtool_stats()
6483 tmp_stats[i++] = swstats->rx_unkn_prot_cnt; in s2io_get_ethtool_stats()
6484 tmp_stats[i++] = swstats->rx_fcs_err_cnt; in s2io_get_ethtool_stats()
6485 tmp_stats[i++] = swstats->rx_buf_size_err_cnt; in s2io_get_ethtool_stats()
6486 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt; in s2io_get_ethtool_stats()
6487 tmp_stats[i++] = swstats->rx_unkn_err_cnt; in s2io_get_ethtool_stats()
6488 tmp_stats[i++] = swstats->tda_err_cnt; in s2io_get_ethtool_stats()
6489 tmp_stats[i++] = swstats->pfc_err_cnt; in s2io_get_ethtool_stats()
6490 tmp_stats[i++] = swstats->pcc_err_cnt; in s2io_get_ethtool_stats()
6491 tmp_stats[i++] = swstats->tti_err_cnt; in s2io_get_ethtool_stats()
6492 tmp_stats[i++] = swstats->tpa_err_cnt; in s2io_get_ethtool_stats()
6493 tmp_stats[i++] = swstats->sm_err_cnt; in s2io_get_ethtool_stats()
6494 tmp_stats[i++] = swstats->lso_err_cnt; in s2io_get_ethtool_stats()
6495 tmp_stats[i++] = swstats->mac_tmac_err_cnt; in s2io_get_ethtool_stats()
6496 tmp_stats[i++] = swstats->mac_rmac_err_cnt; in s2io_get_ethtool_stats()
6497 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt; in s2io_get_ethtool_stats()
6498 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt; in s2io_get_ethtool_stats()
6499 tmp_stats[i++] = swstats->rc_err_cnt; in s2io_get_ethtool_stats()
6500 tmp_stats[i++] = swstats->prc_pcix_err_cnt; in s2io_get_ethtool_stats()
6501 tmp_stats[i++] = swstats->rpa_err_cnt; in s2io_get_ethtool_stats()
6502 tmp_stats[i++] = swstats->rda_err_cnt; in s2io_get_ethtool_stats()
6503 tmp_stats[i++] = swstats->rti_err_cnt; in s2io_get_ethtool_stats()
6504 tmp_stats[i++] = swstats->mc_err_cnt; in s2io_get_ethtool_stats()
6526 switch (sp->device_type) { in s2io_get_sset_count()
6535 return -EOPNOTSUPP; in s2io_get_sset_count()
6552 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_ethtool_get_strings()
6567 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; in s2io_set_features()
6574 dev->features = features; in s2io_set_features()
6608 * s2io_ioctl - Entry point for the Ioctl
6621 return -EOPNOTSUPP; in s2io_ioctl()
6625 * s2io_change_mtu - entry point to change MTU size for the device.
6631 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6640 WRITE_ONCE(dev->mtu, new_mtu); in s2io_change_mtu()
6652 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_change_mtu()
6655 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); in s2io_change_mtu()
6662 * s2io_set_link - Set the LInk status
6671 struct net_device *dev = nic->dev; in s2io_set_link()
6672 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_set_link()
6681 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { in s2io_set_link()
6686 subid = nic->pdev->subsystem_device; in s2io_set_link()
6689 * Allow a small delay for the NICs self initiated in s2io_set_link()
6695 val64 = readq(&bar0->adapter_status); in s2io_set_link()
6697 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) { in s2io_set_link()
6699 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6701 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6703 nic->device_type, subid)) { in s2io_set_link()
6704 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6706 writeq(val64, &bar0->gpio_control); in s2io_set_link()
6707 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6710 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6712 nic->device_enabled_once = true; in s2io_set_link()
6716 dev->name); in s2io_set_link()
6720 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6722 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6725 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, in s2io_set_link()
6727 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6729 writeq(val64, &bar0->gpio_control); in s2io_set_link()
6730 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6733 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6735 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6738 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); in s2io_set_link()
6749 struct net_device *dev = sp->dev; in set_rxd_buffer_pointer()
6750 struct swStat *stats = &sp->mac_control.stats_info->sw_stat; in set_rxd_buffer_pointer()
6752 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { in set_rxd_buffer_pointer()
6762 rxdp1->Buffer0_ptr = *temp0; in set_rxd_buffer_pointer()
6768 dev->name, "1 buf mode SKBs"); in set_rxd_buffer_pointer()
6769 stats->mem_alloc_fail_cnt++; in set_rxd_buffer_pointer()
6770 return -ENOMEM ; in set_rxd_buffer_pointer()
6772 stats->mem_allocated += (*skb)->truesize; in set_rxd_buffer_pointer()
6777 rxdp1->Buffer0_ptr = *temp0 = in set_rxd_buffer_pointer()
6778 dma_map_single(&sp->pdev->dev, (*skb)->data, in set_rxd_buffer_pointer()
6779 size - NET_IP_ALIGN, in set_rxd_buffer_pointer()
6781 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr)) in set_rxd_buffer_pointer()
6783 rxdp->Host_Control = (unsigned long) (*skb); in set_rxd_buffer_pointer()
6785 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { in set_rxd_buffer_pointer()
6789 rxdp3->Buffer2_ptr = *temp2; in set_rxd_buffer_pointer()
6790 rxdp3->Buffer0_ptr = *temp0; in set_rxd_buffer_pointer()
6791 rxdp3->Buffer1_ptr = *temp1; in set_rxd_buffer_pointer()
6797 dev->name, in set_rxd_buffer_pointer()
6799 stats->mem_alloc_fail_cnt++; in set_rxd_buffer_pointer()
6800 return -ENOMEM; in set_rxd_buffer_pointer()
6802 stats->mem_allocated += (*skb)->truesize; in set_rxd_buffer_pointer()
6803 rxdp3->Buffer2_ptr = *temp2 = in set_rxd_buffer_pointer()
6804 dma_map_single(&sp->pdev->dev, (*skb)->data, in set_rxd_buffer_pointer()
6805 dev->mtu + 4, DMA_FROM_DEVICE); in set_rxd_buffer_pointer()
6806 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr)) in set_rxd_buffer_pointer()
6808 rxdp3->Buffer0_ptr = *temp0 = in set_rxd_buffer_pointer()
6809 dma_map_single(&sp->pdev->dev, ba->ba_0, in set_rxd_buffer_pointer()
6811 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) { in set_rxd_buffer_pointer()
6812 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6813 (dma_addr_t)rxdp3->Buffer2_ptr, in set_rxd_buffer_pointer()
6814 dev->mtu + 4, in set_rxd_buffer_pointer()
6818 rxdp->Host_Control = (unsigned long) (*skb); in set_rxd_buffer_pointer()
6820 /* Buffer-1 will be dummy buffer not used */ in set_rxd_buffer_pointer()
6821 rxdp3->Buffer1_ptr = *temp1 = in set_rxd_buffer_pointer()
6822 dma_map_single(&sp->pdev->dev, ba->ba_1, in set_rxd_buffer_pointer()
6824 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) { in set_rxd_buffer_pointer()
6825 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6826 (dma_addr_t)rxdp3->Buffer0_ptr, in set_rxd_buffer_pointer()
6828 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6829 (dma_addr_t)rxdp3->Buffer2_ptr, in set_rxd_buffer_pointer()
6830 dev->mtu + 4, in set_rxd_buffer_pointer()
6839 stats->pci_map_fail_cnt++; in set_rxd_buffer_pointer()
6840 stats->mem_freed += (*skb)->truesize; in set_rxd_buffer_pointer()
6842 return -ENOMEM; in set_rxd_buffer_pointer()
6848 struct net_device *dev = sp->dev; in set_rxd_buffer_size()
6849 if (sp->rxd_mode == RXD_MODE_1) { in set_rxd_buffer_size()
6850 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); in set_rxd_buffer_size()
6851 } else if (sp->rxd_mode == RXD_MODE_3B) { in set_rxd_buffer_size()
6852 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); in set_rxd_buffer_size()
6853 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); in set_rxd_buffer_size()
6854 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4); in set_rxd_buffer_size()
6861 struct config_param *config = &sp->config; in rxd_owner_bit_reset()
6862 struct mac_info *mac_control = &sp->mac_control; in rxd_owner_bit_reset()
6863 struct net_device *dev = sp->dev; in rxd_owner_bit_reset()
6870 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + in rxd_owner_bit_reset()
6872 if (sp->rxd_mode == RXD_MODE_1) in rxd_owner_bit_reset()
6874 else if (sp->rxd_mode == RXD_MODE_3B) in rxd_owner_bit_reset()
6875 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; in rxd_owner_bit_reset()
6877 for (i = 0; i < config->rx_ring_num; i++) { in rxd_owner_bit_reset()
6878 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in rxd_owner_bit_reset()
6879 struct ring_info *ring = &mac_control->rings[i]; in rxd_owner_bit_reset()
6881 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1); in rxd_owner_bit_reset()
6884 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { in rxd_owner_bit_reset()
6885 rxdp = ring->rx_blocks[j].rxds[k].virt_addr; in rxd_owner_bit_reset()
6886 if (sp->rxd_mode == RXD_MODE_3B) in rxd_owner_bit_reset()
6887 ba = &ring->ba[j][k]; in rxd_owner_bit_reset()
6892 size) == -ENOMEM) { in rxd_owner_bit_reset()
6899 rxdp->Control_1 |= RXD_OWN_XENA; in rxd_owner_bit_reset()
6910 struct net_device *dev = sp->dev; in s2io_add_isr()
6913 if (sp->config.intr_type == MSI_X) in s2io_add_isr()
6916 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); in s2io_add_isr()
6917 sp->config.intr_type = INTA; in s2io_add_isr()
6927 if (sp->config.intr_type == MSI_X) { in s2io_add_isr()
6930 for (i = 0; i < sp->num_entries; i++) { in s2io_add_isr()
6931 if (sp->s2io_entries[i].in_use == MSIX_FLG) { in s2io_add_isr()
6932 if (sp->s2io_entries[i].type == in s2io_add_isr()
6934 snprintf(sp->desc[i], in s2io_add_isr()
6935 sizeof(sp->desc[i]), in s2io_add_isr()
6936 "%s:MSI-X-%d-RX", in s2io_add_isr()
6937 dev->name, i); in s2io_add_isr()
6938 err = request_irq(sp->entries[i].vector, in s2io_add_isr()
6941 sp->desc[i], in s2io_add_isr()
6942 sp->s2io_entries[i].arg); in s2io_add_isr()
6943 } else if (sp->s2io_entries[i].type == in s2io_add_isr()
6945 snprintf(sp->desc[i], in s2io_add_isr()
6946 sizeof(sp->desc[i]), in s2io_add_isr()
6947 "%s:MSI-X-%d-TX", in s2io_add_isr()
6948 dev->name, i); in s2io_add_isr()
6949 err = request_irq(sp->entries[i].vector, in s2io_add_isr()
6952 sp->desc[i], in s2io_add_isr()
6953 sp->s2io_entries[i].arg); in s2io_add_isr()
6957 if (!(sp->msix_info[i].addr && in s2io_add_isr()
6958 sp->msix_info[i].data)) { in s2io_add_isr()
6961 sp->desc[i], in s2io_add_isr()
6963 sp->msix_info[i].addr, in s2io_add_isr()
6965 ntohl(sp->msix_info[i].data)); in s2io_add_isr()
6972 "%s:MSI-X-%d registration " in s2io_add_isr()
6973 "failed\n", dev->name, i); in s2io_add_isr()
6977 dev->name); in s2io_add_isr()
6978 sp->config.intr_type = INTA; in s2io_add_isr()
6981 sp->s2io_entries[i].in_use = in s2io_add_isr()
6986 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt); in s2io_add_isr()
6988 "MSI-X-TX entries enabled through alarm vector\n"); in s2io_add_isr()
6991 if (sp->config.intr_type == INTA) { in s2io_add_isr()
6992 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED, in s2io_add_isr()
6993 sp->name, dev); in s2io_add_isr()
6996 dev->name); in s2io_add_isr()
6997 return -1; in s2io_add_isr()
7005 if (sp->config.intr_type == MSI_X) in s2io_rem_isr()
7014 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_card_down()
7017 config = &sp->config; in do_s2io_card_down()
7022 del_timer_sync(&sp->alarm_timer); in do_s2io_card_down()
7024 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) in do_s2io_card_down()
7026 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); in do_s2io_card_down()
7029 if (sp->config.napi) { in do_s2io_card_down()
7031 if (config->intr_type == MSI_X) { in do_s2io_card_down()
7032 for (; off < sp->config.rx_ring_num; off++) in do_s2io_card_down()
7033 napi_disable(&sp->mac_control.rings[off].napi); in do_s2io_card_down()
7036 napi_disable(&sp->napi); in do_s2io_card_down()
7059 val64 = readq(&bar0->adapter_status); in do_s2io_card_down()
7061 if (verify_pcc_quiescent(sp, sp->device_enabled_once)) in do_s2io_card_down()
7068 DBG_PRINT(ERR_DBG, "Device not Quiescent - " in do_s2io_card_down()
7083 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); in do_s2io_card_down()
7096 struct net_device *dev = sp->dev; in s2io_card_up()
7103 dev->name); in s2io_card_up()
7104 if (ret != -EIO) in s2io_card_up()
7113 config = &sp->config; in s2io_card_up()
7114 mac_control = &sp->mac_control; in s2io_card_up()
7116 for (i = 0; i < config->rx_ring_num; i++) { in s2io_card_up()
7117 struct ring_info *ring = &mac_control->rings[i]; in s2io_card_up()
7119 ring->mtu = dev->mtu; in s2io_card_up()
7120 ring->lro = !!(dev->features & NETIF_F_LRO); in s2io_card_up()
7124 dev->name); in s2io_card_up()
7125 ret = -ENOMEM; in s2io_card_up()
7129 ring->rx_bufs_left); in s2io_card_up()
7133 if (config->napi) { in s2io_card_up()
7134 if (config->intr_type == MSI_X) { in s2io_card_up()
7135 for (i = 0; i < sp->config.rx_ring_num; i++) in s2io_card_up()
7136 napi_enable(&sp->mac_control.rings[i].napi); in s2io_card_up()
7138 napi_enable(&sp->napi); in s2io_card_up()
7143 if (sp->promisc_flg) in s2io_card_up()
7144 sp->promisc_flg = 0; in s2io_card_up()
7145 if (sp->m_cast_flg) { in s2io_card_up()
7146 sp->m_cast_flg = 0; in s2io_card_up()
7147 sp->all_multi_pos = 0; in s2io_card_up()
7153 if (dev->features & NETIF_F_LRO) { in s2io_card_up()
7155 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; in s2io_card_up()
7157 if (lro_max_pkts < sp->lro_max_aggr_per_sess) in s2io_card_up()
7158 sp->lro_max_aggr_per_sess = lro_max_pkts; in s2io_card_up()
7163 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); in s2io_card_up()
7164 ret = -ENODEV; in s2io_card_up()
7170 if (sp->config.intr_type == MSI_X) in s2io_card_up()
7172 ret = -ENODEV; in s2io_card_up()
7176 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); in s2io_card_up()
7177 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); in s2io_card_up()
7179 set_bit(__S2IO_STATE_CARD_UP, &sp->state); in s2io_card_up()
7183 if (sp->config.intr_type != INTA) { in s2io_card_up()
7195 if (config->napi) { in s2io_card_up()
7196 if (config->intr_type == MSI_X) { in s2io_card_up()
7197 for (i = 0; i < sp->config.rx_ring_num; i++) in s2io_card_up()
7198 napi_disable(&sp->mac_control.rings[i].napi); in s2io_card_up()
7200 napi_disable(&sp->napi); in s2io_card_up()
7210 * s2io_restart_nic - Resets the NIC.
7222 struct net_device *dev = sp->dev; in s2io_restart_nic()
7231 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name); in s2io_restart_nic()
7234 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name); in s2io_restart_nic()
7240 * s2io_tx_watchdog - Watchdog for transmit side.
7245 * for a pre-defined amount of time when the Interface is still up.
7256 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_tx_watchdog()
7259 swstats->watchdog_timer_cnt++; in s2io_tx_watchdog()
7260 schedule_work(&sp->rst_timer_task); in s2io_tx_watchdog()
7261 swstats->soft_reset_cnt++; in s2io_tx_watchdog()
7266 * rx_osm_handler - To perform some OS related operations on SKB.
7277 * SUCCESS on success and -1 on failure.
7281 struct s2io_nic *sp = ring_data->nic; in rx_osm_handler()
7282 struct net_device *dev = ring_data->dev; in rx_osm_handler()
7284 ((unsigned long)rxdp->Host_Control); in rx_osm_handler()
7285 int ring_no = ring_data->ring_no; in rx_osm_handler()
7287 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; in rx_osm_handler()
7290 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in rx_osm_handler()
7292 skb->dev = dev; in rx_osm_handler()
7297 swstats->parity_err_cnt++; in rx_osm_handler()
7302 swstats->rx_parity_err_cnt++; in rx_osm_handler()
7306 swstats->rx_abort_cnt++; in rx_osm_handler()
7310 swstats->rx_parity_abort_cnt++; in rx_osm_handler()
7314 swstats->rx_rda_fail_cnt++; in rx_osm_handler()
7318 swstats->rx_unkn_prot_cnt++; in rx_osm_handler()
7322 swstats->rx_fcs_err_cnt++; in rx_osm_handler()
7326 swstats->rx_buf_size_err_cnt++; in rx_osm_handler()
7330 swstats->rx_rxd_corrupt_cnt++; in rx_osm_handler()
7334 swstats->rx_unkn_err_cnt++; in rx_osm_handler()
7346 dev->name, err_mask); in rx_osm_handler()
7347 dev->stats.rx_crc_errors++; in rx_osm_handler()
7348 swstats->mem_freed in rx_osm_handler()
7349 += skb->truesize; in rx_osm_handler()
7351 ring_data->rx_bufs_left -= 1; in rx_osm_handler()
7352 rxdp->Host_Control = 0; in rx_osm_handler()
7357 rxdp->Host_Control = 0; in rx_osm_handler()
7358 if (sp->rxd_mode == RXD_MODE_1) { in rx_osm_handler()
7359 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); in rx_osm_handler()
7362 } else if (sp->rxd_mode == RXD_MODE_3B) { in rx_osm_handler()
7363 int get_block = ring_data->rx_curr_get_info.block_index; in rx_osm_handler()
7364 int get_off = ring_data->rx_curr_get_info.offset; in rx_osm_handler()
7365 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); in rx_osm_handler()
7366 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); in rx_osm_handler()
7368 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; in rx_osm_handler()
7369 skb_put_data(skb, ba->ba_0, buf0_len); in rx_osm_handler()
7373 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && in rx_osm_handler()
7374 ((!ring_data->lro) || in rx_osm_handler()
7375 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && in rx_osm_handler()
7376 (dev->features & NETIF_F_RXCSUM)) { in rx_osm_handler()
7377 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); in rx_osm_handler()
7378 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); in rx_osm_handler()
7385 skb->ip_summed = CHECKSUM_UNNECESSARY; in rx_osm_handler()
7386 if (ring_data->lro) { in rx_osm_handler()
7392 skb->data, &tcp, in rx_osm_handler()
7397 lro->parent = skb; in rx_osm_handler()
7404 queue_rx_frame(lro->parent, in rx_osm_handler()
7405 lro->vlan_tag); in rx_osm_handler()
7407 swstats->flush_max_pkts++; in rx_osm_handler()
7410 lro->parent->data_len = lro->frags_len; in rx_osm_handler()
7411 swstats->sending_both++; in rx_osm_handler()
7412 queue_rx_frame(lro->parent, in rx_osm_handler()
7413 lro->vlan_tag); in rx_osm_handler()
7417 case -1: /* non-TCP or not L2 aggregatable */ in rx_osm_handler()
7440 swstats->mem_freed += skb->truesize; in rx_osm_handler()
7443 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); in rx_osm_handler()
7445 sp->mac_control.rings[ring_no].rx_bufs_left -= 1; in rx_osm_handler()
7450 * s2io_link - stops/starts the Tx queue.
7464 struct net_device *dev = sp->dev; in s2io_link()
7465 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_link()
7467 if (link != sp->last_link_state) { in s2io_link()
7470 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); in s2io_link()
7473 if (swstats->link_up_cnt) in s2io_link()
7474 swstats->link_up_time = in s2io_link()
7475 jiffies - sp->start_time; in s2io_link()
7476 swstats->link_down_cnt++; in s2io_link()
7478 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); in s2io_link()
7479 if (swstats->link_down_cnt) in s2io_link()
7480 swstats->link_down_time = in s2io_link()
7481 jiffies - sp->start_time; in s2io_link()
7482 swstats->link_up_cnt++; in s2io_link()
7487 sp->last_link_state = link; in s2io_link()
7488 sp->start_time = jiffies; in s2io_link()
7492 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7496 * This function initializes a few of the PCI and PCI-X configuration registers
7506 /* Enable Data Parity Error Recovery in PCI-X command register. */ in s2io_init_pci()
7507 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7509 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7511 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7515 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); in s2io_init_pci()
7516 pci_write_config_word(sp->pdev, PCI_COMMAND, in s2io_init_pci()
7518 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); in s2io_init_pci()
7572 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && in s2io_verify_parm()
7573 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { in s2io_verify_parm()
7581 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); in s2io_verify_parm()
7597 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7604 * '-1' on failure (endian settings incorrect).
7608 struct XENA_dev_config __iomem *bar0 = nic->bar0; in rts_ds_steer()
7615 writeq(val64, &bar0->rts_ds_mem_data); in rts_ds_steer()
7621 writeq(val64, &bar0->rts_ds_mem_ctrl); in rts_ds_steer()
7623 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, in rts_ds_steer()
7646 * s2io_init_nic - Initialization of the adapter .
7686 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in s2io_init_nic()
7690 return -ENOMEM; in s2io_init_nic()
7694 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n", in s2io_init_nic()
7697 return -ENODEV; in s2io_init_nic()
7706 return -ENODEV; in s2io_init_nic()
7711 SET_NETDEV_DEV(dev, &pdev->dev); in s2io_init_nic()
7715 sp->dev = dev; in s2io_init_nic()
7716 sp->pdev = pdev; in s2io_init_nic()
7717 sp->device_enabled_once = false; in s2io_init_nic()
7719 sp->rxd_mode = RXD_MODE_1; in s2io_init_nic()
7721 sp->rxd_mode = RXD_MODE_3B; in s2io_init_nic()
7723 sp->config.intr_type = dev_intr_type; in s2io_init_nic()
7725 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || in s2io_init_nic()
7726 (pdev->device == PCI_DEVICE_ID_HERC_UNI)) in s2io_init_nic()
7727 sp->device_type = XFRAME_II_DEVICE; in s2io_init_nic()
7729 sp->device_type = XFRAME_I_DEVICE; in s2io_init_nic()
7732 /* Initialize some PCI/PCI-X fields of the NIC. */ in s2io_init_nic()
7742 config = &sp->config; in s2io_init_nic()
7743 mac_control = &sp->mac_control; in s2io_init_nic()
7745 config->napi = napi; in s2io_init_nic()
7746 config->tx_steering_type = tx_steering_type; in s2io_init_nic()
7749 if (config->tx_steering_type == TX_PRIORITY_STEERING) in s2io_init_nic()
7750 config->tx_fifo_num = MAX_TX_FIFOS; in s2io_init_nic()
7752 config->tx_fifo_num = tx_fifo_num; in s2io_init_nic()
7755 if (config->tx_fifo_num < 5) { in s2io_init_nic()
7756 if (config->tx_fifo_num == 1) in s2io_init_nic()
7757 sp->total_tcp_fifos = 1; in s2io_init_nic()
7759 sp->total_tcp_fifos = config->tx_fifo_num - 1; in s2io_init_nic()
7760 sp->udp_fifo_idx = config->tx_fifo_num - 1; in s2io_init_nic()
7761 sp->total_udp_fifos = 1; in s2io_init_nic()
7762 sp->other_fifo_idx = sp->total_tcp_fifos - 1; in s2io_init_nic()
7764 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - in s2io_init_nic()
7766 sp->udp_fifo_idx = sp->total_tcp_fifos; in s2io_init_nic()
7767 sp->total_udp_fifos = FIFO_UDP_MAX_NUM; in s2io_init_nic()
7768 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; in s2io_init_nic()
7771 config->multiq = dev_multiq; in s2io_init_nic()
7772 for (i = 0; i < config->tx_fifo_num; i++) { in s2io_init_nic()
7773 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in s2io_init_nic()
7775 tx_cfg->fifo_len = tx_fifo_len[i]; in s2io_init_nic()
7776 tx_cfg->fifo_priority = i; in s2io_init_nic()
7781 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; in s2io_init_nic()
7784 for (i = 0; i < config->tx_fifo_num; i++) in s2io_init_nic()
7785 sp->fifo_selector[i] = fifo_selector[i]; in s2io_init_nic()
7788 config->tx_intr_type = TXD_INT_TYPE_UTILZ; in s2io_init_nic()
7789 for (i = 0; i < config->tx_fifo_num; i++) { in s2io_init_nic()
7790 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in s2io_init_nic()
7792 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER); in s2io_init_nic()
7793 if (tx_cfg->fifo_len < 65) { in s2io_init_nic()
7794 config->tx_intr_type = TXD_INT_TYPE_PER_LIST; in s2io_init_nic()
7798 /* + 2 because one Txd for skb->data and one Txd for UFO */ in s2io_init_nic()
7799 config->max_txds = MAX_SKB_FRAGS + 2; in s2io_init_nic()
7802 config->rx_ring_num = rx_ring_num; in s2io_init_nic()
7803 for (i = 0; i < config->rx_ring_num; i++) { in s2io_init_nic()
7804 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in s2io_init_nic()
7805 struct ring_info *ring = &mac_control->rings[i]; in s2io_init_nic()
7807 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1); in s2io_init_nic()
7808 rx_cfg->ring_priority = i; in s2io_init_nic()
7809 ring->rx_bufs_left = 0; in s2io_init_nic()
7810 ring->rxd_mode = sp->rxd_mode; in s2io_init_nic()
7811 ring->rxd_count = rxd_count[sp->rxd_mode]; in s2io_init_nic()
7812 ring->pdev = sp->pdev; in s2io_init_nic()
7813 ring->dev = sp->dev; in s2io_init_nic()
7817 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in s2io_init_nic()
7819 rx_cfg->ring_org = RING_ORG_BUFF1; in s2io_init_nic()
7820 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); in s2io_init_nic()
7824 mac_control->rmac_pause_time = rmac_pause_time; in s2io_init_nic()
7825 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3; in s2io_init_nic()
7826 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; in s2io_init_nic()
7831 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name); in s2io_init_nic()
7832 ret = -ENOMEM; in s2io_init_nic()
7836 sp->bar0 = pci_ioremap_bar(pdev, 0); in s2io_init_nic()
7837 if (!sp->bar0) { in s2io_init_nic()
7839 dev->name); in s2io_init_nic()
7840 ret = -ENOMEM; in s2io_init_nic()
7844 sp->bar1 = pci_ioremap_bar(pdev, 2); in s2io_init_nic()
7845 if (!sp->bar1) { in s2io_init_nic()
7847 dev->name); in s2io_init_nic()
7848 ret = -ENOMEM; in s2io_init_nic()
7854 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); in s2io_init_nic()
7858 dev->netdev_ops = &s2io_netdev_ops; in s2io_init_nic()
7859 dev->ethtool_ops = &netdev_ethtool_ops; in s2io_init_nic()
7860 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | in s2io_init_nic()
7863 dev->features |= dev->hw_features | in s2io_init_nic()
7866 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; in s2io_init_nic()
7867 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); in s2io_init_nic()
7868 INIT_WORK(&sp->set_link_task, s2io_set_link); in s2io_init_nic()
7870 pci_save_state(sp->pdev); in s2io_init_nic()
7875 dev->name); in s2io_init_nic()
7876 ret = -EAGAIN; in s2io_init_nic()
7881 if (sp->device_type & XFRAME_II_DEVICE) { in s2io_init_nic()
7886 ret = -EBADSLT; in s2io_init_nic()
7891 if (sp->config.intr_type == MSI_X) { in s2io_init_nic()
7892 sp->num_entries = config->rx_ring_num + 1; in s2io_init_nic()
7897 /* rollback MSI-X, will re-enable during add_isr() */ in s2io_init_nic()
7903 "MSI-X requested but failed to enable\n"); in s2io_init_nic()
7904 sp->config.intr_type = INTA; in s2io_init_nic()
7908 if (config->intr_type == MSI_X) { in s2io_init_nic()
7909 for (i = 0; i < config->rx_ring_num ; i++) { in s2io_init_nic()
7910 struct ring_info *ring = &mac_control->rings[i]; in s2io_init_nic()
7912 netif_napi_add(dev, &ring->napi, s2io_poll_msix); in s2io_init_nic()
7915 netif_napi_add(dev, &sp->napi, s2io_poll_inta); in s2io_init_nic()
7919 if (sp->device_type & XFRAME_I_DEVICE) { in s2io_init_nic()
7932 bar0 = sp->bar0; in s2io_init_nic()
7935 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_init_nic()
7936 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_init_nic()
7939 tmp64 = readq(&bar0->rmac_addr_data0_mem); in s2io_init_nic()
7943 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); in s2io_init_nic()
7944 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); in s2io_init_nic()
7945 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); in s2io_init_nic()
7946 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24); in s2io_init_nic()
7947 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); in s2io_init_nic()
7948 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); in s2io_init_nic()
7951 dev->addr_len = ETH_ALEN; in s2io_init_nic()
7952 eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr); in s2io_init_nic()
7955 if (sp->device_type == XFRAME_I_DEVICE) { in s2io_init_nic()
7956 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES; in s2io_init_nic()
7957 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES; in s2io_init_nic()
7958 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET; in s2io_init_nic()
7959 } else if (sp->device_type == XFRAME_II_DEVICE) { in s2io_init_nic()
7960 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES; in s2io_init_nic()
7961 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES; in s2io_init_nic()
7962 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET; in s2io_init_nic()
7965 /* MTU range: 46 - 9600 */ in s2io_init_nic()
7966 dev->min_mtu = MIN_MTU; in s2io_init_nic()
7967 dev->max_mtu = S2IO_JUMBO_SIZE; in s2io_init_nic()
7973 if ((sp->device_type == XFRAME_II_DEVICE) && in s2io_init_nic()
7974 (config->intr_type == MSI_X)) in s2io_init_nic()
7975 sp->num_entries = config->rx_ring_num + 1; in s2io_init_nic()
7986 sp->state = 0; in s2io_init_nic()
7989 for (i = 0; i < sp->config.tx_fifo_num; i++) { in s2io_init_nic()
7990 struct fifo_info *fifo = &mac_control->fifos[i]; in s2io_init_nic()
7992 spin_lock_init(&fifo->tx_lock); in s2io_init_nic()
7996 * SXE-002: Configure link and activity LED to init state in s2io_init_nic()
7999 subid = sp->pdev->subsystem_device; in s2io_init_nic()
8001 val64 = readq(&bar0->gpio_control); in s2io_init_nic()
8003 writeq(val64, &bar0->gpio_control); in s2io_init_nic()
8006 val64 = readq(&bar0->gpio_control); in s2io_init_nic()
8009 sp->rx_csum = 1; /* Rx chksum verify enabled by default */ in s2io_init_nic()
8013 ret = -ENODEV; in s2io_init_nic()
8017 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n"); in s2io_init_nic()
8018 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, in s2io_init_nic()
8019 sp->product_name, pdev->revision); in s2io_init_nic()
8020 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, in s2io_init_nic()
8022 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr); in s2io_init_nic()
8023 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num); in s2io_init_nic()
8024 if (sp->device_type & XFRAME_II_DEVICE) { in s2io_init_nic()
8027 ret = -EBADSLT; in s2io_init_nic()
8032 switch (sp->rxd_mode) { in s2io_init_nic()
8034 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", in s2io_init_nic()
8035 dev->name); in s2io_init_nic()
8038 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", in s2io_init_nic()
8039 dev->name); in s2io_init_nic()
8043 switch (sp->config.napi) { in s2io_init_nic()
8045 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); in s2io_init_nic()
8048 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); in s2io_init_nic()
8052 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, in s2io_init_nic()
8053 sp->config.tx_fifo_num); in s2io_init_nic()
8055 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, in s2io_init_nic()
8056 sp->config.rx_ring_num); in s2io_init_nic()
8058 switch (sp->config.intr_type) { in s2io_init_nic()
8060 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); in s2io_init_nic()
8063 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); in s2io_init_nic()
8066 if (sp->config.multiq) { in s2io_init_nic()
8067 for (i = 0; i < sp->config.tx_fifo_num; i++) { in s2io_init_nic()
8068 struct fifo_info *fifo = &mac_control->fifos[i]; in s2io_init_nic()
8070 fifo->multiq = config->multiq; in s2io_init_nic()
8073 dev->name); in s2io_init_nic()
8076 dev->name); in s2io_init_nic()
8078 switch (sp->config.tx_steering_type) { in s2io_init_nic()
8081 dev->name); in s2io_init_nic()
8086 dev->name); in s2io_init_nic()
8091 dev->name); in s2io_init_nic()
8095 dev->name); in s2io_init_nic()
8097 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, in s2io_init_nic()
8098 sp->product_name); in s2io_init_nic()
8101 sp->vlan_strip_flag = 1; in s2io_init_nic()
8103 sp->vlan_strip_flag = 0; in s2io_init_nic()
8116 iounmap(sp->bar1); in s2io_init_nic()
8118 iounmap(sp->bar0); in s2io_init_nic()
8130 * s2io_rem_nic - Free the PCI device
8150 cancel_work_sync(&sp->rst_timer_task); in s2io_rem_nic()
8151 cancel_work_sync(&sp->set_link_task); in s2io_rem_nic()
8156 iounmap(sp->bar0); in s2io_rem_nic()
8157 iounmap(sp->bar1); in s2io_rem_nic()
8170 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; in check_L2_lro_capable()
8172 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { in check_L2_lro_capable()
8174 "%s: Non-TCP frames not supported for LRO\n", in check_L2_lro_capable()
8176 return -1; in check_L2_lro_capable()
8186 if ((!sp->vlan_strip_flag) && in check_L2_lro_capable()
8187 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) in check_L2_lro_capable()
8190 /* LLC, SNAP etc are considered non-mergeable */ in check_L2_lro_capable()
8191 return -1; in check_L2_lro_capable()
8195 ip_len = (u8)((*ip)->ihl); in check_L2_lro_capable()
8206 if ((lro->iph->saddr != ip->saddr) || in check_for_socket_match()
8207 (lro->iph->daddr != ip->daddr) || in check_for_socket_match()
8208 (lro->tcph->source != tcp->source) || in check_for_socket_match()
8209 (lro->tcph->dest != tcp->dest)) in check_for_socket_match()
8210 return -1; in check_for_socket_match()
8216 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2); in get_l4_pyld_length()
8224 lro->l2h = l2h; in initiate_new_session()
8225 lro->iph = ip; in initiate_new_session()
8226 lro->tcph = tcp; in initiate_new_session()
8227 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); in initiate_new_session()
8228 lro->tcp_ack = tcp->ack_seq; in initiate_new_session()
8229 lro->sg_num = 1; in initiate_new_session()
8230 lro->total_len = ntohs(ip->tot_len); in initiate_new_session()
8231 lro->frags_len = 0; in initiate_new_session()
8232 lro->vlan_tag = vlan_tag; in initiate_new_session()
8237 if (tcp->doff == 8) { in initiate_new_session()
8240 lro->saw_ts = 1; in initiate_new_session()
8241 lro->cur_tsval = ntohl(*(ptr+1)); in initiate_new_session()
8242 lro->cur_tsecr = *(ptr+2); in initiate_new_session()
8244 lro->in_use = 1; in initiate_new_session()
8249 struct iphdr *ip = lro->iph; in update_L3L4_header()
8250 struct tcphdr *tcp = lro->tcph; in update_L3L4_header()
8251 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in update_L3L4_header()
8256 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len)); in update_L3L4_header()
8257 ip->tot_len = htons(lro->total_len); in update_L3L4_header()
8260 tcp->ack_seq = lro->tcp_ack; in update_L3L4_header()
8261 tcp->window = lro->window; in update_L3L4_header()
8264 if (lro->saw_ts) { in update_L3L4_header()
8266 *(ptr+2) = lro->cur_tsecr; in update_L3L4_header()
8272 swstats->sum_avg_pkts_aggregated += lro->sg_num; in update_L3L4_header()
8273 swstats->num_aggregations++; in update_L3L4_header()
8280 lro->total_len += l4_pyld; in aggregate_new_rx()
8281 lro->frags_len += l4_pyld; in aggregate_new_rx()
8282 lro->tcp_next_seq += l4_pyld; in aggregate_new_rx()
8283 lro->sg_num++; in aggregate_new_rx()
8286 lro->tcp_ack = tcp->ack_seq; in aggregate_new_rx()
8287 lro->window = tcp->window; in aggregate_new_rx()
8289 if (lro->saw_ts) { in aggregate_new_rx()
8293 lro->cur_tsval = ntohl(*(ptr+1)); in aggregate_new_rx()
8294 lro->cur_tsecr = *(ptr + 2); in aggregate_new_rx()
8307 return -1; in verify_l3_l4_lro_capable()
8310 if (ip->ihl != 5) /* IP has options */ in verify_l3_l4_lro_capable()
8311 return -1; in verify_l3_l4_lro_capable()
8315 return -1; in verify_l3_l4_lro_capable()
8318 if (tcp->urg || tcp->psh || tcp->rst || in verify_l3_l4_lro_capable()
8319 tcp->syn || tcp->fin || in verify_l3_l4_lro_capable()
8320 tcp->ece || tcp->cwr || !tcp->ack) { in verify_l3_l4_lro_capable()
8326 return -1; in verify_l3_l4_lro_capable()
8333 if (tcp->doff != 5 && tcp->doff != 8) in verify_l3_l4_lro_capable()
8334 return -1; in verify_l3_l4_lro_capable()
8336 if (tcp->doff == 8) { in verify_l3_l4_lro_capable()
8341 return -1; in verify_l3_l4_lro_capable()
8345 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) in verify_l3_l4_lro_capable()
8346 return -1; in verify_l3_l4_lro_capable()
8348 /* timestamp echo reply should be non-zero */ in verify_l3_l4_lro_capable()
8350 return -1; in verify_l3_l4_lro_capable()
8364 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_club_tcp_session()
8371 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr); in s2io_club_tcp_session()
8373 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); in s2io_club_tcp_session()
8377 struct lro *l_lro = &ring_data->lro0_n[i]; in s2io_club_tcp_session()
8378 if (l_lro->in_use) { in s2io_club_tcp_session()
8384 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { in s2io_club_tcp_session()
8388 (*lro)->tcp_next_seq, in s2io_club_tcp_session()
8389 ntohl(tcph->seq)); in s2io_club_tcp_session()
8391 swstats->outof_sequence_pkts++; in s2io_club_tcp_session()
8415 struct lro *l_lro = &ring_data->lro0_n[i]; in s2io_club_tcp_session()
8416 if (!(l_lro->in_use)) { in s2io_club_tcp_session()
8441 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { in s2io_club_tcp_session()
8463 struct net_device *dev = skb->dev; in queue_rx_frame()
8466 skb->protocol = eth_type_trans(skb, dev); in queue_rx_frame()
8467 if (vlan_tag && sp->vlan_strip_flag) in queue_rx_frame()
8469 if (sp->config.napi) in queue_rx_frame()
8478 struct sk_buff *first = lro->parent; in lro_append_pkt()
8479 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in lro_append_pkt()
8481 first->len += tcp_len; in lro_append_pkt()
8482 first->data_len = lro->frags_len; in lro_append_pkt()
8483 skb_pull(skb, (skb->len - tcp_len)); in lro_append_pkt()
8484 if (skb_shinfo(first)->frag_list) in lro_append_pkt()
8485 lro->last_frag->next = skb; in lro_append_pkt()
8487 skb_shinfo(first)->frag_list = skb; in lro_append_pkt()
8488 first->truesize += skb->truesize; in lro_append_pkt()
8489 lro->last_frag = skb; in lro_append_pkt()
8490 swstats->clubbed_frms_cnt++; in lro_append_pkt()
8494 * s2io_io_error_detected - called when PCI error is detected
8522 * s2io_io_slot_reset - called after the pci bus has been reset.
8525 * Restart the card from scratch, as if from a cold-boot.
8536 pr_err("Cannot re-enable PCI device after reset.\n"); in s2io_io_slot_reset()
8547 * s2io_io_resume - called when traffic can start flowing again.
8564 if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) { in s2io_io_resume()