Lines Matching +full:wake +full:- +full:on +full:- +full:lan
1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
5 * Copyright (c) 2009-2010 Micrel, Inc.
271 #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
293 #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
482 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
587 #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
588 #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
589 #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
590 #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
591 #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
592 #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
606 #define VLAN_TABLE_VID 00-00000000-00000FFF
607 #define VLAN_TABLE_FID 00-00000000-0000F000
608 #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
609 #define VLAN_TABLE_VALID 00-00000000-00080000
621 #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
622 #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
623 #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
624 #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
625 #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
626 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
627 #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
628 #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
649 #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
650 #define MIB_COUNTER_VALID 00-00000000-40000000
651 #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
670 #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
686 #define HW_TO_DEV_PORT(port) (port - 1)
865 * struct ksz_hw_desc - Hardware descriptor data structure
879 * struct ksz_sw_desc - Software descriptor data structure
891 * struct ksz_dma_buf - OS dependent DMA buffer data structure
903 * struct ksz_desc - Descriptor structure
916 #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
919 * struct ksz_desc_info - Descriptor information data structure
959 * struct ksz_mac_table - Static MAC table data structure
981 * struct ksz_vlan_table - VLAN table data structure
1045 * struct ksz_port_mib - Port MIB data structure
1050 * @counter: 64-bit MIB counter value.
1074 * struct ksz_port_cfg - Port configuration data structure
1092 * struct ksz_switch - KSZ8842 switch data structure
1096 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
1098 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
1123 * struct ksz_port_info - Port information data structure
1127 * @advertised: Advertised auto-negotiation setting. Used to determine link.
1128 * @partner: Auto-negotiation partner setting. Used to determine link.
1156 * struct ksz_hw - KSZ884X hardware data structure
1248 * struct ksz_port - Virtual port data structure
1254 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
1283 * struct ksz_timer_info - Timer information data structure
1286 * @max: Number of times to run timer; -1 for infinity.
1297 * struct ksz_shared_mem - OS dependent shared memory data structure
1313 * struct ksz_counter_info - OS dependent counter information data structure
1325 * struct dev_info - Network device information data structure
1344 * @wol_enable: Wake-on-LAN enable set by ethtool.
1345 * @wol_support: Wake-on-LAN support used by ethtool.
1380 * struct dev_priv - Network device private data structure
1426 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS); in hw_ack_intr()
1431 hw->intr_blocked = hw->intr_mask; in hw_dis_intr()
1432 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1433 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1438 hw->intr_set = interrupt; in hw_set_intr()
1439 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_set_intr()
1444 hw->intr_blocked = 0; in hw_ena_intr()
1445 hw_set_intr(hw, hw->intr_mask); in hw_ena_intr()
1450 hw->intr_mask &= ~(bit); in hw_dis_intr_bit()
1457 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1458 hw->intr_set = read_intr & ~interrupt; in hw_turn_off_intr()
1459 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1464 * hw_turn_on_intr - turn on specified interrupts
1466 * @bit: The interrupt bits to be on.
1468 * This routine turns on the specified interrupts in the interrupt mask so that
1473 hw->intr_mask |= bit; in hw_turn_on_intr()
1475 if (!hw->intr_blocked) in hw_turn_on_intr()
1476 hw_set_intr(hw, hw->intr_mask); in hw_turn_on_intr()
1481 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS); in hw_read_intr()
1482 *status = *status & hw->intr_set; in hw_read_intr()
1492 * hw_block_intr - block hardware interrupts
1504 if (!hw->intr_blocked) { in hw_block_intr()
1506 interrupt = hw->intr_blocked; in hw_block_intr()
1518 desc->phw->ctrl.data = cpu_to_le32(status.data); in reset_desc()
1523 desc->sw.ctrl.tx.hw_owned = 1; in release_desc()
1524 if (desc->sw.buf_size != desc->sw.buf.data) { in release_desc()
1525 desc->sw.buf_size = desc->sw.buf.data; in release_desc()
1526 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); in release_desc()
1528 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); in release_desc()
1533 *desc = &info->ring[info->last]; in get_rx_pkt()
1534 info->last++; in get_rx_pkt()
1535 info->last &= info->mask; in get_rx_pkt()
1536 info->avail--; in get_rx_pkt()
1537 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; in get_rx_pkt()
1542 desc->phw->addr = cpu_to_le32(addr); in set_rx_buf()
1547 desc->sw.buf.rx.buf_size = len; in set_rx_len()
1553 *desc = &info->ring[info->next]; in get_tx_pkt()
1554 info->next++; in get_tx_pkt()
1555 info->next &= info->mask; in get_tx_pkt()
1556 info->avail--; in get_tx_pkt()
1557 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; in get_tx_pkt()
1562 desc->phw->addr = cpu_to_le32(addr); in set_tx_buf()
1567 desc->sw.buf.tx.buf_size = len; in set_tx_len()
1577 readw(hw->io + reg); \
1581 * sw_r_table - read 4 bytes of data from switch table
1599 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_r_table()
1601 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_r_table()
1607 * sw_w_table_64 - write 8 bytes of data to the switch table
1627 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET); in sw_w_table_64()
1628 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_w_table_64()
1630 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_w_table_64()
1637 * sw_w_sta_mac_table - write to the static MAC table
1675 * sw_r_vlan_table - read from the VLAN table
1685 * Return 0 if the entry is valid; otherwise -1.
1700 return -1; in sw_r_vlan_table()
1704 * port_r_mib_cnt - read MIB counter
1725 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_cnt()
1728 for (timeout = 100; timeout > 0; timeout--) { in port_r_mib_cnt()
1729 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_cnt()
1743 * port_r_mib_pkt - read dropped packet counts
1767 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_pkt()
1769 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_pkt()
1779 data -= cur; in port_r_mib_pkt()
1784 index -= KS_MIB_PACKET_DROPPED_TX - in port_r_mib_pkt()
1790 * port_r_cnt - read MIB counters periodically
1798 * Return non-zero when not all counters not read.
1802 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_r_cnt()
1804 if (mib->mib_start < PORT_COUNTER_NUM) in port_r_cnt()
1805 while (mib->cnt_ptr < PORT_COUNTER_NUM) { in port_r_cnt()
1806 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_r_cnt()
1807 &mib->counter[mib->cnt_ptr]); in port_r_cnt()
1808 ++mib->cnt_ptr; in port_r_cnt()
1810 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_r_cnt()
1811 port_r_mib_pkt(hw, port, mib->dropped, in port_r_cnt()
1812 &mib->counter[PORT_COUNTER_NUM]); in port_r_cnt()
1813 mib->cnt_ptr = 0; in port_r_cnt()
1818 * port_init_cnt - initialize MIB counter values
1827 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_init_cnt()
1829 mib->cnt_ptr = 0; in port_init_cnt()
1830 if (mib->mib_start < PORT_COUNTER_NUM) in port_init_cnt()
1832 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_init_cnt()
1833 &mib->counter[mib->cnt_ptr]); in port_init_cnt()
1834 ++mib->cnt_ptr; in port_init_cnt()
1835 } while (mib->cnt_ptr < PORT_COUNTER_NUM); in port_init_cnt()
1836 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_init_cnt()
1837 port_r_mib_pkt(hw, port, mib->dropped, in port_init_cnt()
1838 &mib->counter[PORT_COUNTER_NUM]); in port_init_cnt()
1839 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); in port_init_cnt()
1840 mib->cnt_ptr = 0; in port_init_cnt()
1848 * port_cfg - set port register bits
1865 data = readw(hw->io + addr); in port_cfg()
1870 writew(data, hw->io + addr); in port_cfg()
1874 * port_r8 - read byte from port register
1888 *data = readb(hw->io + addr); in port_r8()
1892 * port_r16 - read word from port register.
1906 *data = readw(hw->io + addr); in port_r16()
1910 * port_w16 - write word to port register.
1924 writew(data, hw->io + addr); in port_w16()
1928 * sw_chk - check switch register bits
1942 data = readw(hw->io + addr); in sw_chk()
1947 * sw_cfg - set switch register bits
1959 data = readw(hw->io + addr); in sw_cfg()
1964 writew(data, hw->io + addr); in sw_cfg()
1982 * sw_cfg_broad_storm - configure broadcast storm threshold
1996 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
1999 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
2003 * sw_get_broad_storm - get broadcast storm threshold
2014 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_get_broad_storm()
2023 * sw_dis_broad_storm - disable broadstorm
2035 * sw_ena_broad_storm - enable broadcast storm
2043 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_ena_broad_storm()
2048 * sw_init_broad_storm - initialize broadcast storm
2057 hw->ksz_switch->broad_per = 1; in sw_init_broad_storm()
2058 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_init_broad_storm()
2065 * hw_cfg_broad_storm - configure broadcast storm
2079 hw->ksz_switch->broad_per = percent; in hw_cfg_broad_storm()
2083 * sw_dis_prio_rate - disable switch priority rate
2095 writel(0, hw->io + addr); in sw_dis_prio_rate()
2099 * sw_init_prio_rate - initialize switch prioirty rate
2108 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio_rate()
2112 sw->port_cfg[port].rx_rate[prio] = in sw_init_prio_rate()
2113 sw->port_cfg[port].tx_rate[prio] = 0; in sw_init_prio_rate()
2191 * sw_dis_diffserv - disable switch DiffServ priority
2203 * sw_dis_802_1p - disable switch 802.1p priority
2215 * sw_cfg_replace_null_vid -
2226 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2231 * This routine enables the 802.1p priority re-mapping function of the switch.
2242 * sw_cfg_port_based - configure switch port based priority
2256 hw->ksz_switch->port_cfg[port].port_prio = prio; in sw_cfg_port_based()
2265 * sw_dis_multi_queue - disable transmit multiple queues
2270 * port. Only single transmit queue on the port.
2278 * sw_init_prio - initialize switch priority
2287 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio()
2293 sw->p_802_1p[0] = 0; in sw_init_prio()
2294 sw->p_802_1p[1] = 0; in sw_init_prio()
2295 sw->p_802_1p[2] = 1; in sw_init_prio()
2296 sw->p_802_1p[3] = 1; in sw_init_prio()
2297 sw->p_802_1p[4] = 2; in sw_init_prio()
2298 sw->p_802_1p[5] = 2; in sw_init_prio()
2299 sw->p_802_1p[6] = 3; in sw_init_prio()
2300 sw->p_802_1p[7] = 3; in sw_init_prio()
2307 sw->diffserv[tos] = 0; in sw_init_prio()
2316 sw->port_cfg[port].port_prio = 0; in sw_init_prio()
2317 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); in sw_init_prio()
2323 * port_get_def_vid - get port default VID.
2336 *vid = readw(hw->io + addr); in port_get_def_vid()
2340 * sw_init_vlan - initialize switch VLAN
2349 struct ksz_switch *sw = hw->ksz_switch; in sw_init_vlan()
2354 &sw->vlan_table[entry].vid, in sw_init_vlan()
2355 &sw->vlan_table[entry].fid, in sw_init_vlan()
2356 &sw->vlan_table[entry].member); in sw_init_vlan()
2360 port_get_def_vid(hw, port, &sw->port_cfg[port].vid); in sw_init_vlan()
2361 sw->port_cfg[port].member = PORT_MASK; in sw_init_vlan()
2366 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2369 * @member: The port-based VLAN membership.
2371 * This routine configures the port-based VLAN membership of the port.
2381 data = readb(hw->io + addr); in sw_cfg_port_base_vlan()
2384 writeb(data, hw->io + addr); in sw_cfg_port_base_vlan()
2386 hw->ksz_switch->port_cfg[port].member = member; in sw_cfg_port_base_vlan()
2390 * sw_set_addr - configure switch MAC address
2401 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i); in sw_set_addr()
2402 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i); in sw_set_addr()
2407 * sw_set_global_ctrl - set switch global control
2417 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2419 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2421 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2430 if (hw->overrides & FAST_AGING) in sw_set_global_ctrl()
2434 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2436 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2440 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2453 * port_set_stp_state - configure port spanning tree state
2472 * No need to turn on transmit because of port direct mode. in port_set_stp_state()
2473 * Turning on receive is required if static MAC table is not setup. in port_set_stp_state()
2502 hw->ksz_switch->port_cfg[port].stp_state = state; in port_set_stp_state()
2511 * sw_clr_sta_mac_table - clear static MAC table
2522 entry = &hw->ksz_switch->mac_table[i]; in sw_clr_sta_mac_table()
2524 entry->mac_addr, entry->ports, in sw_clr_sta_mac_table()
2525 entry->override, 0, in sw_clr_sta_mac_table()
2526 entry->use_fid, entry->fid); in sw_clr_sta_mac_table()
2531 * sw_init_stp - initialize switch spanning tree support
2540 entry = &hw->ksz_switch->mac_table[STP_ENTRY]; in sw_init_stp()
2541 entry->mac_addr[0] = 0x01; in sw_init_stp()
2542 entry->mac_addr[1] = 0x80; in sw_init_stp()
2543 entry->mac_addr[2] = 0xC2; in sw_init_stp()
2544 entry->mac_addr[3] = 0x00; in sw_init_stp()
2545 entry->mac_addr[4] = 0x00; in sw_init_stp()
2546 entry->mac_addr[5] = 0x00; in sw_init_stp()
2547 entry->ports = HOST_MASK; in sw_init_stp()
2548 entry->override = 1; in sw_init_stp()
2549 entry->valid = 1; in sw_init_stp()
2551 entry->mac_addr, entry->ports, in sw_init_stp()
2552 entry->override, entry->valid, in sw_init_stp()
2553 entry->use_fid, entry->fid); in sw_init_stp()
2557 * sw_block_addr - block certain packets from the host port
2568 entry = &hw->ksz_switch->mac_table[i]; in sw_block_addr()
2569 entry->valid = 0; in sw_block_addr()
2571 entry->mac_addr, entry->ports, in sw_block_addr()
2572 entry->override, entry->valid, in sw_block_addr()
2573 entry->use_fid, entry->fid); in sw_block_addr()
2579 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_r_phy_ctrl()
2584 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_w_phy_ctrl()
2588 * hw_r_phy - read data from PHY register
2601 *val = readw(hw->io + phy); in hw_r_phy()
2605 * hw_w_phy - write data to PHY register
2618 writew(val, hw->io + phy); in hw_w_phy()
2641 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
2643 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
2650 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
2652 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
2659 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in state_gpio()
2676 for (i = 15; i >= 0; i--) { in spi_r()
2692 for (i = 15; i >= 0; i--) { in spi_w()
2708 for (i = 1; i >= 0; i--) { in spi_reg()
2715 for (i = 5; i >= 0; i--) { in spi_reg()
2734 * eeprom_read - read from AT93C46 EEPROM
2757 * eeprom_write - write to AT93C46 EEPROM
2787 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
2804 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
2822 switch (port->flow_ctrl) { in advertised_flow_ctrl()
2840 rx_cfg = hw->rx_cfg; in set_flow_ctrl()
2841 tx_cfg = hw->tx_cfg; in set_flow_ctrl()
2843 hw->rx_cfg |= DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
2845 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
2847 hw->tx_cfg |= DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
2849 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
2850 if (hw->enabled) { in set_flow_ctrl()
2851 if (rx_cfg != hw->rx_cfg) in set_flow_ctrl()
2852 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in set_flow_ctrl()
2853 if (tx_cfg != hw->tx_cfg) in set_flow_ctrl()
2854 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in set_flow_ctrl()
2864 if (hw->overrides & PAUSE_FLOW_CTRL) in determine_flow_ctrl()
2868 if (port->force_link) in determine_flow_ctrl()
2884 if (!hw->ksz_switch) in determine_flow_ctrl()
2891 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) && in port_cfg_change()
2892 !(hw->overrides & PAUSE_FLOW_CTRL)) { in port_cfg_change()
2893 u32 cfg = hw->tx_cfg; in port_cfg_change()
2896 if (1 == info->duplex) in port_cfg_change()
2897 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in port_cfg_change()
2898 if (hw->enabled && cfg != hw->tx_cfg) in port_cfg_change()
2899 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in port_cfg_change()
2904 * port_get_link_speed - get current link status
2915 struct ksz_hw *hw = port->hw; in port_get_link_speed()
2925 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_get_link_speed()
2926 info = &hw->port_info[p]; in port_get_link_speed()
2939 if (local == info->advertised && remote == info->partner) in port_get_link_speed()
2942 info->advertised = local; in port_get_link_speed()
2943 info->partner = remote; in port_get_link_speed()
2950 info->tx_rate = 10 * TX_RATE_UNIT; in port_get_link_speed()
2952 info->tx_rate = 100 * TX_RATE_UNIT; in port_get_link_speed()
2954 info->duplex = 1; in port_get_link_speed()
2956 info->duplex = 2; in port_get_link_speed()
2958 if (media_connected != info->state) { in port_get_link_speed()
2964 if (hw->ksz_switch) { in port_get_link_speed()
2966 (1 == info->duplex)); in port_get_link_speed()
2970 info->state = media_connected; in port_get_link_speed()
2973 if (media_disconnected != info->state) in port_get_link_speed()
2974 hw->port_mib[p].link_down = 1; in port_get_link_speed()
2976 info->state = media_disconnected; in port_get_link_speed()
2978 hw->port_mib[p].state = (u8) info->state; in port_get_link_speed()
2981 if (linked && media_disconnected == port->linked->state) in port_get_link_speed()
2982 port->linked = linked; in port_get_link_speed()
2990 * port_set_link_speed - set port speed
2997 struct ksz_hw *hw = port->hw; in port_set_link_speed()
3004 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_set_link_speed()
3019 if (port->speed || port->duplex) { in port_set_link_speed()
3020 if (10 == port->speed) in port_set_link_speed()
3023 else if (100 == port->speed) in port_set_link_speed()
3026 if (1 == port->duplex) in port_set_link_speed()
3029 else if (2 == port->duplex) in port_set_link_speed()
3041 * port_force_link_speed - force port speed
3048 struct ksz_hw *hw = port->hw; in port_force_link_speed()
3054 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_force_link_speed()
3060 if (10 == port->speed) in port_force_link_speed()
3062 else if (100 == port->speed) in port_force_link_speed()
3064 if (1 == port->duplex) in port_force_link_speed()
3066 else if (2 == port->duplex) in port_force_link_speed()
3074 struct ksz_hw *hw = port->hw; in port_set_power_saving()
3078 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) in port_set_power_saving()
3088 * hw_chk_wol_pme_status - check PMEN pin
3098 struct pci_dev *pdev = hw_priv->pdev; in hw_chk_wol_pme_status()
3101 if (!pdev->pm_cap) in hw_chk_wol_pme_status()
3103 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_chk_wol_pme_status()
3108 * hw_clr_wol_pme_status - clear PMEN pin
3116 struct pci_dev *pdev = hw_priv->pdev; in hw_clr_wol_pme_status()
3119 if (!pdev->pm_cap) in hw_clr_wol_pme_status()
3123 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_clr_wol_pme_status()
3125 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_clr_wol_pme_status()
3129 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3133 * This routine is used to enable or disable Wake-on-LAN.
3138 struct pci_dev *pdev = hw_priv->pdev; in hw_cfg_wol_pme()
3141 if (!pdev->pm_cap) in hw_cfg_wol_pme()
3143 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_cfg_wol_pme()
3149 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_cfg_wol_pme()
3153 * hw_cfg_wol - configure Wake-on-LAN features
3158 * This routine is used to enable or disable certain Wake-on-LAN features.
3164 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3169 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3173 * hw_set_wol_frame - program Wake-on-LAN pattern
3181 * This routine is used to program Wake-on-LAN pattern.
3200 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i); in hw_set_wol_frame()
3201 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i); in hw_set_wol_frame()
3210 --bits; in hw_set_wol_frame()
3213 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i in hw_set_wol_frame()
3223 bits = mask[len - 1]; in hw_set_wol_frame()
3226 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len - in hw_set_wol_frame()
3230 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i); in hw_set_wol_frame()
3234 * hw_add_wol_arp - add ARP pattern
3258 * hw_add_wol_bcast - add broadcast pattern
3272 * hw_add_wol_mcast - add multicast pattern
3279 * multicast hash table, so not all multicast packets can wake up the host.
3286 memcpy(&pattern[3], &hw->override_addr[3], 3); in hw_add_wol_mcast()
3291 * hw_add_wol_ucast - add unicast pattern
3303 hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); in hw_add_wol_ucast()
3307 * hw_enable_wol - enable Wake-on-LAN
3309 * @wol_enable: The Wake-on-LAN settings.
3312 * This routine is used to enable Wake-on-LAN depending on driver settings.
3327 * hw_init - check driver is correct for the hardware
3342 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET); in hw_init()
3345 data = readw(hw->io + KS884X_CHIP_ID_OFFSET); in hw_init()
3358 hw->features |= SMALL_PACKET_TX_BUG; in hw_init()
3360 hw->features |= HALF_DUPLEX_SIGNAL_BUG; in hw_init()
3366 * hw_reset - reset the hardware
3373 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3379 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3383 * hw_setup - setup the hardware
3394 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3397 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3401 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE | in hw_setup()
3405 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST | in hw_setup()
3407 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST; in hw_setup()
3410 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in hw_setup()
3412 if (hw->all_multi) in hw_setup()
3413 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_setup()
3414 if (hw->promiscuous) in hw_setup()
3415 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_setup()
3419 * hw_setup_intr - setup interrupt mask
3426 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN; in hw_setup_intr()
3433 int alloc = info->alloc; in ksz_check_desc_num()
3450 info->alloc = alloc; in ksz_check_desc_num()
3452 info->mask = info->alloc - 1; in ksz_check_desc_num()
3458 u32 phys = desc_info->ring_phys; in hw_init_desc()
3459 struct ksz_hw_desc *desc = desc_info->ring_virt; in hw_init_desc()
3460 struct ksz_desc *cur = desc_info->ring; in hw_init_desc()
3463 for (i = 0; i < desc_info->alloc; i++) { in hw_init_desc()
3464 cur->phw = desc++; in hw_init_desc()
3465 phys += desc_info->size; in hw_init_desc()
3467 previous->phw->next = cpu_to_le32(phys); in hw_init_desc()
3469 previous->phw->next = cpu_to_le32(desc_info->ring_phys); in hw_init_desc()
3470 previous->sw.buf.rx.end_of_ring = 1; in hw_init_desc()
3471 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); in hw_init_desc()
3473 desc_info->avail = desc_info->alloc; in hw_init_desc()
3474 desc_info->last = desc_info->next = 0; in hw_init_desc()
3476 desc_info->cur = desc_info->ring; in hw_init_desc()
3480 * hw_set_desc_base - set descriptor base addresses
3490 writel(tx_addr, hw->io + KS_DMA_TX_ADDR); in hw_set_desc_base()
3491 writel(rx_addr, hw->io + KS_DMA_RX_ADDR); in hw_set_desc_base()
3496 info->cur = info->ring; in hw_reset_pkts()
3497 info->avail = info->alloc; in hw_reset_pkts()
3498 info->last = info->next = 0; in hw_reset_pkts()
3503 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_resume_rx()
3507 * hw_start_rx - start receiving
3514 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in hw_start_rx()
3517 hw->intr_mask |= KS884X_INT_RX_STOPPED; in hw_start_rx()
3519 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_start_rx()
3521 hw->rx_stop++; in hw_start_rx()
3524 if (0 == hw->rx_stop) in hw_start_rx()
3525 hw->rx_stop = 2; in hw_start_rx()
3529 * hw_stop_rx - stop receiving
3536 hw->rx_stop = 0; in hw_stop_rx()
3538 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL); in hw_stop_rx()
3542 * hw_start_tx - start transmitting
3549 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in hw_start_tx()
3553 * hw_stop_tx - stop transmitting
3560 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL); in hw_stop_tx()
3564 * hw_disable - disable hardware
3573 hw->enabled = 0; in hw_disable()
3577 * hw_enable - enable hardware
3586 hw->enabled = 1; in hw_enable()
3590 * hw_alloc_pkt - allocate enough descriptors for transmission
3602 if (hw->tx_desc_info.avail <= 1) in hw_alloc_pkt()
3606 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur); in hw_alloc_pkt()
3607 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; in hw_alloc_pkt()
3610 ++hw->tx_int_cnt; in hw_alloc_pkt()
3611 hw->tx_size += length; in hw_alloc_pkt()
3613 /* Cannot hold on too much data. */ in hw_alloc_pkt()
3614 if (hw->tx_size >= MAX_TX_HELD_SIZE) in hw_alloc_pkt()
3615 hw->tx_int_cnt = hw->tx_int_mask + 1; in hw_alloc_pkt()
3617 if (physical > hw->tx_desc_info.avail) in hw_alloc_pkt()
3620 return hw->tx_desc_info.avail; in hw_alloc_pkt()
3624 * hw_send_pkt - mark packet for transmission
3631 struct ksz_desc *cur = hw->tx_desc_info.cur; in hw_send_pkt()
3633 cur->sw.buf.tx.last_seg = 1; in hw_send_pkt()
3636 if (hw->tx_int_cnt > hw->tx_int_mask) { in hw_send_pkt()
3637 cur->sw.buf.tx.intr = 1; in hw_send_pkt()
3638 hw->tx_int_cnt = 0; in hw_send_pkt()
3639 hw->tx_size = 0; in hw_send_pkt()
3643 cur->sw.buf.tx.dest_port = hw->dst_ports; in hw_send_pkt()
3647 writel(0, hw->io + KS_DMA_TX_START); in hw_send_pkt()
3659 * hw_set_addr - set MAC address
3670 writeb(hw->override_addr[MAC_ADDR_ORDER(i)], in hw_set_addr()
3671 hw->io + KS884X_ADDR_0_OFFSET + i); in hw_set_addr()
3673 sw_set_addr(hw, hw->override_addr); in hw_set_addr()
3677 * hw_read_addr - read MAC address
3687 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + in hw_read_addr()
3690 if (!hw->mac_override) { in hw_read_addr()
3691 memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); in hw_read_addr()
3692 if (empty_addr(hw->override_addr)) { in hw_read_addr()
3693 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); in hw_read_addr()
3694 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, in hw_read_addr()
3696 hw->override_addr[5] += hw->id; in hw_read_addr()
3721 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO); in hw_ena_add_addr()
3722 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI); in hw_ena_add_addr()
3730 if (empty_addr(hw->address[i])) in hw_set_add_addr()
3731 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_set_add_addr()
3734 hw_ena_add_addr(hw, i, hw->address[i]); in hw_set_add_addr()
3743 if (ether_addr_equal(hw->override_addr, mac_addr)) in hw_add_addr()
3745 for (i = 0; i < hw->addr_list_size; i++) { in hw_add_addr()
3746 if (ether_addr_equal(hw->address[i], mac_addr)) in hw_add_addr()
3748 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) in hw_add_addr()
3752 memcpy(hw->address[j], mac_addr, ETH_ALEN); in hw_add_addr()
3753 hw_ena_add_addr(hw, j, hw->address[j]); in hw_add_addr()
3756 return -1; in hw_add_addr()
3763 for (i = 0; i < hw->addr_list_size; i++) { in hw_del_addr()
3764 if (ether_addr_equal(hw->address[i], mac_addr)) { in hw_del_addr()
3765 eth_zero_addr(hw->address[i]); in hw_del_addr()
3766 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_del_addr()
3771 return -1; in hw_del_addr()
3775 * hw_clr_multicast - clear multicast addresses
3785 hw->multi_bits[i] = 0; in hw_clr_multicast()
3787 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i); in hw_clr_multicast()
3792 * hw_set_grp_addr - set multicast addresses
3805 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE); in hw_set_grp_addr()
3807 for (i = 0; i < hw->multi_list_size; i++) { in hw_set_grp_addr()
3808 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f; in hw_set_grp_addr()
3811 hw->multi_bits[index] |= (u8) value; in hw_set_grp_addr()
3815 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET + in hw_set_grp_addr()
3820 * hw_set_multicast - enable or disable all multicast receiving
3822 * @multicast: To turn on or off the all multicast feature.
3832 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_set_multicast()
3834 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST; in hw_set_multicast()
3836 if (hw->enabled) in hw_set_multicast()
3841 * hw_set_promiscuous - enable or disable promiscuous receiving
3843 * @prom: To turn on or off the promiscuous feature.
3853 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
3855 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
3857 if (hw->enabled) in hw_set_promiscuous()
3862 * sw_enable - enable the switch
3873 if (hw->dev_count > 1) { in sw_enable()
3874 /* Set port-base vlan membership with host port. */ in sw_enable()
3883 if (hw->dev_count > 1) in sw_enable()
3890 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET); in sw_enable()
3894 * sw_setup - setup the switch
3919 if (hw->features & STP_SUPPORT) in sw_setup()
3923 hw->overrides |= PAUSE_FLOW_CTRL; in sw_setup()
3928 * ksz_start_timer - start kernel timer
3936 info->cnt = 0; in ksz_start_timer()
3937 info->timer.expires = jiffies + time; in ksz_start_timer()
3938 add_timer(&info->timer); in ksz_start_timer()
3941 info->max = -1; in ksz_start_timer()
3945 * ksz_stop_timer - stop kernel timer
3952 if (info->max) { in ksz_stop_timer()
3953 info->max = 0; in ksz_stop_timer()
3954 del_timer_sync(&info->timer); in ksz_stop_timer()
3961 info->max = 0; in ksz_init_timer()
3962 info->period = period; in ksz_init_timer()
3963 timer_setup(&info->timer, function, 0); in ksz_init_timer()
3968 ++info->cnt; in ksz_update_timer()
3969 if (info->max > 0) { in ksz_update_timer()
3970 if (info->cnt < info->max) { in ksz_update_timer()
3971 info->timer.expires = jiffies + info->period; in ksz_update_timer()
3972 add_timer(&info->timer); in ksz_update_timer()
3974 info->max = 0; in ksz_update_timer()
3975 } else if (info->max < 0) { in ksz_update_timer()
3976 info->timer.expires = jiffies + info->period; in ksz_update_timer()
3977 add_timer(&info->timer); in ksz_update_timer()
3982 * ksz_alloc_soft_desc - allocate software descriptors
3993 desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc), in ksz_alloc_soft_desc()
3995 if (!desc_info->ring) in ksz_alloc_soft_desc()
4002 * ksz_alloc_desc - allocate hardware descriptors
4012 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_desc()
4016 adapter->desc_pool.alloc_size = in ksz_alloc_desc()
4017 hw->rx_desc_info.size * hw->rx_desc_info.alloc + in ksz_alloc_desc()
4018 hw->tx_desc_info.size * hw->tx_desc_info.alloc + in ksz_alloc_desc()
4021 adapter->desc_pool.alloc_virt = in ksz_alloc_desc()
4022 dma_alloc_coherent(&adapter->pdev->dev, in ksz_alloc_desc()
4023 adapter->desc_pool.alloc_size, in ksz_alloc_desc()
4024 &adapter->desc_pool.dma_addr, GFP_KERNEL); in ksz_alloc_desc()
4025 if (adapter->desc_pool.alloc_virt == NULL) { in ksz_alloc_desc()
4026 adapter->desc_pool.alloc_size = 0; in ksz_alloc_desc()
4031 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? in ksz_alloc_desc()
4032 (DESC_ALIGNMENT - in ksz_alloc_desc()
4033 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0); in ksz_alloc_desc()
4034 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset; in ksz_alloc_desc()
4035 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset; in ksz_alloc_desc()
4038 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4039 adapter->desc_pool.virt; in ksz_alloc_desc()
4040 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys; in ksz_alloc_desc()
4041 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size; in ksz_alloc_desc()
4042 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4043 (adapter->desc_pool.virt + offset); in ksz_alloc_desc()
4044 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset; in ksz_alloc_desc()
4046 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0)) in ksz_alloc_desc()
4048 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1)) in ksz_alloc_desc()
4055 * free_dma_buf - release DMA buffer resources
4065 dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, in free_dma_buf()
4067 dev_kfree_skb(dma_buf->skb); in free_dma_buf()
4068 dma_buf->skb = NULL; in free_dma_buf()
4069 dma_buf->dma = 0; in free_dma_buf()
4073 * ksz_init_rx_buffers - initialize receive descriptors
4083 struct ksz_hw *hw = &adapter->hw; in ksz_init_rx_buffers()
4084 struct ksz_desc_info *info = &hw->rx_desc_info; in ksz_init_rx_buffers()
4086 for (i = 0; i < hw->rx_desc_info.alloc; i++) { in ksz_init_rx_buffers()
4090 if (dma_buf->skb && dma_buf->len != adapter->mtu) in ksz_init_rx_buffers()
4092 dma_buf->len = adapter->mtu; in ksz_init_rx_buffers()
4093 if (!dma_buf->skb) in ksz_init_rx_buffers()
4094 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); in ksz_init_rx_buffers()
4095 if (dma_buf->skb && !dma_buf->dma) in ksz_init_rx_buffers()
4096 dma_buf->dma = dma_map_single(&adapter->pdev->dev, in ksz_init_rx_buffers()
4097 skb_tail_pointer(dma_buf->skb), in ksz_init_rx_buffers()
4098 dma_buf->len, in ksz_init_rx_buffers()
4102 set_rx_buf(desc, dma_buf->dma); in ksz_init_rx_buffers()
4103 set_rx_len(desc, dma_buf->len); in ksz_init_rx_buffers()
4109 * ksz_alloc_mem - allocate memory for hardware descriptors
4119 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_mem()
4122 hw->rx_desc_info.alloc = NUM_OF_RX_DESC; in ksz_alloc_mem()
4123 hw->tx_desc_info.alloc = NUM_OF_TX_DESC; in ksz_alloc_mem()
4126 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4127 hw->tx_int_mask = NUM_OF_TX_DESC / 4; in ksz_alloc_mem()
4128 if (hw->tx_int_mask > 8) in ksz_alloc_mem()
4129 hw->tx_int_mask = 8; in ksz_alloc_mem()
4130 while (hw->tx_int_mask) { in ksz_alloc_mem()
4131 hw->tx_int_cnt++; in ksz_alloc_mem()
4132 hw->tx_int_mask >>= 1; in ksz_alloc_mem()
4134 if (hw->tx_int_cnt) { in ksz_alloc_mem()
4135 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1; in ksz_alloc_mem()
4136 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4140 hw->rx_desc_info.size = in ksz_alloc_mem()
4141 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4143 hw->tx_desc_info.size = in ksz_alloc_mem()
4144 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4146 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) in ksz_alloc_mem()
4148 ksz_check_desc_num(&hw->rx_desc_info); in ksz_alloc_mem()
4149 ksz_check_desc_num(&hw->tx_desc_info); in ksz_alloc_mem()
4159 * ksz_free_desc - free software and hardware descriptors
4167 struct ksz_hw *hw = &adapter->hw; in ksz_free_desc()
4170 hw->rx_desc_info.ring_virt = NULL; in ksz_free_desc()
4171 hw->tx_desc_info.ring_virt = NULL; in ksz_free_desc()
4172 hw->rx_desc_info.ring_phys = 0; in ksz_free_desc()
4173 hw->tx_desc_info.ring_phys = 0; in ksz_free_desc()
4176 if (adapter->desc_pool.alloc_virt) in ksz_free_desc()
4177 dma_free_coherent(&adapter->pdev->dev, in ksz_free_desc()
4178 adapter->desc_pool.alloc_size, in ksz_free_desc()
4179 adapter->desc_pool.alloc_virt, in ksz_free_desc()
4180 adapter->desc_pool.dma_addr); in ksz_free_desc()
4183 adapter->desc_pool.alloc_size = 0; in ksz_free_desc()
4184 adapter->desc_pool.alloc_virt = NULL; in ksz_free_desc()
4186 kfree(hw->rx_desc_info.ring); in ksz_free_desc()
4187 hw->rx_desc_info.ring = NULL; in ksz_free_desc()
4188 kfree(hw->tx_desc_info.ring); in ksz_free_desc()
4189 hw->tx_desc_info.ring = NULL; in ksz_free_desc()
4193 * ksz_free_buffers - free buffers used in the descriptors
4205 struct ksz_desc *desc = desc_info->ring; in ksz_free_buffers()
4207 for (i = 0; i < desc_info->alloc; i++) { in ksz_free_buffers()
4209 if (dma_buf->skb) in ksz_free_buffers()
4216 * ksz_free_mem - free all resources used by descriptors
4224 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); in ksz_free_mem()
4227 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); in ksz_free_mem()
4243 port_mib = &hw->port_mib[port]; in get_mib_counters()
4244 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++) in get_mib_counters()
4245 counter[mib] += port_mib->counter[mib]; in get_mib_counters()
4250 * send_packet - send packet
4261 struct dev_info *hw_priv = priv->adapter; in send_packet()
4262 struct ksz_hw *hw = &hw_priv->hw; in send_packet()
4263 struct ksz_desc_info *info = &hw->tx_desc_info; in send_packet()
4266 int last_frag = skb_shinfo(skb)->nr_frags; in send_packet()
4272 if (hw->dev_count > 1) in send_packet()
4273 hw->dst_ports = 1 << priv->port.first_port; in send_packet()
4276 len = skb->len; in send_packet()
4279 first = info->cur; in send_packet()
4287 dma_buf->len = skb_headlen(skb); in send_packet()
4289 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4290 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4291 set_tx_buf(desc, dma_buf->dma); in send_packet()
4292 set_tx_len(desc, dma_buf->len); in send_packet()
4296 this_frag = &skb_shinfo(skb)->frags[frag]; in send_packet()
4302 ++hw->tx_int_cnt; in send_packet()
4305 dma_buf->len = skb_frag_size(this_frag); in send_packet()
4307 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, in send_packet()
4309 dma_buf->len, in send_packet()
4311 set_tx_buf(desc, dma_buf->dma); in send_packet()
4312 set_tx_len(desc, dma_buf->len); in send_packet()
4323 info->cur = desc; in send_packet()
4328 dma_buf->len = len; in send_packet()
4330 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4331 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4332 set_tx_buf(desc, dma_buf->dma); in send_packet()
4333 set_tx_len(desc, dma_buf->len); in send_packet()
4336 if (skb->ip_summed == CHECKSUM_PARTIAL) { in send_packet()
4337 (desc)->sw.buf.tx.csum_gen_tcp = 1; in send_packet()
4338 (desc)->sw.buf.tx.csum_gen_udp = 1; in send_packet()
4345 dma_buf->skb = skb; in send_packet()
4350 dev->stats.tx_packets++; in send_packet()
4351 dev->stats.tx_bytes += len; in send_packet()
4355 * transmit_cleanup - clean up transmit descriptors
4365 struct ksz_hw *hw = &hw_priv->hw; in transmit_cleanup()
4366 struct ksz_desc_info *info = &hw->tx_desc_info; in transmit_cleanup()
4371 spin_lock_irq(&hw_priv->hwlock); in transmit_cleanup()
4372 last = info->last; in transmit_cleanup()
4374 while (info->avail < info->alloc) { in transmit_cleanup()
4376 desc = &info->ring[last]; in transmit_cleanup()
4377 status.data = le32_to_cpu(desc->phw->ctrl.data); in transmit_cleanup()
4386 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, in transmit_cleanup()
4387 dma_buf->len, DMA_TO_DEVICE); in transmit_cleanup()
4390 if (dma_buf->skb) { in transmit_cleanup()
4391 dev = dma_buf->skb->dev; in transmit_cleanup()
4394 dev_kfree_skb_irq(dma_buf->skb); in transmit_cleanup()
4395 dma_buf->skb = NULL; in transmit_cleanup()
4400 last &= info->mask; in transmit_cleanup()
4401 info->avail++; in transmit_cleanup()
4403 info->last = last; in transmit_cleanup()
4404 spin_unlock_irq(&hw_priv->hwlock); in transmit_cleanup()
4412 * tx_done - transmit done processing
4420 struct ksz_hw *hw = &hw_priv->hw; in tx_done()
4425 for (port = 0; port < hw->dev_count; port++) { in tx_done()
4426 struct net_device *dev = hw->port_info[port].pdev; in tx_done()
4435 skb->dev = old->dev; in copy_old_skb()
4436 skb->protocol = old->protocol; in copy_old_skb()
4437 skb->ip_summed = old->ip_summed; in copy_old_skb()
4438 skb->csum = old->csum; in copy_old_skb()
4445 * netdev_tx - send out packet
4456 struct dev_info *hw_priv = priv->adapter; in netdev_tx()
4457 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx()
4462 if (hw->features & SMALL_PACKET_TX_BUG) { in netdev_tx()
4465 if (skb->len <= 48) { in netdev_tx()
4466 if (skb_end_pointer(skb) - skb->data >= 50) { in netdev_tx()
4467 memset(&skb->data[skb->len], 0, 50 - skb->len); in netdev_tx()
4468 skb->len = 50; in netdev_tx()
4473 memcpy(skb->data, org_skb->data, org_skb->len); in netdev_tx()
4474 memset(&skb->data[org_skb->len], 0, in netdev_tx()
4475 50 - org_skb->len); in netdev_tx()
4476 skb->len = 50; in netdev_tx()
4482 spin_lock_irq(&hw_priv->hwlock); in netdev_tx()
4484 num = skb_shinfo(skb)->nr_frags + 1; in netdev_tx()
4485 left = hw_alloc_pkt(hw, skb->len, num); in netdev_tx()
4488 (CHECKSUM_PARTIAL == skb->ip_summed && in netdev_tx()
4489 skb->protocol == htons(ETH_P_IPV6))) { in netdev_tx()
4492 skb = netdev_alloc_skb(dev, org_skb->len); in netdev_tx()
4497 skb_copy_and_csum_dev(org_skb, skb->data); in netdev_tx()
4498 org_skb->ip_summed = CHECKSUM_NONE; in netdev_tx()
4499 skb->len = org_skb->len; in netdev_tx()
4511 spin_unlock_irq(&hw_priv->hwlock); in netdev_tx()
4517 * netdev_tx_timeout - transmit timeout processing
4531 struct dev_info *hw_priv = priv->adapter; in netdev_tx_timeout()
4532 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx_timeout()
4535 if (hw->dev_count > 1) { in netdev_tx_timeout()
4540 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo)) in netdev_tx_timeout()
4550 hw_reset_pkts(&hw->rx_desc_info); in netdev_tx_timeout()
4551 hw_reset_pkts(&hw->tx_desc_info); in netdev_tx_timeout()
4557 hw->tx_desc_info.ring_phys, in netdev_tx_timeout()
4558 hw->rx_desc_info.ring_phys); in netdev_tx_timeout()
4560 if (hw->all_multi) in netdev_tx_timeout()
4561 hw_set_multicast(hw, hw->all_multi); in netdev_tx_timeout()
4562 else if (hw->multi_list_size) in netdev_tx_timeout()
4565 if (hw->dev_count > 1) { in netdev_tx_timeout()
4573 port_dev = hw->port_info[port].pdev; in netdev_tx_timeout()
4593 protocol = skb->protocol; in csum_verified()
4597 protocol = iph->tot_len; in csum_verified()
4602 if (iph->protocol == IPPROTO_TCP) in csum_verified()
4603 skb->ip_summed = CHECKSUM_UNNECESSARY; in csum_verified()
4612 struct dev_info *hw_priv = priv->adapter; in rx_proc()
4616 /* Received length includes 4-byte CRC. */ in rx_proc()
4617 packet_len = status.rx.frame_len - 4; in rx_proc()
4620 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, in rx_proc()
4624 /* skb->data != skb->head */ in rx_proc()
4627 dev->stats.rx_dropped++; in rx_proc()
4628 return -ENOMEM; in rx_proc()
4632 * Align socket buffer in 4-byte boundary for better in rx_proc()
4637 skb_put_data(skb, dma_buf->skb->data, packet_len); in rx_proc()
4640 skb->protocol = eth_type_trans(skb, dev); in rx_proc()
4642 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) in rx_proc()
4646 dev->stats.rx_packets++; in rx_proc()
4647 dev->stats.rx_bytes += packet_len; in rx_proc()
4659 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_packets()
4660 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_packets()
4661 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_packets()
4662 int left = info->alloc; in dev_rcv_packets()
4666 next = info->next; in dev_rcv_packets()
4667 while (left--) { in dev_rcv_packets()
4669 desc = &info->ring[next]; in dev_rcv_packets()
4670 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_packets()
4684 next &= info->mask; in dev_rcv_packets()
4686 info->next = next; in dev_rcv_packets()
4695 struct ksz_hw *hw = &hw_priv->hw; in port_rcv_packets()
4696 struct net_device *dev = hw->port_info[0].pdev; in port_rcv_packets()
4697 struct ksz_desc_info *info = &hw->rx_desc_info; in port_rcv_packets()
4698 int left = info->alloc; in port_rcv_packets()
4702 next = info->next; in port_rcv_packets()
4703 while (left--) { in port_rcv_packets()
4705 desc = &info->ring[next]; in port_rcv_packets()
4706 status.data = le32_to_cpu(desc->phw->ctrl.data); in port_rcv_packets()
4710 if (hw->dev_count > 1) { in port_rcv_packets()
4714 dev = hw->port_info[p].pdev; in port_rcv_packets()
4729 next &= info->mask; in port_rcv_packets()
4731 info->next = next; in port_rcv_packets()
4740 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_special()
4741 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_special()
4742 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_special()
4743 int left = info->alloc; in dev_rcv_special()
4747 next = info->next; in dev_rcv_special()
4748 while (left--) { in dev_rcv_special()
4750 desc = &info->ring[next]; in dev_rcv_special()
4751 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_special()
4755 if (hw->dev_count > 1) { in dev_rcv_special()
4759 dev = hw->port_info[p].pdev; in dev_rcv_special()
4781 priv->port.counter[OID_COUNTER_RCV_ERROR]++; in dev_rcv_special()
4788 next &= info->mask; in dev_rcv_special()
4790 info->next = next; in dev_rcv_special()
4798 struct ksz_hw *hw = &hw_priv->hw; in rx_proc_task()
4800 if (!hw->enabled) in rx_proc_task()
4802 if (unlikely(!hw_priv->dev_rcv(hw_priv))) { in rx_proc_task()
4808 spin_lock_irq(&hw_priv->hwlock); in rx_proc_task()
4810 spin_unlock_irq(&hw_priv->hwlock); in rx_proc_task()
4813 tasklet_schedule(&hw_priv->rx_tasklet); in rx_proc_task()
4820 struct ksz_hw *hw = &hw_priv->hw; in tx_proc_task()
4827 spin_lock_irq(&hw_priv->hwlock); in tx_proc_task()
4829 spin_unlock_irq(&hw_priv->hwlock); in tx_proc_task()
4835 if (0 == hw->rx_stop) in handle_rx_stop()
4836 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
4837 else if (hw->rx_stop > 1) { in handle_rx_stop()
4838 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) { in handle_rx_stop()
4841 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
4842 hw->rx_stop = 0; in handle_rx_stop()
4846 hw->rx_stop++; in handle_rx_stop()
4850 * netdev_intr - interrupt handling
4863 struct dev_info *hw_priv = priv->adapter; in netdev_intr()
4864 struct ksz_hw *hw = &hw_priv->hw; in netdev_intr()
4866 spin_lock(&hw_priv->hwlock); in netdev_intr()
4872 spin_unlock(&hw_priv->hwlock); in netdev_intr()
4878 int_enable &= hw->intr_mask; in netdev_intr()
4882 tasklet_schedule(&hw_priv->tx_tasklet); in netdev_intr()
4887 tasklet_schedule(&hw_priv->rx_tasklet); in netdev_intr()
4891 dev->stats.rx_fifo_errors++; in netdev_intr()
4896 struct ksz_port *port = &priv->port; in netdev_intr()
4898 hw->features |= LINK_INT_WORKING; in netdev_intr()
4910 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; in netdev_intr()
4912 data = readl(hw->io + KS_DMA_TX_CTRL); in netdev_intr()
4921 spin_unlock(&hw_priv->hwlock); in netdev_intr()
4935 struct dev_info *hw_priv = priv->adapter; in netdev_netpoll()
4937 hw_dis_intr(&hw_priv->hw); in netdev_netpoll()
4938 netdev_intr(dev->irq, dev); in netdev_netpoll()
4946 struct ksz_switch *sw = hw->ksz_switch; in bridge_change()
4949 if (!sw->member) { in bridge_change()
4954 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) in bridge_change()
4955 member = HOST_MASK | sw->member; in bridge_change()
4958 if (member != sw->port_cfg[port].member) in bridge_change()
4964 * netdev_close - close network device
4975 struct dev_info *hw_priv = priv->adapter; in netdev_close()
4976 struct ksz_port *port = &priv->port; in netdev_close()
4977 struct ksz_hw *hw = &hw_priv->hw; in netdev_close()
4982 ksz_stop_timer(&priv->monitor_timer_info); in netdev_close()
4985 if (hw->dev_count > 1) { in netdev_close()
4986 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED); in netdev_close()
4989 if (hw->features & STP_SUPPORT) { in netdev_close()
4990 pi = 1 << port->first_port; in netdev_close()
4991 if (hw->ksz_switch->member & pi) { in netdev_close()
4992 hw->ksz_switch->member &= ~pi; in netdev_close()
4997 if (port->first_port > 0) in netdev_close()
4998 hw_del_addr(hw, dev->dev_addr); in netdev_close()
4999 if (!hw_priv->wol_enable) in netdev_close()
5002 if (priv->multicast) in netdev_close()
5003 --hw->all_multi; in netdev_close()
5004 if (priv->promiscuous) in netdev_close()
5005 --hw->promiscuous; in netdev_close()
5007 hw_priv->opened--; in netdev_close()
5008 if (!(hw_priv->opened)) { in netdev_close()
5009 ksz_stop_timer(&hw_priv->mib_timer_info); in netdev_close()
5010 flush_work(&hw_priv->mib_read); in netdev_close()
5019 tasklet_kill(&hw_priv->rx_tasklet); in netdev_close()
5020 tasklet_kill(&hw_priv->tx_tasklet); in netdev_close()
5021 free_irq(dev->irq, hw_priv->dev); in netdev_close()
5024 hw_reset_pkts(&hw->rx_desc_info); in netdev_close()
5025 hw_reset_pkts(&hw->tx_desc_info); in netdev_close()
5028 if (hw->features & STP_SUPPORT) in netdev_close()
5037 if (hw->ksz_switch) { in hw_cfg_huge_frame()
5040 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5041 if (hw->features & RX_HUGE_FRAME) in hw_cfg_huge_frame()
5045 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5047 if (hw->features & RX_HUGE_FRAME) { in hw_cfg_huge_frame()
5048 hw->rx_cfg |= DMA_RX_ERROR; in hw_cfg_huge_frame()
5049 hw_priv->dev_rcv = dev_rcv_special; in hw_cfg_huge_frame()
5051 hw->rx_cfg &= ~DMA_RX_ERROR; in hw_cfg_huge_frame()
5052 if (hw->dev_count > 1) in hw_cfg_huge_frame()
5053 hw_priv->dev_rcv = port_rcv_packets; in hw_cfg_huge_frame()
5055 hw_priv->dev_rcv = dev_rcv_packets; in hw_cfg_huge_frame()
5062 struct dev_info *hw_priv = priv->adapter; in prepare_hardware()
5063 struct ksz_hw *hw = &hw_priv->hw; in prepare_hardware()
5067 hw_priv->dev = dev; in prepare_hardware()
5068 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); in prepare_hardware()
5071 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task); in prepare_hardware()
5072 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task); in prepare_hardware()
5074 hw->promiscuous = 0; in prepare_hardware()
5075 hw->all_multi = 0; in prepare_hardware()
5076 hw->multi_list_size = 0; in prepare_hardware()
5081 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys); in prepare_hardware()
5092 if (media_state == priv->media_state) in set_media_state()
5097 media_state == priv->media_state ? "on" : "off"); in set_media_state()
5101 * netdev_open - open network device
5112 struct dev_info *hw_priv = priv->adapter; in netdev_open()
5113 struct ksz_hw *hw = &hw_priv->hw; in netdev_open()
5114 struct ksz_port *port = &priv->port; in netdev_open()
5121 priv->multicast = 0; in netdev_open()
5122 priv->promiscuous = 0; in netdev_open()
5125 memset(&dev->stats, 0, sizeof(struct net_device_stats)); in netdev_open()
5126 memset((void *) port->counter, 0, in netdev_open()
5129 if (!(hw_priv->opened)) { in netdev_open()
5133 for (i = 0; i < hw->mib_port_cnt; i++) { in netdev_open()
5135 hw_priv->counter[i].time = next_jiffies; in netdev_open()
5136 hw->port_mib[i].state = media_disconnected; in netdev_open()
5139 if (hw->ksz_switch) in netdev_open()
5140 hw->port_mib[HOST_PORT].state = media_connected; in netdev_open()
5144 hw_clr_wol_pme_status(&hw_priv->hw); in netdev_open()
5149 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in netdev_open()
5154 hw->port_info[p].partner = 0xFF; in netdev_open()
5155 hw->port_info[p].state = media_disconnected; in netdev_open()
5159 if (hw->dev_count > 1) { in netdev_open()
5160 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE); in netdev_open()
5161 if (port->first_port > 0) in netdev_open()
5162 hw_add_addr(hw, dev->dev_addr); in netdev_open()
5166 if (port->force_link) in netdev_open()
5171 if (!(hw_priv->opened)) { in netdev_open()
5176 if (hw->mib_port_cnt) in netdev_open()
5177 ksz_start_timer(&hw_priv->mib_timer_info, in netdev_open()
5178 hw_priv->mib_timer_info.period); in netdev_open()
5181 hw_priv->opened++; in netdev_open()
5183 ksz_start_timer(&priv->monitor_timer_info, in netdev_open()
5184 priv->monitor_timer_info.period); in netdev_open()
5186 priv->media_state = port->linked->state; in netdev_open()
5205 * netdev_query_statistics - query network device statistics
5216 struct ksz_port *port = &priv->port; in netdev_query_statistics()
5217 struct ksz_hw *hw = &priv->adapter->hw; in netdev_query_statistics()
5222 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; in netdev_query_statistics()
5223 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; in netdev_query_statistics()
5226 dev->stats.multicast = 0; in netdev_query_statistics()
5227 dev->stats.collisions = 0; in netdev_query_statistics()
5228 dev->stats.rx_length_errors = 0; in netdev_query_statistics()
5229 dev->stats.rx_crc_errors = 0; in netdev_query_statistics()
5230 dev->stats.rx_frame_errors = 0; in netdev_query_statistics()
5231 dev->stats.tx_window_errors = 0; in netdev_query_statistics()
5233 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_query_statistics()
5234 mib = &hw->port_mib[p]; in netdev_query_statistics()
5236 dev->stats.multicast += (unsigned long) in netdev_query_statistics()
5237 mib->counter[MIB_COUNTER_RX_MULTICAST]; in netdev_query_statistics()
5239 dev->stats.collisions += (unsigned long) in netdev_query_statistics()
5240 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; in netdev_query_statistics()
5242 dev->stats.rx_length_errors += (unsigned long)( in netdev_query_statistics()
5243 mib->counter[MIB_COUNTER_RX_UNDERSIZE] + in netdev_query_statistics()
5244 mib->counter[MIB_COUNTER_RX_FRAGMENT] + in netdev_query_statistics()
5245 mib->counter[MIB_COUNTER_RX_OVERSIZE] + in netdev_query_statistics()
5246 mib->counter[MIB_COUNTER_RX_JABBER]); in netdev_query_statistics()
5247 dev->stats.rx_crc_errors += (unsigned long) in netdev_query_statistics()
5248 mib->counter[MIB_COUNTER_RX_CRC_ERR]; in netdev_query_statistics()
5249 dev->stats.rx_frame_errors += (unsigned long)( in netdev_query_statistics()
5250 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + in netdev_query_statistics()
5251 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); in netdev_query_statistics()
5253 dev->stats.tx_window_errors += (unsigned long) in netdev_query_statistics()
5254 mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; in netdev_query_statistics()
5257 return &dev->stats; in netdev_query_statistics()
5261 * netdev_set_mac_address - set network device MAC address
5272 struct dev_info *hw_priv = priv->adapter; in netdev_set_mac_address()
5273 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_mac_address()
5277 if (priv->port.first_port > 0) in netdev_set_mac_address()
5278 hw_del_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5280 hw->mac_override = 1; in netdev_set_mac_address()
5281 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); in netdev_set_mac_address()
5284 eth_hw_addr_set(dev, mac->sa_data); in netdev_set_mac_address()
5288 if (priv->port.first_port > 0) in netdev_set_mac_address()
5289 hw_add_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5300 if (promiscuous != priv->promiscuous) { in dev_set_promiscuous()
5301 u8 prev_state = hw->promiscuous; in dev_set_promiscuous()
5304 ++hw->promiscuous; in dev_set_promiscuous()
5306 --hw->promiscuous; in dev_set_promiscuous()
5307 priv->promiscuous = promiscuous; in dev_set_promiscuous()
5309 /* Turn on/off promiscuous mode. */ in dev_set_promiscuous()
5310 if (hw->promiscuous <= 1 && prev_state <= 1) in dev_set_promiscuous()
5311 hw_set_promiscuous(hw, hw->promiscuous); in dev_set_promiscuous()
5317 if ((hw->features & STP_SUPPORT) && !promiscuous && in dev_set_promiscuous()
5319 struct ksz_switch *sw = hw->ksz_switch; in dev_set_promiscuous()
5320 int port = priv->port.first_port; in dev_set_promiscuous()
5324 if (sw->member & port) { in dev_set_promiscuous()
5325 sw->member &= ~port; in dev_set_promiscuous()
5335 if (multicast != priv->multicast) { in dev_set_multicast()
5336 u8 all_multi = hw->all_multi; in dev_set_multicast()
5339 ++hw->all_multi; in dev_set_multicast()
5341 --hw->all_multi; in dev_set_multicast()
5342 priv->multicast = multicast; in dev_set_multicast()
5344 /* Turn on/off all multicast mode. */ in dev_set_multicast()
5345 if (hw->all_multi <= 1 && all_multi <= 1) in dev_set_multicast()
5346 hw_set_multicast(hw, hw->all_multi); in dev_set_multicast()
5360 struct dev_info *hw_priv = priv->adapter; in netdev_set_rx_mode()
5361 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_rx_mode()
5363 int multicast = (dev->flags & IFF_ALLMULTI); in netdev_set_rx_mode()
5365 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); in netdev_set_rx_mode()
5367 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5368 multicast |= (dev->flags & IFF_MULTICAST); in netdev_set_rx_mode()
5372 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5375 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { in netdev_set_rx_mode()
5378 /* List too big to support so turn on all multicast mode. */ in netdev_set_rx_mode()
5380 if (MAX_MULTICAST_LIST != hw->multi_list_size) { in netdev_set_rx_mode()
5381 hw->multi_list_size = MAX_MULTICAST_LIST; in netdev_set_rx_mode()
5382 ++hw->all_multi; in netdev_set_rx_mode()
5383 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5391 memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); in netdev_set_rx_mode()
5393 hw->multi_list_size = (u8) i; in netdev_set_rx_mode()
5396 if (MAX_MULTICAST_LIST == hw->multi_list_size) { in netdev_set_rx_mode()
5397 --hw->all_multi; in netdev_set_rx_mode()
5398 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5400 hw->multi_list_size = 0; in netdev_set_rx_mode()
5408 struct dev_info *hw_priv = priv->adapter; in netdev_change_mtu()
5409 struct ksz_hw *hw = &hw_priv->hw; in netdev_change_mtu()
5413 return -EBUSY; in netdev_change_mtu()
5416 if (hw->dev_count > 1) in netdev_change_mtu()
5417 if (dev != hw_priv->dev) in netdev_change_mtu()
5422 hw->features |= RX_HUGE_FRAME; in netdev_change_mtu()
5425 hw->features &= ~RX_HUGE_FRAME; in netdev_change_mtu()
5429 hw_priv->mtu = hw_mtu; in netdev_change_mtu()
5430 WRITE_ONCE(dev->mtu, new_mtu); in netdev_change_mtu()
5436 * netdev_ioctl - I/O control processing
5448 struct dev_info *hw_priv = priv->adapter; in netdev_ioctl()
5449 struct ksz_hw *hw = &hw_priv->hw; in netdev_ioctl()
5450 struct ksz_port *port = &priv->port; in netdev_ioctl()
5454 if (down_interruptible(&priv->proc_sem)) in netdev_ioctl()
5455 return -ERESTARTSYS; in netdev_ioctl()
5460 data->phy_id = priv->id; in netdev_ioctl()
5465 if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5466 result = -EIO; in netdev_ioctl()
5468 hw_r_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5469 &data->val_out); in netdev_ioctl()
5475 result = -EPERM; in netdev_ioctl()
5476 else if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5477 result = -EIO; in netdev_ioctl()
5479 hw_w_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5480 data->val_in); in netdev_ioctl()
5484 result = -EOPNOTSUPP; in netdev_ioctl()
5487 up(&priv->proc_sem); in netdev_ioctl()
5497 * mdio_read - read PHY register
5509 struct ksz_port *port = &priv->port; in mdio_read()
5510 struct ksz_hw *hw = port->hw; in mdio_read()
5513 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out); in mdio_read()
5518 * mdio_write - set PHY register
5529 struct ksz_port *port = &priv->port; in mdio_write()
5530 struct ksz_hw *hw = port->hw; in mdio_write()
5534 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++) in mdio_write()
5555 * netdev_get_link_ksettings - get network device settings
5567 struct dev_info *hw_priv = priv->adapter; in netdev_get_link_ksettings()
5569 mutex_lock(&hw_priv->lock); in netdev_get_link_ksettings()
5570 mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); in netdev_get_link_ksettings()
5572 mutex_unlock(&hw_priv->lock); in netdev_get_link_ksettings()
5575 ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, in netdev_get_link_ksettings()
5576 cmd->link_modes.advertising); in netdev_get_link_ksettings()
5582 * netdev_set_link_ksettings - set network device settings
5594 struct dev_info *hw_priv = priv->adapter; in netdev_set_link_ksettings()
5595 struct ksz_port *port = &priv->port; in netdev_set_link_ksettings()
5597 u32 speed = cmd->base.speed; in netdev_set_link_ksettings()
5602 cmd->link_modes.advertising); in netdev_set_link_ksettings()
5608 if (cmd->base.autoneg && priv->advertising == advertising) { in netdev_set_link_ksettings()
5618 if (0 == cmd->base.duplex) in netdev_set_link_ksettings()
5622 else if (1 == cmd->base.duplex) in netdev_set_link_ksettings()
5627 mutex_lock(&hw_priv->lock); in netdev_set_link_ksettings()
5628 if (cmd->base.autoneg && in netdev_set_link_ksettings()
5630 port->duplex = 0; in netdev_set_link_ksettings()
5631 port->speed = 0; in netdev_set_link_ksettings()
5632 port->force_link = 0; in netdev_set_link_ksettings()
5634 port->duplex = cmd->base.duplex + 1; in netdev_set_link_ksettings()
5636 port->speed = speed; in netdev_set_link_ksettings()
5637 if (cmd->base.autoneg) in netdev_set_link_ksettings()
5638 port->force_link = 0; in netdev_set_link_ksettings()
5640 port->force_link = 1; in netdev_set_link_ksettings()
5647 &priv->mii_if, in netdev_set_link_ksettings()
5649 mutex_unlock(&hw_priv->lock); in netdev_set_link_ksettings()
5654 * netdev_nway_reset - restart auto-negotiation
5657 * This function restarts the PHY for auto-negotiation.
5664 struct dev_info *hw_priv = priv->adapter; in netdev_nway_reset()
5667 mutex_lock(&hw_priv->lock); in netdev_nway_reset()
5668 rc = mii_nway_restart(&priv->mii_if); in netdev_nway_reset()
5669 mutex_unlock(&hw_priv->lock); in netdev_nway_reset()
5674 * netdev_get_link - get network device link status
5686 rc = mii_link_ok(&priv->mii_if); in netdev_get_link()
5691 * netdev_get_drvinfo - get network driver information
5701 struct dev_info *hw_priv = priv->adapter; in netdev_get_drvinfo()
5703 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in netdev_get_drvinfo()
5704 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in netdev_get_drvinfo()
5705 strscpy(info->bus_info, pci_name(hw_priv->pdev), in netdev_get_drvinfo()
5706 sizeof(info->bus_info)); in netdev_get_drvinfo()
5723 * netdev_get_regs_len - get length of register dump
5735 while (range->end > range->start) { in netdev_get_regs_len()
5736 regs_len += (range->end - range->start + 3) / 4 * 4; in netdev_get_regs_len()
5743 * netdev_get_regs - get register dump
5754 struct dev_info *hw_priv = priv->adapter; in netdev_get_regs()
5755 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_regs()
5760 mutex_lock(&hw_priv->lock); in netdev_get_regs()
5761 regs->version = 0; in netdev_get_regs()
5763 pci_read_config_dword(hw_priv->pdev, len, buf); in netdev_get_regs()
5766 while (range->end > range->start) { in netdev_get_regs()
5767 for (len = range->start; len < range->end; len += 4) { in netdev_get_regs()
5768 *buf = readl(hw->io + len); in netdev_get_regs()
5773 mutex_unlock(&hw_priv->lock); in netdev_get_regs()
5782 * netdev_get_wol - get Wake-on-LAN support
5784 * @wol: Ethtool Wake-on-LAN data structure.
5786 * This procedure returns Wake-on-LAN support.
5792 struct dev_info *hw_priv = priv->adapter; in netdev_get_wol()
5794 wol->supported = hw_priv->wol_support; in netdev_get_wol()
5795 wol->wolopts = hw_priv->wol_enable; in netdev_get_wol()
5796 memset(&wol->sopass, 0, sizeof(wol->sopass)); in netdev_get_wol()
5800 * netdev_set_wol - set Wake-on-LAN support
5802 * @wol: Ethtool Wake-on-LAN data structure.
5804 * This function sets Wake-on-LAN support.
5812 struct dev_info *hw_priv = priv->adapter; in netdev_set_wol()
5817 if (wol->wolopts & ~hw_priv->wol_support) in netdev_set_wol()
5818 return -EINVAL; in netdev_set_wol()
5820 hw_priv->wol_enable = wol->wolopts; in netdev_set_wol()
5823 if (wol->wolopts) in netdev_set_wol()
5824 hw_priv->wol_enable |= WAKE_PHY; in netdev_set_wol()
5825 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr); in netdev_set_wol()
5830 * netdev_get_msglevel - get debug message level
5841 return priv->msg_enable; in netdev_get_msglevel()
5845 * netdev_set_msglevel - set debug message level
5855 priv->msg_enable = value; in netdev_set_msglevel()
5859 * netdev_get_eeprom_len - get EEPROM length
5874 * netdev_get_eeprom - get EEPROM data
5887 struct dev_info *hw_priv = priv->adapter; in netdev_get_eeprom()
5892 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_get_eeprom()
5893 for (i = eeprom->offset / 2; i < len; i++) in netdev_get_eeprom()
5894 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_get_eeprom()
5895 eeprom->magic = EEPROM_MAGIC; in netdev_get_eeprom()
5896 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len); in netdev_get_eeprom()
5902 * netdev_set_eeprom - write EEPROM data
5915 struct dev_info *hw_priv = priv->adapter; in netdev_set_eeprom()
5921 if (eeprom->magic != EEPROM_MAGIC) in netdev_set_eeprom()
5922 return -EINVAL; in netdev_set_eeprom()
5924 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_set_eeprom()
5925 for (i = eeprom->offset / 2; i < len; i++) in netdev_set_eeprom()
5926 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_set_eeprom()
5928 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len); in netdev_set_eeprom()
5932 eeprom_write(&hw_priv->hw, i, eeprom_data[i]); in netdev_set_eeprom()
5939 * netdev_get_pauseparam - get flow control parameters
5949 struct dev_info *hw_priv = priv->adapter; in netdev_get_pauseparam()
5950 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_pauseparam()
5952 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1; in netdev_get_pauseparam()
5953 if (!hw->ksz_switch) { in netdev_get_pauseparam()
5954 pause->rx_pause = in netdev_get_pauseparam()
5955 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
5956 pause->tx_pause = in netdev_get_pauseparam()
5957 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
5959 pause->rx_pause = in netdev_get_pauseparam()
5962 pause->tx_pause = in netdev_get_pauseparam()
5969 * netdev_set_pauseparam - set flow control parameters
5982 struct dev_info *hw_priv = priv->adapter; in netdev_set_pauseparam()
5983 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_pauseparam()
5984 struct ksz_port *port = &priv->port; in netdev_set_pauseparam()
5986 mutex_lock(&hw_priv->lock); in netdev_set_pauseparam()
5987 if (pause->autoneg) { in netdev_set_pauseparam()
5988 if (!pause->rx_pause && !pause->tx_pause) in netdev_set_pauseparam()
5989 port->flow_ctrl = PHY_NO_FLOW_CTRL; in netdev_set_pauseparam()
5991 port->flow_ctrl = PHY_FLOW_CTRL; in netdev_set_pauseparam()
5992 hw->overrides &= ~PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
5993 port->force_link = 0; in netdev_set_pauseparam()
5994 if (hw->ksz_switch) { in netdev_set_pauseparam()
6002 hw->overrides |= PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
6003 if (hw->ksz_switch) { in netdev_set_pauseparam()
6005 SWITCH_RX_FLOW_CTRL, pause->rx_pause); in netdev_set_pauseparam()
6007 SWITCH_TX_FLOW_CTRL, pause->tx_pause); in netdev_set_pauseparam()
6009 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause); in netdev_set_pauseparam()
6011 mutex_unlock(&hw_priv->lock); in netdev_set_pauseparam()
6017 * netdev_get_ringparam - get tx/rx ring parameters
6031 struct dev_info *hw_priv = priv->adapter; in netdev_get_ringparam()
6032 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ringparam()
6034 ring->tx_max_pending = (1 << 9); in netdev_get_ringparam()
6035 ring->tx_pending = hw->tx_desc_info.alloc; in netdev_get_ringparam()
6036 ring->rx_max_pending = (1 << 9); in netdev_get_ringparam()
6037 ring->rx_pending = hw->rx_desc_info.alloc; in netdev_get_ringparam()
6084 * netdev_get_strings - get statistics identity strings
6094 struct dev_info *hw_priv = priv->adapter; in netdev_get_strings()
6095 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_strings()
6099 ETH_GSTRING_LEN * hw->mib_cnt); in netdev_get_strings()
6103 * netdev_get_sset_count - get statistics size
6114 struct dev_info *hw_priv = priv->adapter; in netdev_get_sset_count()
6115 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_sset_count()
6119 return hw->mib_cnt; in netdev_get_sset_count()
6121 return -EOPNOTSUPP; in netdev_get_sset_count()
6126 * netdev_get_ethtool_stats - get network device statistics
6137 struct dev_info *hw_priv = priv->adapter; in netdev_get_ethtool_stats()
6138 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ethtool_stats()
6139 struct ksz_port *port = &priv->port; in netdev_get_ethtool_stats()
6140 int n_stats = stats->n_stats; in netdev_get_ethtool_stats()
6146 mutex_lock(&hw_priv->lock); in netdev_get_ethtool_stats()
6148 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_get_ethtool_stats()
6149 if (media_connected == hw->port_mib[p].state) { in netdev_get_ethtool_stats()
6150 hw_priv->counter[p].read = 1; in netdev_get_ethtool_stats()
6157 mutex_unlock(&hw_priv->lock); in netdev_get_ethtool_stats()
6160 schedule_work(&hw_priv->mib_read); in netdev_get_ethtool_stats()
6162 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { in netdev_get_ethtool_stats()
6165 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6166 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6169 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { in netdev_get_ethtool_stats()
6172 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6173 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6175 } else if (hw->port_mib[p].cnt_ptr) { in netdev_get_ethtool_stats()
6177 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6178 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6183 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter); in netdev_get_ethtool_stats()
6184 n = hw->mib_cnt; in netdev_get_ethtool_stats()
6187 n_stats -= n; in netdev_get_ethtool_stats()
6193 * netdev_set_features - set receive checksum support
6205 struct dev_info *hw_priv = priv->adapter; in netdev_set_features()
6206 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_features()
6208 mutex_lock(&hw_priv->lock); in netdev_set_features()
6212 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP; in netdev_set_features()
6214 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in netdev_set_features()
6216 if (hw->enabled) in netdev_set_features()
6217 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in netdev_set_features()
6219 mutex_unlock(&hw_priv->lock); in netdev_set_features()
6254 if (priv->media_state != port->linked->state) { in update_link()
6255 priv->media_state = port->linked->state; in update_link()
6265 struct ksz_hw *hw = &hw_priv->hw; in mib_read_work()
6271 for (i = 0; i < hw->mib_port_cnt; i++) { in mib_read_work()
6272 mib = &hw->port_mib[i]; in mib_read_work()
6275 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) { in mib_read_work()
6280 hw_priv->counter[i].read = 0; in mib_read_work()
6283 if (0 == mib->cnt_ptr) { in mib_read_work()
6284 hw_priv->counter[i].read = 2; in mib_read_work()
6286 &hw_priv->counter[i].counter); in mib_read_work()
6288 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { in mib_read_work()
6290 if (media_connected == mib->state) in mib_read_work()
6291 hw_priv->counter[i].read = 1; in mib_read_work()
6292 next_jiffies += HZ * 1 * hw->mib_port_cnt; in mib_read_work()
6293 hw_priv->counter[i].time = next_jiffies; in mib_read_work()
6296 } else if (mib->link_down) { in mib_read_work()
6297 mib->link_down = 0; in mib_read_work()
6300 hw_priv->counter[i].read = 1; in mib_read_work()
6309 mib_read_work(&hw_priv->mib_read); in mib_monitor()
6311 /* This is used to verify Wake-on-LAN is working. */ in mib_monitor()
6312 if (hw_priv->pme_wait) { in mib_monitor()
6313 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { in mib_monitor()
6314 hw_clr_wol_pme_status(&hw_priv->hw); in mib_monitor()
6315 hw_priv->pme_wait = 0; in mib_monitor()
6317 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) { in mib_monitor()
6320 hw_priv->pme_wait = jiffies + HZ * 2; in mib_monitor()
6323 ksz_update_timer(&hw_priv->mib_timer_info); in mib_monitor()
6327 * dev_monitor - periodic monitoring
6335 struct net_device *dev = priv->mii_if.dev; in dev_monitor()
6336 struct dev_info *hw_priv = priv->adapter; in dev_monitor()
6337 struct ksz_hw *hw = &hw_priv->hw; in dev_monitor()
6338 struct ksz_port *port = &priv->port; in dev_monitor()
6340 if (!(hw->features & LINK_INT_WORKING)) in dev_monitor()
6344 ksz_update_timer(&priv->monitor_timer_info); in dev_monitor()
6380 * Enabling STP support also turns on multiple network device mode.
6392 * netdev_init - initialize network device.
6404 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, in netdev_init()
6408 dev->watchdog_timeo = HZ / 2; in netdev_init()
6410 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; in netdev_init()
6414 * driver actually runs faster with this on. in netdev_init()
6416 dev->hw_features |= NETIF_F_IPV6_CSUM; in netdev_init()
6418 dev->features |= dev->hw_features; in netdev_init()
6420 sema_init(&priv->proc_sem, 1); in netdev_init()
6422 priv->mii_if.phy_id_mask = 0x1; in netdev_init()
6423 priv->mii_if.reg_num_mask = 0x7; in netdev_init()
6424 priv->mii_if.dev = dev; in netdev_init()
6425 priv->mii_if.mdio_read = mdio_read; in netdev_init()
6426 priv->mii_if.mdio_write = mdio_write; in netdev_init()
6427 priv->mii_if.phy_id = priv->port.first_port + 1; in netdev_init()
6429 priv->msg_enable = netif_msg_init(msg_enable, in netdev_init()
6455 if (dev->watchdog_timeo) in netdev_free()
6494 hw_priv->hw.override_addr[j++] = (u8) num; in get_mac_addr()
6495 hw_priv->hw.override_addr[5] += in get_mac_addr()
6496 hw_priv->hw.id; in get_mac_addr()
6498 hw_priv->hw.ksz_switch->other_addr[j++] = in get_mac_addr()
6500 hw_priv->hw.ksz_switch->other_addr[5] += in get_mac_addr()
6501 hw_priv->hw.id; in get_mac_addr()
6509 hw_priv->hw.mac_override = 1; in get_mac_addr()
6519 struct ksz_switch *sw = hw->ksz_switch; in read_other_addr()
6524 sw->other_addr[5] = (u8) data[0]; in read_other_addr()
6525 sw->other_addr[4] = (u8)(data[0] >> 8); in read_other_addr()
6526 sw->other_addr[3] = (u8) data[1]; in read_other_addr()
6527 sw->other_addr[2] = (u8)(data[1] >> 8); in read_other_addr()
6528 sw->other_addr[1] = (u8) data[2]; in read_other_addr()
6529 sw->other_addr[0] = (u8)(data[2] >> 8); in read_other_addr()
6560 result = -ENODEV; in pcidev_init()
6562 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || in pcidev_init()
6563 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) in pcidev_init()
6575 result = -ENOMEM; in pcidev_init()
6581 hw_priv = &info->dev_info; in pcidev_init()
6582 hw_priv->pdev = pdev; in pcidev_init()
6584 hw = &hw_priv->hw; in pcidev_init()
6586 hw->io = ioremap(reg_base, reg_len); in pcidev_init()
6587 if (!hw->io) in pcidev_init()
6594 result = -ENODEV; in pcidev_init()
6600 dev_info(&hw_priv->pdev->dev, "%s\n", banner); in pcidev_init()
6601 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); in pcidev_init()
6604 hw->dev_count = 1; in pcidev_init()
6607 hw->addr_list_size = 0; in pcidev_init()
6608 hw->mib_cnt = PORT_COUNTER_NUM; in pcidev_init()
6609 hw->mib_port_cnt = 1; in pcidev_init()
6614 hw->overrides |= FAST_AGING; in pcidev_init()
6616 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM; in pcidev_init()
6620 hw->dev_count = SWITCH_PORT_NUM; in pcidev_init()
6621 hw->addr_list_size = SWITCH_PORT_NUM - 1; in pcidev_init()
6625 if (1 == hw->dev_count) { in pcidev_init()
6629 hw->mib_port_cnt = TOTAL_PORT_NUM; in pcidev_init()
6630 hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL); in pcidev_init()
6631 if (!hw->ksz_switch) in pcidev_init()
6634 sw = hw->ksz_switch; in pcidev_init()
6636 for (i = 0; i < hw->mib_port_cnt; i++) in pcidev_init()
6637 hw->port_mib[i].mib_start = 0; in pcidev_init()
6639 hw->parent = hw_priv; in pcidev_init()
6642 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3; in pcidev_init()
6647 hw_priv->hw.id = net_device_present; in pcidev_init()
6649 spin_lock_init(&hw_priv->hwlock); in pcidev_init()
6650 mutex_init(&hw_priv->lock); in pcidev_init()
6653 init_waitqueue_head(&hw_priv->counter[i].counter); in pcidev_init()
6662 if (hw->dev_count > 1) { in pcidev_init()
6663 memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); in pcidev_init()
6670 if (hw->ksz_switch) in pcidev_init()
6673 hw_priv->wol_support = WOL_SUPPORT; in pcidev_init()
6674 hw_priv->wol_enable = 0; in pcidev_init()
6677 INIT_WORK(&hw_priv->mib_read, mib_read_work); in pcidev_init()
6680 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, in pcidev_init()
6683 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
6687 SET_NETDEV_DEV(dev, &pdev->dev); in pcidev_init()
6688 info->netdev[i] = dev; in pcidev_init()
6691 priv->adapter = hw_priv; in pcidev_init()
6692 priv->id = net_device_present++; in pcidev_init()
6694 port = &priv->port; in pcidev_init()
6695 port->port_cnt = port_count; in pcidev_init()
6696 port->mib_port_cnt = mib_port_count; in pcidev_init()
6697 port->first_port = i; in pcidev_init()
6698 port->flow_ctrl = PHY_FLOW_CTRL; in pcidev_init()
6700 port->hw = hw; in pcidev_init()
6701 port->linked = &hw->port_info[port->first_port]; in pcidev_init()
6704 hw->port_info[pi].port_id = pi; in pcidev_init()
6705 hw->port_info[pi].pdev = dev; in pcidev_init()
6706 hw->port_info[pi].state = media_disconnected; in pcidev_init()
6709 dev->mem_start = (unsigned long) hw->io; in pcidev_init()
6710 dev->mem_end = dev->mem_start + reg_len - 1; in pcidev_init()
6711 dev->irq = pdev->irq; in pcidev_init()
6713 eth_hw_addr_set(dev, hw_priv->hw.override_addr); in pcidev_init()
6717 ether_addr_copy(addr, sw->other_addr); in pcidev_init()
6718 if (ether_addr_equal(sw->other_addr, hw->override_addr)) in pcidev_init()
6719 addr[5] += port->first_port; in pcidev_init()
6723 dev->netdev_ops = &netdev_ops; in pcidev_init()
6724 dev->ethtool_ops = &netdev_ethtool_ops; in pcidev_init()
6726 /* MTU range: 60 - 1894 */ in pcidev_init()
6727 dev->min_mtu = ETH_ZLEN; in pcidev_init()
6728 dev->max_mtu = MAX_RX_BUF_SIZE - in pcidev_init()
6736 pci_dev_get(hw_priv->pdev); in pcidev_init()
6741 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
6742 if (info->netdev[i]) { in pcidev_init()
6743 netdev_free(info->netdev[i]); in pcidev_init()
6744 info->netdev[i] = NULL; in pcidev_init()
6750 kfree(hw->ksz_switch); in pcidev_init()
6753 iounmap(hw->io); in pcidev_init()
6768 struct dev_info *hw_priv = &info->dev_info; in pcidev_exit()
6772 for (i = 0; i < hw_priv->hw.dev_count; i++) { in pcidev_exit()
6773 if (info->netdev[i]) in pcidev_exit()
6774 netdev_free(info->netdev[i]); in pcidev_exit()
6776 if (hw_priv->hw.io) in pcidev_exit()
6777 iounmap(hw_priv->hw.io); in pcidev_exit()
6779 kfree(hw_priv->hw.ksz_switch); in pcidev_exit()
6780 pci_dev_put(hw_priv->pdev); in pcidev_exit()
6788 struct dev_info *hw_priv = &info->dev_info; in pcidev_resume()
6789 struct ksz_hw *hw = &hw_priv->hw; in pcidev_resume()
6793 if (hw_priv->wol_enable) in pcidev_resume()
6795 for (i = 0; i < hw->dev_count; i++) { in pcidev_resume()
6796 if (info->netdev[i]) { in pcidev_resume()
6797 struct net_device *dev = info->netdev[i]; in pcidev_resume()
6812 struct dev_info *hw_priv = &info->dev_info; in pcidev_suspend()
6813 struct ksz_hw *hw = &hw_priv->hw; in pcidev_suspend()
6818 for (i = 0; i < hw->dev_count; i++) { in pcidev_suspend()
6819 if (info->netdev[i]) { in pcidev_suspend()
6820 struct net_device *dev = info->netdev[i]; in pcidev_suspend()
6828 if (hw_priv->wol_enable) { in pcidev_suspend()
6829 hw_enable_wol(hw, hw_priv->wol_enable, net_addr); in pcidev_suspend()