Lines Matching full:pp

484 	struct mvneta_port	*pp;  member
762 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) in mvreg_write() argument
764 writel(data, pp->base + offset); in mvreg_write()
768 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) in mvreg_read() argument
770 return readl(pp->base + offset); in mvreg_read()
791 static void mvneta_mib_counters_clear(struct mvneta_port *pp) in mvneta_mib_counters_clear() argument
797 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); in mvneta_mib_counters_clear()
798 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); in mvneta_mib_counters_clear()
799 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); in mvneta_mib_counters_clear()
807 struct mvneta_port *pp = netdev_priv(dev); in mvneta_get_stats64() local
820 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
856 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, in mvneta_rxq_non_occup_desc_add() argument
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
875 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, in mvneta_rxq_busy_desc_num_get() argument
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
887 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, in mvneta_rxq_desc_num_update() argument
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
932 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) in mvneta_max_rx_size_set() argument
936 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_max_rx_size_set()
940 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_max_rx_size_set()
945 static void mvneta_rxq_offset_set(struct mvneta_port *pp, in mvneta_rxq_offset_set() argument
951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
963 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, in mvneta_txq_pend_desc_add() argument
974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
1002 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, in mvneta_rxq_buf_size_set() argument
1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
1017 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, in mvneta_rxq_bm_disable() argument
1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
1028 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, in mvneta_rxq_bm_enable() argument
1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
1039 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, in mvneta_rxq_long_pool_set() argument
1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1046 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); in mvneta_rxq_long_pool_set()
1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1052 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, in mvneta_rxq_short_pool_set() argument
1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1059 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); in mvneta_rxq_short_pool_set()
1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1065 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, in mvneta_bm_pool_bufsize_set() argument
1072 dev_warn(pp->dev->dev.parent, in mvneta_bm_pool_bufsize_set()
1078 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); in mvneta_bm_pool_bufsize_set()
1080 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); in mvneta_bm_pool_bufsize_set()
1084 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, in mvneta_mbus_io_win_set() argument
1090 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); in mvneta_mbus_io_win_set()
1092 if (pp->bm_win_id < 0) { in mvneta_mbus_io_win_set()
1096 pp->bm_win_id = i; in mvneta_mbus_io_win_set()
1103 i = pp->bm_win_id; in mvneta_mbus_io_win_set()
1106 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_mbus_io_win_set()
1107 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_mbus_io_win_set()
1110 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_mbus_io_win_set()
1112 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | in mvneta_mbus_io_win_set()
1115 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); in mvneta_mbus_io_win_set()
1117 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); in mvneta_mbus_io_win_set()
1119 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_mbus_io_win_set()
1122 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_mbus_io_win_set()
1127 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) in mvneta_bm_port_mbus_init() argument
1134 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, in mvneta_bm_port_mbus_init()
1139 pp->bm_win_id = -1; in mvneta_bm_port_mbus_init()
1142 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, in mvneta_bm_port_mbus_init()
1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); in mvneta_bm_port_mbus_init()
1155 struct mvneta_port *pp) in mvneta_bm_port_init() argument
1160 if (!pp->neta_armada3700) { in mvneta_bm_port_init()
1163 ret = mvneta_bm_port_mbus_init(pp); in mvneta_bm_port_init()
1169 netdev_info(pp->dev, "missing long pool id\n"); in mvneta_bm_port_init()
1174 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, in mvneta_bm_port_init()
1175 MVNETA_BM_LONG, pp->id, in mvneta_bm_port_init()
1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); in mvneta_bm_port_init()
1177 if (!pp->pool_long) { in mvneta_bm_port_init()
1178 netdev_info(pp->dev, "fail to obtain long pool for port\n"); in mvneta_bm_port_init()
1182 pp->pool_long->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1184 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, in mvneta_bm_port_init()
1185 pp->pool_long->id); in mvneta_bm_port_init()
1192 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, in mvneta_bm_port_init()
1193 MVNETA_BM_SHORT, pp->id, in mvneta_bm_port_init()
1195 if (!pp->pool_short) { in mvneta_bm_port_init()
1196 netdev_info(pp->dev, "fail to obtain short pool for port\n"); in mvneta_bm_port_init()
1197 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_port_init()
1202 pp->pool_short->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1203 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, in mvneta_bm_port_init()
1204 pp->pool_short->id); in mvneta_bm_port_init()
1211 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) in mvneta_bm_update_mtu() argument
1213 struct mvneta_bm_pool *bm_pool = pp->pool_long; in mvneta_bm_update_mtu()
1218 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); in mvneta_bm_update_mtu()
1237 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); in mvneta_bm_update_mtu()
1242 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_update_mtu()
1243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); in mvneta_bm_update_mtu()
1245 pp->bm_priv = NULL; in mvneta_bm_update_mtu()
1246 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_bm_update_mtu()
1247 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); in mvneta_bm_update_mtu()
1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); in mvneta_bm_update_mtu()
1252 static void mvneta_port_up(struct mvneta_port *pp) in mvneta_port_up() argument
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
1264 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); in mvneta_port_up()
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
1274 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); in mvneta_port_up()
1278 static void mvneta_port_down(struct mvneta_port *pp) in mvneta_port_down() argument
1284 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; in mvneta_port_down()
1288 mvreg_write(pp, MVNETA_RXQ_CMD, in mvneta_port_down()
1295 netdev_warn(pp->dev, in mvneta_port_down()
1302 val = mvreg_read(pp, MVNETA_RXQ_CMD); in mvneta_port_down()
1308 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; in mvneta_port_down()
1311 mvreg_write(pp, MVNETA_TXQ_CMD, in mvneta_port_down()
1318 netdev_warn(pp->dev, in mvneta_port_down()
1326 val = mvreg_read(pp, MVNETA_TXQ_CMD); in mvneta_port_down()
1334 netdev_warn(pp->dev, in mvneta_port_down()
1341 val = mvreg_read(pp, MVNETA_PORT_STATUS); in mvneta_port_down()
1349 static void mvneta_port_enable(struct mvneta_port *pp) in mvneta_port_enable() argument
1354 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_enable()
1356 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_enable()
1360 static void mvneta_port_disable(struct mvneta_port *pp) in mvneta_port_disable() argument
1365 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_disable()
1367 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_disable()
1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
1388 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); in mvneta_set_ucast_table()
1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument
1405 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); in mvneta_set_special_mcast_table()
1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_other_mcast_table() argument
1416 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1419 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1425 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); in mvneta_set_other_mcast_table()
1430 struct mvneta_port *pp = arg; in mvneta_percpu_unmask_interrupt() local
1435 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_percpu_unmask_interrupt()
1443 struct mvneta_port *pp = arg; in mvneta_percpu_mask_interrupt() local
1448 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_percpu_mask_interrupt()
1449 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_percpu_mask_interrupt()
1450 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_percpu_mask_interrupt()
1455 struct mvneta_port *pp = arg; in mvneta_percpu_clear_intr_cause() local
1460 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1461 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1462 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_percpu_clear_intr_cause()
1474 static void mvneta_defaults_set(struct mvneta_port *pp) in mvneta_defaults_set() argument
1482 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_defaults_set()
1485 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_defaults_set()
1486 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); in mvneta_defaults_set()
1489 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); in mvneta_defaults_set()
1499 if (!pp->neta_armada3700) { in mvneta_defaults_set()
1513 txq_map = (cpu == pp->rxq_def) ? in mvneta_defaults_set()
1521 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_defaults_set()
1525 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_defaults_set()
1526 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_defaults_set()
1529 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); in mvneta_defaults_set()
1531 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); in mvneta_defaults_set()
1532 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); in mvneta_defaults_set()
1535 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_defaults_set()
1536 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_defaults_set()
1539 if (pp->bm_priv) in mvneta_defaults_set()
1545 mvreg_write(pp, MVNETA_ACC_MODE, val); in mvneta_defaults_set()
1547 if (pp->bm_priv) in mvneta_defaults_set()
1548 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); in mvneta_defaults_set()
1551 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_defaults_set()
1552 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_defaults_set()
1555 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); in mvneta_defaults_set()
1556 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); in mvneta_defaults_set()
1571 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); in mvneta_defaults_set()
1576 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); in mvneta_defaults_set()
1578 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); in mvneta_defaults_set()
1580 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1581 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1582 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1585 mvreg_write(pp, MVNETA_INTR_ENABLE, in mvneta_defaults_set()
1589 mvneta_mib_counters_clear(pp); in mvneta_defaults_set()
1593 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) in mvneta_txq_max_tx_size_set() argument
1604 val = mvreg_read(pp, MVNETA_TX_MTU); in mvneta_txq_max_tx_size_set()
1607 mvreg_write(pp, MVNETA_TX_MTU, val); in mvneta_txq_max_tx_size_set()
1610 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); in mvneta_txq_max_tx_size_set()
1617 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); in mvneta_txq_max_tx_size_set()
1620 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); in mvneta_txq_max_tx_size_set()
1627 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); in mvneta_txq_max_tx_size_set()
1633 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, in mvneta_set_ucast_addr() argument
1649 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); in mvneta_set_ucast_addr()
1659 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); in mvneta_set_ucast_addr()
1663 static void mvneta_mac_addr_set(struct mvneta_port *pp, in mvneta_mac_addr_set() argument
1674 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); in mvneta_mac_addr_set()
1675 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); in mvneta_mac_addr_set()
1679 mvneta_set_ucast_addr(pp, addr[5], queue); in mvneta_mac_addr_set()
1685 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, in mvneta_rx_pkts_coal_set() argument
1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1695 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, in mvneta_rx_time_coal_set() argument
1701 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1708 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, in mvneta_tx_done_pkts_coal_set() argument
1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1734 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, in mvneta_txq_sent_desc_dec() argument
1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1752 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, in mvneta_txq_sent_desc_num_get() argument
1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1768 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, in mvneta_txq_sent_desc_proc() argument
1774 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1778 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1813 static void mvneta_rx_error(struct mvneta_port *pp, in mvneta_rx_error() argument
1816 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_error()
1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1845 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) in mvneta_rx_csum() argument
1847 if ((pp->dev->features & NETIF_F_RXCSUM) && in mvneta_rx_csum()
1859 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, in mvneta_tx_done_policy() argument
1864 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1868 static void mvneta_txq_bufs_free(struct mvneta_port *pp, in mvneta_txq_bufs_free() argument
1889 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1913 static void mvneta_txq_done(struct mvneta_port *pp, in mvneta_txq_done() argument
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1919 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1923 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done()
1935 static int mvneta_rx_refill(struct mvneta_port *pp, in mvneta_rx_refill() argument
1948 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; in mvneta_rx_refill()
1986 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, in mvneta_rxq_drop_pkts() argument
1991 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1993 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1995 if (pp->bm_priv) { in mvneta_rxq_drop_pkts()
2002 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts()
2004 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rxq_drop_pkts()
2025 mvneta_update_stats(struct mvneta_port *pp, in mvneta_update_stats() argument
2028 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_update_stats()
2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) in mvneta_rx_refill_queue() argument
2050 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { in mvneta_rx_refill_queue()
2056 stats = this_cpu_ptr(pp->stats); in mvneta_rx_refill_queue()
2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_xdp_put_buff() argument
2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, in mvneta_xdp_submit_frame() argument
2095 struct device *dev = pp->dev->dev.parent; in mvneta_xdp_submit_frame()
2174 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) in mvneta_xdp_xmit_back() argument
2176 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit_back()
2188 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit_back()
2189 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2192 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false); in mvneta_xdp_xmit_back()
2200 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit_back()
2215 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_xmit() local
2216 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit()
2223 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) in mvneta_xdp_xmit()
2229 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit()
2230 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit()
2234 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte, in mvneta_xdp_xmit()
2243 mvneta_txq_pend_desc_add(pp, txq, 0); in mvneta_xdp_xmit()
2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_run_xdp() argument
2264 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2269 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2279 err = xdp_do_redirect(pp->dev, xdp, prog); in mvneta_run_xdp()
2281 mvneta_xdp_put_buff(pp, rxq, xdp, sync); in mvneta_run_xdp()
2290 ret = mvneta_xdp_xmit_back(pp, xdp); in mvneta_run_xdp()
2292 mvneta_xdp_put_buff(pp, rxq, xdp, sync); in mvneta_run_xdp()
2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act); in mvneta_run_xdp()
2298 trace_xdp_exception(pp->dev, prog, act); in mvneta_run_xdp()
2301 mvneta_xdp_put_buff(pp, rxq, xdp, sync); in mvneta_run_xdp()
2314 mvneta_swbm_rx_frame(struct mvneta_port *pp, in mvneta_swbm_rx_frame() argument
2322 struct net_device *dev = pp->dev; in mvneta_swbm_rx_frame()
2344 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, in mvneta_swbm_rx_frame()
2349 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, in mvneta_swbm_add_rx_fragment() argument
2356 struct net_device *dev = pp->dev; in mvneta_swbm_add_rx_fragment()
2380 pp->rx_offset_correction, data_len); in mvneta_swbm_add_rx_fragment()
2395 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, in mvneta_swbm_build_skb() argument
2413 skb->ip_summed = mvneta_rx_csum(pp, desc_status); in mvneta_swbm_build_skb()
2426 struct mvneta_port *pp, int budget, in mvneta_rx_swbm() argument
2430 struct net_device *dev = pp->dev; in mvneta_rx_swbm()
2440 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_swbm()
2442 xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_rx_swbm()
2461 mvneta_rx_error(pp, rx_desc); in mvneta_rx_swbm()
2469 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2479 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2488 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2493 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) in mvneta_rx_swbm()
2496 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); in mvneta_rx_swbm()
2498 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_swbm()
2500 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2520 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2526 mvneta_update_stats(pp, &ps); in mvneta_rx_swbm()
2529 refill = mvneta_rx_refill_queue(pp, rxq); in mvneta_rx_swbm()
2532 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); in mvneta_rx_swbm()
2539 struct mvneta_port *pp, int rx_todo, in mvneta_rx_hwbm() argument
2542 struct net_device *dev = pp->dev; in mvneta_rx_hwbm()
2548 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_hwbm()
2572 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
2578 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2581 mvneta_rx_error(pp, rx_desc); in mvneta_rx_hwbm()
2592 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, in mvneta_rx_hwbm()
2601 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2608 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2622 stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2637 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, in mvneta_rx_hwbm()
2650 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2656 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2665 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rx_hwbm()
2670 static void mvneta_free_tso_hdrs(struct mvneta_port *pp, in mvneta_free_tso_hdrs() argument
2673 struct device *dev = pp->dev->dev.parent; in mvneta_free_tso_hdrs()
2686 static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp, in mvneta_alloc_tso_hdrs() argument
2689 struct device *dev = pp->dev->dev.parent; in mvneta_alloc_tso_hdrs()
2698 mvneta_free_tso_hdrs(pp, txq); in mvneta_alloc_tso_hdrs()
2775 static void mvneta_release_descs(struct mvneta_port *pp, in mvneta_release_descs() argument
2790 dma_unmap_single(pp->dev->dev.parent, in mvneta_release_descs()
2808 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx_tso() local
2857 mvneta_release_descs(pp, txq, first_desc, desc_count - 1); in mvneta_tx_tso()
2862 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, in mvneta_tx_frag_process() argument
2878 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
2881 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
2906 mvneta_release_descs(pp, txq, first_desc, i - 1); in mvneta_tx_frag_process()
2913 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx() local
2915 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
2963 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
2977 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
2987 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
3005 static void mvneta_txq_done_force(struct mvneta_port *pp, in mvneta_txq_done_force() argument
3009 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done_force()
3012 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); in mvneta_txq_done_force()
3023 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) in mvneta_tx_done_gbe() argument
3030 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
3032 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
3036 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
3071 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, in mvneta_set_special_mcast_addr() argument
3084 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST in mvneta_set_special_mcast_addr()
3094 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, in mvneta_set_special_mcast_addr()
3106 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, in mvneta_set_other_mcast_addr() argument
3117 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); in mvneta_set_other_mcast_addr()
3127 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); in mvneta_set_other_mcast_addr()
3139 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, in mvneta_mcast_addr_set() argument
3145 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); in mvneta_mcast_addr_set()
3151 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
3152 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
3157 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
3158 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
3159 netdev_info(pp->dev, in mvneta_mcast_addr_set()
3161 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
3165 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
3167 mvneta_set_other_mcast_addr(pp, crc_result, queue); in mvneta_mcast_addr_set()
3173 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, in mvneta_rx_unicast_promisc_set() argument
3178 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); in mvneta_rx_unicast_promisc_set()
3180 val = mvreg_read(pp, MVNETA_TYPE_PRIO); in mvneta_rx_unicast_promisc_set()
3187 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); in mvneta_rx_unicast_promisc_set()
3188 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); in mvneta_rx_unicast_promisc_set()
3195 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); in mvneta_rx_unicast_promisc_set()
3196 mvreg_write(pp, MVNETA_TYPE_PRIO, val); in mvneta_rx_unicast_promisc_set()
3202 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_rx_mode() local
3207 mvneta_rx_unicast_promisc_set(pp, 1); in mvneta_set_rx_mode()
3208 mvneta_set_ucast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3209 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3210 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3213 mvneta_rx_unicast_promisc_set(pp, 0); in mvneta_set_rx_mode()
3214 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
3215 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); in mvneta_set_rx_mode()
3219 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3220 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3223 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
3224 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
3228 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
3229 pp->rxq_def); in mvneta_set_rx_mode()
3239 struct mvneta_port *pp = (struct mvneta_port *)dev_id; in mvneta_isr() local
3241 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_isr()
3242 napi_schedule(&pp->napi); in mvneta_isr()
3252 disable_percpu_irq(port->pp->dev->irq); in mvneta_percpu_isr()
3258 static void mvneta_link_change(struct mvneta_port *pp) in mvneta_link_change() argument
3260 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_link_change()
3262 phylink_pcs_change(&pp->phylink_pcs, in mvneta_link_change()
3278 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll() local
3279 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
3281 if (!netif_running(pp->dev)) { in mvneta_poll()
3287 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); in mvneta_poll()
3289 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); in mvneta_poll()
3291 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_poll()
3295 mvneta_link_change(pp); in mvneta_poll()
3300 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); in mvneta_poll()
3307 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : in mvneta_poll()
3313 if (pp->bm_priv) in mvneta_poll()
3314 rx_done = mvneta_rx_hwbm(napi, pp, budget, in mvneta_poll()
3315 &pp->rxqs[rx_queue]); in mvneta_poll()
3317 rx_done = mvneta_rx_swbm(napi, pp, budget, in mvneta_poll()
3318 &pp->rxqs[rx_queue]); in mvneta_poll()
3325 if (pp->neta_armada3700) { in mvneta_poll()
3329 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_poll()
3335 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
3339 if (pp->neta_armada3700) in mvneta_poll()
3340 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3347 static int mvneta_create_page_pool(struct mvneta_port *pp, in mvneta_create_page_pool() argument
3350 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_create_page_pool()
3356 .dev = pp->dev->dev.parent, in mvneta_create_page_pool()
3358 .offset = pp->rx_offset_correction, in mvneta_create_page_pool()
3370 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, in mvneta_create_page_pool()
3391 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
3396 err = mvneta_create_page_pool(pp, rxq, num); in mvneta_rxq_fill()
3402 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3404 netdev_err(pp->dev, in mvneta_rxq_fill()
3414 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
3420 static void mvneta_tx_reset(struct mvneta_port *pp) in mvneta_tx_reset() argument
3426 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
3428 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_tx_reset()
3429 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_tx_reset()
3432 static void mvneta_rx_reset(struct mvneta_port *pp) in mvneta_rx_reset() argument
3434 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_rx_reset()
3435 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_rx_reset()
3440 static int mvneta_rxq_sw_init(struct mvneta_port *pp, in mvneta_rxq_sw_init() argument
3443 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3446 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3457 static void mvneta_rxq_hw_init(struct mvneta_port *pp, in mvneta_rxq_hw_init() argument
3461 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3462 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3465 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3466 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3468 if (!pp->bm_priv) { in mvneta_rxq_hw_init()
3470 mvneta_rxq_offset_set(pp, rxq, 0); in mvneta_rxq_hw_init()
3471 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? in mvneta_rxq_hw_init()
3473 MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_hw_init()
3474 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_hw_init()
3475 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3478 mvneta_rxq_offset_set(pp, rxq, in mvneta_rxq_hw_init()
3479 NET_SKB_PAD - pp->rx_offset_correction); in mvneta_rxq_hw_init()
3481 mvneta_rxq_bm_enable(pp, rxq); in mvneta_rxq_hw_init()
3483 mvneta_rxq_long_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3484 mvneta_rxq_short_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3485 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3490 static int mvneta_rxq_init(struct mvneta_port *pp, in mvneta_rxq_init() argument
3496 ret = mvneta_rxq_sw_init(pp, rxq); in mvneta_rxq_init()
3500 mvneta_rxq_hw_init(pp, rxq); in mvneta_rxq_init()
3506 static void mvneta_rxq_deinit(struct mvneta_port *pp, in mvneta_rxq_deinit() argument
3509 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
3512 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
3525 static int mvneta_txq_sw_init(struct mvneta_port *pp, in mvneta_txq_sw_init() argument
3530 txq->size = pp->tx_ring_size; in mvneta_txq_sw_init()
3540 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3553 err = mvneta_alloc_tso_hdrs(pp, txq); in mvneta_txq_sw_init()
3558 if (pp->neta_armada3700) in mvneta_txq_sw_init()
3563 cpu = pp->rxq_def % num_present_cpus(); in mvneta_txq_sw_init()
3565 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); in mvneta_txq_sw_init()
3570 static void mvneta_txq_hw_init(struct mvneta_port *pp, in mvneta_txq_hw_init() argument
3574 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_hw_init()
3575 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_hw_init()
3578 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_hw_init()
3579 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_hw_init()
3581 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_hw_init()
3585 static int mvneta_txq_init(struct mvneta_port *pp, in mvneta_txq_init() argument
3590 ret = mvneta_txq_sw_init(pp, txq); in mvneta_txq_init()
3594 mvneta_txq_hw_init(pp, txq); in mvneta_txq_init()
3600 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, in mvneta_txq_sw_deinit() argument
3603 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_sw_deinit()
3607 mvneta_free_tso_hdrs(pp, txq); in mvneta_txq_sw_deinit()
3609 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3622 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, in mvneta_txq_hw_deinit() argument
3626 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3627 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3630 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3631 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3634 static void mvneta_txq_deinit(struct mvneta_port *pp, in mvneta_txq_deinit() argument
3637 mvneta_txq_sw_deinit(pp, txq); in mvneta_txq_deinit()
3638 mvneta_txq_hw_deinit(pp, txq); in mvneta_txq_deinit()
3642 static void mvneta_cleanup_txqs(struct mvneta_port *pp) in mvneta_cleanup_txqs() argument
3647 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
3651 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) in mvneta_cleanup_rxqs() argument
3656 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3661 static int mvneta_setup_rxqs(struct mvneta_port *pp) in mvneta_setup_rxqs() argument
3666 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
3669 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
3671 mvneta_cleanup_rxqs(pp); in mvneta_setup_rxqs()
3680 static int mvneta_setup_txqs(struct mvneta_port *pp) in mvneta_setup_txqs() argument
3685 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
3687 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
3689 mvneta_cleanup_txqs(pp); in mvneta_setup_txqs()
3697 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) in mvneta_comphy_init() argument
3701 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); in mvneta_comphy_init()
3705 return phy_power_on(pp->comphy); in mvneta_comphy_init()
3708 static int mvneta_config_interface(struct mvneta_port *pp, in mvneta_config_interface() argument
3713 if (pp->comphy) { in mvneta_config_interface()
3717 ret = mvneta_comphy_init(pp, interface); in mvneta_config_interface()
3722 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3728 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3733 mvreg_write(pp, MVNETA_SERDES_CFG, in mvneta_config_interface()
3741 pp->phy_interface = interface; in mvneta_config_interface()
3746 static void mvneta_start_dev(struct mvneta_port *pp) in mvneta_start_dev() argument
3750 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); in mvneta_start_dev()
3752 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3753 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3756 mvneta_port_enable(pp); in mvneta_start_dev()
3758 if (!pp->neta_armada3700) { in mvneta_start_dev()
3762 per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
3767 napi_enable(&pp->napi); in mvneta_start_dev()
3771 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_start_dev()
3773 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_start_dev()
3777 phylink_start(pp->phylink); in mvneta_start_dev()
3780 phylink_speed_up(pp->phylink); in mvneta_start_dev()
3782 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
3784 clear_bit(__MVNETA_DOWN, &pp->state); in mvneta_start_dev()
3787 static void mvneta_stop_dev(struct mvneta_port *pp) in mvneta_stop_dev() argument
3791 set_bit(__MVNETA_DOWN, &pp->state); in mvneta_stop_dev()
3793 if (device_may_wakeup(&pp->dev->dev)) in mvneta_stop_dev()
3794 phylink_speed_down(pp->phylink, false); in mvneta_stop_dev()
3796 phylink_stop(pp->phylink); in mvneta_stop_dev()
3798 if (!pp->neta_armada3700) { in mvneta_stop_dev()
3801 per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
3806 napi_disable(&pp->napi); in mvneta_stop_dev()
3809 netif_carrier_off(pp->dev); in mvneta_stop_dev()
3811 mvneta_port_down(pp); in mvneta_stop_dev()
3812 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
3815 mvneta_port_disable(pp); in mvneta_stop_dev()
3818 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); in mvneta_stop_dev()
3821 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_stop_dev()
3823 mvneta_tx_reset(pp); in mvneta_stop_dev()
3824 mvneta_rx_reset(pp); in mvneta_stop_dev()
3826 WARN_ON(phy_power_off(pp->comphy)); in mvneta_stop_dev()
3831 struct mvneta_port *pp = arg; in mvneta_percpu_enable() local
3833 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
3838 struct mvneta_port *pp = arg; in mvneta_percpu_disable() local
3840 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
3846 struct mvneta_port *pp = netdev_priv(dev); in mvneta_change_mtu() local
3847 struct bpf_prog *prog = pp->xdp_prog; in mvneta_change_mtu()
3867 if (pp->bm_priv) in mvneta_change_mtu()
3868 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3877 mvneta_stop_dev(pp); in mvneta_change_mtu()
3878 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_change_mtu()
3880 mvneta_cleanup_txqs(pp); in mvneta_change_mtu()
3881 mvneta_cleanup_rxqs(pp); in mvneta_change_mtu()
3883 if (pp->bm_priv) in mvneta_change_mtu()
3884 mvneta_bm_update_mtu(pp, mtu); in mvneta_change_mtu()
3886 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
3888 ret = mvneta_setup_rxqs(pp); in mvneta_change_mtu()
3894 ret = mvneta_setup_txqs(pp); in mvneta_change_mtu()
3900 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_change_mtu()
3901 mvneta_start_dev(pp); in mvneta_change_mtu()
3911 struct mvneta_port *pp = netdev_priv(dev); in mvneta_fix_features() local
3913 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
3917 pp->tx_csum_limit); in mvneta_fix_features()
3924 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) in mvneta_get_mac_addr() argument
3928 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); in mvneta_get_mac_addr()
3929 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); in mvneta_get_mac_addr()
3941 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_mac_addr() local
3949 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
3952 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); in mvneta_set_mac_addr()
3982 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); in mvneta_pcs_get_state() local
3985 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_pcs_get_state()
4011 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); in mvneta_pcs_config() local
4052 old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_pcs_config()
4056 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); in mvneta_pcs_config()
4064 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); in mvneta_pcs_an_restart() local
4065 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_pcs_an_restart()
4067 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_pcs_an_restart()
4069 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_pcs_an_restart()
4084 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_select_pcs() local
4086 return &pp->phylink_pcs; in mvneta_mac_select_pcs()
4093 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_prepare() local
4096 if (pp->phy_interface != interface || in mvneta_mac_prepare()
4103 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_prepare()
4106 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_prepare()
4109 if (pp->phy_interface != interface) in mvneta_mac_prepare()
4110 WARN_ON(phy_power_off(pp->comphy)); in mvneta_mac_prepare()
4114 unsigned long rate = clk_get_rate(pp->clk); in mvneta_mac_prepare()
4116 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, in mvneta_mac_prepare()
4127 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_config() local
4128 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_mac_config()
4129 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); in mvneta_mac_config()
4130 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); in mvneta_mac_config()
4166 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); in mvneta_mac_config()
4168 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); in mvneta_mac_config()
4170 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); in mvneta_mac_config()
4173 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & in mvneta_mac_config()
4183 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_finish() local
4188 clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); in mvneta_mac_finish()
4190 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); in mvneta_mac_finish()
4193 if (pp->phy_interface != interface) in mvneta_mac_finish()
4195 WARN_ON(mvneta_config_interface(pp, interface)); in mvneta_mac_finish()
4201 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_finish()
4203 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_finish()
4209 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) in mvneta_set_eee() argument
4213 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); in mvneta_set_eee()
4218 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); in mvneta_set_eee()
4225 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_down() local
4228 mvneta_port_down(pp); in mvneta_mac_link_down()
4231 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_down()
4234 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_down()
4237 pp->eee_active = false; in mvneta_mac_link_down()
4238 mvneta_set_eee(pp, false); in mvneta_mac_link_down()
4248 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_mac_link_up() local
4252 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4271 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4277 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_mac_link_up()
4283 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_mac_link_up()
4286 mvneta_port_up(pp); in mvneta_mac_link_up()
4288 if (phy && pp->eee_enabled) { in mvneta_mac_link_up()
4289 pp->eee_active = phy_init_eee(phy, false) >= 0; in mvneta_mac_link_up()
4290 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); in mvneta_mac_link_up()
4303 static int mvneta_mdio_probe(struct mvneta_port *pp) in mvneta_mdio_probe() argument
4306 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); in mvneta_mdio_probe()
4309 netdev_err(pp->dev, "could not attach PHY: %d\n", err); in mvneta_mdio_probe()
4311 phylink_ethtool_get_wol(pp->phylink, &wol); in mvneta_mdio_probe()
4312 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); in mvneta_mdio_probe()
4316 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); in mvneta_mdio_probe()
4321 static void mvneta_mdio_remove(struct mvneta_port *pp) in mvneta_mdio_remove() argument
4323 phylink_disconnect_phy(pp->phylink); in mvneta_mdio_remove()
4330 static void mvneta_percpu_elect(struct mvneta_port *pp) in mvneta_percpu_elect() argument
4337 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) in mvneta_percpu_elect()
4338 elected_cpu = pp->rxq_def; in mvneta_percpu_elect()
4352 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); in mvneta_percpu_elect()
4362 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & in mvneta_percpu_elect()
4365 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); in mvneta_percpu_elect()
4371 pp, true); in mvneta_percpu_elect()
4378 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_online() local
4380 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_online()
4385 if (pp->neta_armada3700) in mvneta_cpu_online()
4388 spin_lock(&pp->lock); in mvneta_cpu_online()
4393 if (pp->is_stopped) { in mvneta_cpu_online()
4394 spin_unlock(&pp->lock); in mvneta_cpu_online()
4397 netif_tx_stop_all_queues(pp->dev); in mvneta_cpu_online()
4406 per_cpu_ptr(pp->ports, other_cpu); in mvneta_cpu_online()
4413 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_online()
4420 mvneta_percpu_enable(pp); in mvneta_cpu_online()
4426 mvneta_percpu_elect(pp); in mvneta_cpu_online()
4429 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_online()
4430 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_online()
4433 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_online()
4434 spin_unlock(&pp->lock); in mvneta_cpu_online()
4440 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_down_prepare() local
4442 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_down_prepare()
4448 spin_lock(&pp->lock); in mvneta_cpu_down_prepare()
4450 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_cpu_down_prepare()
4451 spin_unlock(&pp->lock); in mvneta_cpu_down_prepare()
4456 mvneta_percpu_disable(pp); in mvneta_cpu_down_prepare()
4462 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, in mvneta_cpu_dead() local
4466 spin_lock(&pp->lock); in mvneta_cpu_dead()
4467 mvneta_percpu_elect(pp); in mvneta_cpu_dead()
4468 spin_unlock(&pp->lock); in mvneta_cpu_dead()
4470 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); in mvneta_cpu_dead()
4471 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_cpu_dead()
4474 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_dead()
4480 struct mvneta_port *pp = netdev_priv(dev); in mvneta_open() local
4483 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
4485 ret = mvneta_setup_rxqs(pp); in mvneta_open()
4489 ret = mvneta_setup_txqs(pp); in mvneta_open()
4494 if (pp->neta_armada3700) in mvneta_open()
4495 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
4496 dev->name, pp); in mvneta_open()
4498 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, in mvneta_open()
4499 dev->name, pp->ports); in mvneta_open()
4501 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
4505 if (!pp->neta_armada3700) { in mvneta_open()
4509 on_each_cpu(mvneta_percpu_enable, pp, true); in mvneta_open()
4511 pp->is_stopped = false; in mvneta_open()
4516 &pp->node_online); in mvneta_open()
4521 &pp->node_dead); in mvneta_open()
4526 ret = mvneta_mdio_probe(pp); in mvneta_open()
4532 mvneta_start_dev(pp); in mvneta_open()
4537 if (!pp->neta_armada3700) in mvneta_open()
4539 &pp->node_dead); in mvneta_open()
4541 if (!pp->neta_armada3700) in mvneta_open()
4543 &pp->node_online); in mvneta_open()
4545 if (pp->neta_armada3700) { in mvneta_open()
4546 free_irq(pp->dev->irq, pp); in mvneta_open()
4548 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_open()
4549 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
4552 mvneta_cleanup_txqs(pp); in mvneta_open()
4554 mvneta_cleanup_rxqs(pp); in mvneta_open()
4561 struct mvneta_port *pp = netdev_priv(dev); in mvneta_stop() local
4563 if (!pp->neta_armada3700) { in mvneta_stop()
4569 spin_lock(&pp->lock); in mvneta_stop()
4570 pp->is_stopped = true; in mvneta_stop()
4571 spin_unlock(&pp->lock); in mvneta_stop()
4573 mvneta_stop_dev(pp); in mvneta_stop()
4574 mvneta_mdio_remove(pp); in mvneta_stop()
4577 &pp->node_online); in mvneta_stop()
4579 &pp->node_dead); in mvneta_stop()
4580 on_each_cpu(mvneta_percpu_disable, pp, true); in mvneta_stop()
4581 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
4583 mvneta_stop_dev(pp); in mvneta_stop()
4584 mvneta_mdio_remove(pp); in mvneta_stop()
4585 free_irq(dev->irq, pp); in mvneta_stop()
4588 mvneta_cleanup_rxqs(pp); in mvneta_stop()
4589 mvneta_cleanup_txqs(pp); in mvneta_stop()
4596 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ioctl() local
4598 return phylink_mii_ioctl(pp->phylink, ifr, cmd); in mvneta_ioctl()
4605 struct mvneta_port *pp = netdev_priv(dev); in mvneta_xdp_setup() local
4614 if (pp->bm_priv) { in mvneta_xdp_setup()
4620 need_update = !!pp->xdp_prog != !!prog; in mvneta_xdp_setup()
4624 old_prog = xchg(&pp->xdp_prog, prog); in mvneta_xdp_setup()
4651 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_set_link_ksettings() local
4653 return phylink_ethtool_ksettings_set(pp->phylink, cmd); in mvneta_ethtool_set_link_ksettings()
4661 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_ethtool_get_link_ksettings() local
4663 return phylink_ethtool_ksettings_get(pp->phylink, cmd); in mvneta_ethtool_get_link_ksettings()
4668 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_nway_reset() local
4670 return phylink_ethtool_nway_reset(pp->phylink); in mvneta_ethtool_nway_reset()
4680 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_coalesce() local
4684 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4687 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4688 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
4692 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
4694 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
4707 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_coalesce() local
4709 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4710 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4712 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
4735 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_ringparam() local
4739 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
4740 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
4749 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_ringparam() local
4753 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
4756 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
4758 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
4760 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
4777 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_pauseparam() local
4779 phylink_ethtool_get_pauseparam(pp->phylink, pause); in mvneta_ethtool_get_pauseparam()
4785 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_pauseparam() local
4787 return phylink_ethtool_set_pauseparam(pp->phylink, pause); in mvneta_ethtool_set_pauseparam()
4794 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_strings() local
4800 if (!pp->bm_priv) { in mvneta_ethtool_get_strings()
4807 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, in mvneta_ethtool_update_pcpu_stats() argument
4825 stats = per_cpu_ptr(pp->stats, cpu); in mvneta_ethtool_update_pcpu_stats()
4851 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) in mvneta_ethtool_update_stats() argument
4855 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
4860 mvneta_ethtool_update_pcpu_stats(pp, &stats); in mvneta_ethtool_update_stats()
4867 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4874 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4879 val = phylink_get_eee_err(pp->phylink); in mvneta_ethtool_update_stats()
4880 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4883 pp->ethtool_stats[i] = stats.skb_alloc_error; in mvneta_ethtool_update_stats()
4886 pp->ethtool_stats[i] = stats.refill_error; in mvneta_ethtool_update_stats()
4889 pp->ethtool_stats[i] = stats.ps.xdp_redirect; in mvneta_ethtool_update_stats()
4892 pp->ethtool_stats[i] = stats.ps.xdp_pass; in mvneta_ethtool_update_stats()
4895 pp->ethtool_stats[i] = stats.ps.xdp_drop; in mvneta_ethtool_update_stats()
4898 pp->ethtool_stats[i] = stats.ps.xdp_tx; in mvneta_ethtool_update_stats()
4901 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; in mvneta_ethtool_update_stats()
4904 pp->ethtool_stats[i] = stats.ps.xdp_xmit; in mvneta_ethtool_update_stats()
4907 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; in mvneta_ethtool_update_stats()
4915 static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) in mvneta_ethtool_pp_stats() argument
4921 if (pp->rxqs[i].page_pool) in mvneta_ethtool_pp_stats()
4922 page_pool_get_stats(pp->rxqs[i].page_pool, &stats); in mvneta_ethtool_pp_stats()
4931 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_stats() local
4934 mvneta_ethtool_update_stats(pp); in mvneta_ethtool_get_stats()
4937 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
4939 if (!pp->bm_priv) in mvneta_ethtool_get_stats()
4940 mvneta_ethtool_pp_stats(pp, data); in mvneta_ethtool_get_stats()
4947 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_sset_count() local
4949 if (!pp->bm_priv) in mvneta_ethtool_get_sset_count()
4978 static int mvneta_config_rss(struct mvneta_port *pp) in mvneta_config_rss() argument
4983 netif_tx_stop_all_queues(pp->dev); in mvneta_config_rss()
4985 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); in mvneta_config_rss()
4987 if (!pp->neta_armada3700) { in mvneta_config_rss()
4991 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
4997 napi_synchronize(&pp->napi); in mvneta_config_rss()
4998 napi_disable(&pp->napi); in mvneta_config_rss()
5001 pp->rxq_def = pp->indir[0]; in mvneta_config_rss()
5004 mvneta_set_rx_mode(pp->dev); in mvneta_config_rss()
5007 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_config_rss()
5008 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_config_rss()
5011 spin_lock(&pp->lock); in mvneta_config_rss()
5012 mvneta_percpu_elect(pp); in mvneta_config_rss()
5013 spin_unlock(&pp->lock); in mvneta_config_rss()
5015 if (!pp->neta_armada3700) { in mvneta_config_rss()
5019 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
5024 napi_enable(&pp->napi); in mvneta_config_rss()
5027 netif_tx_start_all_queues(pp->dev); in mvneta_config_rss()
5036 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_rxfh() local
5039 if (pp->neta_armada3700) in mvneta_ethtool_set_rxfh()
5053 memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_set_rxfh()
5055 return mvneta_config_rss(pp); in mvneta_ethtool_set_rxfh()
5061 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_rxfh() local
5064 if (pp->neta_armada3700) in mvneta_ethtool_get_rxfh()
5072 memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_get_rxfh()
5080 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_wol() local
5082 phylink_ethtool_get_wol(pp->phylink, wol); in mvneta_ethtool_get_wol()
5088 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_wol() local
5091 ret = phylink_ethtool_set_wol(pp->phylink, wol); in mvneta_ethtool_set_wol()
5101 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_eee() local
5104 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_get_eee()
5106 eee->eee_enabled = pp->eee_enabled; in mvneta_ethtool_get_eee()
5107 eee->eee_active = pp->eee_active; in mvneta_ethtool_get_eee()
5108 eee->tx_lpi_enabled = pp->tx_lpi_enabled; in mvneta_ethtool_get_eee()
5111 return phylink_ethtool_get_eee(pp->phylink, eee); in mvneta_ethtool_get_eee()
5117 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_eee() local
5126 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); in mvneta_ethtool_set_eee()
5129 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); in mvneta_ethtool_set_eee()
5131 pp->eee_enabled = eee->eee_enabled; in mvneta_ethtool_set_eee()
5132 pp->tx_lpi_enabled = eee->tx_lpi_enabled; in mvneta_ethtool_set_eee()
5134 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); in mvneta_ethtool_set_eee()
5136 return phylink_ethtool_set_eee(pp->phylink, eee); in mvneta_ethtool_set_eee()
5139 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) in mvneta_clear_rx_prio_map() argument
5141 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); in mvneta_clear_rx_prio_map()
5144 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) in mvneta_map_vlan_prio_to_rxq() argument
5146 u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); in mvneta_map_vlan_prio_to_rxq()
5151 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); in mvneta_map_vlan_prio_to_rxq()
5154 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) in mvneta_enable_per_queue_rate_limit() argument
5160 core_clk_rate = clk_get_rate(pp->clk); in mvneta_enable_per_queue_rate_limit()
5171 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); in mvneta_enable_per_queue_rate_limit()
5173 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); in mvneta_enable_per_queue_rate_limit()
5176 mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); in mvneta_enable_per_queue_rate_limit()
5181 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) in mvneta_disable_per_queue_rate_limit() argument
5183 u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); in mvneta_disable_per_queue_rate_limit()
5186 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); in mvneta_disable_per_queue_rate_limit()
5189 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, in mvneta_setup_queue_rates() argument
5212 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); in mvneta_setup_queue_rates()
5220 struct mvneta_port *pp = netdev_priv(dev); in mvneta_setup_mqprio() local
5232 mvneta_clear_rx_prio_map(pp); in mvneta_setup_mqprio()
5235 mvneta_disable_per_queue_rate_limit(pp); in mvneta_setup_mqprio()
5252 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); in mvneta_setup_mqprio()
5257 mvneta_disable_per_queue_rate_limit(pp); in mvneta_setup_mqprio()
5264 ret = mvneta_enable_per_queue_rate_limit(pp); in mvneta_setup_mqprio()
5275 ret = mvneta_setup_queue_rates(pp, txq, in mvneta_setup_mqprio()
5340 static int mvneta_init(struct device *dev, struct mvneta_port *pp) in mvneta_init() argument
5345 mvneta_port_disable(pp); in mvneta_init()
5348 mvneta_defaults_set(pp); in mvneta_init()
5350 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); in mvneta_init()
5351 if (!pp->txqs) in mvneta_init()
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
5358 txq->size = pp->tx_ring_size; in mvneta_init()
5362 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); in mvneta_init()
5363 if (!pp->rxqs) in mvneta_init()
5368 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
5370 rxq->size = pp->rx_ring_size; in mvneta_init()
5374 = devm_kmalloc_array(pp->dev->dev.parent, in mvneta_init()
5386 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, in mvneta_conf_mbus_windows() argument
5394 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_conf_mbus_windows()
5395 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_conf_mbus_windows()
5398 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_conf_mbus_windows()
5408 mvreg_write(pp, MVNETA_WIN_BASE(i), in mvneta_conf_mbus_windows()
5413 mvreg_write(pp, MVNETA_WIN_SIZE(i), in mvneta_conf_mbus_windows()
5420 if (pp->neta_ac5) in mvneta_conf_mbus_windows()
5421 mvreg_write(pp, MVNETA_WIN_BASE(0), in mvneta_conf_mbus_windows()
5428 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); in mvneta_conf_mbus_windows()
5433 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_conf_mbus_windows()
5434 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_conf_mbus_windows()
5438 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) in mvneta_port_power_up() argument
5441 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); in mvneta_port_power_up()
5457 struct mvneta_port *pp; in mvneta_probe() local
5478 pp = netdev_priv(dev); in mvneta_probe()
5479 spin_lock_init(&pp->lock); in mvneta_probe()
5480 pp->dn = dn; in mvneta_probe()
5482 pp->rxq_def = rxq_def; in mvneta_probe()
5483 pp->indir[0] = rxq_def; in mvneta_probe()
5491 pp->phy_interface = phy_mode; in mvneta_probe()
5500 pp->comphy = comphy; in mvneta_probe()
5502 pp->base = devm_platform_ioremap_resource(pdev, 0); in mvneta_probe()
5503 if (IS_ERR(pp->base)) in mvneta_probe()
5504 return PTR_ERR(pp->base); in mvneta_probe()
5508 pp->neta_armada3700 = true; in mvneta_probe()
5510 pp->neta_armada3700 = true; in mvneta_probe()
5511 pp->neta_ac5 = true; in mvneta_probe()
5518 pp->clk = devm_clk_get(&pdev->dev, "core"); in mvneta_probe()
5519 if (IS_ERR(pp->clk)) in mvneta_probe()
5520 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
5521 if (IS_ERR(pp->clk)) { in mvneta_probe()
5522 err = PTR_ERR(pp->clk); in mvneta_probe()
5526 clk_prepare_enable(pp->clk); in mvneta_probe()
5528 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); in mvneta_probe()
5529 if (!IS_ERR(pp->clk_bus)) in mvneta_probe()
5530 clk_prepare_enable(pp->clk_bus); in mvneta_probe()
5532 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; in mvneta_probe()
5533 pp->phylink_pcs.neg_mode = true; in mvneta_probe()
5535 pp->phylink_config.dev = &dev->dev; in mvneta_probe()
5536 pp->phylink_config.type = PHYLINK_NETDEV; in mvneta_probe()
5537 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | in mvneta_probe()
5540 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); in mvneta_probe()
5542 pp->phylink_config.supported_interfaces); in mvneta_probe()
5548 pp->phylink_config.supported_interfaces); in mvneta_probe()
5550 pp->phylink_config.supported_interfaces); in mvneta_probe()
5552 pp->phylink_config.supported_interfaces); in mvneta_probe()
5556 pp->phylink_config.supported_interfaces); in mvneta_probe()
5561 pp->phylink_config.supported_interfaces); in mvneta_probe()
5563 pp->phylink_config.supported_interfaces); in mvneta_probe()
5566 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, in mvneta_probe()
5573 pp->phylink = phylink; in mvneta_probe()
5576 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
5577 if (!pp->ports) { in mvneta_probe()
5583 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
5584 if (!pp->stats) { in mvneta_probe()
5593 mvneta_get_mac_addr(pp, hw_mac_addr); in mvneta_probe()
5617 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
5619 pp->dram_target_info = mv_mbus_dram_info(); in mvneta_probe()
5624 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_probe()
5625 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_probe()
5627 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
5628 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
5630 pp->dev = dev; in mvneta_probe()
5633 pp->id = global_port_id++; in mvneta_probe()
5638 pp->bm_priv = mvneta_bm_get(bm_node); in mvneta_probe()
5639 if (pp->bm_priv) { in mvneta_probe()
5640 err = mvneta_bm_port_init(pdev, pp); in mvneta_probe()
5644 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5645 pp->bm_priv = NULL; in mvneta_probe()
5652 pp->rx_offset_correction = max(0, in mvneta_probe()
5659 if (!pp->bm_priv) in mvneta_probe()
5660 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_probe()
5662 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
5666 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_probe()
5675 if (pp->neta_armada3700) { in mvneta_probe()
5676 netif_napi_add(dev, &pp->napi, mvneta_poll); in mvneta_probe()
5680 per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
5683 port->pp = pp; in mvneta_probe()
5691 if (!pp->bm_priv) in mvneta_probe()
5714 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
5719 if (pp->bm_priv) { in mvneta_probe()
5720 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_probe()
5721 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_probe()
5722 1 << pp->id); in mvneta_probe()
5723 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5725 free_percpu(pp->stats); in mvneta_probe()
5727 free_percpu(pp->ports); in mvneta_probe()
5729 if (pp->phylink) in mvneta_probe()
5730 phylink_destroy(pp->phylink); in mvneta_probe()
5732 clk_disable_unprepare(pp->clk_bus); in mvneta_probe()
5733 clk_disable_unprepare(pp->clk); in mvneta_probe()
5743 struct mvneta_port *pp = netdev_priv(dev); in mvneta_remove() local
5746 clk_disable_unprepare(pp->clk_bus); in mvneta_remove()
5747 clk_disable_unprepare(pp->clk); in mvneta_remove()
5748 free_percpu(pp->ports); in mvneta_remove()
5749 free_percpu(pp->stats); in mvneta_remove()
5751 phylink_destroy(pp->phylink); in mvneta_remove()
5753 if (pp->bm_priv) { in mvneta_remove()
5754 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_remove()
5755 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_remove()
5756 1 << pp->id); in mvneta_remove()
5757 mvneta_bm_put(pp->bm_priv); in mvneta_remove()
5766 struct mvneta_port *pp = netdev_priv(dev); in mvneta_suspend() local
5771 if (!pp->neta_armada3700) { in mvneta_suspend()
5772 spin_lock(&pp->lock); in mvneta_suspend()
5773 pp->is_stopped = true; in mvneta_suspend()
5774 spin_unlock(&pp->lock); in mvneta_suspend()
5777 &pp->node_online); in mvneta_suspend()
5779 &pp->node_dead); in mvneta_suspend()
5783 mvneta_stop_dev(pp); in mvneta_suspend()
5787 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend()
5789 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_suspend()
5793 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_suspend()
5795 mvneta_txq_hw_deinit(pp, txq); in mvneta_suspend()
5800 clk_disable_unprepare(pp->clk_bus); in mvneta_suspend()
5801 clk_disable_unprepare(pp->clk); in mvneta_suspend()
5810 struct mvneta_port *pp = netdev_priv(dev); in mvneta_resume() local
5813 clk_prepare_enable(pp->clk); in mvneta_resume()
5814 if (!IS_ERR(pp->clk_bus)) in mvneta_resume()
5815 clk_prepare_enable(pp->clk_bus); in mvneta_resume()
5816 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_resume()
5817 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_resume()
5818 if (pp->bm_priv) { in mvneta_resume()
5819 err = mvneta_bm_port_init(pdev, pp); in mvneta_resume()
5822 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_resume()
5823 pp->bm_priv = NULL; in mvneta_resume()
5826 mvneta_defaults_set(pp); in mvneta_resume()
5827 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_resume()
5839 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume()
5842 mvneta_rxq_hw_init(pp, rxq); in mvneta_resume()
5846 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_resume()
5849 mvneta_txq_hw_init(pp, txq); in mvneta_resume()
5852 if (!pp->neta_armada3700) { in mvneta_resume()
5853 spin_lock(&pp->lock); in mvneta_resume()
5854 pp->is_stopped = false; in mvneta_resume()
5855 spin_unlock(&pp->lock); in mvneta_resume()
5857 &pp->node_online); in mvneta_resume()
5859 &pp->node_dead); in mvneta_resume()
5863 mvneta_start_dev(pp); in mvneta_resume()