1*d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21edb9ca6SSiva Reddy /* 10G controller driver for Samsung SoCs
31edb9ca6SSiva Reddy *
41edb9ca6SSiva Reddy * Copyright (C) 2013 Samsung Electronics Co., Ltd.
51edb9ca6SSiva Reddy * http://www.samsung.com
61edb9ca6SSiva Reddy *
71edb9ca6SSiva Reddy * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
81edb9ca6SSiva Reddy */
91edb9ca6SSiva Reddy #include <linux/delay.h>
101edb9ca6SSiva Reddy #include <linux/export.h>
111edb9ca6SSiva Reddy #include <linux/io.h>
121edb9ca6SSiva Reddy #include <linux/netdevice.h>
131edb9ca6SSiva Reddy #include <linux/phy.h>
141edb9ca6SSiva Reddy
151edb9ca6SSiva Reddy #include "sxgbe_common.h"
161edb9ca6SSiva Reddy #include "sxgbe_dma.h"
171edb9ca6SSiva Reddy #include "sxgbe_reg.h"
181edb9ca6SSiva Reddy #include "sxgbe_desc.h"
191edb9ca6SSiva Reddy
201edb9ca6SSiva Reddy /* DMA core initialization */
sxgbe_dma_init(void __iomem * ioaddr,int fix_burst,int burst_map)211edb9ca6SSiva Reddy static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
221edb9ca6SSiva Reddy {
231edb9ca6SSiva Reddy u32 reg_val;
241edb9ca6SSiva Reddy
251edb9ca6SSiva Reddy reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
261edb9ca6SSiva Reddy
271edb9ca6SSiva Reddy /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
281edb9ca6SSiva Reddy * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
291edb9ca6SSiva Reddy * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
301edb9ca6SSiva Reddy * Set burst_map irrespective of fix_burst value.
311edb9ca6SSiva Reddy */
321edb9ca6SSiva Reddy if (!fix_burst)
331edb9ca6SSiva Reddy reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
341edb9ca6SSiva Reddy
351edb9ca6SSiva Reddy /* write burst len map */
361edb9ca6SSiva Reddy reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
371edb9ca6SSiva Reddy
381edb9ca6SSiva Reddy writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
391edb9ca6SSiva Reddy
401edb9ca6SSiva Reddy return 0;
411edb9ca6SSiva Reddy }
421edb9ca6SSiva Reddy
sxgbe_dma_channel_init(void __iomem * ioaddr,int cha_num,int fix_burst,int pbl,dma_addr_t dma_tx,dma_addr_t dma_rx,int t_rsize,int r_rsize)431edb9ca6SSiva Reddy static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
441edb9ca6SSiva Reddy int fix_burst, int pbl, dma_addr_t dma_tx,
451edb9ca6SSiva Reddy dma_addr_t dma_rx, int t_rsize, int r_rsize)
461edb9ca6SSiva Reddy {
471edb9ca6SSiva Reddy u32 reg_val;
481edb9ca6SSiva Reddy dma_addr_t dma_addr;
491edb9ca6SSiva Reddy
501edb9ca6SSiva Reddy reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
511edb9ca6SSiva Reddy /* set the pbl */
521edb9ca6SSiva Reddy if (fix_burst) {
531edb9ca6SSiva Reddy reg_val |= SXGBE_DMA_PBL_X8MODE;
541edb9ca6SSiva Reddy writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
551edb9ca6SSiva Reddy /* program the TX pbl */
561edb9ca6SSiva Reddy reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
571edb9ca6SSiva Reddy reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
581edb9ca6SSiva Reddy writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
591edb9ca6SSiva Reddy /* program the RX pbl */
601edb9ca6SSiva Reddy reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
611edb9ca6SSiva Reddy reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
621edb9ca6SSiva Reddy writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
631edb9ca6SSiva Reddy }
641edb9ca6SSiva Reddy
651edb9ca6SSiva Reddy /* program desc registers */
661edb9ca6SSiva Reddy writel(upper_32_bits(dma_tx),
671edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
681edb9ca6SSiva Reddy writel(lower_32_bits(dma_tx),
691edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
701edb9ca6SSiva Reddy
711edb9ca6SSiva Reddy writel(upper_32_bits(dma_rx),
721edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
731edb9ca6SSiva Reddy writel(lower_32_bits(dma_rx),
741edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
751edb9ca6SSiva Reddy
761edb9ca6SSiva Reddy /* program tail pointers */
771edb9ca6SSiva Reddy /* assumption: upper 32 bits are constant and
781edb9ca6SSiva Reddy * same as TX/RX desc list
791edb9ca6SSiva Reddy */
801edb9ca6SSiva Reddy dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
811edb9ca6SSiva Reddy writel(lower_32_bits(dma_addr),
821edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
831edb9ca6SSiva Reddy
841edb9ca6SSiva Reddy dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
851edb9ca6SSiva Reddy writel(lower_32_bits(dma_addr),
861edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
871edb9ca6SSiva Reddy /* program the ring sizes */
881edb9ca6SSiva Reddy writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
891edb9ca6SSiva Reddy writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
901edb9ca6SSiva Reddy
911edb9ca6SSiva Reddy /* Enable TX/RX interrupts */
921edb9ca6SSiva Reddy writel(SXGBE_DMA_ENA_INT,
931edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
941edb9ca6SSiva Reddy }
951edb9ca6SSiva Reddy
sxgbe_enable_dma_transmission(void __iomem * ioaddr,int cha_num)961edb9ca6SSiva Reddy static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
971edb9ca6SSiva Reddy {
981edb9ca6SSiva Reddy u32 tx_config;
991edb9ca6SSiva Reddy
1001edb9ca6SSiva Reddy tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
1011edb9ca6SSiva Reddy tx_config |= SXGBE_TX_START_DMA;
1021edb9ca6SSiva Reddy writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
1031edb9ca6SSiva Reddy }
1041edb9ca6SSiva Reddy
sxgbe_enable_dma_irq(void __iomem * ioaddr,int dma_cnum)1051edb9ca6SSiva Reddy static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
1061edb9ca6SSiva Reddy {
1071edb9ca6SSiva Reddy /* Enable TX/RX interrupts */
1081edb9ca6SSiva Reddy writel(SXGBE_DMA_ENA_INT,
1091edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
1101edb9ca6SSiva Reddy }
1111edb9ca6SSiva Reddy
sxgbe_disable_dma_irq(void __iomem * ioaddr,int dma_cnum)1121edb9ca6SSiva Reddy static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
1131edb9ca6SSiva Reddy {
1141edb9ca6SSiva Reddy /* Disable TX/RX interrupts */
1151edb9ca6SSiva Reddy writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
1161edb9ca6SSiva Reddy }
1171edb9ca6SSiva Reddy
sxgbe_dma_start_tx(void __iomem * ioaddr,int tchannels)1181edb9ca6SSiva Reddy static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
1191edb9ca6SSiva Reddy {
1201edb9ca6SSiva Reddy int cnum;
1211edb9ca6SSiva Reddy u32 tx_ctl_reg;
1221edb9ca6SSiva Reddy
1231edb9ca6SSiva Reddy for (cnum = 0; cnum < tchannels; cnum++) {
1241edb9ca6SSiva Reddy tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
1251edb9ca6SSiva Reddy tx_ctl_reg |= SXGBE_TX_ENABLE;
1261edb9ca6SSiva Reddy writel(tx_ctl_reg,
1271edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
1281edb9ca6SSiva Reddy }
1291edb9ca6SSiva Reddy }
1301edb9ca6SSiva Reddy
sxgbe_dma_start_tx_queue(void __iomem * ioaddr,int dma_cnum)1311edb9ca6SSiva Reddy static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
1321edb9ca6SSiva Reddy {
1331edb9ca6SSiva Reddy u32 tx_ctl_reg;
1341edb9ca6SSiva Reddy
1351edb9ca6SSiva Reddy tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
1361edb9ca6SSiva Reddy tx_ctl_reg |= SXGBE_TX_ENABLE;
1371edb9ca6SSiva Reddy writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
1381edb9ca6SSiva Reddy }
1391edb9ca6SSiva Reddy
sxgbe_dma_stop_tx_queue(void __iomem * ioaddr,int dma_cnum)1401edb9ca6SSiva Reddy static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
1411edb9ca6SSiva Reddy {
1421edb9ca6SSiva Reddy u32 tx_ctl_reg;
1431edb9ca6SSiva Reddy
1441edb9ca6SSiva Reddy tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
1451edb9ca6SSiva Reddy tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
1461edb9ca6SSiva Reddy writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
1471edb9ca6SSiva Reddy }
1481edb9ca6SSiva Reddy
sxgbe_dma_stop_tx(void __iomem * ioaddr,int tchannels)1491edb9ca6SSiva Reddy static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
1501edb9ca6SSiva Reddy {
1511edb9ca6SSiva Reddy int cnum;
1521edb9ca6SSiva Reddy u32 tx_ctl_reg;
1531edb9ca6SSiva Reddy
1541edb9ca6SSiva Reddy for (cnum = 0; cnum < tchannels; cnum++) {
1551edb9ca6SSiva Reddy tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
1561edb9ca6SSiva Reddy tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
1571edb9ca6SSiva Reddy writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
1581edb9ca6SSiva Reddy }
1591edb9ca6SSiva Reddy }
1601edb9ca6SSiva Reddy
sxgbe_dma_start_rx(void __iomem * ioaddr,int rchannels)1611edb9ca6SSiva Reddy static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
1621edb9ca6SSiva Reddy {
1631edb9ca6SSiva Reddy int cnum;
1641edb9ca6SSiva Reddy u32 rx_ctl_reg;
1651edb9ca6SSiva Reddy
1661edb9ca6SSiva Reddy for (cnum = 0; cnum < rchannels; cnum++) {
1671edb9ca6SSiva Reddy rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
1681edb9ca6SSiva Reddy rx_ctl_reg |= SXGBE_RX_ENABLE;
1691edb9ca6SSiva Reddy writel(rx_ctl_reg,
1701edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
1711edb9ca6SSiva Reddy }
1721edb9ca6SSiva Reddy }
1731edb9ca6SSiva Reddy
sxgbe_dma_stop_rx(void __iomem * ioaddr,int rchannels)1741edb9ca6SSiva Reddy static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
1751edb9ca6SSiva Reddy {
1761edb9ca6SSiva Reddy int cnum;
1771edb9ca6SSiva Reddy u32 rx_ctl_reg;
1781edb9ca6SSiva Reddy
1791edb9ca6SSiva Reddy for (cnum = 0; cnum < rchannels; cnum++) {
1801edb9ca6SSiva Reddy rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
1811edb9ca6SSiva Reddy rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
1821edb9ca6SSiva Reddy writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
1831edb9ca6SSiva Reddy }
1841edb9ca6SSiva Reddy }
1851edb9ca6SSiva Reddy
sxgbe_tx_dma_int_status(void __iomem * ioaddr,int channel_no,struct sxgbe_extra_stats * x)1861edb9ca6SSiva Reddy static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
1871edb9ca6SSiva Reddy struct sxgbe_extra_stats *x)
1881edb9ca6SSiva Reddy {
1891edb9ca6SSiva Reddy u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
1901edb9ca6SSiva Reddy u32 clear_val = 0;
1911edb9ca6SSiva Reddy u32 ret_val = 0;
1921edb9ca6SSiva Reddy
1931edb9ca6SSiva Reddy /* TX Normal Interrupt Summary */
1941edb9ca6SSiva Reddy if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
1951edb9ca6SSiva Reddy x->normal_irq_n++;
1961edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TI) {
1971edb9ca6SSiva Reddy ret_val |= handle_tx;
1981edb9ca6SSiva Reddy x->tx_normal_irq_n++;
1991edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TI;
2001edb9ca6SSiva Reddy }
2011edb9ca6SSiva Reddy
2021edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
2031edb9ca6SSiva Reddy x->tx_underflow_irq++;
2041edb9ca6SSiva Reddy ret_val |= tx_bump_tc;
2051edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TBU;
2061edb9ca6SSiva Reddy }
2071edb9ca6SSiva Reddy } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
2081edb9ca6SSiva Reddy /* TX Abnormal Interrupt Summary */
2091edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
2101edb9ca6SSiva Reddy ret_val |= tx_hard_error;
2111edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TPS;
2121edb9ca6SSiva Reddy x->tx_process_stopped_irq++;
2131edb9ca6SSiva Reddy }
2141edb9ca6SSiva Reddy
2151edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
2161edb9ca6SSiva Reddy ret_val |= tx_hard_error;
2171edb9ca6SSiva Reddy x->fatal_bus_error_irq++;
2181edb9ca6SSiva Reddy
2191edb9ca6SSiva Reddy /* Assumption: FBE bit is the combination of
2201edb9ca6SSiva Reddy * all the bus access erros and cleared when
2211edb9ca6SSiva Reddy * the respective error bits cleared
2221edb9ca6SSiva Reddy */
2231edb9ca6SSiva Reddy
2241edb9ca6SSiva Reddy /* check for actual cause */
2251edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
2261edb9ca6SSiva Reddy x->tx_read_transfer_err++;
2271edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
2281edb9ca6SSiva Reddy } else {
2291edb9ca6SSiva Reddy x->tx_write_transfer_err++;
2301edb9ca6SSiva Reddy }
2311edb9ca6SSiva Reddy
2321edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
2331edb9ca6SSiva Reddy x->tx_desc_access_err++;
2341edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
2351edb9ca6SSiva Reddy } else {
2361edb9ca6SSiva Reddy x->tx_buffer_access_err++;
2371edb9ca6SSiva Reddy }
2381edb9ca6SSiva Reddy
2391edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
2401edb9ca6SSiva Reddy x->tx_data_transfer_err++;
2411edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
2421edb9ca6SSiva Reddy }
2431edb9ca6SSiva Reddy }
2441edb9ca6SSiva Reddy
2451edb9ca6SSiva Reddy /* context descriptor error */
2461edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
2471edb9ca6SSiva Reddy x->tx_ctxt_desc_err++;
2481edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
2491edb9ca6SSiva Reddy }
2501edb9ca6SSiva Reddy }
2511edb9ca6SSiva Reddy
2521edb9ca6SSiva Reddy /* clear the served bits */
2531edb9ca6SSiva Reddy writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
2541edb9ca6SSiva Reddy
2551edb9ca6SSiva Reddy return ret_val;
2561edb9ca6SSiva Reddy }
2571edb9ca6SSiva Reddy
sxgbe_rx_dma_int_status(void __iomem * ioaddr,int channel_no,struct sxgbe_extra_stats * x)2581edb9ca6SSiva Reddy static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
2591edb9ca6SSiva Reddy struct sxgbe_extra_stats *x)
2601edb9ca6SSiva Reddy {
2611edb9ca6SSiva Reddy u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
2621edb9ca6SSiva Reddy u32 clear_val = 0;
2631edb9ca6SSiva Reddy u32 ret_val = 0;
2641edb9ca6SSiva Reddy
2651edb9ca6SSiva Reddy /* RX Normal Interrupt Summary */
2661edb9ca6SSiva Reddy if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
2671edb9ca6SSiva Reddy x->normal_irq_n++;
2681edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_RI) {
2691edb9ca6SSiva Reddy ret_val |= handle_rx;
2701edb9ca6SSiva Reddy x->rx_normal_irq_n++;
2711edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_RI;
2721edb9ca6SSiva Reddy }
2731edb9ca6SSiva Reddy } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
2741edb9ca6SSiva Reddy /* RX Abnormal Interrupt Summary */
2751edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
2761edb9ca6SSiva Reddy ret_val |= rx_bump_tc;
2771edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_RBU;
2781edb9ca6SSiva Reddy x->rx_underflow_irq++;
2791edb9ca6SSiva Reddy }
2801edb9ca6SSiva Reddy
2811edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
2821edb9ca6SSiva Reddy ret_val |= rx_hard_error;
2831edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_RPS;
2841edb9ca6SSiva Reddy x->rx_process_stopped_irq++;
2851edb9ca6SSiva Reddy }
2861edb9ca6SSiva Reddy
2871edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
2881edb9ca6SSiva Reddy ret_val |= rx_hard_error;
2891edb9ca6SSiva Reddy x->fatal_bus_error_irq++;
2901edb9ca6SSiva Reddy
2911edb9ca6SSiva Reddy /* Assumption: FBE bit is the combination of
2921edb9ca6SSiva Reddy * all the bus access erros and cleared when
2931edb9ca6SSiva Reddy * the respective error bits cleared
2941edb9ca6SSiva Reddy */
2951edb9ca6SSiva Reddy
2961edb9ca6SSiva Reddy /* check for actual cause */
2971edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
2981edb9ca6SSiva Reddy x->rx_read_transfer_err++;
2991edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_REB0;
3001edb9ca6SSiva Reddy } else {
3011edb9ca6SSiva Reddy x->rx_write_transfer_err++;
3021edb9ca6SSiva Reddy }
3031edb9ca6SSiva Reddy
3041edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
3051edb9ca6SSiva Reddy x->rx_desc_access_err++;
3061edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_REB1;
3071edb9ca6SSiva Reddy } else {
3081edb9ca6SSiva Reddy x->rx_buffer_access_err++;
3091edb9ca6SSiva Reddy }
3101edb9ca6SSiva Reddy
3111edb9ca6SSiva Reddy if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
3121edb9ca6SSiva Reddy x->rx_data_transfer_err++;
3131edb9ca6SSiva Reddy clear_val |= SXGBE_DMA_INT_STATUS_REB2;
3141edb9ca6SSiva Reddy }
3151edb9ca6SSiva Reddy }
3161edb9ca6SSiva Reddy }
3171edb9ca6SSiva Reddy
3181edb9ca6SSiva Reddy /* clear the served bits */
3191edb9ca6SSiva Reddy writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
3201edb9ca6SSiva Reddy
3211edb9ca6SSiva Reddy return ret_val;
3221edb9ca6SSiva Reddy }
3231edb9ca6SSiva Reddy
3241edb9ca6SSiva Reddy /* Program the HW RX Watchdog */
sxgbe_dma_rx_watchdog(void __iomem * ioaddr,u32 riwt)3251edb9ca6SSiva Reddy static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
3261edb9ca6SSiva Reddy {
3271edb9ca6SSiva Reddy u32 que_num;
3281edb9ca6SSiva Reddy
3291edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
3301edb9ca6SSiva Reddy writel(riwt,
3311edb9ca6SSiva Reddy ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
3321edb9ca6SSiva Reddy }
3331edb9ca6SSiva Reddy }
3341edb9ca6SSiva Reddy
sxgbe_enable_tso(void __iomem * ioaddr,u8 chan_num)3351051125dSVipul Pandya static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
3361051125dSVipul Pandya {
3371051125dSVipul Pandya u32 ctrl;
3381051125dSVipul Pandya
3391051125dSVipul Pandya ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
3401051125dSVipul Pandya ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
3411051125dSVipul Pandya writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
3421051125dSVipul Pandya }
3431051125dSVipul Pandya
3441edb9ca6SSiva Reddy static const struct sxgbe_dma_ops sxgbe_dma_ops = {
3451edb9ca6SSiva Reddy .init = sxgbe_dma_init,
3461edb9ca6SSiva Reddy .cha_init = sxgbe_dma_channel_init,
3471edb9ca6SSiva Reddy .enable_dma_transmission = sxgbe_enable_dma_transmission,
3481edb9ca6SSiva Reddy .enable_dma_irq = sxgbe_enable_dma_irq,
3491edb9ca6SSiva Reddy .disable_dma_irq = sxgbe_disable_dma_irq,
3501edb9ca6SSiva Reddy .start_tx = sxgbe_dma_start_tx,
3511edb9ca6SSiva Reddy .start_tx_queue = sxgbe_dma_start_tx_queue,
3521edb9ca6SSiva Reddy .stop_tx = sxgbe_dma_stop_tx,
3531edb9ca6SSiva Reddy .stop_tx_queue = sxgbe_dma_stop_tx_queue,
3541edb9ca6SSiva Reddy .start_rx = sxgbe_dma_start_rx,
3551edb9ca6SSiva Reddy .stop_rx = sxgbe_dma_stop_rx,
3561edb9ca6SSiva Reddy .tx_dma_int_status = sxgbe_tx_dma_int_status,
3571edb9ca6SSiva Reddy .rx_dma_int_status = sxgbe_rx_dma_int_status,
3581edb9ca6SSiva Reddy .rx_watchdog = sxgbe_dma_rx_watchdog,
3591051125dSVipul Pandya .enable_tso = sxgbe_enable_tso,
3601edb9ca6SSiva Reddy };
3611edb9ca6SSiva Reddy
sxgbe_get_dma_ops(void)3621edb9ca6SSiva Reddy const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
3631edb9ca6SSiva Reddy {
3641edb9ca6SSiva Reddy return &sxgbe_dma_ops;
3651edb9ca6SSiva Reddy }
366