1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2023, Intel Corporation 4 * stmmac EST(802.3 Qbv) handling 5 */ 6 #include <linux/iopoll.h> 7 #include <linux/types.h> 8 #include "stmmac.h" 9 #include "stmmac_est.h" 10 11 static int est_write(void __iomem *est_addr, u32 reg, u32 val, bool gcl) 12 { 13 u32 ctrl; 14 15 writel(val, est_addr + EST_GCL_DATA); 16 17 ctrl = (reg << EST_ADDR_SHIFT); 18 ctrl |= gcl ? 0 : EST_GCRR; 19 writel(ctrl, est_addr + EST_GCL_CONTROL); 20 21 ctrl |= EST_SRWO; 22 writel(ctrl, est_addr + EST_GCL_CONTROL); 23 24 return readl_poll_timeout(est_addr + EST_GCL_CONTROL, ctrl, 25 !(ctrl & EST_SRWO), 100, 5000); 26 } 27 28 static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, 29 unsigned int ptp_rate) 30 { 31 void __iomem *est_addr = priv->estaddr; 32 int i, ret = 0; 33 u32 ctrl; 34 35 if (!ptp_rate) { 36 netdev_warn(priv->dev, "Invalid PTP rate"); 37 return -EINVAL; 38 } 39 40 ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false); 41 ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false); 42 ret |= est_write(est_addr, EST_TER, cfg->ter, false); 43 ret |= est_write(est_addr, EST_LLR, cfg->gcl_size, false); 44 ret |= est_write(est_addr, EST_CTR_LOW, cfg->ctr[0], false); 45 ret |= est_write(est_addr, EST_CTR_HIGH, cfg->ctr[1], false); 46 if (ret) 47 return ret; 48 49 for (i = 0; i < cfg->gcl_size; i++) { 50 ret = est_write(est_addr, i, cfg->gcl[i], true); 51 if (ret) 52 return ret; 53 } 54 55 ctrl = readl(est_addr + EST_CONTROL); 56 if (priv->plat->has_xgmac) { 57 ctrl &= ~EST_XGMAC_PTOV; 58 ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) << 59 EST_XGMAC_PTOV_SHIFT; 60 } else { 61 ctrl &= ~EST_GMAC5_PTOV; 62 ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_GMAC5_PTOV_MUL) << 63 EST_GMAC5_PTOV_SHIFT; 64 } 65 if (cfg->enable) 66 ctrl |= EST_EEST | EST_SSWL | EST_DFBS; 67 else 68 ctrl &= ~EST_EEST; 69 70 writel(ctrl, est_addr + EST_CONTROL); 71 72 /* Configure EST interrupt */ 73 if (cfg->enable) 74 ctrl = EST_IECGCE | EST_IEHS | EST_IEHF | EST_IEBE | EST_IECC; 75 else 76 ctrl = 0; 77 78 writel(ctrl, est_addr + EST_INT_EN); 79 80 return 0; 81 } 82 83 static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, 84 struct stmmac_extra_stats *x, u32 txqcnt) 85 { 86 u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max; 87 void __iomem *est_addr = priv->estaddr; 88 u32 txqcnt_mask = BIT(txqcnt) - 1; 89 int i; 90 91 status = readl(est_addr + EST_STATUS); 92 93 value = EST_CGCE | EST_HLBS | EST_HLBF | EST_BTRE | EST_SWLC; 94 95 /* Return if there is no error */ 96 if (!(status & value)) 97 return; 98 99 if (status & EST_CGCE) { 100 /* Clear Interrupt */ 101 writel(EST_CGCE, est_addr + EST_STATUS); 102 103 x->mtl_est_cgce++; 104 } 105 106 if (status & EST_HLBS) { 107 value = readl(est_addr + EST_SCH_ERR); 108 value &= txqcnt_mask; 109 110 x->mtl_est_hlbs++; 111 112 for (i = 0; i < txqcnt; i++) 113 if (value & BIT(i)) 114 x->mtl_est_txq_hlbs[i]++; 115 116 /* Clear Interrupt */ 117 writel(value, est_addr + EST_SCH_ERR); 118 119 /* Collecting info to shows all the queues that has HLBS 120 * issue. The only way to clear this is to clear the 121 * statistic 122 */ 123 if (net_ratelimit()) 124 netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); 125 } 126 127 if (status & EST_HLBF) { 128 value = readl(est_addr + EST_FRM_SZ_ERR); 129 feqn = value & txqcnt_mask; 130 131 value = readl(est_addr + EST_FRM_SZ_CAP); 132 hbfq = (value & EST_SZ_CAP_HBFQ_MASK(txqcnt)) >> 133 EST_SZ_CAP_HBFQ_SHIFT; 134 hbfs = value & EST_SZ_CAP_HBFS_MASK; 135 136 x->mtl_est_hlbf++; 137 138 for (i = 0; i < txqcnt; i++) 139 if (feqn & BIT(i)) 140 x->mtl_est_txq_hlbf[i]++; 141 142 /* Clear Interrupt */ 143 writel(feqn, est_addr + EST_FRM_SZ_ERR); 144 145 if (net_ratelimit()) 146 netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", 147 hbfq, hbfs); 148 } 149 150 if (status & EST_BTRE) { 151 if (priv->plat->has_xgmac) { 152 btrl = FIELD_GET(EST_XGMAC_BTRL, status); 153 btrl_max = FIELD_MAX(EST_XGMAC_BTRL); 154 } else { 155 btrl = FIELD_GET(EST_GMAC5_BTRL, status); 156 btrl_max = FIELD_MAX(EST_GMAC5_BTRL); 157 } 158 if (btrl == btrl_max) 159 x->mtl_est_btrlm++; 160 else 161 x->mtl_est_btre++; 162 163 if (net_ratelimit()) 164 netdev_info(dev, "EST: BTR Error Loop Count %u\n", 165 btrl); 166 167 writel(EST_BTRE, est_addr + EST_STATUS); 168 } 169 170 if (status & EST_SWLC) { 171 writel(EST_SWLC, est_addr + EST_STATUS); 172 netdev_info(dev, "EST: SWOL has been switched\n"); 173 } 174 } 175 176 const struct stmmac_est_ops dwmac510_est_ops = { 177 .configure = est_configure, 178 .irq_status = est_irq_status, 179 }; 180