1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* 3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. 4 * stmmac XGMAC support. 5 */ 6 7 #include <linux/iopoll.h> 8 #include "stmmac.h" 9 #include "dwxgmac2.h" 10 11 static int dwxgmac2_dma_reset(void __iomem *ioaddr) 12 { 13 u32 value = readl(ioaddr + XGMAC_DMA_MODE); 14 15 /* DMA SW reset */ 16 writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE); 17 18 return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value, 19 !(value & XGMAC_SWR), 0, 100000); 20 } 21 22 static void dwxgmac2_dma_init(void __iomem *ioaddr, 23 struct stmmac_dma_cfg *dma_cfg, int atds) 24 { 25 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); 26 27 if (dma_cfg->aal) 28 value |= XGMAC_AAL; 29 30 if (dma_cfg->eame) 31 value |= XGMAC_EAME; 32 33 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); 34 } 35 36 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv, 37 void __iomem *ioaddr, 38 struct stmmac_dma_cfg *dma_cfg, u32 chan) 39 { 40 u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); 41 42 if (dma_cfg->pblx8) 43 value |= XGMAC_PBLx8; 44 45 writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); 46 writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 47 } 48 49 static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv, 50 void __iomem *ioaddr, 51 struct stmmac_dma_cfg *dma_cfg, 52 dma_addr_t phy, u32 chan) 53 { 54 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; 55 u32 value; 56 57 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 58 value &= ~XGMAC_RxPBL; 59 value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL; 60 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 61 62 writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan)); 63 writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan)); 64 } 65 66 static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv, 67 void __iomem *ioaddr, 68 struct stmmac_dma_cfg *dma_cfg, 69 dma_addr_t phy, u32 chan) 70 { 71 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; 72 u32 value; 73 74 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 75 value &= ~XGMAC_TxPBL; 76 value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL; 77 value |= XGMAC_OSP; 78 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 79 80 writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan)); 81 writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan)); 82 } 83 84 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) 85 { 86 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); 87 int i; 88 89 if (axi->axi_lpi_en) 90 value |= XGMAC_EN_LPI; 91 if (axi->axi_xit_frm) 92 value |= XGMAC_LPI_XIT_PKT; 93 94 value &= ~XGMAC_WR_OSR_LMT; 95 value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) & 96 XGMAC_WR_OSR_LMT; 97 98 value &= ~XGMAC_RD_OSR_LMT; 99 value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) & 100 XGMAC_RD_OSR_LMT; 101 102 if (!axi->axi_fb) 103 value |= XGMAC_UNDEF; 104 105 value &= ~XGMAC_BLEN; 106 for (i = 0; i < AXI_BLEN; i++) { 107 switch (axi->axi_blen[i]) { 108 case 256: 109 value |= XGMAC_BLEN256; 110 break; 111 case 128: 112 value |= XGMAC_BLEN128; 113 break; 114 case 64: 115 value |= XGMAC_BLEN64; 116 break; 117 case 32: 118 value |= XGMAC_BLEN32; 119 break; 120 case 16: 121 value |= XGMAC_BLEN16; 122 break; 123 case 8: 124 value |= XGMAC_BLEN8; 125 break; 126 case 4: 127 value |= XGMAC_BLEN4; 128 break; 129 } 130 } 131 132 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); 133 writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL); 134 writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL); 135 } 136 137 static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv, 138 void __iomem *ioaddr, u32 *reg_space) 139 { 140 int i; 141 142 for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++) 143 reg_space[i] = readl(ioaddr + i * 4); 144 } 145 146 static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, 147 int mode, u32 channel, int fifosz, u8 qmode) 148 { 149 u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); 150 unsigned int rqs = fifosz / 256 - 1; 151 152 if (mode == SF_DMA_MODE) { 153 value |= XGMAC_RSF; 154 } else { 155 value &= ~XGMAC_RSF; 156 value &= ~XGMAC_RTC; 157 158 if (mode <= 64) 159 value |= 0x0 << XGMAC_RTC_SHIFT; 160 else if (mode <= 96) 161 value |= 0x2 << XGMAC_RTC_SHIFT; 162 else 163 value |= 0x3 << XGMAC_RTC_SHIFT; 164 } 165 166 value &= ~XGMAC_RQS; 167 value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; 168 169 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { 170 u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); 171 unsigned int rfd, rfa; 172 173 value |= XGMAC_EHFC; 174 175 /* Set Threshold for Activating Flow Control to min 2 frames, 176 * i.e. 1500 * 2 = 3000 bytes. 177 * 178 * Set Threshold for Deactivating Flow Control to min 1 frame, 179 * i.e. 1500 bytes. 180 */ 181 switch (fifosz) { 182 case 4096: 183 /* This violates the above formula because of FIFO size 184 * limit therefore overflow may occur in spite of this. 185 */ 186 rfd = 0x03; /* Full-2.5K */ 187 rfa = 0x01; /* Full-1.5K */ 188 break; 189 190 default: 191 rfd = 0x07; /* Full-4.5K */ 192 rfa = 0x04; /* Full-3K */ 193 break; 194 } 195 196 flow &= ~XGMAC_RFD; 197 flow |= rfd << XGMAC_RFD_SHIFT; 198 199 flow &= ~XGMAC_RFA; 200 flow |= rfa << XGMAC_RFA_SHIFT; 201 202 writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); 203 } 204 205 writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); 206 207 /* Enable MTL RX overflow */ 208 value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); 209 writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); 210 } 211 212 static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, 213 int mode, u32 channel, int fifosz, u8 qmode) 214 { 215 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 216 unsigned int tqs = fifosz / 256 - 1; 217 218 if (mode == SF_DMA_MODE) { 219 value |= XGMAC_TSF; 220 } else { 221 value &= ~XGMAC_TSF; 222 value &= ~XGMAC_TTC; 223 224 if (mode <= 64) 225 value |= 0x0 << XGMAC_TTC_SHIFT; 226 else if (mode <= 96) 227 value |= 0x2 << XGMAC_TTC_SHIFT; 228 else if (mode <= 128) 229 value |= 0x3 << XGMAC_TTC_SHIFT; 230 else if (mode <= 192) 231 value |= 0x4 << XGMAC_TTC_SHIFT; 232 else if (mode <= 256) 233 value |= 0x5 << XGMAC_TTC_SHIFT; 234 else if (mode <= 384) 235 value |= 0x6 << XGMAC_TTC_SHIFT; 236 else 237 value |= 0x7 << XGMAC_TTC_SHIFT; 238 } 239 240 /* Use static TC to Queue mapping */ 241 value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; 242 243 value &= ~XGMAC_TXQEN; 244 if (qmode != MTL_QUEUE_AVB) 245 value |= 0x2 << XGMAC_TXQEN_SHIFT; 246 else 247 value |= 0x1 << XGMAC_TXQEN_SHIFT; 248 249 value &= ~XGMAC_TQS; 250 value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS; 251 252 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 253 } 254 255 static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv, 256 void __iomem *ioaddr, u32 chan, 257 bool rx, bool tx) 258 { 259 u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 260 261 if (rx) 262 value |= XGMAC_DMA_INT_DEFAULT_RX; 263 if (tx) 264 value |= XGMAC_DMA_INT_DEFAULT_TX; 265 266 writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 267 } 268 269 static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv, 270 void __iomem *ioaddr, u32 chan, 271 bool rx, bool tx) 272 { 273 u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 274 275 if (rx) 276 value &= ~XGMAC_DMA_INT_DEFAULT_RX; 277 if (tx) 278 value &= ~XGMAC_DMA_INT_DEFAULT_TX; 279 280 writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 281 } 282 283 static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv, 284 void __iomem *ioaddr, u32 chan) 285 { 286 u32 value; 287 288 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 289 value |= XGMAC_TXST; 290 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 291 292 value = readl(ioaddr + XGMAC_TX_CONFIG); 293 value |= XGMAC_CONFIG_TE; 294 writel(value, ioaddr + XGMAC_TX_CONFIG); 295 } 296 297 static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, 298 u32 chan) 299 { 300 u32 value; 301 302 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 303 value &= ~XGMAC_TXST; 304 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 305 306 value = readl(ioaddr + XGMAC_TX_CONFIG); 307 value &= ~XGMAC_CONFIG_TE; 308 writel(value, ioaddr + XGMAC_TX_CONFIG); 309 } 310 311 static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv, 312 void __iomem *ioaddr, u32 chan) 313 { 314 u32 value; 315 316 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 317 value |= XGMAC_RXST; 318 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 319 320 value = readl(ioaddr + XGMAC_RX_CONFIG); 321 value |= XGMAC_CONFIG_RE; 322 writel(value, ioaddr + XGMAC_RX_CONFIG); 323 } 324 325 static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, 326 u32 chan) 327 { 328 u32 value; 329 330 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 331 value &= ~XGMAC_RXST; 332 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 333 } 334 335 static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, 336 void __iomem *ioaddr, 337 struct stmmac_extra_stats *x, u32 chan, 338 u32 dir) 339 { 340 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); 341 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 342 int ret = 0; 343 344 if (dir == DMA_DIR_RX) 345 intr_status &= XGMAC_DMA_STATUS_MSK_RX; 346 else if (dir == DMA_DIR_TX) 347 intr_status &= XGMAC_DMA_STATUS_MSK_TX; 348 349 /* ABNORMAL interrupts */ 350 if (unlikely(intr_status & XGMAC_AIS)) { 351 if (unlikely(intr_status & XGMAC_RBU)) { 352 x->rx_buf_unav_irq++; 353 ret |= handle_rx; 354 } 355 if (unlikely(intr_status & XGMAC_TPS)) { 356 x->tx_process_stopped_irq++; 357 ret |= tx_hard_error; 358 } 359 if (unlikely(intr_status & XGMAC_FBE)) { 360 x->fatal_bus_error_irq++; 361 ret |= tx_hard_error; 362 } 363 } 364 365 /* TX/RX NORMAL interrupts */ 366 if (likely(intr_status & XGMAC_NIS)) { 367 x->normal_irq_n++; 368 369 if (likely(intr_status & XGMAC_RI)) { 370 x->rx_normal_irq_n++; 371 x->rxq_stats[chan].rx_normal_irq_n++; 372 ret |= handle_rx; 373 } 374 if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { 375 x->tx_normal_irq_n++; 376 x->txq_stats[chan].tx_normal_irq_n++; 377 ret |= handle_tx; 378 } 379 } 380 381 /* Clear interrupts */ 382 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 383 384 return ret; 385 } 386 387 static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, 388 struct dma_features *dma_cap) 389 { 390 u32 hw_cap; 391 392 /* MAC HW feature 0 */ 393 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); 394 dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27; 395 dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; 396 dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; 397 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; 398 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; 399 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; 400 dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10); 401 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; 402 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; 403 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; 404 dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; 405 dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; 406 dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; 407 408 /* MAC HW feature 1 */ 409 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); 410 dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; 411 dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24; 412 dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; 413 dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; 414 dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17; 415 416 dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14; 417 switch (dma_cap->addr64) { 418 case 0: 419 dma_cap->addr64 = 32; 420 break; 421 case 1: 422 dma_cap->addr64 = 40; 423 break; 424 case 2: 425 dma_cap->addr64 = 48; 426 break; 427 default: 428 dma_cap->addr64 = 32; 429 break; 430 } 431 432 dma_cap->tx_fifo_size = 433 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6); 434 dma_cap->rx_fifo_size = 435 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0); 436 437 /* MAC HW feature 2 */ 438 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2); 439 dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24; 440 dma_cap->number_tx_channel = 441 ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1; 442 dma_cap->number_rx_channel = 443 ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1; 444 dma_cap->number_tx_queues = 445 ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1; 446 dma_cap->number_rx_queues = 447 ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1; 448 449 /* MAC HW feature 3 */ 450 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3); 451 dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27; 452 dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26; 453 dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23; 454 dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20; 455 dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19; 456 dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14; 457 dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13; 458 dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; 459 dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; 460 dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; 461 462 return 0; 463 } 464 465 static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, 466 u32 riwt, u32 queue) 467 { 468 writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); 469 } 470 471 static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv, 472 void __iomem *ioaddr, u32 len, u32 chan) 473 { 474 writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan)); 475 } 476 477 static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv, 478 void __iomem *ioaddr, u32 len, u32 chan) 479 { 480 writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan)); 481 } 482 483 static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv, 484 void __iomem *ioaddr, u32 ptr, u32 chan) 485 { 486 writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan)); 487 } 488 489 static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv, 490 void __iomem *ioaddr, u32 ptr, u32 chan) 491 { 492 writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan)); 493 } 494 495 static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr, 496 bool en, u32 chan) 497 { 498 u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 499 500 if (en) 501 value |= XGMAC_TSE; 502 else 503 value &= ~XGMAC_TSE; 504 505 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 506 } 507 508 static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr, 509 u32 channel, u8 qmode) 510 { 511 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 512 u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL); 513 514 value &= ~XGMAC_TXQEN; 515 if (qmode != MTL_QUEUE_AVB) { 516 value |= 0x2 << XGMAC_TXQEN_SHIFT; 517 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); 518 } else { 519 value |= 0x1 << XGMAC_TXQEN_SHIFT; 520 writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL); 521 } 522 523 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 524 } 525 526 static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr, 527 int bfsize, u32 chan) 528 { 529 u32 value; 530 531 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 532 value &= ~XGMAC_RBSZ; 533 value |= bfsize << XGMAC_RBSZ_SHIFT; 534 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); 535 } 536 537 static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr, 538 bool en, u32 chan) 539 { 540 u32 value = readl(ioaddr + XGMAC_RX_CONFIG); 541 542 value &= ~XGMAC_CONFIG_HDSMS; 543 value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ 544 writel(value, ioaddr + XGMAC_RX_CONFIG); 545 546 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); 547 if (en) 548 value |= XGMAC_SPH; 549 else 550 value &= ~XGMAC_SPH; 551 writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); 552 } 553 554 static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr, 555 bool en, u32 chan) 556 { 557 u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 558 559 if (en) 560 value |= XGMAC_EDSE; 561 else 562 value &= ~XGMAC_EDSE; 563 564 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); 565 566 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE; 567 if (en && !value) 568 return -EIO; 569 570 writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0); 571 writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1); 572 writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2); 573 writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3); 574 return 0; 575 } 576 577 const struct stmmac_dma_ops dwxgmac210_dma_ops = { 578 .reset = dwxgmac2_dma_reset, 579 .init = dwxgmac2_dma_init, 580 .init_chan = dwxgmac2_dma_init_chan, 581 .init_rx_chan = dwxgmac2_dma_init_rx_chan, 582 .init_tx_chan = dwxgmac2_dma_init_tx_chan, 583 .axi = dwxgmac2_dma_axi, 584 .dump_regs = dwxgmac2_dma_dump_regs, 585 .dma_rx_mode = dwxgmac2_dma_rx_mode, 586 .dma_tx_mode = dwxgmac2_dma_tx_mode, 587 .enable_dma_irq = dwxgmac2_enable_dma_irq, 588 .disable_dma_irq = dwxgmac2_disable_dma_irq, 589 .start_tx = dwxgmac2_dma_start_tx, 590 .stop_tx = dwxgmac2_dma_stop_tx, 591 .start_rx = dwxgmac2_dma_start_rx, 592 .stop_rx = dwxgmac2_dma_stop_rx, 593 .dma_interrupt = dwxgmac2_dma_interrupt, 594 .get_hw_feature = dwxgmac2_get_hw_feature, 595 .rx_watchdog = dwxgmac2_rx_watchdog, 596 .set_rx_ring_len = dwxgmac2_set_rx_ring_len, 597 .set_tx_ring_len = dwxgmac2_set_tx_ring_len, 598 .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, 599 .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, 600 .enable_tso = dwxgmac2_enable_tso, 601 .qmode = dwxgmac2_qmode, 602 .set_bfsize = dwxgmac2_set_bfsize, 603 .enable_sph = dwxgmac2_enable_sph, 604 .enable_tbs = dwxgmac2_enable_tbs, 605 }; 606