1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/platform_device.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/skbuff.h> 11 #include <linux/of_platform.h> 12 #include <linux/of_address.h> 13 #include <linux/of_reserved_mem.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/debugfs.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/flow_offload.h> 18 #include <net/pkt_cls.h> 19 #include "mtk_eth_soc.h" 20 #include "mtk_wed.h" 21 #include "mtk_ppe.h" 22 #include "mtk_wed_wo.h" 23 24 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 25 26 #define MTK_WED_PKT_SIZE 1920 27 #define MTK_WED_BUF_SIZE 2048 28 #define MTK_WED_PAGE_BUF_SIZE 128 29 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 30 #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) 31 #define MTK_WED_RX_RING_SIZE 1536 32 #define MTK_WED_RX_PG_BM_CNT 8192 33 #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) 34 #define MTK_WED_AMSDU_NPAGES 32 35 36 #define MTK_WED_TX_RING_SIZE 2048 37 #define MTK_WED_WDMA_RING_SIZE 1024 38 #define MTK_WED_MAX_GROUP_SIZE 0x100 39 #define MTK_WED_VLD_GROUP_SIZE 0x40 40 #define MTK_WED_PER_GROUP_PKT 128 41 42 #define MTK_WED_FBUF_SIZE 128 43 #define MTK_WED_MIOD_CNT 16 44 #define MTK_WED_FB_CMD_CNT 1024 45 #define MTK_WED_RRO_QUE_CNT 8192 46 #define MTK_WED_MIOD_ENTRY_CNT 128 47 48 #define MTK_WED_TX_BM_DMA_SIZE 65536 49 #define MTK_WED_TX_BM_PKT_CNT 32768 50 51 static struct mtk_wed_hw *hw_list[3]; 52 static DEFINE_MUTEX(hw_lock); 53 54 struct mtk_wed_flow_block_priv { 55 struct mtk_wed_hw *hw; 56 struct net_device *dev; 57 }; 58 59 static const struct mtk_wed_soc_data mt7622_data = { 60 .regmap = { 61 .tx_bm_tkid = 0x088, 62 .wpdma_rx_ring = { 63 0x770, 64 }, 65 .reset_idx_tx_mask = GENMASK(3, 0), 66 .reset_idx_rx_mask = GENMASK(17, 16), 67 }, 68 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 69 .wdma_desc_size = sizeof(struct mtk_wdma_desc), 70 }; 71 72 static const struct mtk_wed_soc_data mt7986_data = { 73 .regmap = { 74 .tx_bm_tkid = 0x0c8, 75 .wpdma_rx_ring = { 76 0x770, 77 }, 78 .reset_idx_tx_mask = GENMASK(1, 0), 79 .reset_idx_rx_mask = GENMASK(7, 6), 80 }, 81 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 82 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 83 }; 84 85 static const struct mtk_wed_soc_data mt7988_data = { 86 .regmap = { 87 .tx_bm_tkid = 0x0c8, 88 .wpdma_rx_ring = { 89 0x7d0, 90 0x7d8, 91 }, 92 .reset_idx_tx_mask = GENMASK(1, 0), 93 .reset_idx_rx_mask = GENMASK(7, 6), 94 }, 95 .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), 96 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 97 }; 98 99 static void 100 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 101 { 102 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 103 } 104 105 static void 106 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 107 { 108 return wed_m32(dev, reg, 0, mask); 109 } 110 111 static void 112 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 113 { 114 return wed_m32(dev, reg, mask, 0); 115 } 116 117 static void 118 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 119 { 120 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 121 } 122 123 static void 124 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 125 { 126 wdma_m32(dev, reg, 0, mask); 127 } 128 129 static void 130 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 131 { 132 wdma_m32(dev, reg, mask, 0); 133 } 134 135 static u32 136 wifi_r32(struct mtk_wed_device *dev, u32 reg) 137 { 138 return readl(dev->wlan.base + reg); 139 } 140 141 static void 142 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 143 { 144 writel(val, dev->wlan.base + reg); 145 } 146 147 static u32 148 mtk_wed_read_reset(struct mtk_wed_device *dev) 149 { 150 return wed_r32(dev, MTK_WED_RESET); 151 } 152 153 static u32 154 mtk_wdma_read_reset(struct mtk_wed_device *dev) 155 { 156 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 157 } 158 159 static void 160 mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) 161 { 162 u32 status; 163 164 if (!mtk_wed_is_v3_or_greater(dev->hw)) 165 return; 166 167 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 168 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 169 170 if (read_poll_timeout(wdma_r32, status, 171 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 172 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 173 dev_err(dev->hw->dev, "rx reset failed\n"); 174 175 if (read_poll_timeout(wdma_r32, status, 176 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 177 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 178 dev_err(dev->hw->dev, "rx reset failed\n"); 179 180 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 181 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 182 183 if (read_poll_timeout(wdma_r32, status, 184 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 185 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 186 dev_err(dev->hw->dev, "rx reset failed\n"); 187 188 if (read_poll_timeout(wdma_r32, status, 189 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 190 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 191 dev_err(dev->hw->dev, "rx reset failed\n"); 192 193 /* prefetch FIFO */ 194 wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 195 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 196 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 197 wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 198 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 199 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 200 201 /* core FIFO */ 202 wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 203 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 204 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 205 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 206 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 207 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 208 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 209 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 210 wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 211 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 212 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 213 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 214 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 215 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 216 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 217 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 218 219 /* writeback FIFO */ 220 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 221 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 222 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 223 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 224 225 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 226 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 227 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 228 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 229 230 /* prefetch ring status */ 231 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 232 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 233 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 234 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 235 236 /* writeback ring status */ 237 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 238 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 239 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 240 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 241 } 242 243 static int 244 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 245 { 246 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 247 int i, ret; 248 249 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 250 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 251 !(status & mask), 0, 10000); 252 if (ret) 253 dev_err(dev->hw->dev, "rx reset failed\n"); 254 255 mtk_wdma_v3_rx_reset(dev); 256 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 257 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 258 259 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 260 if (dev->rx_wdma[i].desc) 261 continue; 262 263 wdma_w32(dev, 264 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 265 } 266 267 return ret; 268 } 269 270 static u32 271 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 272 { 273 return !!(wed_r32(dev, reg) & mask); 274 } 275 276 static int 277 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 278 { 279 int sleep = 15000; 280 int timeout = 100 * sleep; 281 u32 val; 282 283 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 284 timeout, false, dev, reg, mask); 285 } 286 287 static void 288 mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) 289 { 290 u32 status; 291 292 if (!mtk_wed_is_v3_or_greater(dev->hw)) 293 return; 294 295 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 296 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 297 298 if (read_poll_timeout(wdma_r32, status, 299 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 300 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 301 dev_err(dev->hw->dev, "tx reset failed\n"); 302 303 if (read_poll_timeout(wdma_r32, status, 304 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 305 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 306 dev_err(dev->hw->dev, "tx reset failed\n"); 307 308 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 309 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 310 311 if (read_poll_timeout(wdma_r32, status, 312 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 313 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 314 dev_err(dev->hw->dev, "tx reset failed\n"); 315 316 if (read_poll_timeout(wdma_r32, status, 317 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 318 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 319 dev_err(dev->hw->dev, "tx reset failed\n"); 320 321 /* prefetch FIFO */ 322 wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 323 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 324 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 325 wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 326 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 327 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 328 329 /* core FIFO */ 330 wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 331 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 332 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 333 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 334 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 335 wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 336 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 337 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 338 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 339 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 340 341 /* writeback FIFO */ 342 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 343 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 344 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 345 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 346 347 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 348 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 349 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 350 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 351 352 /* prefetch ring status */ 353 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 354 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 355 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 356 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 357 358 /* writeback ring status */ 359 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 360 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 361 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 362 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 363 } 364 365 static void 366 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 367 { 368 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 369 int i; 370 371 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 372 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 373 !(status & mask), 0, 10000)) 374 dev_err(dev->hw->dev, "tx reset failed\n"); 375 376 mtk_wdma_v3_tx_reset(dev); 377 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 378 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 379 380 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 381 wdma_w32(dev, 382 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 383 } 384 385 static void 386 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 387 { 388 u32 status; 389 390 wed_w32(dev, MTK_WED_RESET, mask); 391 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 392 !(status & mask), 0, 1000)) 393 WARN_ON_ONCE(1); 394 } 395 396 static u32 397 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 398 { 399 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 400 } 401 402 static void 403 mtk_wed_wo_reset(struct mtk_wed_device *dev) 404 { 405 struct mtk_wed_wo *wo = dev->hw->wed_wo; 406 u8 state = MTK_WED_WO_STATE_DISABLE; 407 void __iomem *reg; 408 u32 val; 409 410 mtk_wdma_tx_reset(dev); 411 mtk_wed_reset(dev, MTK_WED_RESET_WED); 412 413 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 414 MTK_WED_WO_CMD_CHANGE_STATE, &state, 415 sizeof(state), false)) 416 return; 417 418 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 419 val == MTK_WED_WOIF_DISABLE_DONE, 420 100, MTK_WOCPU_TIMEOUT)) 421 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 422 423 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 424 425 val = readl(reg); 426 switch (dev->hw->index) { 427 case 0: 428 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 429 writel(val, reg); 430 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 431 writel(val, reg); 432 break; 433 case 1: 434 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 435 writel(val, reg); 436 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 437 writel(val, reg); 438 break; 439 default: 440 break; 441 } 442 iounmap(reg); 443 } 444 445 void mtk_wed_fe_reset(void) 446 { 447 int i; 448 449 mutex_lock(&hw_lock); 450 451 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 452 struct mtk_wed_hw *hw = hw_list[i]; 453 struct mtk_wed_device *dev; 454 int err; 455 456 if (!hw) 457 break; 458 459 dev = hw->wed_dev; 460 if (!dev || !dev->wlan.reset) 461 continue; 462 463 /* reset callback blocks until WLAN reset is completed */ 464 err = dev->wlan.reset(dev); 465 if (err) 466 dev_err(dev->dev, "wlan reset failed: %d\n", err); 467 } 468 469 mutex_unlock(&hw_lock); 470 } 471 472 void mtk_wed_fe_reset_complete(void) 473 { 474 int i; 475 476 mutex_lock(&hw_lock); 477 478 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 479 struct mtk_wed_hw *hw = hw_list[i]; 480 struct mtk_wed_device *dev; 481 482 if (!hw) 483 break; 484 485 dev = hw->wed_dev; 486 if (!dev || !dev->wlan.reset_complete) 487 continue; 488 489 dev->wlan.reset_complete(dev); 490 } 491 492 mutex_unlock(&hw_lock); 493 } 494 495 static struct mtk_wed_hw * 496 mtk_wed_assign(struct mtk_wed_device *dev) 497 { 498 struct mtk_wed_hw *hw; 499 int i; 500 501 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 502 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 503 if (!hw) 504 return NULL; 505 506 if (!hw->wed_dev) 507 goto out; 508 509 if (mtk_wed_is_v1(hw)) 510 return NULL; 511 512 /* MT7986 WED devices do not have any pcie slot restrictions */ 513 } 514 /* MT7986 PCIE or AXI */ 515 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 516 hw = hw_list[i]; 517 if (hw && !hw->wed_dev) 518 goto out; 519 } 520 521 return NULL; 522 523 out: 524 hw->wed_dev = dev; 525 return hw; 526 } 527 528 static int 529 mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) 530 { 531 struct mtk_wed_hw *hw = dev->hw; 532 struct mtk_wed_amsdu *wed_amsdu; 533 int i; 534 535 if (!mtk_wed_is_v3_or_greater(hw)) 536 return 0; 537 538 wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, 539 sizeof(*wed_amsdu), GFP_KERNEL); 540 if (!wed_amsdu) 541 return -ENOMEM; 542 543 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 544 void *ptr; 545 546 /* each segment is 64K */ 547 ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | 548 __GFP_ZERO | __GFP_COMP | 549 GFP_DMA32, 550 get_order(MTK_WED_AMSDU_BUF_SIZE)); 551 if (!ptr) 552 goto error; 553 554 wed_amsdu[i].txd = ptr; 555 wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, 556 MTK_WED_AMSDU_BUF_SIZE, 557 DMA_TO_DEVICE); 558 if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) 559 goto error; 560 } 561 dev->hw->wed_amsdu = wed_amsdu; 562 563 return 0; 564 565 error: 566 for (i--; i >= 0; i--) 567 dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, 568 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 569 return -ENOMEM; 570 } 571 572 static void 573 mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) 574 { 575 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 576 int i; 577 578 if (!wed_amsdu) 579 return; 580 581 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 582 dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, 583 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 584 free_pages((unsigned long)wed_amsdu[i].txd, 585 get_order(MTK_WED_AMSDU_BUF_SIZE)); 586 } 587 } 588 589 static int 590 mtk_wed_amsdu_init(struct mtk_wed_device *dev) 591 { 592 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 593 int i, ret; 594 595 if (!wed_amsdu) 596 return 0; 597 598 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) 599 wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), 600 wed_amsdu[i].txd_phy); 601 602 /* init all sta parameter */ 603 wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | 604 MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | 605 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, 606 dev->wlan.amsdu_max_len >> 8) | 607 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, 608 dev->wlan.amsdu_max_subframes)); 609 610 wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); 611 612 ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, 613 MTK_WED_AMSDU_STA_INFO_DO_INIT); 614 if (ret) { 615 dev_err(dev->hw->dev, "amsdu initialization failed\n"); 616 return ret; 617 } 618 619 /* init partial amsdu offload txd src */ 620 wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, 621 FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); 622 623 /* init qmem */ 624 wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); 625 ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); 626 if (ret) { 627 pr_info("%s: amsdu qmem initialization failed\n", __func__); 628 return ret; 629 } 630 631 /* Kite and Eagle E1 PCIE1 tx ring 22 flow control issue */ 632 if (dev->wlan.id == 0x7991 || dev->wlan.id == 0x7992) 633 wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); 634 635 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 636 637 return 0; 638 } 639 640 static int 641 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 642 { 643 u32 desc_size = dev->hw->soc->tx_ring_desc_size; 644 int i, page_idx = 0, n_pages, ring_size; 645 int token = dev->wlan.token_start; 646 struct mtk_wed_buf *page_list; 647 dma_addr_t desc_phys; 648 void *desc_ptr; 649 650 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 651 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 652 dev->tx_buf_ring.size = ring_size; 653 } else { 654 dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; 655 ring_size = MTK_WED_TX_BM_PKT_CNT; 656 } 657 n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; 658 659 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 660 if (!page_list) 661 return -ENOMEM; 662 663 dev->tx_buf_ring.pages = page_list; 664 665 desc_ptr = dma_alloc_coherent(dev->hw->dev, 666 dev->tx_buf_ring.size * desc_size, 667 &desc_phys, GFP_KERNEL); 668 if (!desc_ptr) 669 return -ENOMEM; 670 671 dev->tx_buf_ring.desc = desc_ptr; 672 dev->tx_buf_ring.desc_phys = desc_phys; 673 674 for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 675 dma_addr_t page_phys, buf_phys; 676 struct page *page; 677 void *buf; 678 int s; 679 680 page = __dev_alloc_page(GFP_KERNEL); 681 if (!page) 682 return -ENOMEM; 683 684 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 685 DMA_BIDIRECTIONAL); 686 if (dma_mapping_error(dev->hw->dev, page_phys)) { 687 __free_page(page); 688 return -ENOMEM; 689 } 690 691 page_list[page_idx].p = page; 692 page_list[page_idx++].phy_addr = page_phys; 693 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 694 DMA_BIDIRECTIONAL); 695 696 buf = page_to_virt(page); 697 buf_phys = page_phys; 698 699 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 700 struct mtk_wdma_desc *desc = desc_ptr; 701 u32 ctrl; 702 703 desc->buf0 = cpu_to_le32(buf_phys); 704 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 705 u32 txd_size; 706 707 txd_size = dev->wlan.init_buf(buf, buf_phys, 708 token++); 709 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 710 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); 711 if (mtk_wed_is_v1(dev->hw)) 712 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | 713 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 714 MTK_WED_BUF_SIZE - txd_size); 715 else 716 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | 717 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 718 MTK_WED_BUF_SIZE - txd_size); 719 desc->info = 0; 720 } else { 721 ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys); 722 } 723 desc->ctrl = cpu_to_le32(ctrl); 724 725 desc_ptr += desc_size; 726 buf += MTK_WED_BUF_SIZE; 727 buf_phys += MTK_WED_BUF_SIZE; 728 } 729 730 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 731 DMA_BIDIRECTIONAL); 732 } 733 734 return 0; 735 } 736 737 static void 738 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 739 { 740 struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; 741 struct mtk_wed_hw *hw = dev->hw; 742 int i, page_idx = 0; 743 744 if (!page_list) 745 return; 746 747 if (!dev->tx_buf_ring.desc) 748 goto free_pagelist; 749 750 for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { 751 dma_addr_t page_phy = page_list[page_idx].phy_addr; 752 void *page = page_list[page_idx++].p; 753 754 if (!page) 755 break; 756 757 dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, 758 DMA_BIDIRECTIONAL); 759 __free_page(page); 760 } 761 762 dma_free_coherent(dev->hw->dev, 763 dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, 764 dev->tx_buf_ring.desc, 765 dev->tx_buf_ring.desc_phys); 766 767 free_pagelist: 768 kfree(page_list); 769 } 770 771 static int 772 mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) 773 { 774 int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; 775 struct mtk_wed_buf *page_list; 776 struct mtk_wed_bm_desc *desc; 777 dma_addr_t desc_phys; 778 int i, page_idx = 0; 779 780 if (!dev->wlan.hw_rro) 781 return 0; 782 783 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 784 if (!page_list) 785 return -ENOMEM; 786 787 dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 788 dev->hw_rro.pages = page_list; 789 desc = dma_alloc_coherent(dev->hw->dev, 790 dev->wlan.rx_nbuf * sizeof(*desc), 791 &desc_phys, GFP_KERNEL); 792 if (!desc) 793 return -ENOMEM; 794 795 dev->hw_rro.desc = desc; 796 dev->hw_rro.desc_phys = desc_phys; 797 798 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 799 dma_addr_t page_phys, buf_phys; 800 struct page *page; 801 int s; 802 803 page = __dev_alloc_page(GFP_KERNEL); 804 if (!page) 805 return -ENOMEM; 806 807 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 808 DMA_BIDIRECTIONAL); 809 if (dma_mapping_error(dev->hw->dev, page_phys)) { 810 __free_page(page); 811 return -ENOMEM; 812 } 813 814 page_list[page_idx].p = page; 815 page_list[page_idx++].phy_addr = page_phys; 816 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 817 DMA_BIDIRECTIONAL); 818 819 buf_phys = page_phys; 820 for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { 821 desc->buf0 = cpu_to_le32(buf_phys); 822 desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys)); 823 buf_phys += MTK_WED_PAGE_BUF_SIZE; 824 desc++; 825 } 826 827 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 828 DMA_BIDIRECTIONAL); 829 } 830 831 return 0; 832 } 833 834 static int 835 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 836 { 837 struct mtk_wed_bm_desc *desc; 838 dma_addr_t desc_phys; 839 840 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 841 desc = dma_alloc_coherent(dev->hw->dev, 842 dev->wlan.rx_nbuf * sizeof(*desc), 843 &desc_phys, GFP_KERNEL); 844 if (!desc) 845 return -ENOMEM; 846 847 dev->rx_buf_ring.desc = desc; 848 dev->rx_buf_ring.desc_phys = desc_phys; 849 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 850 851 return mtk_wed_hwrro_buffer_alloc(dev); 852 } 853 854 static void 855 mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) 856 { 857 struct mtk_wed_buf *page_list = dev->hw_rro.pages; 858 struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; 859 int i, page_idx = 0; 860 861 if (!dev->wlan.hw_rro) 862 return; 863 864 if (!page_list) 865 return; 866 867 if (!desc) 868 goto free_pagelist; 869 870 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 871 dma_addr_t buf_addr = page_list[page_idx].phy_addr; 872 void *page = page_list[page_idx++].p; 873 874 if (!page) 875 break; 876 877 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 878 DMA_BIDIRECTIONAL); 879 __free_page(page); 880 } 881 882 dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), 883 desc, dev->hw_rro.desc_phys); 884 885 free_pagelist: 886 kfree(page_list); 887 } 888 889 static void 890 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 891 { 892 struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; 893 894 if (!desc) 895 return; 896 897 dev->wlan.release_rx_buf(dev); 898 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 899 desc, dev->rx_buf_ring.desc_phys); 900 901 mtk_wed_hwrro_free_buffer(dev); 902 } 903 904 static void 905 mtk_wed_hwrro_init(struct mtk_wed_device *dev) 906 { 907 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 908 return; 909 910 wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, 911 FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); 912 913 wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); 914 915 wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, 916 MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | 917 FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, 918 MTK_WED_RX_PG_BM_CNT)); 919 920 /* enable rx_page_bm to fetch dmad */ 921 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 922 } 923 924 static void 925 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 926 { 927 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 928 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 929 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 930 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 931 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 932 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 933 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 934 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 935 936 mtk_wed_hwrro_init(dev); 937 } 938 939 static void 940 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 941 { 942 if (!ring->desc) 943 return; 944 945 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 946 ring->desc, ring->desc_phys); 947 } 948 949 static void 950 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 951 { 952 mtk_wed_free_rx_buffer(dev); 953 mtk_wed_free_ring(dev, &dev->rro.ring); 954 } 955 956 static void 957 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 958 { 959 int i; 960 961 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 962 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 963 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 964 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 965 } 966 967 static void 968 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 969 { 970 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 971 972 switch (dev->hw->version) { 973 case 1: 974 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 975 break; 976 case 2: 977 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 978 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 979 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 980 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 981 break; 982 case 3: 983 mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 984 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 985 break; 986 default: 987 break; 988 } 989 990 if (!dev->hw->num_flows) 991 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 992 993 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 994 wed_r32(dev, MTK_WED_EXT_INT_MASK); 995 } 996 997 static void 998 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 999 { 1000 if (!mtk_wed_is_v2(dev->hw)) 1001 return; 1002 1003 if (enable) { 1004 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 1005 wed_w32(dev, MTK_WED_TXP_DW1, 1006 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 1007 } else { 1008 wed_w32(dev, MTK_WED_TXP_DW1, 1009 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 1010 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 1011 } 1012 } 1013 1014 static int 1015 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, 1016 struct mtk_wed_ring *ring) 1017 { 1018 int i; 1019 1020 for (i = 0; i < 3; i++) { 1021 u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); 1022 1023 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 1024 break; 1025 1026 usleep_range(100000, 200000); 1027 } 1028 1029 if (i == 3) { 1030 dev_err(dev->hw->dev, "rx dma enable failed\n"); 1031 return -ETIMEDOUT; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void 1038 mtk_wed_dma_disable(struct mtk_wed_device *dev) 1039 { 1040 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1041 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1042 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1043 1044 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1045 1046 wed_clr(dev, MTK_WED_GLO_CFG, 1047 MTK_WED_GLO_CFG_TX_DMA_EN | 1048 MTK_WED_GLO_CFG_RX_DMA_EN); 1049 1050 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1051 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1052 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1053 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1054 1055 if (mtk_wed_is_v1(dev->hw)) { 1056 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 1057 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1058 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1059 } else { 1060 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1061 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1062 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1063 1064 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1065 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1066 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1067 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1068 1069 if (mtk_wed_is_v3_or_greater(dev->hw) && 1070 mtk_wed_get_rx_capa(dev)) { 1071 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, 1072 MTK_WDMA_PREF_TX_CFG_PREF_EN); 1073 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, 1074 MTK_WDMA_PREF_RX_CFG_PREF_EN); 1075 } 1076 } 1077 1078 mtk_wed_set_512_support(dev, false); 1079 } 1080 1081 static void 1082 mtk_wed_stop(struct mtk_wed_device *dev) 1083 { 1084 mtk_wed_dma_disable(dev); 1085 mtk_wed_set_ext_int(dev, false); 1086 1087 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 1088 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 1089 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 1090 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 1091 1092 if (!mtk_wed_get_rx_capa(dev)) 1093 return; 1094 1095 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 1096 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 1097 } 1098 1099 static void 1100 mtk_wed_deinit(struct mtk_wed_device *dev) 1101 { 1102 mtk_wed_stop(dev); 1103 1104 wed_clr(dev, MTK_WED_CTRL, 1105 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1106 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1107 MTK_WED_CTRL_WED_TX_BM_EN | 1108 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1109 1110 if (mtk_wed_is_v1(dev->hw)) 1111 return; 1112 1113 wed_clr(dev, MTK_WED_CTRL, 1114 MTK_WED_CTRL_RX_ROUTE_QM_EN | 1115 MTK_WED_CTRL_WED_RX_BM_EN | 1116 MTK_WED_CTRL_RX_RRO_QM_EN); 1117 1118 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1119 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1120 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); 1121 wed_clr(dev, MTK_WED_PCIE_INT_CTRL, 1122 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1123 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); 1124 } 1125 } 1126 1127 static void 1128 __mtk_wed_detach(struct mtk_wed_device *dev) 1129 { 1130 struct mtk_wed_hw *hw = dev->hw; 1131 1132 mtk_wed_deinit(dev); 1133 1134 mtk_wdma_rx_reset(dev); 1135 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1136 mtk_wed_amsdu_free_buffer(dev); 1137 mtk_wed_free_tx_buffer(dev); 1138 mtk_wed_free_tx_rings(dev); 1139 1140 if (mtk_wed_get_rx_capa(dev)) { 1141 if (hw->wed_wo) 1142 mtk_wed_wo_reset(dev); 1143 mtk_wed_free_rx_rings(dev); 1144 if (hw->wed_wo) 1145 mtk_wed_wo_deinit(hw); 1146 } 1147 1148 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 1149 struct device_node *wlan_node; 1150 1151 wlan_node = dev->wlan.pci_dev->dev.of_node; 1152 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 1153 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1154 BIT(hw->index), BIT(hw->index)); 1155 } 1156 1157 if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && 1158 hw->eth->dma_dev != hw->eth->dev) 1159 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 1160 1161 memset(dev, 0, sizeof(*dev)); 1162 module_put(THIS_MODULE); 1163 1164 hw->wed_dev = NULL; 1165 } 1166 1167 static void 1168 mtk_wed_detach(struct mtk_wed_device *dev) 1169 { 1170 mutex_lock(&hw_lock); 1171 __mtk_wed_detach(dev); 1172 mutex_unlock(&hw_lock); 1173 } 1174 1175 static void 1176 mtk_wed_bus_init(struct mtk_wed_device *dev) 1177 { 1178 switch (dev->wlan.bus_type) { 1179 case MTK_WED_BUS_PCIE: { 1180 struct device_node *np = dev->hw->eth->dev->of_node; 1181 1182 if (mtk_wed_is_v2(dev->hw)) { 1183 struct regmap *regs; 1184 1185 regs = syscon_regmap_lookup_by_phandle(np, 1186 "mediatek,wed-pcie"); 1187 if (IS_ERR(regs)) 1188 break; 1189 1190 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 1191 } 1192 1193 if (dev->wlan.msi) { 1194 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1195 dev->hw->pcie_base | 0xc08); 1196 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1197 dev->hw->pcie_base | 0xc04); 1198 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); 1199 } else { 1200 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1201 dev->hw->pcie_base | 0x180); 1202 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1203 dev->hw->pcie_base | 0x184); 1204 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 1205 } 1206 1207 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 1208 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 1209 1210 /* pcie interrupt control: pola/source selection */ 1211 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 1212 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1213 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | 1214 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1215 dev->hw->index)); 1216 break; 1217 } 1218 case MTK_WED_BUS_AXI: 1219 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 1220 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 1221 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 1222 break; 1223 default: 1224 break; 1225 } 1226 } 1227 1228 static void 1229 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 1230 { 1231 int i; 1232 1233 if (mtk_wed_is_v1(dev->hw)) { 1234 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 1235 return; 1236 } 1237 1238 mtk_wed_bus_init(dev); 1239 1240 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 1241 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 1242 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 1243 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 1244 1245 if (!mtk_wed_get_rx_capa(dev)) 1246 return; 1247 1248 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 1249 wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[0], 1250 dev->wlan.wpdma_rx[0]); 1251 if (mtk_wed_is_v3_or_greater(dev->hw)) 1252 wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[1], 1253 dev->wlan.wpdma_rx[1]); 1254 1255 if (!dev->wlan.hw_rro) 1256 return; 1257 1258 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); 1259 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); 1260 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) 1261 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), 1262 dev->wlan.wpdma_rx_pg + i * 0x10); 1263 } 1264 1265 static void 1266 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 1267 { 1268 u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); 1269 u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; 1270 1271 mtk_wed_deinit(dev); 1272 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1273 mtk_wed_set_wpdma(dev); 1274 1275 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 1276 mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 1277 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 1278 set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 1279 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 1280 } 1281 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 1282 1283 if (mtk_wed_is_v1(dev->hw)) { 1284 u32 offset = dev->hw->index ? 0x04000400 : 0; 1285 1286 wdma_set(dev, MTK_WDMA_GLO_CFG, 1287 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1288 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 1289 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1290 1291 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 1292 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 1293 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1294 MTK_PCIE_BASE(dev->hw->index)); 1295 } else { 1296 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 1297 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 1298 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 1299 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 1300 MTK_WDMA_INT_STATUS) | 1301 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 1302 MTK_WDMA_GLO_CFG)); 1303 1304 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 1305 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 1306 MTK_WDMA_RING_TX(0)) | 1307 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 1308 MTK_WDMA_RING_RX(0))); 1309 } 1310 } 1311 1312 static int 1313 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1314 int size) 1315 { 1316 ring->desc = dma_alloc_coherent(dev->hw->dev, 1317 size * sizeof(*ring->desc), 1318 &ring->desc_phys, GFP_KERNEL); 1319 if (!ring->desc) 1320 return -ENOMEM; 1321 1322 ring->desc_size = sizeof(*ring->desc); 1323 ring->size = size; 1324 1325 return 0; 1326 } 1327 1328 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 1329 static int 1330 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 1331 { 1332 struct resource res; 1333 int ret; 1334 1335 ret = of_reserved_mem_region_to_resource_byname(dev->hw->node, "wo-dlm", &res); 1336 if (ret) 1337 return ret; 1338 1339 dev->rro.miod_phys = res.start; 1340 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 1341 1342 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 1343 MTK_WED_RRO_QUE_CNT); 1344 } 1345 1346 static int 1347 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 1348 { 1349 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1350 struct { 1351 struct { 1352 __le32 base; 1353 __le32 cnt; 1354 __le32 unit; 1355 } ring[2]; 1356 __le32 wed; 1357 u8 version; 1358 } req = { 1359 .ring[0] = { 1360 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 1361 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 1362 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 1363 }, 1364 .ring[1] = { 1365 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 1366 MTK_WED_MIOD_COUNT), 1367 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 1368 .unit = cpu_to_le32(4), 1369 }, 1370 }; 1371 1372 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1373 MTK_WED_WO_CMD_WED_CFG, 1374 &req, sizeof(req), true); 1375 } 1376 1377 static void 1378 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 1379 { 1380 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 1381 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 1382 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 1383 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 1384 MTK_WED_MIOD_ENTRY_CNT >> 2)); 1385 1386 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 1387 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 1388 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 1389 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 1390 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 1391 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 1392 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 1393 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 1394 1395 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1396 MTK_WED_RROQM_RST_IDX_MIOD | 1397 MTK_WED_RROQM_RST_IDX_FDBK); 1398 1399 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1400 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 1401 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1402 } 1403 1404 static void 1405 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 1406 { 1407 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 1408 1409 for (;;) { 1410 usleep_range(100, 200); 1411 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 1412 break; 1413 } 1414 1415 /* configure RX_ROUTE_QM */ 1416 if (mtk_wed_is_v2(dev->hw)) { 1417 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1418 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 1419 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1420 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 1421 0x3 + dev->hw->index)); 1422 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1423 } else { 1424 wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, 1425 FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 1426 0x3 + dev->hw->index)); 1427 } 1428 /* enable RX_ROUTE_QM */ 1429 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1430 } 1431 1432 static void 1433 mtk_wed_hw_init(struct mtk_wed_device *dev) 1434 { 1435 if (dev->init_done) 1436 return; 1437 1438 dev->init_done = true; 1439 mtk_wed_set_ext_int(dev, false); 1440 1441 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 1442 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 1443 1444 if (mtk_wed_is_v1(dev->hw)) { 1445 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1446 MTK_WED_TX_BM_CTRL_PAUSE | 1447 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1448 dev->tx_buf_ring.size / 128) | 1449 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1450 MTK_WED_TX_RING_SIZE / 256)); 1451 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1452 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 1453 MTK_WED_TX_BM_DYN_THR_HI); 1454 } else if (mtk_wed_is_v2(dev->hw)) { 1455 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1456 MTK_WED_TX_BM_CTRL_PAUSE | 1457 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1458 dev->tx_buf_ring.size / 128) | 1459 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1460 MTK_WED_TX_RING_SIZE / 256)); 1461 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 1462 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 1463 MTK_WED_TX_TKID_DYN_THR_HI); 1464 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1465 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 1466 MTK_WED_TX_BM_DYN_THR_HI_V2); 1467 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1468 MTK_WED_TX_TKID_CTRL_PAUSE | 1469 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 1470 dev->tx_buf_ring.size / 128) | 1471 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 1472 dev->tx_buf_ring.size / 128)); 1473 } 1474 1475 wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, 1476 FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | 1477 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 1478 dev->wlan.token_start + dev->wlan.nbuf - 1)); 1479 1480 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1481 1482 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1483 /* switch to new bm architecture */ 1484 wed_clr(dev, MTK_WED_TX_BM_CTRL, 1485 MTK_WED_TX_BM_CTRL_LEGACY_EN); 1486 1487 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1488 MTK_WED_TX_TKID_CTRL_PAUSE | 1489 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, 1490 dev->wlan.nbuf / 128) | 1491 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, 1492 dev->wlan.nbuf / 128)); 1493 /* return SKBID + SDP back to bm */ 1494 wed_set(dev, MTK_WED_TX_TKID_CTRL, 1495 MTK_WED_TX_TKID_CTRL_FREE_FORMAT); 1496 1497 wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, 1498 MTK_WED_TX_BM_PKT_CNT | 1499 MTK_WED_TX_BM_INIT_SW_TAIL_IDX); 1500 } 1501 1502 if (mtk_wed_is_v1(dev->hw)) { 1503 wed_set(dev, MTK_WED_CTRL, 1504 MTK_WED_CTRL_WED_TX_BM_EN | 1505 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1506 } else if (mtk_wed_get_rx_capa(dev)) { 1507 /* rx hw init */ 1508 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1509 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1510 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1511 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1512 1513 /* reset prefetch index of ring */ 1514 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1515 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1516 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1517 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1518 1519 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1520 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1521 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1522 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1523 1524 /* reset prefetch FIFO of ring */ 1525 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 1526 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | 1527 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); 1528 wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); 1529 1530 mtk_wed_rx_buffer_hw_init(dev); 1531 mtk_wed_rro_hw_init(dev); 1532 mtk_wed_route_qm_hw_init(dev); 1533 } 1534 1535 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 1536 if (!mtk_wed_is_v1(dev->hw)) 1537 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 1538 } 1539 1540 static void 1541 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 1542 { 1543 void *head = (void *)ring->desc; 1544 int i; 1545 1546 for (i = 0; i < size; i++) { 1547 struct mtk_wdma_desc *desc; 1548 1549 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 1550 desc->buf0 = 0; 1551 if (tx) 1552 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1553 else 1554 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 1555 desc->buf1 = 0; 1556 desc->info = 0; 1557 } 1558 } 1559 1560 static int 1561 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1562 { 1563 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1564 u8 val = MTK_WED_WO_STATE_SER_RESET; 1565 int i, ret; 1566 1567 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1568 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1569 sizeof(val), true); 1570 if (ret) 1571 return ret; 1572 1573 if (dev->wlan.hw_rro) { 1574 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 1575 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, 1576 MTK_WED_RX_IND_CMD_BUSY); 1577 mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); 1578 } 1579 1580 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1581 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1582 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1583 if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) 1584 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1585 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1586 if (ret) { 1587 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1588 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1589 } else { 1590 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1591 /* 1.a. disable prefetch HW */ 1592 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1593 MTK_WED_WPDMA_RX_D_PREF_EN); 1594 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1595 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1596 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1597 MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); 1598 } 1599 1600 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1601 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1602 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1603 1604 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1605 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1606 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1607 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1608 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1609 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1610 1611 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1612 } 1613 1614 /* reset rro qm */ 1615 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1616 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1617 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1618 if (ret) { 1619 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1620 } else { 1621 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1622 MTK_WED_RROQM_RST_IDX_MIOD | 1623 MTK_WED_RROQM_RST_IDX_FDBK); 1624 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1625 } 1626 1627 if (dev->wlan.hw_rro) { 1628 /* disable rro msdu page drv */ 1629 wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1630 MTK_WED_RRO_MSDU_PG_DRV_EN); 1631 1632 /* disable rro data drv */ 1633 wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 1634 1635 /* rro msdu page drv reset */ 1636 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1637 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1638 mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1639 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1640 1641 /* rro data drv reset */ 1642 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), 1643 MTK_WED_RRO_RX_D_DRV_CLR); 1644 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), 1645 MTK_WED_RRO_RX_D_DRV_CLR); 1646 } 1647 1648 /* reset route qm */ 1649 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1650 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1651 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1652 if (ret) { 1653 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1654 } else if (mtk_wed_is_v3_or_greater(dev->hw)) { 1655 wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); 1656 wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); 1657 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1658 } else { 1659 wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1660 } 1661 1662 /* reset tx wdma */ 1663 mtk_wdma_tx_reset(dev); 1664 1665 /* reset tx wdma drv */ 1666 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1667 if (mtk_wed_is_v3_or_greater(dev->hw)) 1668 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, 1669 MTK_WED_WPDMA_STATUS_TX_DRV); 1670 else 1671 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1672 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1673 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1674 1675 /* reset wed rx dma */ 1676 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1677 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1678 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1679 if (ret) { 1680 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1681 } else { 1682 wed_set(dev, MTK_WED_RESET_IDX, 1683 dev->hw->soc->regmap.reset_idx_rx_mask); 1684 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1685 } 1686 1687 /* reset rx bm */ 1688 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1689 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1690 MTK_WED_CTRL_WED_RX_BM_BUSY); 1691 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1692 1693 if (dev->wlan.hw_rro) { 1694 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 1695 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1696 MTK_WED_CTRL_WED_RX_PG_BM_BUSY); 1697 wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1698 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1699 } 1700 1701 /* wo change to enable state */ 1702 val = MTK_WED_WO_STATE_ENABLE; 1703 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1704 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1705 sizeof(val), true); 1706 if (ret) 1707 return ret; 1708 1709 /* wed_rx_ring_reset */ 1710 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1711 if (!dev->rx_ring[i].desc) 1712 continue; 1713 1714 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1715 false); 1716 } 1717 mtk_wed_free_rx_buffer(dev); 1718 mtk_wed_hwrro_free_buffer(dev); 1719 1720 return 0; 1721 } 1722 1723 static void 1724 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1725 { 1726 bool busy = false; 1727 u32 val; 1728 int i; 1729 1730 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1731 if (!dev->tx_ring[i].desc) 1732 continue; 1733 1734 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1735 true); 1736 } 1737 1738 /* 1. reset WED tx DMA */ 1739 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1740 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1741 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1742 if (busy) { 1743 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1744 } else { 1745 wed_w32(dev, MTK_WED_RESET_IDX, 1746 dev->hw->soc->regmap.reset_idx_tx_mask); 1747 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1748 } 1749 1750 /* 2. reset WDMA rx DMA */ 1751 busy = !!mtk_wdma_rx_reset(dev); 1752 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1753 val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | 1754 wed_r32(dev, MTK_WED_WDMA_GLO_CFG); 1755 val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; 1756 wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); 1757 } else { 1758 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1759 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1760 } 1761 1762 if (!busy) 1763 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1764 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1765 if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) 1766 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1767 MTK_WED_WDMA_RX_PREF_BUSY); 1768 1769 if (busy) { 1770 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1771 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1772 } else { 1773 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1774 /* 1.a. disable prefetch HW */ 1775 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1776 MTK_WED_WDMA_RX_PREF_EN); 1777 mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1778 MTK_WED_WDMA_RX_PREF_BUSY); 1779 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1780 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 1781 1782 /* 2. Reset dma index */ 1783 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1784 MTK_WED_WDMA_RESET_IDX_RX_ALL); 1785 } 1786 1787 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1788 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1789 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1790 1791 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1792 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1793 1794 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1795 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1796 } 1797 1798 /* 3. reset WED WPDMA tx */ 1799 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1800 1801 for (i = 0; i < 100; i++) { 1802 if (mtk_wed_is_v1(dev->hw)) 1803 val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, 1804 wed_r32(dev, MTK_WED_TX_BM_INTF)); 1805 else 1806 val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, 1807 wed_r32(dev, MTK_WED_TX_TKID_INTF)); 1808 if (val == 0x40) 1809 break; 1810 } 1811 1812 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1813 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1814 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1815 1816 /* 4. reset WED WPDMA tx */ 1817 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1818 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1819 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1820 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1821 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1822 if (!busy) 1823 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1824 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1825 1826 if (busy) { 1827 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1828 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1829 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1830 if (mtk_wed_is_v3_or_greater(dev->hw)) 1831 wed_w32(dev, MTK_WED_RX1_CTRL2, 0); 1832 } else { 1833 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1834 MTK_WED_WPDMA_RESET_IDX_TX | 1835 MTK_WED_WPDMA_RESET_IDX_RX); 1836 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1837 } 1838 1839 dev->init_done = false; 1840 if (mtk_wed_is_v1(dev->hw)) 1841 return; 1842 1843 if (!busy) { 1844 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1845 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1846 } 1847 1848 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1849 /* reset amsdu engine */ 1850 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1851 mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); 1852 } 1853 1854 if (mtk_wed_get_rx_capa(dev)) 1855 mtk_wed_rx_reset(dev); 1856 } 1857 1858 static int 1859 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1860 int size, u32 desc_size, bool tx) 1861 { 1862 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1863 &ring->desc_phys, GFP_KERNEL); 1864 if (!ring->desc) 1865 return -ENOMEM; 1866 1867 ring->desc_size = desc_size; 1868 ring->size = size; 1869 mtk_wed_ring_reset(ring, size, tx); 1870 1871 return 0; 1872 } 1873 1874 static int 1875 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1876 bool reset) 1877 { 1878 struct mtk_wed_ring *wdma; 1879 1880 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1881 return -EINVAL; 1882 1883 wdma = &dev->rx_wdma[idx]; 1884 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1885 dev->hw->soc->wdma_desc_size, true)) 1886 return -ENOMEM; 1887 1888 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1889 wdma->desc_phys); 1890 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1891 size); 1892 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1893 1894 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1895 wdma->desc_phys); 1896 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1897 size); 1898 1899 return 0; 1900 } 1901 1902 static int 1903 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1904 bool reset) 1905 { 1906 struct mtk_wed_ring *wdma; 1907 1908 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1909 return -EINVAL; 1910 1911 wdma = &dev->tx_wdma[idx]; 1912 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1913 dev->hw->soc->wdma_desc_size, true)) 1914 return -ENOMEM; 1915 1916 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1917 struct mtk_wdma_desc *desc = wdma->desc; 1918 int i; 1919 1920 for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { 1921 desc->buf0 = 0; 1922 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1923 desc->buf1 = 0; 1924 desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); 1925 desc++; 1926 desc->buf0 = 0; 1927 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1928 desc->buf1 = 0; 1929 desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); 1930 desc++; 1931 } 1932 } 1933 1934 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1935 wdma->desc_phys); 1936 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1937 size); 1938 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1939 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1940 1941 if (reset) 1942 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1943 1944 if (!idx) { 1945 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1946 wdma->desc_phys); 1947 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1948 size); 1949 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1950 0); 1951 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1952 0); 1953 } 1954 1955 return 0; 1956 } 1957 1958 static void 1959 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1960 u32 reason, u32 hash) 1961 { 1962 struct mtk_eth *eth = dev->hw->eth; 1963 struct ethhdr *eh; 1964 1965 if (!skb) 1966 return; 1967 1968 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1969 return; 1970 1971 skb_set_mac_header(skb, 0); 1972 eh = eth_hdr(skb); 1973 skb->protocol = eh->h_proto; 1974 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1975 } 1976 1977 static void 1978 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1979 { 1980 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1981 1982 /* wed control cr set */ 1983 wed_set(dev, MTK_WED_CTRL, 1984 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1985 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1986 MTK_WED_CTRL_WED_TX_BM_EN | 1987 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1988 1989 if (mtk_wed_is_v1(dev->hw)) { 1990 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1991 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1992 1993 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1994 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1995 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1996 1997 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1998 } else { 1999 if (mtk_wed_is_v3_or_greater(dev->hw)) 2000 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); 2001 2002 /* initial tx interrupt trigger */ 2003 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 2004 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 2005 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 2006 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 2007 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 2008 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 2009 dev->wlan.tx_tbit[0]) | 2010 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 2011 dev->wlan.tx_tbit[1])); 2012 2013 /* initial txfree interrupt trigger */ 2014 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 2015 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 2016 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 2017 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 2018 dev->wlan.txfree_tbit)); 2019 2020 if (mtk_wed_get_rx_capa(dev)) { 2021 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 2022 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 2023 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 2024 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 2025 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 2026 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 2027 dev->wlan.rx_tbit[0]) | 2028 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 2029 dev->wlan.rx_tbit[1])); 2030 2031 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 2032 GENMASK(1, 0)); 2033 } 2034 2035 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 2036 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 2037 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 2038 dev->wdma_idx)); 2039 } 2040 2041 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 2042 2043 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 2044 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 2045 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2046 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2047 } 2048 2049 #define MTK_WFMDA_RX_DMA_EN BIT(2) 2050 static void 2051 mtk_wed_dma_enable(struct mtk_wed_device *dev) 2052 { 2053 int i; 2054 2055 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 2056 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 2057 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 2058 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2059 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2060 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 2061 wdma_set(dev, MTK_WDMA_GLO_CFG, 2062 MTK_WDMA_GLO_CFG_TX_DMA_EN | 2063 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 2064 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 2065 wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); 2066 } else { 2067 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2068 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2069 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | 2070 MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); 2071 wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 2072 } 2073 2074 wed_set(dev, MTK_WED_GLO_CFG, 2075 MTK_WED_GLO_CFG_TX_DMA_EN | 2076 MTK_WED_GLO_CFG_RX_DMA_EN); 2077 2078 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2079 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 2080 2081 if (mtk_wed_is_v1(dev->hw)) { 2082 wdma_set(dev, MTK_WDMA_GLO_CFG, 2083 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 2084 return; 2085 } 2086 2087 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2088 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 2089 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 2090 2091 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2092 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2093 FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | 2094 FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); 2095 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2096 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 2097 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); 2098 2099 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2100 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); 2101 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2102 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | 2103 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | 2104 MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); 2105 2106 wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 2107 wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 2108 } 2109 2110 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2111 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 2112 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 2113 2114 if (!mtk_wed_get_rx_capa(dev)) 2115 return; 2116 2117 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2118 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 2119 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 2120 2121 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); 2122 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 2123 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 2124 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 2125 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); 2126 2127 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2128 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 2129 MTK_WED_WPDMA_RX_D_PREF_EN | 2130 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | 2131 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); 2132 2133 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 2134 wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 2135 wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 2136 } 2137 2138 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2139 struct mtk_wed_ring *ring = &dev->rx_ring[i]; 2140 u32 val; 2141 2142 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2143 continue; /* queue is not configured by mt76 */ 2144 2145 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { 2146 dev_err(dev->hw->dev, 2147 "rx_ring(%d) dma enable failed\n", i); 2148 continue; 2149 } 2150 2151 val = wifi_r32(dev, 2152 dev->wlan.wpdma_rx_glo - 2153 dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; 2154 wifi_w32(dev, 2155 dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, 2156 val); 2157 } 2158 } 2159 2160 static void 2161 mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) 2162 { 2163 int i; 2164 2165 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2166 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2167 2168 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 2169 return; 2170 2171 if (reset) { 2172 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2173 MTK_WED_RRO_MSDU_PG_DRV_EN); 2174 return; 2175 } 2176 2177 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); 2178 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2179 MTK_WED_RRO_MSDU_PG_DRV_CLR); 2180 2181 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, 2182 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | 2183 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | 2184 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | 2185 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | 2186 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, 2187 dev->wlan.rro_rx_tbit[0]) | 2188 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, 2189 dev->wlan.rro_rx_tbit[1])); 2190 2191 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, 2192 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | 2193 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | 2194 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | 2195 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | 2196 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | 2197 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | 2198 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, 2199 dev->wlan.rx_pg_tbit[0]) | 2200 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, 2201 dev->wlan.rx_pg_tbit[1]) | 2202 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, 2203 dev->wlan.rx_pg_tbit[2])); 2204 2205 /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after 2206 * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken 2207 */ 2208 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2209 MTK_WED_RRO_MSDU_PG_DRV_EN); 2210 2211 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2212 struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; 2213 2214 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2215 continue; 2216 2217 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2218 dev_err(dev->hw->dev, 2219 "rx_rro_ring(%d) initialization failed\n", i); 2220 } 2221 2222 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { 2223 struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; 2224 2225 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2226 continue; 2227 2228 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2229 dev_err(dev->hw->dev, 2230 "rx_page_ring(%d) initialization failed\n", i); 2231 } 2232 } 2233 2234 static void 2235 mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, 2236 void __iomem *regs) 2237 { 2238 struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; 2239 2240 ring->wpdma = regs; 2241 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, 2242 readl(regs)); 2243 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, 2244 readl(regs + MTK_WED_RING_OFS_COUNT)); 2245 ring->flags |= MTK_WED_RING_CONFIGURED; 2246 } 2247 2248 static void 2249 mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 2250 { 2251 struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; 2252 2253 ring->wpdma = regs; 2254 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, 2255 readl(regs)); 2256 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, 2257 readl(regs + MTK_WED_RING_OFS_COUNT)); 2258 ring->flags |= MTK_WED_RING_CONFIGURED; 2259 } 2260 2261 static int 2262 mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2263 { 2264 struct mtk_wed_ring *ring = &dev->ind_cmd_ring; 2265 u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); 2266 int i, count = 0; 2267 2268 ring->wpdma = regs; 2269 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, 2270 readl(regs) & 0xfffffff0); 2271 2272 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, 2273 readl(regs + MTK_WED_RING_OFS_COUNT)); 2274 2275 /* ack sn cr */ 2276 wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + 2277 dev->wlan.ind_cmd.ack_sn_addr); 2278 wed_w32(dev, MTK_WED_RRO_CFG1, 2279 FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, 2280 dev->wlan.ind_cmd.win_size) | 2281 FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, 2282 dev->wlan.ind_cmd.particular_sid)); 2283 2284 /* particular session addr element */ 2285 wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, 2286 dev->wlan.ind_cmd.particular_se_phys); 2287 2288 for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { 2289 wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, 2290 dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); 2291 wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, 2292 MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); 2293 2294 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2295 while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) 2296 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2297 if (count >= 100) 2298 dev_err(dev->hw->dev, 2299 "write ba session base failed\n"); 2300 } 2301 2302 /* pn check init */ 2303 for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { 2304 wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, 2305 MTK_WED_PN_CHECK_IS_FIRST); 2306 2307 wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | 2308 FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); 2309 2310 count = 0; 2311 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2312 while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) 2313 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2314 if (count >= 100) 2315 dev_err(dev->hw->dev, 2316 "session(%d) initialization failed\n", i); 2317 } 2318 2319 wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); 2320 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 2321 2322 return 0; 2323 } 2324 2325 static void 2326 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 2327 { 2328 int i; 2329 2330 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 2331 return; 2332 2333 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 2334 if (!dev->rx_wdma[i].desc) 2335 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 2336 2337 if (dev->wlan.hw_rro) { 2338 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { 2339 u32 addr = MTK_WED_RRO_MSDU_PG_CTRL0(i) + 2340 MTK_WED_RING_OFS_COUNT; 2341 2342 if (!wed_r32(dev, addr)) 2343 wed_w32(dev, addr, 1); 2344 } 2345 } 2346 2347 mtk_wed_hw_init(dev); 2348 mtk_wed_configure_irq(dev, irq_mask); 2349 2350 mtk_wed_set_ext_int(dev, true); 2351 2352 if (mtk_wed_is_v1(dev->hw)) { 2353 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 2354 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 2355 dev->hw->index); 2356 2357 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 2358 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 2359 } else if (mtk_wed_get_rx_capa(dev)) { 2360 /* driver set mid ready and only once */ 2361 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 2362 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2363 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 2364 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2365 2366 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 2367 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 2368 2369 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2370 wed_w32(dev, MTK_WED_EXT_INT_MASK3, 2371 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2372 wed_r32(dev, MTK_WED_EXT_INT_MASK3); 2373 } 2374 2375 if (mtk_wed_rro_cfg(dev)) 2376 return; 2377 } 2378 2379 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 2380 mtk_wed_amsdu_init(dev); 2381 2382 mtk_wed_dma_enable(dev); 2383 dev->running = true; 2384 } 2385 2386 static int 2387 mtk_wed_attach(struct mtk_wed_device *dev) 2388 __releases(RCU) 2389 { 2390 struct mtk_wed_hw *hw; 2391 struct device *device; 2392 int ret = 0; 2393 2394 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 2395 "mtk_wed_attach without holding the RCU read lock"); 2396 2397 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 2398 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 2399 !try_module_get(THIS_MODULE)) 2400 ret = -ENODEV; 2401 2402 rcu_read_unlock(); 2403 2404 if (ret) 2405 return ret; 2406 2407 mutex_lock(&hw_lock); 2408 2409 hw = mtk_wed_assign(dev); 2410 if (!hw) { 2411 module_put(THIS_MODULE); 2412 ret = -ENODEV; 2413 goto unlock; 2414 } 2415 2416 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 2417 ? &dev->wlan.pci_dev->dev 2418 : &dev->wlan.platform_dev->dev; 2419 dev_info(device, "attaching wed device %d version %d\n", 2420 hw->index, hw->version); 2421 2422 dev->hw = hw; 2423 dev->dev = hw->dev; 2424 dev->irq = hw->irq; 2425 dev->wdma_idx = hw->index; 2426 dev->version = hw->version; 2427 dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); 2428 2429 if (hw->eth->dma_dev == hw->eth->dev && 2430 of_dma_is_coherent(hw->eth->dev->of_node)) 2431 mtk_eth_set_dma_device(hw->eth, hw->dev); 2432 2433 ret = mtk_wed_tx_buffer_alloc(dev); 2434 if (ret) 2435 goto out; 2436 2437 ret = mtk_wed_amsdu_buffer_alloc(dev); 2438 if (ret) 2439 goto out; 2440 2441 if (mtk_wed_get_rx_capa(dev)) { 2442 ret = mtk_wed_rro_alloc(dev); 2443 if (ret) 2444 goto out; 2445 } 2446 2447 mtk_wed_hw_init_early(dev); 2448 if (mtk_wed_is_v1(hw)) 2449 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 2450 BIT(hw->index), 0); 2451 else 2452 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 2453 2454 if (mtk_wed_get_rx_capa(dev)) 2455 ret = mtk_wed_wo_init(hw); 2456 out: 2457 if (ret) { 2458 dev_err(dev->hw->dev, "failed to attach wed device\n"); 2459 __mtk_wed_detach(dev); 2460 } 2461 unlock: 2462 mutex_unlock(&hw_lock); 2463 2464 return ret; 2465 } 2466 2467 static int 2468 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2469 bool reset) 2470 { 2471 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 2472 2473 /* 2474 * Tx ring redirection: 2475 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 2476 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 2477 * registers. 2478 * 2479 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 2480 * into MTK_WED_WPDMA_RING_TX(n) registers. 2481 * It gets filled with packets picked up from WED TX ring and from 2482 * WDMA RX. 2483 */ 2484 2485 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 2486 return -EINVAL; 2487 2488 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 2489 sizeof(*ring->desc), true)) 2490 return -ENOMEM; 2491 2492 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2493 reset)) 2494 return -ENOMEM; 2495 2496 ring->reg_base = MTK_WED_RING_TX(idx); 2497 ring->wpdma = regs; 2498 2499 if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { 2500 /* reset prefetch index */ 2501 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2502 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2503 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2504 2505 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2506 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2507 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2508 2509 /* reset prefetch FIFO */ 2510 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 2511 MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | 2512 MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); 2513 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); 2514 } 2515 2516 /* WED -> WPDMA */ 2517 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2518 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 2519 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 2520 2521 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 2522 ring->desc_phys); 2523 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 2524 MTK_WED_TX_RING_SIZE); 2525 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 2526 2527 return 0; 2528 } 2529 2530 static int 2531 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2532 { 2533 struct mtk_wed_ring *ring = &dev->txfree_ring; 2534 int i, index = mtk_wed_is_v1(dev->hw); 2535 2536 /* 2537 * For txfree event handling, the same DMA ring is shared between WED 2538 * and WLAN. The WLAN driver accesses the ring index registers through 2539 * WED 2540 */ 2541 ring->reg_base = MTK_WED_RING_RX(index); 2542 ring->wpdma = regs; 2543 2544 for (i = 0; i < 12; i += 4) { 2545 u32 val = readl(regs + i); 2546 2547 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 2548 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 2549 } 2550 2551 return 0; 2552 } 2553 2554 static int 2555 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2556 bool reset) 2557 { 2558 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 2559 2560 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 2561 return -EINVAL; 2562 2563 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 2564 sizeof(*ring->desc), false)) 2565 return -ENOMEM; 2566 2567 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2568 reset)) 2569 return -ENOMEM; 2570 2571 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 2572 ring->wpdma = regs; 2573 ring->flags |= MTK_WED_RING_CONFIGURED; 2574 2575 /* WPDMA -> WED */ 2576 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2577 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 2578 2579 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 2580 ring->desc_phys); 2581 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 2582 MTK_WED_RX_RING_SIZE); 2583 2584 return 0; 2585 } 2586 2587 static u32 2588 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 2589 { 2590 u32 val, ext_mask; 2591 2592 if (mtk_wed_is_v3_or_greater(dev->hw)) 2593 ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 2594 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2595 else 2596 ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 2597 2598 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 2599 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 2600 val &= ext_mask; 2601 if (!dev->hw->num_flows) 2602 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2603 if (val && net_ratelimit()) 2604 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 2605 2606 val = wed_r32(dev, MTK_WED_INT_STATUS); 2607 val &= mask; 2608 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 2609 2610 return val; 2611 } 2612 2613 static void 2614 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 2615 { 2616 mtk_wed_set_ext_int(dev, !!mask); 2617 wed_w32(dev, MTK_WED_INT_MASK, mask); 2618 } 2619 2620 int mtk_wed_flow_add(int index) 2621 { 2622 struct mtk_wed_hw *hw = hw_list[index]; 2623 int ret = 0; 2624 2625 mutex_lock(&hw_lock); 2626 2627 if (!hw || !hw->wed_dev) { 2628 ret = -ENODEV; 2629 goto out; 2630 } 2631 2632 if (!hw->wed_dev->wlan.offload_enable) 2633 goto out; 2634 2635 if (hw->num_flows) { 2636 hw->num_flows++; 2637 goto out; 2638 } 2639 2640 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 2641 if (!ret) 2642 hw->num_flows++; 2643 mtk_wed_set_ext_int(hw->wed_dev, true); 2644 2645 out: 2646 mutex_unlock(&hw_lock); 2647 2648 return ret; 2649 } 2650 2651 void mtk_wed_flow_remove(int index) 2652 { 2653 struct mtk_wed_hw *hw = hw_list[index]; 2654 2655 mutex_lock(&hw_lock); 2656 2657 if (!hw || !hw->wed_dev) 2658 goto out; 2659 2660 if (!hw->wed_dev->wlan.offload_disable) 2661 goto out; 2662 2663 if (--hw->num_flows) 2664 goto out; 2665 2666 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 2667 mtk_wed_set_ext_int(hw->wed_dev, true); 2668 2669 out: 2670 mutex_unlock(&hw_lock); 2671 } 2672 2673 static int 2674 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 2675 { 2676 struct mtk_wed_flow_block_priv *priv = cb_priv; 2677 struct flow_cls_offload *cls = type_data; 2678 struct mtk_wed_hw *hw = NULL; 2679 2680 if (!priv || !tc_can_offload(priv->dev)) 2681 return -EOPNOTSUPP; 2682 2683 if (type != TC_SETUP_CLSFLOWER) 2684 return -EOPNOTSUPP; 2685 2686 hw = priv->hw; 2687 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 2688 } 2689 2690 static int 2691 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, 2692 struct flow_block_offload *f) 2693 { 2694 struct mtk_wed_flow_block_priv *priv; 2695 static LIST_HEAD(block_cb_list); 2696 struct flow_block_cb *block_cb; 2697 struct mtk_eth *eth = hw->eth; 2698 flow_setup_cb_t *cb; 2699 2700 if (!eth->soc->offload_version) 2701 return -EOPNOTSUPP; 2702 2703 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2704 return -EOPNOTSUPP; 2705 2706 cb = mtk_wed_setup_tc_block_cb; 2707 f->driver_block_list = &block_cb_list; 2708 2709 switch (f->command) { 2710 case FLOW_BLOCK_BIND: 2711 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2712 if (block_cb) { 2713 flow_block_cb_incref(block_cb); 2714 return 0; 2715 } 2716 2717 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2718 if (!priv) 2719 return -ENOMEM; 2720 2721 priv->hw = hw; 2722 priv->dev = dev; 2723 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); 2724 if (IS_ERR(block_cb)) { 2725 kfree(priv); 2726 return PTR_ERR(block_cb); 2727 } 2728 2729 flow_block_cb_incref(block_cb); 2730 flow_block_cb_add(block_cb, f); 2731 list_add_tail(&block_cb->driver_list, &block_cb_list); 2732 return 0; 2733 case FLOW_BLOCK_UNBIND: 2734 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2735 if (!block_cb) 2736 return -ENOENT; 2737 2738 if (!flow_block_cb_decref(block_cb)) { 2739 flow_block_cb_remove(block_cb, f); 2740 list_del(&block_cb->driver_list); 2741 kfree(block_cb->cb_priv); 2742 block_cb->cb_priv = NULL; 2743 } 2744 return 0; 2745 default: 2746 return -EOPNOTSUPP; 2747 } 2748 } 2749 2750 static int 2751 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, 2752 enum tc_setup_type type, void *type_data) 2753 { 2754 struct mtk_wed_hw *hw = wed->hw; 2755 2756 if (mtk_wed_is_v1(hw)) 2757 return -EOPNOTSUPP; 2758 2759 switch (type) { 2760 case TC_SETUP_BLOCK: 2761 case TC_SETUP_FT: 2762 return mtk_wed_setup_tc_block(hw, dev, type_data); 2763 default: 2764 return -EOPNOTSUPP; 2765 } 2766 } 2767 2768 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 2769 void __iomem *wdma, phys_addr_t wdma_phy, 2770 int index) 2771 { 2772 static const struct mtk_wed_ops wed_ops = { 2773 .attach = mtk_wed_attach, 2774 .tx_ring_setup = mtk_wed_tx_ring_setup, 2775 .rx_ring_setup = mtk_wed_rx_ring_setup, 2776 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 2777 .msg_update = mtk_wed_mcu_msg_update, 2778 .start = mtk_wed_start, 2779 .stop = mtk_wed_stop, 2780 .reset_dma = mtk_wed_reset_dma, 2781 .reg_read = wed_r32, 2782 .reg_write = wed_w32, 2783 .irq_get = mtk_wed_irq_get, 2784 .irq_set_mask = mtk_wed_irq_set_mask, 2785 .detach = mtk_wed_detach, 2786 .ppe_check = mtk_wed_ppe_check, 2787 .setup_tc = mtk_wed_setup_tc, 2788 .start_hw_rro = mtk_wed_start_hw_rro, 2789 .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, 2790 .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, 2791 .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, 2792 }; 2793 struct device_node *eth_np = eth->dev->of_node; 2794 struct platform_device *pdev; 2795 struct mtk_wed_hw *hw; 2796 struct regmap *regs; 2797 int irq; 2798 2799 if (!np) 2800 return; 2801 2802 pdev = of_find_device_by_node(np); 2803 if (!pdev) 2804 goto err_of_node_put; 2805 2806 irq = platform_get_irq(pdev, 0); 2807 if (irq < 0) 2808 goto err_put_device; 2809 2810 regs = syscon_regmap_lookup_by_phandle(np, NULL); 2811 if (IS_ERR(regs)) 2812 goto err_put_device; 2813 2814 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 2815 2816 mutex_lock(&hw_lock); 2817 2818 if (WARN_ON(hw_list[index])) 2819 goto unlock; 2820 2821 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 2822 if (!hw) 2823 goto unlock; 2824 2825 hw->node = np; 2826 hw->regs = regs; 2827 hw->eth = eth; 2828 hw->dev = &pdev->dev; 2829 hw->wdma_phy = wdma_phy; 2830 hw->wdma = wdma; 2831 hw->index = index; 2832 hw->irq = irq; 2833 hw->version = eth->soc->version; 2834 2835 switch (hw->version) { 2836 case 2: 2837 hw->soc = &mt7986_data; 2838 break; 2839 case 3: 2840 hw->soc = &mt7988_data; 2841 break; 2842 default: 2843 case 1: 2844 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 2845 "mediatek,pcie-mirror"); 2846 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 2847 "mediatek,hifsys"); 2848 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 2849 kfree(hw); 2850 goto unlock; 2851 } 2852 2853 if (!index) { 2854 regmap_write(hw->mirror, 0, 0); 2855 regmap_write(hw->mirror, 4, 0); 2856 } 2857 hw->soc = &mt7622_data; 2858 break; 2859 } 2860 2861 mtk_wed_hw_add_debugfs(hw); 2862 2863 hw_list[index] = hw; 2864 2865 mutex_unlock(&hw_lock); 2866 2867 return; 2868 2869 unlock: 2870 mutex_unlock(&hw_lock); 2871 err_put_device: 2872 put_device(&pdev->dev); 2873 err_of_node_put: 2874 of_node_put(np); 2875 } 2876 2877 void mtk_wed_exit(void) 2878 { 2879 int i; 2880 2881 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 2882 2883 synchronize_rcu(); 2884 2885 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 2886 struct mtk_wed_hw *hw; 2887 2888 hw = hw_list[i]; 2889 if (!hw) 2890 continue; 2891 2892 hw_list[i] = NULL; 2893 debugfs_remove(hw->debugfs_dir); 2894 put_device(hw->dev); 2895 of_node_put(hw->node); 2896 kfree(hw); 2897 } 2898 } 2899