1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/platform_device.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/skbuff.h> 11 #include <linux/of_platform.h> 12 #include <linux/of_address.h> 13 #include <linux/of_reserved_mem.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/debugfs.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/flow_offload.h> 18 #include <net/pkt_cls.h> 19 #include "mtk_eth_soc.h" 20 #include "mtk_wed.h" 21 #include "mtk_ppe.h" 22 #include "mtk_wed_wo.h" 23 24 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 25 26 #define MTK_WED_PKT_SIZE 1920 27 #define MTK_WED_BUF_SIZE 2048 28 #define MTK_WED_PAGE_BUF_SIZE 128 29 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 30 #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) 31 #define MTK_WED_RX_RING_SIZE 1536 32 #define MTK_WED_RX_PG_BM_CNT 8192 33 #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) 34 #define MTK_WED_AMSDU_NPAGES 32 35 36 #define MTK_WED_TX_RING_SIZE 2048 37 #define MTK_WED_WDMA_RING_SIZE 1024 38 #define MTK_WED_MAX_GROUP_SIZE 0x100 39 #define MTK_WED_VLD_GROUP_SIZE 0x40 40 #define MTK_WED_PER_GROUP_PKT 128 41 42 #define MTK_WED_FBUF_SIZE 128 43 #define MTK_WED_MIOD_CNT 16 44 #define MTK_WED_FB_CMD_CNT 1024 45 #define MTK_WED_RRO_QUE_CNT 8192 46 #define MTK_WED_MIOD_ENTRY_CNT 128 47 48 #define MTK_WED_TX_BM_DMA_SIZE 65536 49 #define MTK_WED_TX_BM_PKT_CNT 32768 50 51 static struct mtk_wed_hw *hw_list[3]; 52 static DEFINE_MUTEX(hw_lock); 53 54 struct mtk_wed_flow_block_priv { 55 struct mtk_wed_hw *hw; 56 struct net_device *dev; 57 }; 58 59 static const struct mtk_wed_soc_data mt7622_data = { 60 .regmap = { 61 .tx_bm_tkid = 0x088, 62 .wpdma_rx_ring0 = 0x770, 63 .reset_idx_tx_mask = GENMASK(3, 0), 64 .reset_idx_rx_mask = GENMASK(17, 16), 65 }, 66 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 67 .wdma_desc_size = sizeof(struct mtk_wdma_desc), 68 }; 69 70 static const struct mtk_wed_soc_data mt7986_data = { 71 .regmap = { 72 .tx_bm_tkid = 0x0c8, 73 .wpdma_rx_ring0 = 0x770, 74 .reset_idx_tx_mask = GENMASK(1, 0), 75 .reset_idx_rx_mask = GENMASK(7, 6), 76 }, 77 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 78 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 79 }; 80 81 static const struct mtk_wed_soc_data mt7988_data = { 82 .regmap = { 83 .tx_bm_tkid = 0x0c8, 84 .wpdma_rx_ring0 = 0x7d0, 85 .reset_idx_tx_mask = GENMASK(1, 0), 86 .reset_idx_rx_mask = GENMASK(7, 6), 87 }, 88 .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), 89 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 90 }; 91 92 static void 93 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 94 { 95 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 96 } 97 98 static void 99 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 100 { 101 return wed_m32(dev, reg, 0, mask); 102 } 103 104 static void 105 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 106 { 107 return wed_m32(dev, reg, mask, 0); 108 } 109 110 static void 111 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 112 { 113 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 114 } 115 116 static void 117 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 118 { 119 wdma_m32(dev, reg, 0, mask); 120 } 121 122 static void 123 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 124 { 125 wdma_m32(dev, reg, mask, 0); 126 } 127 128 static u32 129 wifi_r32(struct mtk_wed_device *dev, u32 reg) 130 { 131 return readl(dev->wlan.base + reg); 132 } 133 134 static void 135 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 136 { 137 writel(val, dev->wlan.base + reg); 138 } 139 140 static u32 141 mtk_wed_read_reset(struct mtk_wed_device *dev) 142 { 143 return wed_r32(dev, MTK_WED_RESET); 144 } 145 146 static u32 147 mtk_wdma_read_reset(struct mtk_wed_device *dev) 148 { 149 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 150 } 151 152 static void 153 mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) 154 { 155 u32 status; 156 157 if (!mtk_wed_is_v3_or_greater(dev->hw)) 158 return; 159 160 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 161 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 162 163 if (read_poll_timeout(wdma_r32, status, 164 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 165 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 166 dev_err(dev->hw->dev, "rx reset failed\n"); 167 168 if (read_poll_timeout(wdma_r32, status, 169 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 170 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 171 dev_err(dev->hw->dev, "rx reset failed\n"); 172 173 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 174 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 175 176 if (read_poll_timeout(wdma_r32, status, 177 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 178 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 179 dev_err(dev->hw->dev, "rx reset failed\n"); 180 181 if (read_poll_timeout(wdma_r32, status, 182 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 183 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 184 dev_err(dev->hw->dev, "rx reset failed\n"); 185 186 /* prefetch FIFO */ 187 wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 188 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 189 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 190 wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 191 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 192 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 193 194 /* core FIFO */ 195 wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 196 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 197 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 198 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 199 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 200 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 201 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 202 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 203 wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 204 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 205 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 206 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 207 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 208 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 209 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 210 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 211 212 /* writeback FIFO */ 213 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 214 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 215 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 216 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 217 218 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 219 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 220 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 221 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 222 223 /* prefetch ring status */ 224 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 225 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 226 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 227 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 228 229 /* writeback ring status */ 230 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 231 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 232 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 233 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 234 } 235 236 static int 237 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 238 { 239 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 240 int i, ret; 241 242 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 243 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 244 !(status & mask), 0, 10000); 245 if (ret) 246 dev_err(dev->hw->dev, "rx reset failed\n"); 247 248 mtk_wdma_v3_rx_reset(dev); 249 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 250 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 251 252 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 253 if (dev->rx_wdma[i].desc) 254 continue; 255 256 wdma_w32(dev, 257 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 258 } 259 260 return ret; 261 } 262 263 static u32 264 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 265 { 266 return !!(wed_r32(dev, reg) & mask); 267 } 268 269 static int 270 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 271 { 272 int sleep = 15000; 273 int timeout = 100 * sleep; 274 u32 val; 275 276 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 277 timeout, false, dev, reg, mask); 278 } 279 280 static void 281 mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) 282 { 283 u32 status; 284 285 if (!mtk_wed_is_v3_or_greater(dev->hw)) 286 return; 287 288 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 289 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 290 291 if (read_poll_timeout(wdma_r32, status, 292 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 293 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 294 dev_err(dev->hw->dev, "tx reset failed\n"); 295 296 if (read_poll_timeout(wdma_r32, status, 297 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 298 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 299 dev_err(dev->hw->dev, "tx reset failed\n"); 300 301 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 302 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 303 304 if (read_poll_timeout(wdma_r32, status, 305 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 306 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 307 dev_err(dev->hw->dev, "tx reset failed\n"); 308 309 if (read_poll_timeout(wdma_r32, status, 310 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 311 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 312 dev_err(dev->hw->dev, "tx reset failed\n"); 313 314 /* prefetch FIFO */ 315 wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 316 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 317 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 318 wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 319 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 320 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 321 322 /* core FIFO */ 323 wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 324 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 325 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 326 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 327 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 328 wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 329 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 330 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 331 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 332 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 333 334 /* writeback FIFO */ 335 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 336 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 337 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 338 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 339 340 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 341 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 342 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 343 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 344 345 /* prefetch ring status */ 346 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 347 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 348 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 349 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 350 351 /* writeback ring status */ 352 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 353 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 354 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 355 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 356 } 357 358 static void 359 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 360 { 361 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 362 int i; 363 364 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 365 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 366 !(status & mask), 0, 10000)) 367 dev_err(dev->hw->dev, "tx reset failed\n"); 368 369 mtk_wdma_v3_tx_reset(dev); 370 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 371 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 372 373 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 374 wdma_w32(dev, 375 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 376 } 377 378 static void 379 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 380 { 381 u32 status; 382 383 wed_w32(dev, MTK_WED_RESET, mask); 384 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 385 !(status & mask), 0, 1000)) 386 WARN_ON_ONCE(1); 387 } 388 389 static u32 390 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 391 { 392 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 393 } 394 395 static void 396 mtk_wed_wo_reset(struct mtk_wed_device *dev) 397 { 398 struct mtk_wed_wo *wo = dev->hw->wed_wo; 399 u8 state = MTK_WED_WO_STATE_DISABLE; 400 void __iomem *reg; 401 u32 val; 402 403 mtk_wdma_tx_reset(dev); 404 mtk_wed_reset(dev, MTK_WED_RESET_WED); 405 406 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 407 MTK_WED_WO_CMD_CHANGE_STATE, &state, 408 sizeof(state), false)) 409 return; 410 411 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 412 val == MTK_WED_WOIF_DISABLE_DONE, 413 100, MTK_WOCPU_TIMEOUT)) 414 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 415 416 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 417 418 val = readl(reg); 419 switch (dev->hw->index) { 420 case 0: 421 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 422 writel(val, reg); 423 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 424 writel(val, reg); 425 break; 426 case 1: 427 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 428 writel(val, reg); 429 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 430 writel(val, reg); 431 break; 432 default: 433 break; 434 } 435 iounmap(reg); 436 } 437 438 void mtk_wed_fe_reset(void) 439 { 440 int i; 441 442 mutex_lock(&hw_lock); 443 444 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 445 struct mtk_wed_hw *hw = hw_list[i]; 446 struct mtk_wed_device *dev; 447 int err; 448 449 if (!hw) 450 break; 451 452 dev = hw->wed_dev; 453 if (!dev || !dev->wlan.reset) 454 continue; 455 456 /* reset callback blocks until WLAN reset is completed */ 457 err = dev->wlan.reset(dev); 458 if (err) 459 dev_err(dev->dev, "wlan reset failed: %d\n", err); 460 } 461 462 mutex_unlock(&hw_lock); 463 } 464 465 void mtk_wed_fe_reset_complete(void) 466 { 467 int i; 468 469 mutex_lock(&hw_lock); 470 471 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 472 struct mtk_wed_hw *hw = hw_list[i]; 473 struct mtk_wed_device *dev; 474 475 if (!hw) 476 break; 477 478 dev = hw->wed_dev; 479 if (!dev || !dev->wlan.reset_complete) 480 continue; 481 482 dev->wlan.reset_complete(dev); 483 } 484 485 mutex_unlock(&hw_lock); 486 } 487 488 static struct mtk_wed_hw * 489 mtk_wed_assign(struct mtk_wed_device *dev) 490 { 491 struct mtk_wed_hw *hw; 492 int i; 493 494 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 495 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 496 if (!hw) 497 return NULL; 498 499 if (!hw->wed_dev) 500 goto out; 501 502 if (mtk_wed_is_v1(hw)) 503 return NULL; 504 505 /* MT7986 WED devices do not have any pcie slot restrictions */ 506 } 507 /* MT7986 PCIE or AXI */ 508 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 509 hw = hw_list[i]; 510 if (hw && !hw->wed_dev) 511 goto out; 512 } 513 514 return NULL; 515 516 out: 517 hw->wed_dev = dev; 518 return hw; 519 } 520 521 static int 522 mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) 523 { 524 struct mtk_wed_hw *hw = dev->hw; 525 struct mtk_wed_amsdu *wed_amsdu; 526 int i; 527 528 if (!mtk_wed_is_v3_or_greater(hw)) 529 return 0; 530 531 wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, 532 sizeof(*wed_amsdu), GFP_KERNEL); 533 if (!wed_amsdu) 534 return -ENOMEM; 535 536 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 537 void *ptr; 538 539 /* each segment is 64K */ 540 ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | 541 __GFP_ZERO | __GFP_COMP | 542 GFP_DMA32, 543 get_order(MTK_WED_AMSDU_BUF_SIZE)); 544 if (!ptr) 545 goto error; 546 547 wed_amsdu[i].txd = ptr; 548 wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, 549 MTK_WED_AMSDU_BUF_SIZE, 550 DMA_TO_DEVICE); 551 if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) 552 goto error; 553 } 554 dev->hw->wed_amsdu = wed_amsdu; 555 556 return 0; 557 558 error: 559 for (i--; i >= 0; i--) 560 dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, 561 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 562 return -ENOMEM; 563 } 564 565 static void 566 mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) 567 { 568 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 569 int i; 570 571 if (!wed_amsdu) 572 return; 573 574 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 575 dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, 576 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 577 free_pages((unsigned long)wed_amsdu[i].txd, 578 get_order(MTK_WED_AMSDU_BUF_SIZE)); 579 } 580 } 581 582 static int 583 mtk_wed_amsdu_init(struct mtk_wed_device *dev) 584 { 585 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 586 int i, ret; 587 588 if (!wed_amsdu) 589 return 0; 590 591 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) 592 wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), 593 wed_amsdu[i].txd_phy); 594 595 /* init all sta parameter */ 596 wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | 597 MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | 598 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, 599 dev->wlan.amsdu_max_len >> 8) | 600 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, 601 dev->wlan.amsdu_max_subframes)); 602 603 wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); 604 605 ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, 606 MTK_WED_AMSDU_STA_INFO_DO_INIT); 607 if (ret) { 608 dev_err(dev->hw->dev, "amsdu initialization failed\n"); 609 return ret; 610 } 611 612 /* init partial amsdu offload txd src */ 613 wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, 614 FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); 615 616 /* init qmem */ 617 wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); 618 ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); 619 if (ret) { 620 pr_info("%s: amsdu qmem initialization failed\n", __func__); 621 return ret; 622 } 623 624 /* eagle E1 PCIE1 tx ring 22 flow control issue */ 625 if (dev->wlan.id == 0x7991) 626 wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); 627 628 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 629 630 return 0; 631 } 632 633 static int 634 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 635 { 636 u32 desc_size = dev->hw->soc->tx_ring_desc_size; 637 int i, page_idx = 0, n_pages, ring_size; 638 int token = dev->wlan.token_start; 639 struct mtk_wed_buf *page_list; 640 dma_addr_t desc_phys; 641 void *desc_ptr; 642 643 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 644 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 645 dev->tx_buf_ring.size = ring_size; 646 } else { 647 dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; 648 ring_size = MTK_WED_TX_BM_PKT_CNT; 649 } 650 n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; 651 652 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 653 if (!page_list) 654 return -ENOMEM; 655 656 dev->tx_buf_ring.pages = page_list; 657 658 desc_ptr = dma_alloc_coherent(dev->hw->dev, 659 dev->tx_buf_ring.size * desc_size, 660 &desc_phys, GFP_KERNEL); 661 if (!desc_ptr) 662 return -ENOMEM; 663 664 dev->tx_buf_ring.desc = desc_ptr; 665 dev->tx_buf_ring.desc_phys = desc_phys; 666 667 for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 668 dma_addr_t page_phys, buf_phys; 669 struct page *page; 670 void *buf; 671 int s; 672 673 page = __dev_alloc_page(GFP_KERNEL); 674 if (!page) 675 return -ENOMEM; 676 677 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 678 DMA_BIDIRECTIONAL); 679 if (dma_mapping_error(dev->hw->dev, page_phys)) { 680 __free_page(page); 681 return -ENOMEM; 682 } 683 684 page_list[page_idx].p = page; 685 page_list[page_idx++].phy_addr = page_phys; 686 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 687 DMA_BIDIRECTIONAL); 688 689 buf = page_to_virt(page); 690 buf_phys = page_phys; 691 692 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 693 struct mtk_wdma_desc *desc = desc_ptr; 694 u32 ctrl; 695 696 desc->buf0 = cpu_to_le32(buf_phys); 697 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 698 u32 txd_size; 699 700 txd_size = dev->wlan.init_buf(buf, buf_phys, 701 token++); 702 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 703 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); 704 if (mtk_wed_is_v1(dev->hw)) 705 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | 706 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 707 MTK_WED_BUF_SIZE - txd_size); 708 else 709 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | 710 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 711 MTK_WED_BUF_SIZE - txd_size); 712 desc->info = 0; 713 } else { 714 ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys); 715 } 716 desc->ctrl = cpu_to_le32(ctrl); 717 718 desc_ptr += desc_size; 719 buf += MTK_WED_BUF_SIZE; 720 buf_phys += MTK_WED_BUF_SIZE; 721 } 722 723 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 724 DMA_BIDIRECTIONAL); 725 } 726 727 return 0; 728 } 729 730 static void 731 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 732 { 733 struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; 734 struct mtk_wed_hw *hw = dev->hw; 735 int i, page_idx = 0; 736 737 if (!page_list) 738 return; 739 740 if (!dev->tx_buf_ring.desc) 741 goto free_pagelist; 742 743 for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { 744 dma_addr_t page_phy = page_list[page_idx].phy_addr; 745 void *page = page_list[page_idx++].p; 746 747 if (!page) 748 break; 749 750 dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, 751 DMA_BIDIRECTIONAL); 752 __free_page(page); 753 } 754 755 dma_free_coherent(dev->hw->dev, 756 dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, 757 dev->tx_buf_ring.desc, 758 dev->tx_buf_ring.desc_phys); 759 760 free_pagelist: 761 kfree(page_list); 762 } 763 764 static int 765 mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) 766 { 767 int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; 768 struct mtk_wed_buf *page_list; 769 struct mtk_wed_bm_desc *desc; 770 dma_addr_t desc_phys; 771 int i, page_idx = 0; 772 773 if (!dev->wlan.hw_rro) 774 return 0; 775 776 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 777 if (!page_list) 778 return -ENOMEM; 779 780 dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 781 dev->hw_rro.pages = page_list; 782 desc = dma_alloc_coherent(dev->hw->dev, 783 dev->wlan.rx_nbuf * sizeof(*desc), 784 &desc_phys, GFP_KERNEL); 785 if (!desc) 786 return -ENOMEM; 787 788 dev->hw_rro.desc = desc; 789 dev->hw_rro.desc_phys = desc_phys; 790 791 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 792 dma_addr_t page_phys, buf_phys; 793 struct page *page; 794 int s; 795 796 page = __dev_alloc_page(GFP_KERNEL); 797 if (!page) 798 return -ENOMEM; 799 800 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 801 DMA_BIDIRECTIONAL); 802 if (dma_mapping_error(dev->hw->dev, page_phys)) { 803 __free_page(page); 804 return -ENOMEM; 805 } 806 807 page_list[page_idx].p = page; 808 page_list[page_idx++].phy_addr = page_phys; 809 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 810 DMA_BIDIRECTIONAL); 811 812 buf_phys = page_phys; 813 for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { 814 desc->buf0 = cpu_to_le32(buf_phys); 815 desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys)); 816 buf_phys += MTK_WED_PAGE_BUF_SIZE; 817 desc++; 818 } 819 820 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 821 DMA_BIDIRECTIONAL); 822 } 823 824 return 0; 825 } 826 827 static int 828 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 829 { 830 struct mtk_wed_bm_desc *desc; 831 dma_addr_t desc_phys; 832 833 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 834 desc = dma_alloc_coherent(dev->hw->dev, 835 dev->wlan.rx_nbuf * sizeof(*desc), 836 &desc_phys, GFP_KERNEL); 837 if (!desc) 838 return -ENOMEM; 839 840 dev->rx_buf_ring.desc = desc; 841 dev->rx_buf_ring.desc_phys = desc_phys; 842 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 843 844 return mtk_wed_hwrro_buffer_alloc(dev); 845 } 846 847 static void 848 mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) 849 { 850 struct mtk_wed_buf *page_list = dev->hw_rro.pages; 851 struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; 852 int i, page_idx = 0; 853 854 if (!dev->wlan.hw_rro) 855 return; 856 857 if (!page_list) 858 return; 859 860 if (!desc) 861 goto free_pagelist; 862 863 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 864 dma_addr_t buf_addr = page_list[page_idx].phy_addr; 865 void *page = page_list[page_idx++].p; 866 867 if (!page) 868 break; 869 870 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 871 DMA_BIDIRECTIONAL); 872 __free_page(page); 873 } 874 875 dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), 876 desc, dev->hw_rro.desc_phys); 877 878 free_pagelist: 879 kfree(page_list); 880 } 881 882 static void 883 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 884 { 885 struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; 886 887 if (!desc) 888 return; 889 890 dev->wlan.release_rx_buf(dev); 891 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 892 desc, dev->rx_buf_ring.desc_phys); 893 894 mtk_wed_hwrro_free_buffer(dev); 895 } 896 897 static void 898 mtk_wed_hwrro_init(struct mtk_wed_device *dev) 899 { 900 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 901 return; 902 903 wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, 904 FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); 905 906 wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); 907 908 wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, 909 MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | 910 FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, 911 MTK_WED_RX_PG_BM_CNT)); 912 913 /* enable rx_page_bm to fetch dmad */ 914 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 915 } 916 917 static void 918 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 919 { 920 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 921 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 922 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 923 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 924 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 925 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 926 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 927 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 928 929 mtk_wed_hwrro_init(dev); 930 } 931 932 static void 933 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 934 { 935 if (!ring->desc) 936 return; 937 938 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 939 ring->desc, ring->desc_phys); 940 } 941 942 static void 943 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 944 { 945 mtk_wed_free_rx_buffer(dev); 946 mtk_wed_free_ring(dev, &dev->rro.ring); 947 } 948 949 static void 950 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 951 { 952 int i; 953 954 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 955 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 956 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 957 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 958 } 959 960 static void 961 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 962 { 963 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 964 965 switch (dev->hw->version) { 966 case 1: 967 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 968 break; 969 case 2: 970 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 971 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 972 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 973 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 974 break; 975 case 3: 976 mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 977 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 978 break; 979 default: 980 break; 981 } 982 983 if (!dev->hw->num_flows) 984 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 985 986 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 987 wed_r32(dev, MTK_WED_EXT_INT_MASK); 988 } 989 990 static void 991 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 992 { 993 if (!mtk_wed_is_v2(dev->hw)) 994 return; 995 996 if (enable) { 997 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 998 wed_w32(dev, MTK_WED_TXP_DW1, 999 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 1000 } else { 1001 wed_w32(dev, MTK_WED_TXP_DW1, 1002 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 1003 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 1004 } 1005 } 1006 1007 static int 1008 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, 1009 struct mtk_wed_ring *ring) 1010 { 1011 int i; 1012 1013 for (i = 0; i < 3; i++) { 1014 u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); 1015 1016 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 1017 break; 1018 1019 usleep_range(100000, 200000); 1020 } 1021 1022 if (i == 3) { 1023 dev_err(dev->hw->dev, "rx dma enable failed\n"); 1024 return -ETIMEDOUT; 1025 } 1026 1027 return 0; 1028 } 1029 1030 static void 1031 mtk_wed_dma_disable(struct mtk_wed_device *dev) 1032 { 1033 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1034 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1035 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1036 1037 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1038 1039 wed_clr(dev, MTK_WED_GLO_CFG, 1040 MTK_WED_GLO_CFG_TX_DMA_EN | 1041 MTK_WED_GLO_CFG_RX_DMA_EN); 1042 1043 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1044 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1045 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1046 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1047 1048 if (mtk_wed_is_v1(dev->hw)) { 1049 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 1050 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1051 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1052 } else { 1053 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1054 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1055 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1056 1057 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1058 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1059 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1060 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1061 1062 if (mtk_wed_is_v3_or_greater(dev->hw) && 1063 mtk_wed_get_rx_capa(dev)) { 1064 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, 1065 MTK_WDMA_PREF_TX_CFG_PREF_EN); 1066 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, 1067 MTK_WDMA_PREF_RX_CFG_PREF_EN); 1068 } 1069 } 1070 1071 mtk_wed_set_512_support(dev, false); 1072 } 1073 1074 static void 1075 mtk_wed_stop(struct mtk_wed_device *dev) 1076 { 1077 mtk_wed_set_ext_int(dev, false); 1078 1079 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 1080 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 1081 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 1082 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 1083 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 1084 1085 if (!mtk_wed_get_rx_capa(dev)) 1086 return; 1087 1088 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 1089 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 1090 } 1091 1092 static void 1093 mtk_wed_deinit(struct mtk_wed_device *dev) 1094 { 1095 mtk_wed_stop(dev); 1096 mtk_wed_dma_disable(dev); 1097 1098 wed_clr(dev, MTK_WED_CTRL, 1099 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1100 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1101 MTK_WED_CTRL_WED_TX_BM_EN | 1102 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1103 1104 if (mtk_wed_is_v1(dev->hw)) 1105 return; 1106 1107 wed_clr(dev, MTK_WED_CTRL, 1108 MTK_WED_CTRL_RX_ROUTE_QM_EN | 1109 MTK_WED_CTRL_WED_RX_BM_EN | 1110 MTK_WED_CTRL_RX_RRO_QM_EN); 1111 1112 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1113 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1114 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); 1115 wed_clr(dev, MTK_WED_PCIE_INT_CTRL, 1116 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1117 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); 1118 } 1119 } 1120 1121 static void 1122 __mtk_wed_detach(struct mtk_wed_device *dev) 1123 { 1124 struct mtk_wed_hw *hw = dev->hw; 1125 1126 mtk_wed_deinit(dev); 1127 1128 mtk_wdma_rx_reset(dev); 1129 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1130 mtk_wed_amsdu_free_buffer(dev); 1131 mtk_wed_free_tx_buffer(dev); 1132 mtk_wed_free_tx_rings(dev); 1133 1134 if (mtk_wed_get_rx_capa(dev)) { 1135 if (hw->wed_wo) 1136 mtk_wed_wo_reset(dev); 1137 mtk_wed_free_rx_rings(dev); 1138 if (hw->wed_wo) 1139 mtk_wed_wo_deinit(hw); 1140 } 1141 1142 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 1143 struct device_node *wlan_node; 1144 1145 wlan_node = dev->wlan.pci_dev->dev.of_node; 1146 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 1147 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1148 BIT(hw->index), BIT(hw->index)); 1149 } 1150 1151 if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && 1152 hw->eth->dma_dev != hw->eth->dev) 1153 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 1154 1155 memset(dev, 0, sizeof(*dev)); 1156 module_put(THIS_MODULE); 1157 1158 hw->wed_dev = NULL; 1159 } 1160 1161 static void 1162 mtk_wed_detach(struct mtk_wed_device *dev) 1163 { 1164 mutex_lock(&hw_lock); 1165 __mtk_wed_detach(dev); 1166 mutex_unlock(&hw_lock); 1167 } 1168 1169 static void 1170 mtk_wed_bus_init(struct mtk_wed_device *dev) 1171 { 1172 switch (dev->wlan.bus_type) { 1173 case MTK_WED_BUS_PCIE: { 1174 struct device_node *np = dev->hw->eth->dev->of_node; 1175 1176 if (mtk_wed_is_v2(dev->hw)) { 1177 struct regmap *regs; 1178 1179 regs = syscon_regmap_lookup_by_phandle(np, 1180 "mediatek,wed-pcie"); 1181 if (IS_ERR(regs)) 1182 break; 1183 1184 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 1185 } 1186 1187 if (dev->wlan.msi) { 1188 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1189 dev->hw->pcie_base | 0xc08); 1190 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1191 dev->hw->pcie_base | 0xc04); 1192 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); 1193 } else { 1194 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1195 dev->hw->pcie_base | 0x180); 1196 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1197 dev->hw->pcie_base | 0x184); 1198 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 1199 } 1200 1201 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 1202 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 1203 1204 /* pcie interrupt control: pola/source selection */ 1205 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 1206 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1207 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | 1208 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1209 dev->hw->index)); 1210 break; 1211 } 1212 case MTK_WED_BUS_AXI: 1213 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 1214 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 1215 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 1216 break; 1217 default: 1218 break; 1219 } 1220 } 1221 1222 static void 1223 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 1224 { 1225 int i; 1226 1227 if (mtk_wed_is_v1(dev->hw)) { 1228 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 1229 return; 1230 } 1231 1232 mtk_wed_bus_init(dev); 1233 1234 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 1235 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 1236 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 1237 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 1238 1239 if (!mtk_wed_get_rx_capa(dev)) 1240 return; 1241 1242 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 1243 wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); 1244 1245 if (!dev->wlan.hw_rro) 1246 return; 1247 1248 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); 1249 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); 1250 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) 1251 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), 1252 dev->wlan.wpdma_rx_pg + i * 0x10); 1253 } 1254 1255 static void 1256 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 1257 { 1258 u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); 1259 u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; 1260 1261 mtk_wed_deinit(dev); 1262 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1263 mtk_wed_set_wpdma(dev); 1264 1265 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 1266 mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 1267 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 1268 set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 1269 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 1270 } 1271 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 1272 1273 if (mtk_wed_is_v1(dev->hw)) { 1274 u32 offset = dev->hw->index ? 0x04000400 : 0; 1275 1276 wdma_set(dev, MTK_WDMA_GLO_CFG, 1277 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1278 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 1279 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1280 1281 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 1282 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 1283 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1284 MTK_PCIE_BASE(dev->hw->index)); 1285 } else { 1286 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 1287 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 1288 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 1289 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 1290 MTK_WDMA_INT_STATUS) | 1291 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 1292 MTK_WDMA_GLO_CFG)); 1293 1294 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 1295 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 1296 MTK_WDMA_RING_TX(0)) | 1297 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 1298 MTK_WDMA_RING_RX(0))); 1299 } 1300 } 1301 1302 static int 1303 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1304 int size) 1305 { 1306 ring->desc = dma_alloc_coherent(dev->hw->dev, 1307 size * sizeof(*ring->desc), 1308 &ring->desc_phys, GFP_KERNEL); 1309 if (!ring->desc) 1310 return -ENOMEM; 1311 1312 ring->desc_size = sizeof(*ring->desc); 1313 ring->size = size; 1314 1315 return 0; 1316 } 1317 1318 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 1319 static int 1320 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 1321 { 1322 struct reserved_mem *rmem; 1323 struct device_node *np; 1324 int index; 1325 1326 index = of_property_match_string(dev->hw->node, "memory-region-names", 1327 "wo-dlm"); 1328 if (index < 0) 1329 return index; 1330 1331 np = of_parse_phandle(dev->hw->node, "memory-region", index); 1332 if (!np) 1333 return -ENODEV; 1334 1335 rmem = of_reserved_mem_lookup(np); 1336 of_node_put(np); 1337 1338 if (!rmem) 1339 return -ENODEV; 1340 1341 dev->rro.miod_phys = rmem->base; 1342 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 1343 1344 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 1345 MTK_WED_RRO_QUE_CNT); 1346 } 1347 1348 static int 1349 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 1350 { 1351 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1352 struct { 1353 struct { 1354 __le32 base; 1355 __le32 cnt; 1356 __le32 unit; 1357 } ring[2]; 1358 __le32 wed; 1359 u8 version; 1360 } req = { 1361 .ring[0] = { 1362 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 1363 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 1364 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 1365 }, 1366 .ring[1] = { 1367 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 1368 MTK_WED_MIOD_COUNT), 1369 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 1370 .unit = cpu_to_le32(4), 1371 }, 1372 }; 1373 1374 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1375 MTK_WED_WO_CMD_WED_CFG, 1376 &req, sizeof(req), true); 1377 } 1378 1379 static void 1380 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 1381 { 1382 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 1383 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 1384 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 1385 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 1386 MTK_WED_MIOD_ENTRY_CNT >> 2)); 1387 1388 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 1389 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 1390 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 1391 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 1392 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 1393 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 1394 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 1395 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 1396 1397 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1398 MTK_WED_RROQM_RST_IDX_MIOD | 1399 MTK_WED_RROQM_RST_IDX_FDBK); 1400 1401 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1402 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 1403 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1404 } 1405 1406 static void 1407 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 1408 { 1409 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 1410 1411 for (;;) { 1412 usleep_range(100, 200); 1413 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 1414 break; 1415 } 1416 1417 /* configure RX_ROUTE_QM */ 1418 if (mtk_wed_is_v2(dev->hw)) { 1419 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1420 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 1421 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1422 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 1423 0x3 + dev->hw->index)); 1424 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1425 } else { 1426 wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, 1427 FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 1428 0x3 + dev->hw->index)); 1429 } 1430 /* enable RX_ROUTE_QM */ 1431 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1432 } 1433 1434 static void 1435 mtk_wed_hw_init(struct mtk_wed_device *dev) 1436 { 1437 if (dev->init_done) 1438 return; 1439 1440 dev->init_done = true; 1441 mtk_wed_set_ext_int(dev, false); 1442 1443 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 1444 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 1445 1446 if (mtk_wed_is_v1(dev->hw)) { 1447 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1448 MTK_WED_TX_BM_CTRL_PAUSE | 1449 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1450 dev->tx_buf_ring.size / 128) | 1451 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1452 MTK_WED_TX_RING_SIZE / 256)); 1453 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1454 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 1455 MTK_WED_TX_BM_DYN_THR_HI); 1456 } else if (mtk_wed_is_v2(dev->hw)) { 1457 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1458 MTK_WED_TX_BM_CTRL_PAUSE | 1459 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1460 dev->tx_buf_ring.size / 128) | 1461 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1462 MTK_WED_TX_RING_SIZE / 256)); 1463 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 1464 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 1465 MTK_WED_TX_TKID_DYN_THR_HI); 1466 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1467 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 1468 MTK_WED_TX_BM_DYN_THR_HI_V2); 1469 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1470 MTK_WED_TX_TKID_CTRL_PAUSE | 1471 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 1472 dev->tx_buf_ring.size / 128) | 1473 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 1474 dev->tx_buf_ring.size / 128)); 1475 } 1476 1477 wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, 1478 FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | 1479 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 1480 dev->wlan.token_start + dev->wlan.nbuf - 1)); 1481 1482 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1483 1484 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1485 /* switch to new bm architecture */ 1486 wed_clr(dev, MTK_WED_TX_BM_CTRL, 1487 MTK_WED_TX_BM_CTRL_LEGACY_EN); 1488 1489 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1490 MTK_WED_TX_TKID_CTRL_PAUSE | 1491 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, 1492 dev->wlan.nbuf / 128) | 1493 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, 1494 dev->wlan.nbuf / 128)); 1495 /* return SKBID + SDP back to bm */ 1496 wed_set(dev, MTK_WED_TX_TKID_CTRL, 1497 MTK_WED_TX_TKID_CTRL_FREE_FORMAT); 1498 1499 wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, 1500 MTK_WED_TX_BM_PKT_CNT | 1501 MTK_WED_TX_BM_INIT_SW_TAIL_IDX); 1502 } 1503 1504 if (mtk_wed_is_v1(dev->hw)) { 1505 wed_set(dev, MTK_WED_CTRL, 1506 MTK_WED_CTRL_WED_TX_BM_EN | 1507 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1508 } else if (mtk_wed_get_rx_capa(dev)) { 1509 /* rx hw init */ 1510 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1511 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1512 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1513 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1514 1515 /* reset prefetch index of ring */ 1516 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1517 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1518 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1519 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1520 1521 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1522 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1523 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1524 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1525 1526 /* reset prefetch FIFO of ring */ 1527 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 1528 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | 1529 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); 1530 wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); 1531 1532 mtk_wed_rx_buffer_hw_init(dev); 1533 mtk_wed_rro_hw_init(dev); 1534 mtk_wed_route_qm_hw_init(dev); 1535 } 1536 1537 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 1538 if (!mtk_wed_is_v1(dev->hw)) 1539 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 1540 } 1541 1542 static void 1543 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 1544 { 1545 void *head = (void *)ring->desc; 1546 int i; 1547 1548 for (i = 0; i < size; i++) { 1549 struct mtk_wdma_desc *desc; 1550 1551 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 1552 desc->buf0 = 0; 1553 if (tx) 1554 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1555 else 1556 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 1557 desc->buf1 = 0; 1558 desc->info = 0; 1559 } 1560 } 1561 1562 static int 1563 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1564 { 1565 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1566 u8 val = MTK_WED_WO_STATE_SER_RESET; 1567 int i, ret; 1568 1569 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1570 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1571 sizeof(val), true); 1572 if (ret) 1573 return ret; 1574 1575 if (dev->wlan.hw_rro) { 1576 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 1577 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, 1578 MTK_WED_RX_IND_CMD_BUSY); 1579 mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); 1580 } 1581 1582 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1583 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1584 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1585 if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) 1586 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1587 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1588 if (ret) { 1589 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1590 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1591 } else { 1592 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1593 /* 1.a. disable prefetch HW */ 1594 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1595 MTK_WED_WPDMA_RX_D_PREF_EN); 1596 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1597 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1598 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1599 MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); 1600 } 1601 1602 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1603 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1604 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1605 1606 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1607 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1608 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1609 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1610 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1611 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1612 1613 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1614 } 1615 1616 /* reset rro qm */ 1617 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1618 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1619 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1620 if (ret) { 1621 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1622 } else { 1623 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1624 MTK_WED_RROQM_RST_IDX_MIOD | 1625 MTK_WED_RROQM_RST_IDX_FDBK); 1626 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1627 } 1628 1629 if (dev->wlan.hw_rro) { 1630 /* disable rro msdu page drv */ 1631 wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1632 MTK_WED_RRO_MSDU_PG_DRV_EN); 1633 1634 /* disable rro data drv */ 1635 wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 1636 1637 /* rro msdu page drv reset */ 1638 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1639 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1640 mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1641 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1642 1643 /* rro data drv reset */ 1644 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), 1645 MTK_WED_RRO_RX_D_DRV_CLR); 1646 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), 1647 MTK_WED_RRO_RX_D_DRV_CLR); 1648 } 1649 1650 /* reset route qm */ 1651 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1652 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1653 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1654 if (ret) { 1655 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1656 } else if (mtk_wed_is_v3_or_greater(dev->hw)) { 1657 wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); 1658 wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); 1659 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1660 } else { 1661 wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1662 } 1663 1664 /* reset tx wdma */ 1665 mtk_wdma_tx_reset(dev); 1666 1667 /* reset tx wdma drv */ 1668 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1669 if (mtk_wed_is_v3_or_greater(dev->hw)) 1670 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, 1671 MTK_WED_WPDMA_STATUS_TX_DRV); 1672 else 1673 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1674 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1675 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1676 1677 /* reset wed rx dma */ 1678 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1679 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1680 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1681 if (ret) { 1682 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1683 } else { 1684 wed_set(dev, MTK_WED_RESET_IDX, 1685 dev->hw->soc->regmap.reset_idx_rx_mask); 1686 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1687 } 1688 1689 /* reset rx bm */ 1690 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1691 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1692 MTK_WED_CTRL_WED_RX_BM_BUSY); 1693 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1694 1695 if (dev->wlan.hw_rro) { 1696 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 1697 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1698 MTK_WED_CTRL_WED_RX_PG_BM_BUSY); 1699 wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1700 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1701 } 1702 1703 /* wo change to enable state */ 1704 val = MTK_WED_WO_STATE_ENABLE; 1705 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1706 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1707 sizeof(val), true); 1708 if (ret) 1709 return ret; 1710 1711 /* wed_rx_ring_reset */ 1712 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1713 if (!dev->rx_ring[i].desc) 1714 continue; 1715 1716 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1717 false); 1718 } 1719 mtk_wed_free_rx_buffer(dev); 1720 mtk_wed_hwrro_free_buffer(dev); 1721 1722 return 0; 1723 } 1724 1725 static void 1726 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1727 { 1728 bool busy = false; 1729 u32 val; 1730 int i; 1731 1732 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1733 if (!dev->tx_ring[i].desc) 1734 continue; 1735 1736 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1737 true); 1738 } 1739 1740 /* 1. reset WED tx DMA */ 1741 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1742 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1743 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1744 if (busy) { 1745 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1746 } else { 1747 wed_w32(dev, MTK_WED_RESET_IDX, 1748 dev->hw->soc->regmap.reset_idx_tx_mask); 1749 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1750 } 1751 1752 /* 2. reset WDMA rx DMA */ 1753 busy = !!mtk_wdma_rx_reset(dev); 1754 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1755 val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | 1756 wed_r32(dev, MTK_WED_WDMA_GLO_CFG); 1757 val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; 1758 wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); 1759 } else { 1760 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1761 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1762 } 1763 1764 if (!busy) 1765 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1766 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1767 if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) 1768 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1769 MTK_WED_WDMA_RX_PREF_BUSY); 1770 1771 if (busy) { 1772 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1773 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1774 } else { 1775 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1776 /* 1.a. disable prefetch HW */ 1777 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1778 MTK_WED_WDMA_RX_PREF_EN); 1779 mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1780 MTK_WED_WDMA_RX_PREF_BUSY); 1781 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1782 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 1783 1784 /* 2. Reset dma index */ 1785 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1786 MTK_WED_WDMA_RESET_IDX_RX_ALL); 1787 } 1788 1789 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1790 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1791 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1792 1793 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1794 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1795 1796 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1797 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1798 } 1799 1800 /* 3. reset WED WPDMA tx */ 1801 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1802 1803 for (i = 0; i < 100; i++) { 1804 if (mtk_wed_is_v1(dev->hw)) 1805 val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, 1806 wed_r32(dev, MTK_WED_TX_BM_INTF)); 1807 else 1808 val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, 1809 wed_r32(dev, MTK_WED_TX_TKID_INTF)); 1810 if (val == 0x40) 1811 break; 1812 } 1813 1814 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1815 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1816 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1817 1818 /* 4. reset WED WPDMA tx */ 1819 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1820 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1821 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1822 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1823 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1824 if (!busy) 1825 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1826 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1827 1828 if (busy) { 1829 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1830 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1831 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1832 if (mtk_wed_is_v3_or_greater(dev->hw)) 1833 wed_w32(dev, MTK_WED_RX1_CTRL2, 0); 1834 } else { 1835 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1836 MTK_WED_WPDMA_RESET_IDX_TX | 1837 MTK_WED_WPDMA_RESET_IDX_RX); 1838 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1839 } 1840 1841 dev->init_done = false; 1842 if (mtk_wed_is_v1(dev->hw)) 1843 return; 1844 1845 if (!busy) { 1846 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1847 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1848 } 1849 1850 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1851 /* reset amsdu engine */ 1852 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1853 mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); 1854 } 1855 1856 if (mtk_wed_get_rx_capa(dev)) 1857 mtk_wed_rx_reset(dev); 1858 } 1859 1860 static int 1861 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1862 int size, u32 desc_size, bool tx) 1863 { 1864 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1865 &ring->desc_phys, GFP_KERNEL); 1866 if (!ring->desc) 1867 return -ENOMEM; 1868 1869 ring->desc_size = desc_size; 1870 ring->size = size; 1871 mtk_wed_ring_reset(ring, size, tx); 1872 1873 return 0; 1874 } 1875 1876 static int 1877 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1878 bool reset) 1879 { 1880 struct mtk_wed_ring *wdma; 1881 1882 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1883 return -EINVAL; 1884 1885 wdma = &dev->rx_wdma[idx]; 1886 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1887 dev->hw->soc->wdma_desc_size, true)) 1888 return -ENOMEM; 1889 1890 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1891 wdma->desc_phys); 1892 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1893 size); 1894 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1895 1896 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1897 wdma->desc_phys); 1898 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1899 size); 1900 1901 return 0; 1902 } 1903 1904 static int 1905 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1906 bool reset) 1907 { 1908 struct mtk_wed_ring *wdma; 1909 1910 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1911 return -EINVAL; 1912 1913 wdma = &dev->tx_wdma[idx]; 1914 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1915 dev->hw->soc->wdma_desc_size, true)) 1916 return -ENOMEM; 1917 1918 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1919 struct mtk_wdma_desc *desc = wdma->desc; 1920 int i; 1921 1922 for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { 1923 desc->buf0 = 0; 1924 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1925 desc->buf1 = 0; 1926 desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); 1927 desc++; 1928 desc->buf0 = 0; 1929 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1930 desc->buf1 = 0; 1931 desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); 1932 desc++; 1933 } 1934 } 1935 1936 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1937 wdma->desc_phys); 1938 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1939 size); 1940 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1941 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1942 1943 if (reset) 1944 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1945 1946 if (!idx) { 1947 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1948 wdma->desc_phys); 1949 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1950 size); 1951 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1952 0); 1953 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1954 0); 1955 } 1956 1957 return 0; 1958 } 1959 1960 static void 1961 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1962 u32 reason, u32 hash) 1963 { 1964 struct mtk_eth *eth = dev->hw->eth; 1965 struct ethhdr *eh; 1966 1967 if (!skb) 1968 return; 1969 1970 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1971 return; 1972 1973 skb_set_mac_header(skb, 0); 1974 eh = eth_hdr(skb); 1975 skb->protocol = eh->h_proto; 1976 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1977 } 1978 1979 static void 1980 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1981 { 1982 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1983 1984 /* wed control cr set */ 1985 wed_set(dev, MTK_WED_CTRL, 1986 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1987 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1988 MTK_WED_CTRL_WED_TX_BM_EN | 1989 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1990 1991 if (mtk_wed_is_v1(dev->hw)) { 1992 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1993 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1994 1995 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1996 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1997 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1998 1999 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 2000 } else { 2001 if (mtk_wed_is_v3_or_greater(dev->hw)) 2002 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); 2003 2004 /* initail tx interrupt trigger */ 2005 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 2006 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 2007 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 2008 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 2009 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 2010 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 2011 dev->wlan.tx_tbit[0]) | 2012 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 2013 dev->wlan.tx_tbit[1])); 2014 2015 /* initail txfree interrupt trigger */ 2016 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 2017 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 2018 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 2019 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 2020 dev->wlan.txfree_tbit)); 2021 2022 if (mtk_wed_get_rx_capa(dev)) { 2023 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 2024 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 2025 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 2026 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 2027 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 2028 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 2029 dev->wlan.rx_tbit[0]) | 2030 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 2031 dev->wlan.rx_tbit[1])); 2032 2033 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 2034 GENMASK(1, 0)); 2035 } 2036 2037 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 2038 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 2039 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 2040 dev->wdma_idx)); 2041 } 2042 2043 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 2044 2045 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 2046 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 2047 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2048 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2049 } 2050 2051 #define MTK_WFMDA_RX_DMA_EN BIT(2) 2052 static void 2053 mtk_wed_dma_enable(struct mtk_wed_device *dev) 2054 { 2055 int i; 2056 2057 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 2058 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 2059 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 2060 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2061 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2062 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 2063 wdma_set(dev, MTK_WDMA_GLO_CFG, 2064 MTK_WDMA_GLO_CFG_TX_DMA_EN | 2065 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 2066 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 2067 wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); 2068 } else { 2069 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2070 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2071 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | 2072 MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); 2073 wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 2074 } 2075 2076 wed_set(dev, MTK_WED_GLO_CFG, 2077 MTK_WED_GLO_CFG_TX_DMA_EN | 2078 MTK_WED_GLO_CFG_RX_DMA_EN); 2079 2080 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2081 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 2082 2083 if (mtk_wed_is_v1(dev->hw)) { 2084 wdma_set(dev, MTK_WDMA_GLO_CFG, 2085 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 2086 return; 2087 } 2088 2089 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2090 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 2091 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 2092 2093 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2094 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2095 FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | 2096 FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); 2097 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2098 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 2099 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); 2100 2101 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2102 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); 2103 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2104 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | 2105 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | 2106 MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); 2107 2108 wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 2109 wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 2110 } 2111 2112 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2113 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 2114 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 2115 2116 if (!mtk_wed_get_rx_capa(dev)) 2117 return; 2118 2119 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2120 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 2121 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 2122 2123 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); 2124 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 2125 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 2126 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 2127 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); 2128 2129 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2130 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 2131 MTK_WED_WPDMA_RX_D_PREF_EN | 2132 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | 2133 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); 2134 2135 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 2136 wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 2137 wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 2138 } 2139 2140 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2141 struct mtk_wed_ring *ring = &dev->rx_ring[i]; 2142 u32 val; 2143 2144 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2145 continue; /* queue is not configured by mt76 */ 2146 2147 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { 2148 dev_err(dev->hw->dev, 2149 "rx_ring(%d) dma enable failed\n", i); 2150 continue; 2151 } 2152 2153 val = wifi_r32(dev, 2154 dev->wlan.wpdma_rx_glo - 2155 dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; 2156 wifi_w32(dev, 2157 dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, 2158 val); 2159 } 2160 } 2161 2162 static void 2163 mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) 2164 { 2165 int i; 2166 2167 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2168 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2169 2170 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 2171 return; 2172 2173 if (reset) { 2174 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2175 MTK_WED_RRO_MSDU_PG_DRV_EN); 2176 return; 2177 } 2178 2179 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); 2180 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2181 MTK_WED_RRO_MSDU_PG_DRV_CLR); 2182 2183 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, 2184 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | 2185 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | 2186 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | 2187 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | 2188 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, 2189 dev->wlan.rro_rx_tbit[0]) | 2190 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, 2191 dev->wlan.rro_rx_tbit[1])); 2192 2193 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, 2194 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | 2195 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | 2196 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | 2197 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | 2198 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | 2199 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | 2200 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, 2201 dev->wlan.rx_pg_tbit[0]) | 2202 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, 2203 dev->wlan.rx_pg_tbit[1]) | 2204 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, 2205 dev->wlan.rx_pg_tbit[2])); 2206 2207 /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after 2208 * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken 2209 */ 2210 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2211 MTK_WED_RRO_MSDU_PG_DRV_EN); 2212 2213 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2214 struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; 2215 2216 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2217 continue; 2218 2219 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2220 dev_err(dev->hw->dev, 2221 "rx_rro_ring(%d) initialization failed\n", i); 2222 } 2223 2224 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { 2225 struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; 2226 2227 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2228 continue; 2229 2230 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2231 dev_err(dev->hw->dev, 2232 "rx_page_ring(%d) initialization failed\n", i); 2233 } 2234 } 2235 2236 static void 2237 mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, 2238 void __iomem *regs) 2239 { 2240 struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; 2241 2242 ring->wpdma = regs; 2243 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, 2244 readl(regs)); 2245 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, 2246 readl(regs + MTK_WED_RING_OFS_COUNT)); 2247 ring->flags |= MTK_WED_RING_CONFIGURED; 2248 } 2249 2250 static void 2251 mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 2252 { 2253 struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; 2254 2255 ring->wpdma = regs; 2256 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, 2257 readl(regs)); 2258 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, 2259 readl(regs + MTK_WED_RING_OFS_COUNT)); 2260 ring->flags |= MTK_WED_RING_CONFIGURED; 2261 } 2262 2263 static int 2264 mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2265 { 2266 struct mtk_wed_ring *ring = &dev->ind_cmd_ring; 2267 u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); 2268 int i, count = 0; 2269 2270 ring->wpdma = regs; 2271 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, 2272 readl(regs) & 0xfffffff0); 2273 2274 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, 2275 readl(regs + MTK_WED_RING_OFS_COUNT)); 2276 2277 /* ack sn cr */ 2278 wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + 2279 dev->wlan.ind_cmd.ack_sn_addr); 2280 wed_w32(dev, MTK_WED_RRO_CFG1, 2281 FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, 2282 dev->wlan.ind_cmd.win_size) | 2283 FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, 2284 dev->wlan.ind_cmd.particular_sid)); 2285 2286 /* particular session addr element */ 2287 wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, 2288 dev->wlan.ind_cmd.particular_se_phys); 2289 2290 for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { 2291 wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, 2292 dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); 2293 wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, 2294 MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); 2295 2296 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2297 while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) 2298 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2299 if (count >= 100) 2300 dev_err(dev->hw->dev, 2301 "write ba session base failed\n"); 2302 } 2303 2304 /* pn check init */ 2305 for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { 2306 wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, 2307 MTK_WED_PN_CHECK_IS_FIRST); 2308 2309 wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | 2310 FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); 2311 2312 count = 0; 2313 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2314 while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) 2315 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2316 if (count >= 100) 2317 dev_err(dev->hw->dev, 2318 "session(%d) initialization failed\n", i); 2319 } 2320 2321 wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); 2322 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 2323 2324 return 0; 2325 } 2326 2327 static void 2328 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 2329 { 2330 int i; 2331 2332 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 2333 return; 2334 2335 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 2336 if (!dev->rx_wdma[i].desc) 2337 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 2338 2339 mtk_wed_hw_init(dev); 2340 mtk_wed_configure_irq(dev, irq_mask); 2341 2342 mtk_wed_set_ext_int(dev, true); 2343 2344 if (mtk_wed_is_v1(dev->hw)) { 2345 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 2346 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 2347 dev->hw->index); 2348 2349 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 2350 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 2351 } else if (mtk_wed_get_rx_capa(dev)) { 2352 /* driver set mid ready and only once */ 2353 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 2354 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2355 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 2356 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2357 2358 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 2359 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 2360 2361 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2362 wed_w32(dev, MTK_WED_EXT_INT_MASK3, 2363 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2364 wed_r32(dev, MTK_WED_EXT_INT_MASK3); 2365 } 2366 2367 if (mtk_wed_rro_cfg(dev)) 2368 return; 2369 } 2370 2371 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 2372 mtk_wed_amsdu_init(dev); 2373 2374 mtk_wed_dma_enable(dev); 2375 dev->running = true; 2376 } 2377 2378 static int 2379 mtk_wed_attach(struct mtk_wed_device *dev) 2380 __releases(RCU) 2381 { 2382 struct mtk_wed_hw *hw; 2383 struct device *device; 2384 int ret = 0; 2385 2386 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 2387 "mtk_wed_attach without holding the RCU read lock"); 2388 2389 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 2390 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 2391 !try_module_get(THIS_MODULE)) 2392 ret = -ENODEV; 2393 2394 rcu_read_unlock(); 2395 2396 if (ret) 2397 return ret; 2398 2399 mutex_lock(&hw_lock); 2400 2401 hw = mtk_wed_assign(dev); 2402 if (!hw) { 2403 module_put(THIS_MODULE); 2404 ret = -ENODEV; 2405 goto unlock; 2406 } 2407 2408 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 2409 ? &dev->wlan.pci_dev->dev 2410 : &dev->wlan.platform_dev->dev; 2411 dev_info(device, "attaching wed device %d version %d\n", 2412 hw->index, hw->version); 2413 2414 dev->hw = hw; 2415 dev->dev = hw->dev; 2416 dev->irq = hw->irq; 2417 dev->wdma_idx = hw->index; 2418 dev->version = hw->version; 2419 dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); 2420 2421 if (hw->eth->dma_dev == hw->eth->dev && 2422 of_dma_is_coherent(hw->eth->dev->of_node)) 2423 mtk_eth_set_dma_device(hw->eth, hw->dev); 2424 2425 ret = mtk_wed_tx_buffer_alloc(dev); 2426 if (ret) 2427 goto out; 2428 2429 ret = mtk_wed_amsdu_buffer_alloc(dev); 2430 if (ret) 2431 goto out; 2432 2433 if (mtk_wed_get_rx_capa(dev)) { 2434 ret = mtk_wed_rro_alloc(dev); 2435 if (ret) 2436 goto out; 2437 } 2438 2439 mtk_wed_hw_init_early(dev); 2440 if (mtk_wed_is_v1(hw)) 2441 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 2442 BIT(hw->index), 0); 2443 else 2444 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 2445 2446 if (mtk_wed_get_rx_capa(dev)) 2447 ret = mtk_wed_wo_init(hw); 2448 out: 2449 if (ret) { 2450 dev_err(dev->hw->dev, "failed to attach wed device\n"); 2451 __mtk_wed_detach(dev); 2452 } 2453 unlock: 2454 mutex_unlock(&hw_lock); 2455 2456 return ret; 2457 } 2458 2459 static int 2460 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2461 bool reset) 2462 { 2463 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 2464 2465 /* 2466 * Tx ring redirection: 2467 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 2468 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 2469 * registers. 2470 * 2471 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 2472 * into MTK_WED_WPDMA_RING_TX(n) registers. 2473 * It gets filled with packets picked up from WED TX ring and from 2474 * WDMA RX. 2475 */ 2476 2477 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 2478 return -EINVAL; 2479 2480 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 2481 sizeof(*ring->desc), true)) 2482 return -ENOMEM; 2483 2484 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2485 reset)) 2486 return -ENOMEM; 2487 2488 ring->reg_base = MTK_WED_RING_TX(idx); 2489 ring->wpdma = regs; 2490 2491 if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { 2492 /* reset prefetch index */ 2493 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2494 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2495 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2496 2497 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2498 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2499 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2500 2501 /* reset prefetch FIFO */ 2502 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 2503 MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | 2504 MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); 2505 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); 2506 } 2507 2508 /* WED -> WPDMA */ 2509 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2510 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 2511 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 2512 2513 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 2514 ring->desc_phys); 2515 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 2516 MTK_WED_TX_RING_SIZE); 2517 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 2518 2519 return 0; 2520 } 2521 2522 static int 2523 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2524 { 2525 struct mtk_wed_ring *ring = &dev->txfree_ring; 2526 int i, index = mtk_wed_is_v1(dev->hw); 2527 2528 /* 2529 * For txfree event handling, the same DMA ring is shared between WED 2530 * and WLAN. The WLAN driver accesses the ring index registers through 2531 * WED 2532 */ 2533 ring->reg_base = MTK_WED_RING_RX(index); 2534 ring->wpdma = regs; 2535 2536 for (i = 0; i < 12; i += 4) { 2537 u32 val = readl(regs + i); 2538 2539 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 2540 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int 2547 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2548 bool reset) 2549 { 2550 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 2551 2552 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 2553 return -EINVAL; 2554 2555 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 2556 sizeof(*ring->desc), false)) 2557 return -ENOMEM; 2558 2559 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2560 reset)) 2561 return -ENOMEM; 2562 2563 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 2564 ring->wpdma = regs; 2565 ring->flags |= MTK_WED_RING_CONFIGURED; 2566 2567 /* WPDMA -> WED */ 2568 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2569 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 2570 2571 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 2572 ring->desc_phys); 2573 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 2574 MTK_WED_RX_RING_SIZE); 2575 2576 return 0; 2577 } 2578 2579 static u32 2580 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 2581 { 2582 u32 val, ext_mask; 2583 2584 if (mtk_wed_is_v3_or_greater(dev->hw)) 2585 ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 2586 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2587 else 2588 ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 2589 2590 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 2591 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 2592 val &= ext_mask; 2593 if (!dev->hw->num_flows) 2594 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2595 if (val && net_ratelimit()) 2596 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 2597 2598 val = wed_r32(dev, MTK_WED_INT_STATUS); 2599 val &= mask; 2600 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 2601 2602 return val; 2603 } 2604 2605 static void 2606 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 2607 { 2608 if (!dev->running) 2609 return; 2610 2611 mtk_wed_set_ext_int(dev, !!mask); 2612 wed_w32(dev, MTK_WED_INT_MASK, mask); 2613 } 2614 2615 int mtk_wed_flow_add(int index) 2616 { 2617 struct mtk_wed_hw *hw = hw_list[index]; 2618 int ret = 0; 2619 2620 mutex_lock(&hw_lock); 2621 2622 if (!hw || !hw->wed_dev) { 2623 ret = -ENODEV; 2624 goto out; 2625 } 2626 2627 if (!hw->wed_dev->wlan.offload_enable) 2628 goto out; 2629 2630 if (hw->num_flows) { 2631 hw->num_flows++; 2632 goto out; 2633 } 2634 2635 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 2636 if (!ret) 2637 hw->num_flows++; 2638 mtk_wed_set_ext_int(hw->wed_dev, true); 2639 2640 out: 2641 mutex_unlock(&hw_lock); 2642 2643 return ret; 2644 } 2645 2646 void mtk_wed_flow_remove(int index) 2647 { 2648 struct mtk_wed_hw *hw = hw_list[index]; 2649 2650 mutex_lock(&hw_lock); 2651 2652 if (!hw || !hw->wed_dev) 2653 goto out; 2654 2655 if (!hw->wed_dev->wlan.offload_disable) 2656 goto out; 2657 2658 if (--hw->num_flows) 2659 goto out; 2660 2661 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 2662 mtk_wed_set_ext_int(hw->wed_dev, true); 2663 2664 out: 2665 mutex_unlock(&hw_lock); 2666 } 2667 2668 static int 2669 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 2670 { 2671 struct mtk_wed_flow_block_priv *priv = cb_priv; 2672 struct flow_cls_offload *cls = type_data; 2673 struct mtk_wed_hw *hw = priv->hw; 2674 2675 if (!tc_can_offload(priv->dev)) 2676 return -EOPNOTSUPP; 2677 2678 if (type != TC_SETUP_CLSFLOWER) 2679 return -EOPNOTSUPP; 2680 2681 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 2682 } 2683 2684 static int 2685 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, 2686 struct flow_block_offload *f) 2687 { 2688 struct mtk_wed_flow_block_priv *priv; 2689 static LIST_HEAD(block_cb_list); 2690 struct flow_block_cb *block_cb; 2691 struct mtk_eth *eth = hw->eth; 2692 flow_setup_cb_t *cb; 2693 2694 if (!eth->soc->offload_version) 2695 return -EOPNOTSUPP; 2696 2697 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2698 return -EOPNOTSUPP; 2699 2700 cb = mtk_wed_setup_tc_block_cb; 2701 f->driver_block_list = &block_cb_list; 2702 2703 switch (f->command) { 2704 case FLOW_BLOCK_BIND: 2705 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2706 if (block_cb) { 2707 flow_block_cb_incref(block_cb); 2708 return 0; 2709 } 2710 2711 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2712 if (!priv) 2713 return -ENOMEM; 2714 2715 priv->hw = hw; 2716 priv->dev = dev; 2717 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); 2718 if (IS_ERR(block_cb)) { 2719 kfree(priv); 2720 return PTR_ERR(block_cb); 2721 } 2722 2723 flow_block_cb_incref(block_cb); 2724 flow_block_cb_add(block_cb, f); 2725 list_add_tail(&block_cb->driver_list, &block_cb_list); 2726 return 0; 2727 case FLOW_BLOCK_UNBIND: 2728 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2729 if (!block_cb) 2730 return -ENOENT; 2731 2732 if (!flow_block_cb_decref(block_cb)) { 2733 flow_block_cb_remove(block_cb, f); 2734 list_del(&block_cb->driver_list); 2735 kfree(block_cb->cb_priv); 2736 } 2737 return 0; 2738 default: 2739 return -EOPNOTSUPP; 2740 } 2741 } 2742 2743 static int 2744 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, 2745 enum tc_setup_type type, void *type_data) 2746 { 2747 struct mtk_wed_hw *hw = wed->hw; 2748 2749 if (mtk_wed_is_v1(hw)) 2750 return -EOPNOTSUPP; 2751 2752 switch (type) { 2753 case TC_SETUP_BLOCK: 2754 case TC_SETUP_FT: 2755 return mtk_wed_setup_tc_block(hw, dev, type_data); 2756 default: 2757 return -EOPNOTSUPP; 2758 } 2759 } 2760 2761 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 2762 void __iomem *wdma, phys_addr_t wdma_phy, 2763 int index) 2764 { 2765 static const struct mtk_wed_ops wed_ops = { 2766 .attach = mtk_wed_attach, 2767 .tx_ring_setup = mtk_wed_tx_ring_setup, 2768 .rx_ring_setup = mtk_wed_rx_ring_setup, 2769 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 2770 .msg_update = mtk_wed_mcu_msg_update, 2771 .start = mtk_wed_start, 2772 .stop = mtk_wed_stop, 2773 .reset_dma = mtk_wed_reset_dma, 2774 .reg_read = wed_r32, 2775 .reg_write = wed_w32, 2776 .irq_get = mtk_wed_irq_get, 2777 .irq_set_mask = mtk_wed_irq_set_mask, 2778 .detach = mtk_wed_detach, 2779 .ppe_check = mtk_wed_ppe_check, 2780 .setup_tc = mtk_wed_setup_tc, 2781 .start_hw_rro = mtk_wed_start_hw_rro, 2782 .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, 2783 .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, 2784 .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, 2785 }; 2786 struct device_node *eth_np = eth->dev->of_node; 2787 struct platform_device *pdev; 2788 struct mtk_wed_hw *hw; 2789 struct regmap *regs; 2790 int irq; 2791 2792 if (!np) 2793 return; 2794 2795 pdev = of_find_device_by_node(np); 2796 if (!pdev) 2797 goto err_of_node_put; 2798 2799 get_device(&pdev->dev); 2800 irq = platform_get_irq(pdev, 0); 2801 if (irq < 0) 2802 goto err_put_device; 2803 2804 regs = syscon_regmap_lookup_by_phandle(np, NULL); 2805 if (IS_ERR(regs)) 2806 goto err_put_device; 2807 2808 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 2809 2810 mutex_lock(&hw_lock); 2811 2812 if (WARN_ON(hw_list[index])) 2813 goto unlock; 2814 2815 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 2816 if (!hw) 2817 goto unlock; 2818 2819 hw->node = np; 2820 hw->regs = regs; 2821 hw->eth = eth; 2822 hw->dev = &pdev->dev; 2823 hw->wdma_phy = wdma_phy; 2824 hw->wdma = wdma; 2825 hw->index = index; 2826 hw->irq = irq; 2827 hw->version = eth->soc->version; 2828 2829 switch (hw->version) { 2830 case 2: 2831 hw->soc = &mt7986_data; 2832 break; 2833 case 3: 2834 hw->soc = &mt7988_data; 2835 break; 2836 default: 2837 case 1: 2838 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 2839 "mediatek,pcie-mirror"); 2840 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 2841 "mediatek,hifsys"); 2842 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 2843 kfree(hw); 2844 goto unlock; 2845 } 2846 2847 if (!index) { 2848 regmap_write(hw->mirror, 0, 0); 2849 regmap_write(hw->mirror, 4, 0); 2850 } 2851 hw->soc = &mt7622_data; 2852 break; 2853 } 2854 2855 mtk_wed_hw_add_debugfs(hw); 2856 2857 hw_list[index] = hw; 2858 2859 mutex_unlock(&hw_lock); 2860 2861 return; 2862 2863 unlock: 2864 mutex_unlock(&hw_lock); 2865 err_put_device: 2866 put_device(&pdev->dev); 2867 err_of_node_put: 2868 of_node_put(np); 2869 } 2870 2871 void mtk_wed_exit(void) 2872 { 2873 int i; 2874 2875 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 2876 2877 synchronize_rcu(); 2878 2879 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 2880 struct mtk_wed_hw *hw; 2881 2882 hw = hw_list[i]; 2883 if (!hw) 2884 continue; 2885 2886 hw_list[i] = NULL; 2887 debugfs_remove(hw->debugfs_dir); 2888 put_device(hw->dev); 2889 of_node_put(hw->node); 2890 kfree(hw); 2891 } 2892 } 2893