1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/platform_device.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/skbuff.h> 11 #include <linux/of_platform.h> 12 #include <linux/of_address.h> 13 #include <linux/of_reserved_mem.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/debugfs.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/flow_offload.h> 18 #include <net/pkt_cls.h> 19 #include "mtk_eth_soc.h" 20 #include "mtk_wed.h" 21 #include "mtk_ppe.h" 22 #include "mtk_wed_wo.h" 23 24 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 25 26 #define MTK_WED_PKT_SIZE 1920 27 #define MTK_WED_BUF_SIZE 2048 28 #define MTK_WED_PAGE_BUF_SIZE 128 29 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 30 #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) 31 #define MTK_WED_RX_RING_SIZE 1536 32 #define MTK_WED_RX_PG_BM_CNT 8192 33 #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) 34 #define MTK_WED_AMSDU_NPAGES 32 35 36 #define MTK_WED_TX_RING_SIZE 2048 37 #define MTK_WED_WDMA_RING_SIZE 1024 38 #define MTK_WED_MAX_GROUP_SIZE 0x100 39 #define MTK_WED_VLD_GROUP_SIZE 0x40 40 #define MTK_WED_PER_GROUP_PKT 128 41 42 #define MTK_WED_FBUF_SIZE 128 43 #define MTK_WED_MIOD_CNT 16 44 #define MTK_WED_FB_CMD_CNT 1024 45 #define MTK_WED_RRO_QUE_CNT 8192 46 #define MTK_WED_MIOD_ENTRY_CNT 128 47 48 #define MTK_WED_TX_BM_DMA_SIZE 65536 49 #define MTK_WED_TX_BM_PKT_CNT 32768 50 51 static struct mtk_wed_hw *hw_list[3]; 52 static DEFINE_MUTEX(hw_lock); 53 54 struct mtk_wed_flow_block_priv { 55 struct mtk_wed_hw *hw; 56 struct net_device *dev; 57 }; 58 59 static const struct mtk_wed_soc_data mt7622_data = { 60 .regmap = { 61 .tx_bm_tkid = 0x088, 62 .wpdma_rx_ring0 = 0x770, 63 .reset_idx_tx_mask = GENMASK(3, 0), 64 .reset_idx_rx_mask = GENMASK(17, 16), 65 }, 66 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 67 .wdma_desc_size = sizeof(struct mtk_wdma_desc), 68 }; 69 70 static const struct mtk_wed_soc_data mt7986_data = { 71 .regmap = { 72 .tx_bm_tkid = 0x0c8, 73 .wpdma_rx_ring0 = 0x770, 74 .reset_idx_tx_mask = GENMASK(1, 0), 75 .reset_idx_rx_mask = GENMASK(7, 6), 76 }, 77 .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), 78 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 79 }; 80 81 static const struct mtk_wed_soc_data mt7988_data = { 82 .regmap = { 83 .tx_bm_tkid = 0x0c8, 84 .wpdma_rx_ring0 = 0x7d0, 85 .reset_idx_tx_mask = GENMASK(1, 0), 86 .reset_idx_rx_mask = GENMASK(7, 6), 87 }, 88 .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), 89 .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), 90 }; 91 92 static void 93 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 94 { 95 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 96 } 97 98 static void 99 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 100 { 101 return wed_m32(dev, reg, 0, mask); 102 } 103 104 static void 105 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 106 { 107 return wed_m32(dev, reg, mask, 0); 108 } 109 110 static void 111 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 112 { 113 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 114 } 115 116 static void 117 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 118 { 119 wdma_m32(dev, reg, 0, mask); 120 } 121 122 static void 123 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 124 { 125 wdma_m32(dev, reg, mask, 0); 126 } 127 128 static u32 129 wifi_r32(struct mtk_wed_device *dev, u32 reg) 130 { 131 return readl(dev->wlan.base + reg); 132 } 133 134 static void 135 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 136 { 137 writel(val, dev->wlan.base + reg); 138 } 139 140 static u32 141 mtk_wed_read_reset(struct mtk_wed_device *dev) 142 { 143 return wed_r32(dev, MTK_WED_RESET); 144 } 145 146 static u32 147 mtk_wdma_read_reset(struct mtk_wed_device *dev) 148 { 149 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 150 } 151 152 static void 153 mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) 154 { 155 u32 status; 156 157 if (!mtk_wed_is_v3_or_greater(dev->hw)) 158 return; 159 160 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 161 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 162 163 if (read_poll_timeout(wdma_r32, status, 164 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 165 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 166 dev_err(dev->hw->dev, "rx reset failed\n"); 167 168 if (read_poll_timeout(wdma_r32, status, 169 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 170 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 171 dev_err(dev->hw->dev, "rx reset failed\n"); 172 173 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 174 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 175 176 if (read_poll_timeout(wdma_r32, status, 177 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 178 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 179 dev_err(dev->hw->dev, "rx reset failed\n"); 180 181 if (read_poll_timeout(wdma_r32, status, 182 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 183 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 184 dev_err(dev->hw->dev, "rx reset failed\n"); 185 186 /* prefetch FIFO */ 187 wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 188 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 189 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 190 wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, 191 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | 192 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); 193 194 /* core FIFO */ 195 wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 196 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 197 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 198 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 199 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 200 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 201 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 202 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 203 wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, 204 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | 205 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | 206 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | 207 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | 208 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | 209 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | 210 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); 211 212 /* writeback FIFO */ 213 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 214 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 215 wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 216 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 217 218 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), 219 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 220 wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), 221 MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); 222 223 /* prefetch ring status */ 224 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 225 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 226 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 227 MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); 228 229 /* writeback ring status */ 230 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 231 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 232 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 233 MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); 234 } 235 236 static int 237 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 238 { 239 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 240 int i, ret; 241 242 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 243 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 244 !(status & mask), 0, 10000); 245 if (ret) 246 dev_err(dev->hw->dev, "rx reset failed\n"); 247 248 mtk_wdma_v3_rx_reset(dev); 249 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 250 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 251 252 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 253 if (dev->rx_wdma[i].desc) 254 continue; 255 256 wdma_w32(dev, 257 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 258 } 259 260 return ret; 261 } 262 263 static u32 264 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 265 { 266 return !!(wed_r32(dev, reg) & mask); 267 } 268 269 static int 270 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 271 { 272 int sleep = 15000; 273 int timeout = 100 * sleep; 274 u32 val; 275 276 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 277 timeout, false, dev, reg, mask); 278 } 279 280 static void 281 mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) 282 { 283 u32 status; 284 285 if (!mtk_wed_is_v3_or_greater(dev->hw)) 286 return; 287 288 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 289 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 290 291 if (read_poll_timeout(wdma_r32, status, 292 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 293 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) 294 dev_err(dev->hw->dev, "tx reset failed\n"); 295 296 if (read_poll_timeout(wdma_r32, status, 297 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 298 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) 299 dev_err(dev->hw->dev, "tx reset failed\n"); 300 301 wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 302 wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 303 304 if (read_poll_timeout(wdma_r32, status, 305 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 306 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) 307 dev_err(dev->hw->dev, "tx reset failed\n"); 308 309 if (read_poll_timeout(wdma_r32, status, 310 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 311 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) 312 dev_err(dev->hw->dev, "tx reset failed\n"); 313 314 /* prefetch FIFO */ 315 wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 316 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 317 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 318 wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, 319 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | 320 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); 321 322 /* core FIFO */ 323 wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 324 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 325 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 326 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 327 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 328 wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, 329 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | 330 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | 331 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | 332 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); 333 334 /* writeback FIFO */ 335 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 336 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 337 wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 338 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 339 340 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), 341 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 342 wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), 343 MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); 344 345 /* prefetch ring status */ 346 wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, 347 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 348 wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, 349 MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); 350 351 /* writeback ring status */ 352 wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, 353 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 354 wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, 355 MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); 356 } 357 358 static void 359 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 360 { 361 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 362 int i; 363 364 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 365 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 366 !(status & mask), 0, 10000)) 367 dev_err(dev->hw->dev, "tx reset failed\n"); 368 369 mtk_wdma_v3_tx_reset(dev); 370 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 371 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 372 373 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 374 wdma_w32(dev, 375 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 376 } 377 378 static void 379 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 380 { 381 u32 status; 382 383 wed_w32(dev, MTK_WED_RESET, mask); 384 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 385 !(status & mask), 0, 1000)) 386 WARN_ON_ONCE(1); 387 } 388 389 static u32 390 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 391 { 392 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 393 } 394 395 static void 396 mtk_wed_wo_reset(struct mtk_wed_device *dev) 397 { 398 struct mtk_wed_wo *wo = dev->hw->wed_wo; 399 u8 state = MTK_WED_WO_STATE_DISABLE; 400 void __iomem *reg; 401 u32 val; 402 403 mtk_wdma_tx_reset(dev); 404 mtk_wed_reset(dev, MTK_WED_RESET_WED); 405 406 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 407 MTK_WED_WO_CMD_CHANGE_STATE, &state, 408 sizeof(state), false)) 409 return; 410 411 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 412 val == MTK_WED_WOIF_DISABLE_DONE, 413 100, MTK_WOCPU_TIMEOUT)) 414 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 415 416 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 417 418 val = readl(reg); 419 switch (dev->hw->index) { 420 case 0: 421 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 422 writel(val, reg); 423 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 424 writel(val, reg); 425 break; 426 case 1: 427 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 428 writel(val, reg); 429 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 430 writel(val, reg); 431 break; 432 default: 433 break; 434 } 435 iounmap(reg); 436 } 437 438 void mtk_wed_fe_reset(void) 439 { 440 int i; 441 442 mutex_lock(&hw_lock); 443 444 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 445 struct mtk_wed_hw *hw = hw_list[i]; 446 struct mtk_wed_device *dev; 447 int err; 448 449 if (!hw) 450 break; 451 452 dev = hw->wed_dev; 453 if (!dev || !dev->wlan.reset) 454 continue; 455 456 /* reset callback blocks until WLAN reset is completed */ 457 err = dev->wlan.reset(dev); 458 if (err) 459 dev_err(dev->dev, "wlan reset failed: %d\n", err); 460 } 461 462 mutex_unlock(&hw_lock); 463 } 464 465 void mtk_wed_fe_reset_complete(void) 466 { 467 int i; 468 469 mutex_lock(&hw_lock); 470 471 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 472 struct mtk_wed_hw *hw = hw_list[i]; 473 struct mtk_wed_device *dev; 474 475 if (!hw) 476 break; 477 478 dev = hw->wed_dev; 479 if (!dev || !dev->wlan.reset_complete) 480 continue; 481 482 dev->wlan.reset_complete(dev); 483 } 484 485 mutex_unlock(&hw_lock); 486 } 487 488 static struct mtk_wed_hw * 489 mtk_wed_assign(struct mtk_wed_device *dev) 490 { 491 struct mtk_wed_hw *hw; 492 int i; 493 494 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 495 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 496 if (!hw) 497 return NULL; 498 499 if (!hw->wed_dev) 500 goto out; 501 502 if (mtk_wed_is_v1(hw)) 503 return NULL; 504 505 /* MT7986 WED devices do not have any pcie slot restrictions */ 506 } 507 /* MT7986 PCIE or AXI */ 508 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 509 hw = hw_list[i]; 510 if (hw && !hw->wed_dev) 511 goto out; 512 } 513 514 return NULL; 515 516 out: 517 hw->wed_dev = dev; 518 return hw; 519 } 520 521 static int 522 mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) 523 { 524 struct mtk_wed_hw *hw = dev->hw; 525 struct mtk_wed_amsdu *wed_amsdu; 526 int i; 527 528 if (!mtk_wed_is_v3_or_greater(hw)) 529 return 0; 530 531 wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, 532 sizeof(*wed_amsdu), GFP_KERNEL); 533 if (!wed_amsdu) 534 return -ENOMEM; 535 536 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 537 void *ptr; 538 539 /* each segment is 64K */ 540 ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | 541 __GFP_ZERO | __GFP_COMP | 542 GFP_DMA32, 543 get_order(MTK_WED_AMSDU_BUF_SIZE)); 544 if (!ptr) 545 goto error; 546 547 wed_amsdu[i].txd = ptr; 548 wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, 549 MTK_WED_AMSDU_BUF_SIZE, 550 DMA_TO_DEVICE); 551 if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) 552 goto error; 553 } 554 dev->hw->wed_amsdu = wed_amsdu; 555 556 return 0; 557 558 error: 559 for (i--; i >= 0; i--) 560 dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, 561 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 562 return -ENOMEM; 563 } 564 565 static void 566 mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) 567 { 568 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 569 int i; 570 571 if (!wed_amsdu) 572 return; 573 574 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { 575 dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, 576 MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); 577 free_pages((unsigned long)wed_amsdu[i].txd, 578 get_order(MTK_WED_AMSDU_BUF_SIZE)); 579 } 580 } 581 582 static int 583 mtk_wed_amsdu_init(struct mtk_wed_device *dev) 584 { 585 struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; 586 int i, ret; 587 588 if (!wed_amsdu) 589 return 0; 590 591 for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) 592 wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), 593 wed_amsdu[i].txd_phy); 594 595 /* init all sta parameter */ 596 wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | 597 MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | 598 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, 599 dev->wlan.amsdu_max_len >> 8) | 600 FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, 601 dev->wlan.amsdu_max_subframes)); 602 603 wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); 604 605 ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, 606 MTK_WED_AMSDU_STA_INFO_DO_INIT); 607 if (ret) { 608 dev_err(dev->hw->dev, "amsdu initialization failed\n"); 609 return ret; 610 } 611 612 /* init partial amsdu offload txd src */ 613 wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, 614 FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); 615 616 /* init qmem */ 617 wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); 618 ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); 619 if (ret) { 620 pr_info("%s: amsdu qmem initialization failed\n", __func__); 621 return ret; 622 } 623 624 /* eagle E1 PCIE1 tx ring 22 flow control issue */ 625 if (dev->wlan.id == 0x7991) 626 wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); 627 628 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 629 630 return 0; 631 } 632 633 static int 634 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 635 { 636 u32 desc_size = dev->hw->soc->tx_ring_desc_size; 637 int i, page_idx = 0, n_pages, ring_size; 638 int token = dev->wlan.token_start; 639 struct mtk_wed_buf *page_list; 640 dma_addr_t desc_phys; 641 void *desc_ptr; 642 643 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 644 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 645 dev->tx_buf_ring.size = ring_size; 646 } else { 647 dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; 648 ring_size = MTK_WED_TX_BM_PKT_CNT; 649 } 650 n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; 651 652 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 653 if (!page_list) 654 return -ENOMEM; 655 656 dev->tx_buf_ring.pages = page_list; 657 658 desc_ptr = dma_alloc_coherent(dev->hw->dev, 659 dev->tx_buf_ring.size * desc_size, 660 &desc_phys, GFP_KERNEL); 661 if (!desc_ptr) 662 return -ENOMEM; 663 664 dev->tx_buf_ring.desc = desc_ptr; 665 dev->tx_buf_ring.desc_phys = desc_phys; 666 667 for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 668 dma_addr_t page_phys, buf_phys; 669 struct page *page; 670 void *buf; 671 int s; 672 673 page = __dev_alloc_pages(GFP_KERNEL, 0); 674 if (!page) 675 return -ENOMEM; 676 677 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 678 DMA_BIDIRECTIONAL); 679 if (dma_mapping_error(dev->hw->dev, page_phys)) { 680 __free_page(page); 681 return -ENOMEM; 682 } 683 684 page_list[page_idx].p = page; 685 page_list[page_idx++].phy_addr = page_phys; 686 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 687 DMA_BIDIRECTIONAL); 688 689 buf = page_to_virt(page); 690 buf_phys = page_phys; 691 692 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 693 struct mtk_wdma_desc *desc = desc_ptr; 694 695 desc->buf0 = cpu_to_le32(buf_phys); 696 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 697 u32 txd_size, ctrl; 698 699 txd_size = dev->wlan.init_buf(buf, buf_phys, 700 token++); 701 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 702 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); 703 if (mtk_wed_is_v1(dev->hw)) 704 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | 705 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 706 MTK_WED_BUF_SIZE - txd_size); 707 else 708 ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | 709 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 710 MTK_WED_BUF_SIZE - txd_size); 711 desc->ctrl = cpu_to_le32(ctrl); 712 desc->info = 0; 713 } else { 714 desc->ctrl = cpu_to_le32(token << 16); 715 } 716 717 desc_ptr += desc_size; 718 buf += MTK_WED_BUF_SIZE; 719 buf_phys += MTK_WED_BUF_SIZE; 720 } 721 722 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 723 DMA_BIDIRECTIONAL); 724 } 725 726 return 0; 727 } 728 729 static void 730 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 731 { 732 struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; 733 struct mtk_wed_hw *hw = dev->hw; 734 int i, page_idx = 0; 735 736 if (!page_list) 737 return; 738 739 if (!dev->tx_buf_ring.desc) 740 goto free_pagelist; 741 742 for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { 743 dma_addr_t page_phy = page_list[page_idx].phy_addr; 744 void *page = page_list[page_idx++].p; 745 746 if (!page) 747 break; 748 749 dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, 750 DMA_BIDIRECTIONAL); 751 __free_page(page); 752 } 753 754 dma_free_coherent(dev->hw->dev, 755 dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, 756 dev->tx_buf_ring.desc, 757 dev->tx_buf_ring.desc_phys); 758 759 free_pagelist: 760 kfree(page_list); 761 } 762 763 static int 764 mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) 765 { 766 int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; 767 struct mtk_wed_buf *page_list; 768 struct mtk_wed_bm_desc *desc; 769 dma_addr_t desc_phys; 770 int i, page_idx = 0; 771 772 if (!dev->wlan.hw_rro) 773 return 0; 774 775 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 776 if (!page_list) 777 return -ENOMEM; 778 779 dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 780 dev->hw_rro.pages = page_list; 781 desc = dma_alloc_coherent(dev->hw->dev, 782 dev->wlan.rx_nbuf * sizeof(*desc), 783 &desc_phys, GFP_KERNEL); 784 if (!desc) 785 return -ENOMEM; 786 787 dev->hw_rro.desc = desc; 788 dev->hw_rro.desc_phys = desc_phys; 789 790 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 791 dma_addr_t page_phys, buf_phys; 792 struct page *page; 793 int s; 794 795 page = __dev_alloc_page(GFP_KERNEL); 796 if (!page) 797 return -ENOMEM; 798 799 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 800 DMA_BIDIRECTIONAL); 801 if (dma_mapping_error(dev->hw->dev, page_phys)) { 802 __free_page(page); 803 return -ENOMEM; 804 } 805 806 page_list[page_idx].p = page; 807 page_list[page_idx++].phy_addr = page_phys; 808 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 809 DMA_BIDIRECTIONAL); 810 811 buf_phys = page_phys; 812 for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { 813 desc->buf0 = cpu_to_le32(buf_phys); 814 buf_phys += MTK_WED_PAGE_BUF_SIZE; 815 desc++; 816 } 817 818 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 819 DMA_BIDIRECTIONAL); 820 } 821 822 return 0; 823 } 824 825 static int 826 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 827 { 828 struct mtk_wed_bm_desc *desc; 829 dma_addr_t desc_phys; 830 831 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 832 desc = dma_alloc_coherent(dev->hw->dev, 833 dev->wlan.rx_nbuf * sizeof(*desc), 834 &desc_phys, GFP_KERNEL); 835 if (!desc) 836 return -ENOMEM; 837 838 dev->rx_buf_ring.desc = desc; 839 dev->rx_buf_ring.desc_phys = desc_phys; 840 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 841 842 return mtk_wed_hwrro_buffer_alloc(dev); 843 } 844 845 static void 846 mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) 847 { 848 struct mtk_wed_buf *page_list = dev->hw_rro.pages; 849 struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; 850 int i, page_idx = 0; 851 852 if (!dev->wlan.hw_rro) 853 return; 854 855 if (!page_list) 856 return; 857 858 if (!desc) 859 goto free_pagelist; 860 861 for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { 862 dma_addr_t buf_addr = page_list[page_idx].phy_addr; 863 void *page = page_list[page_idx++].p; 864 865 if (!page) 866 break; 867 868 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 869 DMA_BIDIRECTIONAL); 870 __free_page(page); 871 } 872 873 dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), 874 desc, dev->hw_rro.desc_phys); 875 876 free_pagelist: 877 kfree(page_list); 878 } 879 880 static void 881 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 882 { 883 struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; 884 885 if (!desc) 886 return; 887 888 dev->wlan.release_rx_buf(dev); 889 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 890 desc, dev->rx_buf_ring.desc_phys); 891 892 mtk_wed_hwrro_free_buffer(dev); 893 } 894 895 static void 896 mtk_wed_hwrro_init(struct mtk_wed_device *dev) 897 { 898 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 899 return; 900 901 wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, 902 FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); 903 904 wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); 905 906 wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, 907 MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | 908 FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, 909 MTK_WED_RX_PG_BM_CNT)); 910 911 /* enable rx_page_bm to fetch dmad */ 912 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 913 } 914 915 static void 916 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 917 { 918 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 919 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 920 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 921 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 922 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 923 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 924 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 925 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 926 927 mtk_wed_hwrro_init(dev); 928 } 929 930 static void 931 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 932 { 933 if (!ring->desc) 934 return; 935 936 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 937 ring->desc, ring->desc_phys); 938 } 939 940 static void 941 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 942 { 943 mtk_wed_free_rx_buffer(dev); 944 mtk_wed_free_ring(dev, &dev->rro.ring); 945 } 946 947 static void 948 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 949 { 950 int i; 951 952 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 953 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 954 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 955 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 956 } 957 958 static void 959 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 960 { 961 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 962 963 switch (dev->hw->version) { 964 case 1: 965 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 966 break; 967 case 2: 968 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 969 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 970 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 971 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 972 break; 973 case 3: 974 mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 975 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 976 break; 977 default: 978 break; 979 } 980 981 if (!dev->hw->num_flows) 982 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 983 984 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 985 wed_r32(dev, MTK_WED_EXT_INT_MASK); 986 } 987 988 static void 989 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 990 { 991 if (!mtk_wed_is_v2(dev->hw)) 992 return; 993 994 if (enable) { 995 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 996 wed_w32(dev, MTK_WED_TXP_DW1, 997 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 998 } else { 999 wed_w32(dev, MTK_WED_TXP_DW1, 1000 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 1001 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 1002 } 1003 } 1004 1005 static int 1006 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, 1007 struct mtk_wed_ring *ring) 1008 { 1009 int i; 1010 1011 for (i = 0; i < 3; i++) { 1012 u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); 1013 1014 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 1015 break; 1016 1017 usleep_range(100000, 200000); 1018 } 1019 1020 if (i == 3) { 1021 dev_err(dev->hw->dev, "rx dma enable failed\n"); 1022 return -ETIMEDOUT; 1023 } 1024 1025 return 0; 1026 } 1027 1028 static void 1029 mtk_wed_dma_disable(struct mtk_wed_device *dev) 1030 { 1031 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1032 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1033 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1034 1035 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1036 1037 wed_clr(dev, MTK_WED_GLO_CFG, 1038 MTK_WED_GLO_CFG_TX_DMA_EN | 1039 MTK_WED_GLO_CFG_RX_DMA_EN); 1040 1041 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1042 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1043 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1044 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1045 1046 if (mtk_wed_is_v1(dev->hw)) { 1047 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 1048 wdma_clr(dev, MTK_WDMA_GLO_CFG, 1049 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1050 } else { 1051 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1052 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1053 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1054 1055 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1056 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1057 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1058 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1059 1060 if (mtk_wed_is_v3_or_greater(dev->hw) && 1061 mtk_wed_get_rx_capa(dev)) { 1062 wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, 1063 MTK_WDMA_PREF_TX_CFG_PREF_EN); 1064 wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, 1065 MTK_WDMA_PREF_RX_CFG_PREF_EN); 1066 } 1067 } 1068 1069 mtk_wed_set_512_support(dev, false); 1070 } 1071 1072 static void 1073 mtk_wed_stop(struct mtk_wed_device *dev) 1074 { 1075 mtk_wed_set_ext_int(dev, false); 1076 1077 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 1078 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 1079 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 1080 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 1081 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 1082 1083 if (!mtk_wed_get_rx_capa(dev)) 1084 return; 1085 1086 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 1087 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 1088 } 1089 1090 static void 1091 mtk_wed_deinit(struct mtk_wed_device *dev) 1092 { 1093 mtk_wed_stop(dev); 1094 mtk_wed_dma_disable(dev); 1095 1096 wed_clr(dev, MTK_WED_CTRL, 1097 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1098 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1099 MTK_WED_CTRL_WED_TX_BM_EN | 1100 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1101 1102 if (mtk_wed_is_v1(dev->hw)) 1103 return; 1104 1105 wed_clr(dev, MTK_WED_CTRL, 1106 MTK_WED_CTRL_RX_ROUTE_QM_EN | 1107 MTK_WED_CTRL_WED_RX_BM_EN | 1108 MTK_WED_CTRL_RX_RRO_QM_EN); 1109 1110 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1111 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1112 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); 1113 wed_clr(dev, MTK_WED_PCIE_INT_CTRL, 1114 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1115 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); 1116 } 1117 } 1118 1119 static void 1120 __mtk_wed_detach(struct mtk_wed_device *dev) 1121 { 1122 struct mtk_wed_hw *hw = dev->hw; 1123 1124 mtk_wed_deinit(dev); 1125 1126 mtk_wdma_rx_reset(dev); 1127 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1128 mtk_wed_amsdu_free_buffer(dev); 1129 mtk_wed_free_tx_buffer(dev); 1130 mtk_wed_free_tx_rings(dev); 1131 1132 if (mtk_wed_get_rx_capa(dev)) { 1133 if (hw->wed_wo) 1134 mtk_wed_wo_reset(dev); 1135 mtk_wed_free_rx_rings(dev); 1136 if (hw->wed_wo) 1137 mtk_wed_wo_deinit(hw); 1138 } 1139 1140 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 1141 struct device_node *wlan_node; 1142 1143 wlan_node = dev->wlan.pci_dev->dev.of_node; 1144 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 1145 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1146 BIT(hw->index), BIT(hw->index)); 1147 } 1148 1149 if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && 1150 hw->eth->dma_dev != hw->eth->dev) 1151 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 1152 1153 memset(dev, 0, sizeof(*dev)); 1154 module_put(THIS_MODULE); 1155 1156 hw->wed_dev = NULL; 1157 } 1158 1159 static void 1160 mtk_wed_detach(struct mtk_wed_device *dev) 1161 { 1162 mutex_lock(&hw_lock); 1163 __mtk_wed_detach(dev); 1164 mutex_unlock(&hw_lock); 1165 } 1166 1167 static void 1168 mtk_wed_bus_init(struct mtk_wed_device *dev) 1169 { 1170 switch (dev->wlan.bus_type) { 1171 case MTK_WED_BUS_PCIE: { 1172 struct device_node *np = dev->hw->eth->dev->of_node; 1173 1174 if (mtk_wed_is_v2(dev->hw)) { 1175 struct regmap *regs; 1176 1177 regs = syscon_regmap_lookup_by_phandle(np, 1178 "mediatek,wed-pcie"); 1179 if (IS_ERR(regs)) 1180 break; 1181 1182 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 1183 } 1184 1185 if (dev->wlan.msi) { 1186 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1187 dev->hw->pcie_base | 0xc08); 1188 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1189 dev->hw->pcie_base | 0xc04); 1190 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); 1191 } else { 1192 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, 1193 dev->hw->pcie_base | 0x180); 1194 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1195 dev->hw->pcie_base | 0x184); 1196 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 1197 } 1198 1199 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 1200 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 1201 1202 /* pcie interrupt control: pola/source selection */ 1203 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 1204 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 1205 MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | 1206 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1207 dev->hw->index)); 1208 break; 1209 } 1210 case MTK_WED_BUS_AXI: 1211 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 1212 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 1213 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 1214 break; 1215 default: 1216 break; 1217 } 1218 } 1219 1220 static void 1221 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 1222 { 1223 int i; 1224 1225 if (mtk_wed_is_v1(dev->hw)) { 1226 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 1227 return; 1228 } 1229 1230 mtk_wed_bus_init(dev); 1231 1232 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 1233 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 1234 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 1235 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 1236 1237 if (!mtk_wed_get_rx_capa(dev)) 1238 return; 1239 1240 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 1241 wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); 1242 1243 if (!dev->wlan.hw_rro) 1244 return; 1245 1246 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); 1247 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); 1248 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) 1249 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), 1250 dev->wlan.wpdma_rx_pg + i * 0x10); 1251 } 1252 1253 static void 1254 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 1255 { 1256 u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); 1257 u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; 1258 1259 mtk_wed_deinit(dev); 1260 mtk_wed_reset(dev, MTK_WED_RESET_WED); 1261 mtk_wed_set_wpdma(dev); 1262 1263 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 1264 mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 1265 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 1266 set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 1267 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 1268 } 1269 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 1270 1271 if (mtk_wed_is_v1(dev->hw)) { 1272 u32 offset = dev->hw->index ? 0x04000400 : 0; 1273 1274 wdma_set(dev, MTK_WDMA_GLO_CFG, 1275 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1276 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 1277 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1278 1279 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 1280 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 1281 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 1282 MTK_PCIE_BASE(dev->hw->index)); 1283 } else { 1284 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 1285 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 1286 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 1287 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 1288 MTK_WDMA_INT_STATUS) | 1289 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 1290 MTK_WDMA_GLO_CFG)); 1291 1292 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 1293 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 1294 MTK_WDMA_RING_TX(0)) | 1295 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 1296 MTK_WDMA_RING_RX(0))); 1297 } 1298 } 1299 1300 static int 1301 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1302 int size) 1303 { 1304 ring->desc = dma_alloc_coherent(dev->hw->dev, 1305 size * sizeof(*ring->desc), 1306 &ring->desc_phys, GFP_KERNEL); 1307 if (!ring->desc) 1308 return -ENOMEM; 1309 1310 ring->desc_size = sizeof(*ring->desc); 1311 ring->size = size; 1312 1313 return 0; 1314 } 1315 1316 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 1317 static int 1318 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 1319 { 1320 struct reserved_mem *rmem; 1321 struct device_node *np; 1322 int index; 1323 1324 index = of_property_match_string(dev->hw->node, "memory-region-names", 1325 "wo-dlm"); 1326 if (index < 0) 1327 return index; 1328 1329 np = of_parse_phandle(dev->hw->node, "memory-region", index); 1330 if (!np) 1331 return -ENODEV; 1332 1333 rmem = of_reserved_mem_lookup(np); 1334 of_node_put(np); 1335 1336 if (!rmem) 1337 return -ENODEV; 1338 1339 dev->rro.miod_phys = rmem->base; 1340 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 1341 1342 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 1343 MTK_WED_RRO_QUE_CNT); 1344 } 1345 1346 static int 1347 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 1348 { 1349 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1350 struct { 1351 struct { 1352 __le32 base; 1353 __le32 cnt; 1354 __le32 unit; 1355 } ring[2]; 1356 __le32 wed; 1357 u8 version; 1358 } req = { 1359 .ring[0] = { 1360 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 1361 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 1362 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 1363 }, 1364 .ring[1] = { 1365 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 1366 MTK_WED_MIOD_COUNT), 1367 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 1368 .unit = cpu_to_le32(4), 1369 }, 1370 }; 1371 1372 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1373 MTK_WED_WO_CMD_WED_CFG, 1374 &req, sizeof(req), true); 1375 } 1376 1377 static void 1378 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 1379 { 1380 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 1381 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 1382 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 1383 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 1384 MTK_WED_MIOD_ENTRY_CNT >> 2)); 1385 1386 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 1387 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 1388 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 1389 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 1390 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 1391 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 1392 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 1393 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 1394 1395 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1396 MTK_WED_RROQM_RST_IDX_MIOD | 1397 MTK_WED_RROQM_RST_IDX_FDBK); 1398 1399 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1400 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 1401 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1402 } 1403 1404 static void 1405 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 1406 { 1407 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 1408 1409 for (;;) { 1410 usleep_range(100, 200); 1411 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 1412 break; 1413 } 1414 1415 /* configure RX_ROUTE_QM */ 1416 if (mtk_wed_is_v2(dev->hw)) { 1417 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1418 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 1419 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1420 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 1421 0x3 + dev->hw->index)); 1422 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1423 } else { 1424 wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, 1425 FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 1426 0x3 + dev->hw->index)); 1427 } 1428 /* enable RX_ROUTE_QM */ 1429 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1430 } 1431 1432 static void 1433 mtk_wed_hw_init(struct mtk_wed_device *dev) 1434 { 1435 if (dev->init_done) 1436 return; 1437 1438 dev->init_done = true; 1439 mtk_wed_set_ext_int(dev, false); 1440 1441 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 1442 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 1443 1444 if (mtk_wed_is_v1(dev->hw)) { 1445 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1446 MTK_WED_TX_BM_CTRL_PAUSE | 1447 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1448 dev->tx_buf_ring.size / 128) | 1449 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1450 MTK_WED_TX_RING_SIZE / 256)); 1451 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1452 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 1453 MTK_WED_TX_BM_DYN_THR_HI); 1454 } else if (mtk_wed_is_v2(dev->hw)) { 1455 wed_w32(dev, MTK_WED_TX_BM_CTRL, 1456 MTK_WED_TX_BM_CTRL_PAUSE | 1457 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 1458 dev->tx_buf_ring.size / 128) | 1459 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 1460 MTK_WED_TX_RING_SIZE / 256)); 1461 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 1462 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 1463 MTK_WED_TX_TKID_DYN_THR_HI); 1464 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 1465 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 1466 MTK_WED_TX_BM_DYN_THR_HI_V2); 1467 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1468 MTK_WED_TX_TKID_CTRL_PAUSE | 1469 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 1470 dev->tx_buf_ring.size / 128) | 1471 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 1472 dev->tx_buf_ring.size / 128)); 1473 } 1474 1475 wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, 1476 FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | 1477 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 1478 dev->wlan.token_start + dev->wlan.nbuf - 1)); 1479 1480 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1481 1482 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1483 /* switch to new bm architecture */ 1484 wed_clr(dev, MTK_WED_TX_BM_CTRL, 1485 MTK_WED_TX_BM_CTRL_LEGACY_EN); 1486 1487 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 1488 MTK_WED_TX_TKID_CTRL_PAUSE | 1489 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, 1490 dev->wlan.nbuf / 128) | 1491 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, 1492 dev->wlan.nbuf / 128)); 1493 /* return SKBID + SDP back to bm */ 1494 wed_set(dev, MTK_WED_TX_TKID_CTRL, 1495 MTK_WED_TX_TKID_CTRL_FREE_FORMAT); 1496 1497 wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, 1498 MTK_WED_TX_BM_PKT_CNT | 1499 MTK_WED_TX_BM_INIT_SW_TAIL_IDX); 1500 } 1501 1502 if (mtk_wed_is_v1(dev->hw)) { 1503 wed_set(dev, MTK_WED_CTRL, 1504 MTK_WED_CTRL_WED_TX_BM_EN | 1505 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1506 } else if (mtk_wed_get_rx_capa(dev)) { 1507 /* rx hw init */ 1508 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1509 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1510 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1511 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1512 1513 /* reset prefetch index of ring */ 1514 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1515 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1516 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, 1517 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1518 1519 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1520 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1521 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, 1522 MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); 1523 1524 /* reset prefetch FIFO of ring */ 1525 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 1526 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | 1527 MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); 1528 wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); 1529 1530 mtk_wed_rx_buffer_hw_init(dev); 1531 mtk_wed_rro_hw_init(dev); 1532 mtk_wed_route_qm_hw_init(dev); 1533 } 1534 1535 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 1536 if (!mtk_wed_is_v1(dev->hw)) 1537 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 1538 } 1539 1540 static void 1541 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 1542 { 1543 void *head = (void *)ring->desc; 1544 int i; 1545 1546 for (i = 0; i < size; i++) { 1547 struct mtk_wdma_desc *desc; 1548 1549 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 1550 desc->buf0 = 0; 1551 if (tx) 1552 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1553 else 1554 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 1555 desc->buf1 = 0; 1556 desc->info = 0; 1557 } 1558 } 1559 1560 static int 1561 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1562 { 1563 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1564 u8 val = MTK_WED_WO_STATE_SER_RESET; 1565 int i, ret; 1566 1567 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1568 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1569 sizeof(val), true); 1570 if (ret) 1571 return ret; 1572 1573 if (dev->wlan.hw_rro) { 1574 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 1575 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, 1576 MTK_WED_RX_IND_CMD_BUSY); 1577 mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); 1578 } 1579 1580 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1581 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1582 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1583 if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) 1584 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1585 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1586 if (ret) { 1587 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1588 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1589 } else { 1590 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1591 /* 1.a. disable prefetch HW */ 1592 wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1593 MTK_WED_WPDMA_RX_D_PREF_EN); 1594 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 1595 MTK_WED_WPDMA_RX_D_PREF_BUSY); 1596 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1597 MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); 1598 } 1599 1600 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1601 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1602 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1603 1604 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1605 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1606 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1607 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1608 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1609 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1610 1611 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1612 } 1613 1614 /* reset rro qm */ 1615 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1616 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1617 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1618 if (ret) { 1619 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1620 } else { 1621 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1622 MTK_WED_RROQM_RST_IDX_MIOD | 1623 MTK_WED_RROQM_RST_IDX_FDBK); 1624 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1625 } 1626 1627 if (dev->wlan.hw_rro) { 1628 /* disable rro msdu page drv */ 1629 wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1630 MTK_WED_RRO_MSDU_PG_DRV_EN); 1631 1632 /* disable rro data drv */ 1633 wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 1634 1635 /* rro msdu page drv reset */ 1636 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1637 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1638 mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 1639 MTK_WED_RRO_MSDU_PG_DRV_CLR); 1640 1641 /* rro data drv reset */ 1642 wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), 1643 MTK_WED_RRO_RX_D_DRV_CLR); 1644 mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), 1645 MTK_WED_RRO_RX_D_DRV_CLR); 1646 } 1647 1648 /* reset route qm */ 1649 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1650 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1651 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1652 if (ret) { 1653 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1654 } else if (mtk_wed_is_v3_or_greater(dev->hw)) { 1655 wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); 1656 wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); 1657 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1658 } else { 1659 wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 1660 } 1661 1662 /* reset tx wdma */ 1663 mtk_wdma_tx_reset(dev); 1664 1665 /* reset tx wdma drv */ 1666 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1667 if (mtk_wed_is_v3_or_greater(dev->hw)) 1668 mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, 1669 MTK_WED_WPDMA_STATUS_TX_DRV); 1670 else 1671 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1672 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1673 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1674 1675 /* reset wed rx dma */ 1676 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1677 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1678 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1679 if (ret) { 1680 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1681 } else { 1682 wed_set(dev, MTK_WED_RESET_IDX, 1683 dev->hw->soc->regmap.reset_idx_rx_mask); 1684 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1685 } 1686 1687 /* reset rx bm */ 1688 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1689 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1690 MTK_WED_CTRL_WED_RX_BM_BUSY); 1691 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1692 1693 if (dev->wlan.hw_rro) { 1694 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); 1695 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1696 MTK_WED_CTRL_WED_RX_PG_BM_BUSY); 1697 wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1698 wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); 1699 } 1700 1701 /* wo change to enable state */ 1702 val = MTK_WED_WO_STATE_ENABLE; 1703 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1704 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1705 sizeof(val), true); 1706 if (ret) 1707 return ret; 1708 1709 /* wed_rx_ring_reset */ 1710 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1711 if (!dev->rx_ring[i].desc) 1712 continue; 1713 1714 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1715 false); 1716 } 1717 mtk_wed_free_rx_buffer(dev); 1718 mtk_wed_hwrro_free_buffer(dev); 1719 1720 return 0; 1721 } 1722 1723 static void 1724 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1725 { 1726 bool busy = false; 1727 u32 val; 1728 int i; 1729 1730 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1731 if (!dev->tx_ring[i].desc) 1732 continue; 1733 1734 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1735 true); 1736 } 1737 1738 /* 1. reset WED tx DMA */ 1739 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1740 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1741 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1742 if (busy) { 1743 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1744 } else { 1745 wed_w32(dev, MTK_WED_RESET_IDX, 1746 dev->hw->soc->regmap.reset_idx_tx_mask); 1747 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1748 } 1749 1750 /* 2. reset WDMA rx DMA */ 1751 busy = !!mtk_wdma_rx_reset(dev); 1752 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1753 val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | 1754 wed_r32(dev, MTK_WED_WDMA_GLO_CFG); 1755 val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; 1756 wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); 1757 } else { 1758 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1759 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1760 } 1761 1762 if (!busy) 1763 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1764 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1765 if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) 1766 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1767 MTK_WED_WDMA_RX_PREF_BUSY); 1768 1769 if (busy) { 1770 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1771 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1772 } else { 1773 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1774 /* 1.a. disable prefetch HW */ 1775 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1776 MTK_WED_WDMA_RX_PREF_EN); 1777 mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, 1778 MTK_WED_WDMA_RX_PREF_BUSY); 1779 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 1780 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 1781 1782 /* 2. Reset dma index */ 1783 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1784 MTK_WED_WDMA_RESET_IDX_RX_ALL); 1785 } 1786 1787 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1788 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1789 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1790 1791 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1792 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1793 1794 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1795 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1796 } 1797 1798 /* 3. reset WED WPDMA tx */ 1799 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1800 1801 for (i = 0; i < 100; i++) { 1802 if (mtk_wed_is_v1(dev->hw)) 1803 val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, 1804 wed_r32(dev, MTK_WED_TX_BM_INTF)); 1805 else 1806 val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, 1807 wed_r32(dev, MTK_WED_TX_TKID_INTF)); 1808 if (val == 0x40) 1809 break; 1810 } 1811 1812 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1813 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1814 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1815 1816 /* 4. reset WED WPDMA tx */ 1817 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1818 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1819 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1820 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1821 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1822 if (!busy) 1823 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1824 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1825 1826 if (busy) { 1827 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1828 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1829 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1830 if (mtk_wed_is_v3_or_greater(dev->hw)) 1831 wed_w32(dev, MTK_WED_RX1_CTRL2, 0); 1832 } else { 1833 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1834 MTK_WED_WPDMA_RESET_IDX_TX | 1835 MTK_WED_WPDMA_RESET_IDX_RX); 1836 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1837 } 1838 1839 dev->init_done = false; 1840 if (mtk_wed_is_v1(dev->hw)) 1841 return; 1842 1843 if (!busy) { 1844 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1845 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1846 } 1847 1848 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1849 /* reset amsdu engine */ 1850 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); 1851 mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); 1852 } 1853 1854 if (mtk_wed_get_rx_capa(dev)) 1855 mtk_wed_rx_reset(dev); 1856 } 1857 1858 static int 1859 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1860 int size, u32 desc_size, bool tx) 1861 { 1862 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1863 &ring->desc_phys, GFP_KERNEL); 1864 if (!ring->desc) 1865 return -ENOMEM; 1866 1867 ring->desc_size = desc_size; 1868 ring->size = size; 1869 mtk_wed_ring_reset(ring, size, tx); 1870 1871 return 0; 1872 } 1873 1874 static int 1875 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1876 bool reset) 1877 { 1878 struct mtk_wed_ring *wdma; 1879 1880 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1881 return -EINVAL; 1882 1883 wdma = &dev->rx_wdma[idx]; 1884 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1885 dev->hw->soc->wdma_desc_size, true)) 1886 return -ENOMEM; 1887 1888 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1889 wdma->desc_phys); 1890 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1891 size); 1892 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1893 1894 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1895 wdma->desc_phys); 1896 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1897 size); 1898 1899 return 0; 1900 } 1901 1902 static int 1903 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1904 bool reset) 1905 { 1906 struct mtk_wed_ring *wdma; 1907 1908 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1909 return -EINVAL; 1910 1911 wdma = &dev->tx_wdma[idx]; 1912 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1913 dev->hw->soc->wdma_desc_size, true)) 1914 return -ENOMEM; 1915 1916 if (mtk_wed_is_v3_or_greater(dev->hw)) { 1917 struct mtk_wdma_desc *desc = wdma->desc; 1918 int i; 1919 1920 for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { 1921 desc->buf0 = 0; 1922 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1923 desc->buf1 = 0; 1924 desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); 1925 desc++; 1926 desc->buf0 = 0; 1927 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1928 desc->buf1 = 0; 1929 desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); 1930 desc++; 1931 } 1932 } 1933 1934 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1935 wdma->desc_phys); 1936 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1937 size); 1938 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1939 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1940 1941 if (reset) 1942 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1943 1944 if (!idx) { 1945 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1946 wdma->desc_phys); 1947 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1948 size); 1949 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1950 0); 1951 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1952 0); 1953 } 1954 1955 return 0; 1956 } 1957 1958 static void 1959 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1960 u32 reason, u32 hash) 1961 { 1962 struct mtk_eth *eth = dev->hw->eth; 1963 struct ethhdr *eh; 1964 1965 if (!skb) 1966 return; 1967 1968 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1969 return; 1970 1971 skb_set_mac_header(skb, 0); 1972 eh = eth_hdr(skb); 1973 skb->protocol = eh->h_proto; 1974 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1975 } 1976 1977 static void 1978 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1979 { 1980 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1981 1982 /* wed control cr set */ 1983 wed_set(dev, MTK_WED_CTRL, 1984 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1985 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1986 MTK_WED_CTRL_WED_TX_BM_EN | 1987 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1988 1989 if (mtk_wed_is_v1(dev->hw)) { 1990 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1991 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1992 1993 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1994 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1995 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1996 1997 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1998 } else { 1999 if (mtk_wed_is_v3_or_greater(dev->hw)) 2000 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); 2001 2002 /* initail tx interrupt trigger */ 2003 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 2004 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 2005 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 2006 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 2007 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 2008 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 2009 dev->wlan.tx_tbit[0]) | 2010 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 2011 dev->wlan.tx_tbit[1])); 2012 2013 /* initail txfree interrupt trigger */ 2014 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 2015 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 2016 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 2017 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 2018 dev->wlan.txfree_tbit)); 2019 2020 if (mtk_wed_get_rx_capa(dev)) { 2021 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 2022 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 2023 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 2024 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 2025 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 2026 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 2027 dev->wlan.rx_tbit[0]) | 2028 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 2029 dev->wlan.rx_tbit[1])); 2030 2031 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 2032 GENMASK(1, 0)); 2033 } 2034 2035 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 2036 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 2037 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 2038 dev->wdma_idx)); 2039 } 2040 2041 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 2042 2043 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 2044 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 2045 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2046 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2047 } 2048 2049 #define MTK_WFMDA_RX_DMA_EN BIT(2) 2050 static void 2051 mtk_wed_dma_enable(struct mtk_wed_device *dev) 2052 { 2053 int i; 2054 2055 if (!mtk_wed_is_v3_or_greater(dev->hw)) { 2056 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 2057 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 2058 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2059 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2060 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 2061 wdma_set(dev, MTK_WDMA_GLO_CFG, 2062 MTK_WDMA_GLO_CFG_TX_DMA_EN | 2063 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 2064 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 2065 wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); 2066 } else { 2067 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2068 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 2069 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | 2070 MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); 2071 wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 2072 } 2073 2074 wed_set(dev, MTK_WED_GLO_CFG, 2075 MTK_WED_GLO_CFG_TX_DMA_EN | 2076 MTK_WED_GLO_CFG_RX_DMA_EN); 2077 2078 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2079 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 2080 2081 if (mtk_wed_is_v1(dev->hw)) { 2082 wdma_set(dev, MTK_WDMA_GLO_CFG, 2083 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 2084 return; 2085 } 2086 2087 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2088 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 2089 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 2090 2091 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2092 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2093 FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | 2094 FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); 2095 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2096 MTK_WED_WDMA_RX_PREF_DDONE2_EN); 2097 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); 2098 2099 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2100 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); 2101 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 2102 MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | 2103 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | 2104 MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); 2105 2106 wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); 2107 wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); 2108 } 2109 2110 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 2111 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 2112 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 2113 2114 if (!mtk_wed_get_rx_capa(dev)) 2115 return; 2116 2117 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 2118 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 2119 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 2120 2121 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); 2122 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 2123 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 2124 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 2125 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); 2126 2127 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2128 wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, 2129 MTK_WED_WPDMA_RX_D_PREF_EN | 2130 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | 2131 FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); 2132 2133 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); 2134 wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); 2135 wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); 2136 } 2137 2138 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2139 struct mtk_wed_ring *ring = &dev->rx_ring[i]; 2140 u32 val; 2141 2142 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2143 continue; /* queue is not configured by mt76 */ 2144 2145 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { 2146 dev_err(dev->hw->dev, 2147 "rx_ring(%d) dma enable failed\n", i); 2148 continue; 2149 } 2150 2151 val = wifi_r32(dev, 2152 dev->wlan.wpdma_rx_glo - 2153 dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; 2154 wifi_w32(dev, 2155 dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, 2156 val); 2157 } 2158 } 2159 2160 static void 2161 mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) 2162 { 2163 int i; 2164 2165 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 2166 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 2167 2168 if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) 2169 return; 2170 2171 if (reset) { 2172 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2173 MTK_WED_RRO_MSDU_PG_DRV_EN); 2174 return; 2175 } 2176 2177 wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); 2178 wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2179 MTK_WED_RRO_MSDU_PG_DRV_CLR); 2180 2181 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, 2182 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | 2183 MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | 2184 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | 2185 MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | 2186 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, 2187 dev->wlan.rro_rx_tbit[0]) | 2188 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, 2189 dev->wlan.rro_rx_tbit[1])); 2190 2191 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, 2192 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | 2193 MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | 2194 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | 2195 MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | 2196 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | 2197 MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | 2198 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, 2199 dev->wlan.rx_pg_tbit[0]) | 2200 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, 2201 dev->wlan.rx_pg_tbit[1]) | 2202 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, 2203 dev->wlan.rx_pg_tbit[2])); 2204 2205 /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after 2206 * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken 2207 */ 2208 wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, 2209 MTK_WED_RRO_MSDU_PG_DRV_EN); 2210 2211 for (i = 0; i < MTK_WED_RX_QUEUES; i++) { 2212 struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; 2213 2214 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2215 continue; 2216 2217 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2218 dev_err(dev->hw->dev, 2219 "rx_rro_ring(%d) initialization failed\n", i); 2220 } 2221 2222 for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { 2223 struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; 2224 2225 if (!(ring->flags & MTK_WED_RING_CONFIGURED)) 2226 continue; 2227 2228 if (mtk_wed_check_wfdma_rx_fill(dev, ring)) 2229 dev_err(dev->hw->dev, 2230 "rx_page_ring(%d) initialization failed\n", i); 2231 } 2232 } 2233 2234 static void 2235 mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, 2236 void __iomem *regs) 2237 { 2238 struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; 2239 2240 ring->wpdma = regs; 2241 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, 2242 readl(regs)); 2243 wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, 2244 readl(regs + MTK_WED_RING_OFS_COUNT)); 2245 ring->flags |= MTK_WED_RING_CONFIGURED; 2246 } 2247 2248 static void 2249 mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 2250 { 2251 struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; 2252 2253 ring->wpdma = regs; 2254 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, 2255 readl(regs)); 2256 wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, 2257 readl(regs + MTK_WED_RING_OFS_COUNT)); 2258 ring->flags |= MTK_WED_RING_CONFIGURED; 2259 } 2260 2261 static int 2262 mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2263 { 2264 struct mtk_wed_ring *ring = &dev->ind_cmd_ring; 2265 u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); 2266 int i, count = 0; 2267 2268 ring->wpdma = regs; 2269 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, 2270 readl(regs) & 0xfffffff0); 2271 2272 wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, 2273 readl(regs + MTK_WED_RING_OFS_COUNT)); 2274 2275 /* ack sn cr */ 2276 wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + 2277 dev->wlan.ind_cmd.ack_sn_addr); 2278 wed_w32(dev, MTK_WED_RRO_CFG1, 2279 FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, 2280 dev->wlan.ind_cmd.win_size) | 2281 FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, 2282 dev->wlan.ind_cmd.particular_sid)); 2283 2284 /* particular session addr element */ 2285 wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, 2286 dev->wlan.ind_cmd.particular_se_phys); 2287 2288 for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { 2289 wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, 2290 dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); 2291 wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, 2292 MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); 2293 2294 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2295 while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) 2296 val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); 2297 if (count >= 100) 2298 dev_err(dev->hw->dev, 2299 "write ba session base failed\n"); 2300 } 2301 2302 /* pn check init */ 2303 for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { 2304 wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, 2305 MTK_WED_PN_CHECK_IS_FIRST); 2306 2307 wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | 2308 FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); 2309 2310 count = 0; 2311 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2312 while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) 2313 val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); 2314 if (count >= 100) 2315 dev_err(dev->hw->dev, 2316 "session(%d) initialization failed\n", i); 2317 } 2318 2319 wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); 2320 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); 2321 2322 return 0; 2323 } 2324 2325 static void 2326 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 2327 { 2328 int i; 2329 2330 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 2331 return; 2332 2333 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 2334 if (!dev->rx_wdma[i].desc) 2335 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 2336 2337 mtk_wed_hw_init(dev); 2338 mtk_wed_configure_irq(dev, irq_mask); 2339 2340 mtk_wed_set_ext_int(dev, true); 2341 2342 if (mtk_wed_is_v1(dev->hw)) { 2343 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 2344 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 2345 dev->hw->index); 2346 2347 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 2348 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 2349 } else if (mtk_wed_get_rx_capa(dev)) { 2350 /* driver set mid ready and only once */ 2351 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 2352 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2353 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 2354 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2355 2356 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 2357 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 2358 2359 if (mtk_wed_is_v3_or_greater(dev->hw)) { 2360 wed_w32(dev, MTK_WED_EXT_INT_MASK3, 2361 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 2362 wed_r32(dev, MTK_WED_EXT_INT_MASK3); 2363 } 2364 2365 if (mtk_wed_rro_cfg(dev)) 2366 return; 2367 } 2368 2369 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 2370 mtk_wed_amsdu_init(dev); 2371 2372 mtk_wed_dma_enable(dev); 2373 dev->running = true; 2374 } 2375 2376 static int 2377 mtk_wed_attach(struct mtk_wed_device *dev) 2378 __releases(RCU) 2379 { 2380 struct mtk_wed_hw *hw; 2381 struct device *device; 2382 int ret = 0; 2383 2384 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 2385 "mtk_wed_attach without holding the RCU read lock"); 2386 2387 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 2388 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 2389 !try_module_get(THIS_MODULE)) 2390 ret = -ENODEV; 2391 2392 rcu_read_unlock(); 2393 2394 if (ret) 2395 return ret; 2396 2397 mutex_lock(&hw_lock); 2398 2399 hw = mtk_wed_assign(dev); 2400 if (!hw) { 2401 module_put(THIS_MODULE); 2402 ret = -ENODEV; 2403 goto unlock; 2404 } 2405 2406 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 2407 ? &dev->wlan.pci_dev->dev 2408 : &dev->wlan.platform_dev->dev; 2409 dev_info(device, "attaching wed device %d version %d\n", 2410 hw->index, hw->version); 2411 2412 dev->hw = hw; 2413 dev->dev = hw->dev; 2414 dev->irq = hw->irq; 2415 dev->wdma_idx = hw->index; 2416 dev->version = hw->version; 2417 dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); 2418 2419 if (hw->eth->dma_dev == hw->eth->dev && 2420 of_dma_is_coherent(hw->eth->dev->of_node)) 2421 mtk_eth_set_dma_device(hw->eth, hw->dev); 2422 2423 ret = mtk_wed_tx_buffer_alloc(dev); 2424 if (ret) 2425 goto out; 2426 2427 ret = mtk_wed_amsdu_buffer_alloc(dev); 2428 if (ret) 2429 goto out; 2430 2431 if (mtk_wed_get_rx_capa(dev)) { 2432 ret = mtk_wed_rro_alloc(dev); 2433 if (ret) 2434 goto out; 2435 } 2436 2437 mtk_wed_hw_init_early(dev); 2438 if (mtk_wed_is_v1(hw)) 2439 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 2440 BIT(hw->index), 0); 2441 else 2442 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 2443 2444 if (mtk_wed_get_rx_capa(dev)) 2445 ret = mtk_wed_wo_init(hw); 2446 out: 2447 if (ret) { 2448 dev_err(dev->hw->dev, "failed to attach wed device\n"); 2449 __mtk_wed_detach(dev); 2450 } 2451 unlock: 2452 mutex_unlock(&hw_lock); 2453 2454 return ret; 2455 } 2456 2457 static int 2458 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2459 bool reset) 2460 { 2461 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 2462 2463 /* 2464 * Tx ring redirection: 2465 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 2466 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 2467 * registers. 2468 * 2469 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 2470 * into MTK_WED_WPDMA_RING_TX(n) registers. 2471 * It gets filled with packets picked up from WED TX ring and from 2472 * WDMA RX. 2473 */ 2474 2475 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 2476 return -EINVAL; 2477 2478 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 2479 sizeof(*ring->desc), true)) 2480 return -ENOMEM; 2481 2482 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2483 reset)) 2484 return -ENOMEM; 2485 2486 ring->reg_base = MTK_WED_RING_TX(idx); 2487 ring->wpdma = regs; 2488 2489 if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { 2490 /* reset prefetch index */ 2491 wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, 2492 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2493 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2494 2495 wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, 2496 MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | 2497 MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); 2498 2499 /* reset prefetch FIFO */ 2500 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 2501 MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | 2502 MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); 2503 wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); 2504 } 2505 2506 /* WED -> WPDMA */ 2507 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2508 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 2509 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 2510 2511 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 2512 ring->desc_phys); 2513 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 2514 MTK_WED_TX_RING_SIZE); 2515 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 2516 2517 return 0; 2518 } 2519 2520 static int 2521 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 2522 { 2523 struct mtk_wed_ring *ring = &dev->txfree_ring; 2524 int i, index = mtk_wed_is_v1(dev->hw); 2525 2526 /* 2527 * For txfree event handling, the same DMA ring is shared between WED 2528 * and WLAN. The WLAN driver accesses the ring index registers through 2529 * WED 2530 */ 2531 ring->reg_base = MTK_WED_RING_RX(index); 2532 ring->wpdma = regs; 2533 2534 for (i = 0; i < 12; i += 4) { 2535 u32 val = readl(regs + i); 2536 2537 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 2538 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int 2545 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 2546 bool reset) 2547 { 2548 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 2549 2550 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 2551 return -EINVAL; 2552 2553 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 2554 sizeof(*ring->desc), false)) 2555 return -ENOMEM; 2556 2557 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 2558 reset)) 2559 return -ENOMEM; 2560 2561 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 2562 ring->wpdma = regs; 2563 ring->flags |= MTK_WED_RING_CONFIGURED; 2564 2565 /* WPDMA -> WED */ 2566 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 2567 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 2568 2569 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 2570 ring->desc_phys); 2571 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 2572 MTK_WED_RX_RING_SIZE); 2573 2574 return 0; 2575 } 2576 2577 static u32 2578 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 2579 { 2580 u32 val, ext_mask; 2581 2582 if (mtk_wed_is_v3_or_greater(dev->hw)) 2583 ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 2584 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2585 else 2586 ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 2587 2588 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 2589 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 2590 val &= ext_mask; 2591 if (!dev->hw->num_flows) 2592 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 2593 if (val && net_ratelimit()) 2594 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 2595 2596 val = wed_r32(dev, MTK_WED_INT_STATUS); 2597 val &= mask; 2598 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 2599 2600 return val; 2601 } 2602 2603 static void 2604 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 2605 { 2606 if (!dev->running) 2607 return; 2608 2609 mtk_wed_set_ext_int(dev, !!mask); 2610 wed_w32(dev, MTK_WED_INT_MASK, mask); 2611 } 2612 2613 int mtk_wed_flow_add(int index) 2614 { 2615 struct mtk_wed_hw *hw = hw_list[index]; 2616 int ret = 0; 2617 2618 mutex_lock(&hw_lock); 2619 2620 if (!hw || !hw->wed_dev) { 2621 ret = -ENODEV; 2622 goto out; 2623 } 2624 2625 if (!hw->wed_dev->wlan.offload_enable) 2626 goto out; 2627 2628 if (hw->num_flows) { 2629 hw->num_flows++; 2630 goto out; 2631 } 2632 2633 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 2634 if (!ret) 2635 hw->num_flows++; 2636 mtk_wed_set_ext_int(hw->wed_dev, true); 2637 2638 out: 2639 mutex_unlock(&hw_lock); 2640 2641 return ret; 2642 } 2643 2644 void mtk_wed_flow_remove(int index) 2645 { 2646 struct mtk_wed_hw *hw = hw_list[index]; 2647 2648 mutex_lock(&hw_lock); 2649 2650 if (!hw || !hw->wed_dev) 2651 goto out; 2652 2653 if (!hw->wed_dev->wlan.offload_disable) 2654 goto out; 2655 2656 if (--hw->num_flows) 2657 goto out; 2658 2659 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 2660 mtk_wed_set_ext_int(hw->wed_dev, true); 2661 2662 out: 2663 mutex_unlock(&hw_lock); 2664 } 2665 2666 static int 2667 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 2668 { 2669 struct mtk_wed_flow_block_priv *priv = cb_priv; 2670 struct flow_cls_offload *cls = type_data; 2671 struct mtk_wed_hw *hw = priv->hw; 2672 2673 if (!tc_can_offload(priv->dev)) 2674 return -EOPNOTSUPP; 2675 2676 if (type != TC_SETUP_CLSFLOWER) 2677 return -EOPNOTSUPP; 2678 2679 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 2680 } 2681 2682 static int 2683 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, 2684 struct flow_block_offload *f) 2685 { 2686 struct mtk_wed_flow_block_priv *priv; 2687 static LIST_HEAD(block_cb_list); 2688 struct flow_block_cb *block_cb; 2689 struct mtk_eth *eth = hw->eth; 2690 flow_setup_cb_t *cb; 2691 2692 if (!eth->soc->offload_version) 2693 return -EOPNOTSUPP; 2694 2695 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2696 return -EOPNOTSUPP; 2697 2698 cb = mtk_wed_setup_tc_block_cb; 2699 f->driver_block_list = &block_cb_list; 2700 2701 switch (f->command) { 2702 case FLOW_BLOCK_BIND: 2703 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2704 if (block_cb) { 2705 flow_block_cb_incref(block_cb); 2706 return 0; 2707 } 2708 2709 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2710 if (!priv) 2711 return -ENOMEM; 2712 2713 priv->hw = hw; 2714 priv->dev = dev; 2715 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); 2716 if (IS_ERR(block_cb)) { 2717 kfree(priv); 2718 return PTR_ERR(block_cb); 2719 } 2720 2721 flow_block_cb_incref(block_cb); 2722 flow_block_cb_add(block_cb, f); 2723 list_add_tail(&block_cb->driver_list, &block_cb_list); 2724 return 0; 2725 case FLOW_BLOCK_UNBIND: 2726 block_cb = flow_block_cb_lookup(f->block, cb, dev); 2727 if (!block_cb) 2728 return -ENOENT; 2729 2730 if (!flow_block_cb_decref(block_cb)) { 2731 flow_block_cb_remove(block_cb, f); 2732 list_del(&block_cb->driver_list); 2733 kfree(block_cb->cb_priv); 2734 } 2735 return 0; 2736 default: 2737 return -EOPNOTSUPP; 2738 } 2739 } 2740 2741 static int 2742 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, 2743 enum tc_setup_type type, void *type_data) 2744 { 2745 struct mtk_wed_hw *hw = wed->hw; 2746 2747 if (mtk_wed_is_v1(hw)) 2748 return -EOPNOTSUPP; 2749 2750 switch (type) { 2751 case TC_SETUP_BLOCK: 2752 case TC_SETUP_FT: 2753 return mtk_wed_setup_tc_block(hw, dev, type_data); 2754 default: 2755 return -EOPNOTSUPP; 2756 } 2757 } 2758 2759 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 2760 void __iomem *wdma, phys_addr_t wdma_phy, 2761 int index) 2762 { 2763 static const struct mtk_wed_ops wed_ops = { 2764 .attach = mtk_wed_attach, 2765 .tx_ring_setup = mtk_wed_tx_ring_setup, 2766 .rx_ring_setup = mtk_wed_rx_ring_setup, 2767 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 2768 .msg_update = mtk_wed_mcu_msg_update, 2769 .start = mtk_wed_start, 2770 .stop = mtk_wed_stop, 2771 .reset_dma = mtk_wed_reset_dma, 2772 .reg_read = wed_r32, 2773 .reg_write = wed_w32, 2774 .irq_get = mtk_wed_irq_get, 2775 .irq_set_mask = mtk_wed_irq_set_mask, 2776 .detach = mtk_wed_detach, 2777 .ppe_check = mtk_wed_ppe_check, 2778 .setup_tc = mtk_wed_setup_tc, 2779 .start_hw_rro = mtk_wed_start_hw_rro, 2780 .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, 2781 .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, 2782 .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, 2783 }; 2784 struct device_node *eth_np = eth->dev->of_node; 2785 struct platform_device *pdev; 2786 struct mtk_wed_hw *hw; 2787 struct regmap *regs; 2788 int irq; 2789 2790 if (!np) 2791 return; 2792 2793 pdev = of_find_device_by_node(np); 2794 if (!pdev) 2795 goto err_of_node_put; 2796 2797 get_device(&pdev->dev); 2798 irq = platform_get_irq(pdev, 0); 2799 if (irq < 0) 2800 goto err_put_device; 2801 2802 regs = syscon_regmap_lookup_by_phandle(np, NULL); 2803 if (IS_ERR(regs)) 2804 goto err_put_device; 2805 2806 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 2807 2808 mutex_lock(&hw_lock); 2809 2810 if (WARN_ON(hw_list[index])) 2811 goto unlock; 2812 2813 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 2814 if (!hw) 2815 goto unlock; 2816 2817 hw->node = np; 2818 hw->regs = regs; 2819 hw->eth = eth; 2820 hw->dev = &pdev->dev; 2821 hw->wdma_phy = wdma_phy; 2822 hw->wdma = wdma; 2823 hw->index = index; 2824 hw->irq = irq; 2825 hw->version = eth->soc->version; 2826 2827 switch (hw->version) { 2828 case 2: 2829 hw->soc = &mt7986_data; 2830 break; 2831 case 3: 2832 hw->soc = &mt7988_data; 2833 break; 2834 default: 2835 case 1: 2836 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 2837 "mediatek,pcie-mirror"); 2838 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 2839 "mediatek,hifsys"); 2840 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 2841 kfree(hw); 2842 goto unlock; 2843 } 2844 2845 if (!index) { 2846 regmap_write(hw->mirror, 0, 0); 2847 regmap_write(hw->mirror, 4, 0); 2848 } 2849 hw->soc = &mt7622_data; 2850 break; 2851 } 2852 2853 mtk_wed_hw_add_debugfs(hw); 2854 2855 hw_list[index] = hw; 2856 2857 mutex_unlock(&hw_lock); 2858 2859 return; 2860 2861 unlock: 2862 mutex_unlock(&hw_lock); 2863 err_put_device: 2864 put_device(&pdev->dev); 2865 err_of_node_put: 2866 of_node_put(np); 2867 } 2868 2869 void mtk_wed_exit(void) 2870 { 2871 int i; 2872 2873 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 2874 2875 synchronize_rcu(); 2876 2877 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 2878 struct mtk_wed_hw *hw; 2879 2880 hw = hw_list[i]; 2881 if (!hw) 2882 continue; 2883 2884 hw_list[i] = NULL; 2885 debugfs_remove(hw->debugfs_dir); 2886 put_device(hw->dev); 2887 of_node_put(hw->node); 2888 kfree(hw); 2889 } 2890 } 2891