1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024 AIROHA Inc 4 * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/iopoll.h> 8 #include <linux/kernel.h> 9 #include <linux/netdevice.h> 10 #include <linux/of.h> 11 #include <linux/of_net.h> 12 #include <linux/platform_device.h> 13 #include <linux/reset.h> 14 #include <linux/tcp.h> 15 #include <linux/u64_stats_sync.h> 16 #include <net/dsa.h> 17 #include <net/page_pool/helpers.h> 18 #include <uapi/linux/ppp_defs.h> 19 20 #define AIROHA_MAX_NUM_GDM_PORTS 1 21 #define AIROHA_MAX_NUM_QDMA 2 22 #define AIROHA_MAX_NUM_RSTS 3 23 #define AIROHA_MAX_NUM_XSI_RSTS 5 24 #define AIROHA_MAX_MTU 2000 25 #define AIROHA_MAX_PACKET_SIZE 2048 26 #define AIROHA_NUM_TX_RING 32 27 #define AIROHA_NUM_RX_RING 32 28 #define AIROHA_FE_MC_MAX_VLAN_TABLE 64 29 #define AIROHA_FE_MC_MAX_VLAN_PORT 16 30 #define AIROHA_NUM_TX_IRQ 2 31 #define HW_DSCP_NUM 2048 32 #define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048) 33 #define TX_DSCP_NUM 1024 34 #define RX_DSCP_NUM(_n) \ 35 ((_n) == 2 ? 128 : \ 36 (_n) == 11 ? 128 : \ 37 (_n) == 15 ? 128 : \ 38 (_n) == 0 ? 1024 : 16) 39 40 #define PSE_RSV_PAGES 128 41 #define PSE_QUEUE_RSV_PAGES 64 42 43 /* FE */ 44 #define PSE_BASE 0x0100 45 #define CSR_IFC_BASE 0x0200 46 #define CDM1_BASE 0x0400 47 #define GDM1_BASE 0x0500 48 #define PPE1_BASE 0x0c00 49 50 #define CDM2_BASE 0x1400 51 #define GDM2_BASE 0x1500 52 53 #define GDM3_BASE 0x1100 54 #define GDM4_BASE 0x2500 55 56 #define GDM_BASE(_n) \ 57 ((_n) == 4 ? GDM4_BASE : \ 58 (_n) == 3 ? GDM3_BASE : \ 59 (_n) == 2 ? GDM2_BASE : GDM1_BASE) 60 61 #define REG_FE_DMA_GLO_CFG 0x0000 62 #define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4) 63 #define FE_DMA_GLO_PG_SZ_MASK BIT(3) 64 65 #define REG_FE_RST_GLO_CFG 0x0004 66 #define FE_RST_GDM4_MBI_ARB_MASK BIT(3) 67 #define FE_RST_GDM3_MBI_ARB_MASK BIT(2) 68 #define FE_RST_CORE_MASK BIT(0) 69 70 #define REG_FE_LAN_MAC_H 0x0040 71 #define REG_FE_LAN_MAC_LMIN 0x0044 72 #define REG_FE_LAN_MAC_LMAX 0x0048 73 74 #define REG_FE_CDM1_OQ_MAP0 0x0050 75 #define REG_FE_CDM1_OQ_MAP1 0x0054 76 #define REG_FE_CDM1_OQ_MAP2 0x0058 77 #define REG_FE_CDM1_OQ_MAP3 0x005c 78 79 #define REG_FE_PCE_CFG 0x0070 80 #define PCE_DPI_EN_MASK BIT(2) 81 #define PCE_KA_EN_MASK BIT(1) 82 #define PCE_MC_EN_MASK BIT(0) 83 84 #define REG_FE_PSE_QUEUE_CFG_WR 0x0080 85 #define PSE_CFG_PORT_ID_MASK GENMASK(27, 24) 86 #define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16) 87 #define PSE_CFG_WR_EN_MASK BIT(8) 88 #define PSE_CFG_OQRSV_SEL_MASK BIT(0) 89 90 #define REG_FE_PSE_QUEUE_CFG_VAL 0x0084 91 #define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0) 92 93 #define PSE_FQ_CFG 0x008c 94 #define PSE_FQ_LIMIT_MASK GENMASK(14, 0) 95 96 #define REG_FE_PSE_BUF_SET 0x0090 97 #define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16) 98 #define PSE_ALLRSV_MASK GENMASK(14, 0) 99 100 #define REG_PSE_SHARE_USED_THD 0x0094 101 #define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16) 102 #define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0) 103 104 #define REG_GDM_MISC_CFG 0x0148 105 #define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9) 106 #define GDM2_CHN_VLD_MODE_MASK BIT(5) 107 108 #define REG_FE_CSR_IFC_CFG CSR_IFC_BASE 109 #define FE_IFC_EN_MASK BIT(0) 110 111 #define REG_FE_VIP_PORT_EN 0x01f0 112 #define REG_FE_IFC_PORT_EN 0x01f4 113 114 #define REG_PSE_IQ_REV1 (PSE_BASE + 0x08) 115 #define PSE_IQ_RES1_P2_MASK GENMASK(23, 16) 116 117 #define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c) 118 #define PSE_IQ_RES2_P5_MASK GENMASK(15, 8) 119 #define PSE_IQ_RES2_P4_MASK GENMASK(7, 0) 120 121 #define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3)) 122 #define PATN_FCPU_EN_MASK BIT(7) 123 #define PATN_SWP_EN_MASK BIT(6) 124 #define PATN_DP_EN_MASK BIT(5) 125 #define PATN_SP_EN_MASK BIT(4) 126 #define PATN_TYPE_MASK GENMASK(3, 1) 127 #define PATN_EN_MASK BIT(0) 128 129 #define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3)) 130 #define PATN_DP_MASK GENMASK(31, 16) 131 #define PATN_SP_MASK GENMASK(15, 0) 132 133 #define REG_CDM1_VLAN_CTRL CDM1_BASE 134 #define CDM1_VLAN_MASK GENMASK(31, 16) 135 136 #define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) 137 #define CDM1_VIP_QSEL_MASK GENMASK(24, 20) 138 139 #define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) 140 #define CDM1_CRSN_QSEL_REASON_MASK(_n) \ 141 GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) 142 143 #define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) 144 #define CDM2_OAM_QSEL_MASK GENMASK(31, 27) 145 #define CDM2_VIP_QSEL_MASK GENMASK(24, 20) 146 147 #define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) 148 #define CDM2_CRSN_QSEL_REASON_MASK(_n) \ 149 GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) 150 151 #define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) 152 #define GDM_DROP_CRC_ERR BIT(23) 153 #define GDM_IP4_CKSUM BIT(22) 154 #define GDM_TCP_CKSUM BIT(21) 155 #define GDM_UDP_CKSUM BIT(20) 156 #define GDM_UCFQ_MASK GENMASK(15, 12) 157 #define GDM_BCFQ_MASK GENMASK(11, 8) 158 #define GDM_MCFQ_MASK GENMASK(7, 4) 159 #define GDM_OCFQ_MASK GENMASK(3, 0) 160 161 #define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10) 162 #define GDM_INGRESS_FC_EN_MASK BIT(1) 163 #define GDM_STAG_EN_MASK BIT(0) 164 165 #define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14) 166 #define GDM_SHORT_LEN_MASK GENMASK(13, 0) 167 #define GDM_LONG_LEN_MASK GENMASK(29, 16) 168 169 #define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) 170 #define FE_CPORT_PAD BIT(26) 171 #define FE_CPORT_PORT_XFC_MASK BIT(25) 172 #define FE_CPORT_QUEUE_XFC_MASK BIT(24) 173 174 #define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0) 175 #define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) 176 #define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) 177 178 #define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) 179 #define FE_STRICT_RFC2819_MODE_MASK BIT(31) 180 #define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) 181 #define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) 182 #define FE_TX_MIB_ID_MASK GENMASK(15, 8) 183 #define FE_RX_MIB_ID_MASK GENMASK(7, 0) 184 185 #define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104) 186 #define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c) 187 #define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110) 188 #define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114) 189 #define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118) 190 #define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c) 191 #define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120) 192 #define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124) 193 #define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128) 194 #define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c) 195 #define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130) 196 #define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134) 197 #define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138) 198 #define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c) 199 #define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140) 200 201 #define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148) 202 #define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c) 203 #define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150) 204 #define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154) 205 #define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158) 206 #define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c) 207 #define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160) 208 #define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164) 209 #define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168) 210 #define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c) 211 #define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170) 212 #define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174) 213 #define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178) 214 #define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c) 215 #define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180) 216 #define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184) 217 #define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188) 218 #define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c) 219 #define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190) 220 #define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194) 221 #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) 222 #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) 223 224 #define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250) 225 #define PPE1_SRAM_TABLE_EN_MASK BIT(0) 226 #define PPE1_SRAM_HASH1_EN_MASK BIT(8) 227 #define PPE1_DRAM_TABLE_EN_MASK BIT(16) 228 #define PPE1_DRAM_HASH1_EN_MASK BIT(24) 229 230 #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) 231 #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) 232 #define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) 233 #define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) 234 235 #define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) 236 #define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) 237 #define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) 238 #define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) 239 #define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) 240 #define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) 241 #define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) 242 #define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) 243 #define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) 244 #define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) 245 #define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) 246 #define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) 247 #define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) 248 #define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) 249 #define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) 250 #define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) 251 252 #define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) 253 #define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) 254 #define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) 255 256 #define REG_GDM3_FWD_CFG GDM3_BASE 257 #define GDM3_PAD_EN_MASK BIT(28) 258 259 #define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100) 260 #define GDM4_PAD_EN_MASK BIT(28) 261 #define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) 262 263 #define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c) 264 #define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) 265 #define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) 266 #define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) 267 268 #define REG_IP_FRAG_FP 0x2010 269 #define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) 270 #define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) 271 #define IP_FRAGMENT_PORT_MASK GENMASK(8, 5) 272 #define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0) 273 274 #define REG_MC_VLAN_EN 0x2100 275 #define MC_VLAN_EN_MASK BIT(0) 276 277 #define REG_MC_VLAN_CFG 0x2104 278 #define MC_VLAN_CFG_CMD_DONE_MASK BIT(31) 279 #define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16) 280 #define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8) 281 #define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4) 282 #define MC_VLAN_CFG_RW_MASK BIT(0) 283 284 #define REG_MC_VLAN_DATA 0x2108 285 286 #define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 287 288 /* QDMA */ 289 #define REG_QDMA_GLOBAL_CFG 0x0004 290 #define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31) 291 #define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29) 292 #define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28) 293 #define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27) 294 #define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26) 295 #define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25) 296 #define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24) 297 #define GLOBAL_CFG_RESET_MASK BIT(23) 298 #define GLOBAL_CFG_RESET_DONE_MASK BIT(22) 299 #define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21) 300 #define GLOBAL_CFG_IRQ1_EN_MASK BIT(20) 301 #define GLOBAL_CFG_IRQ0_EN_MASK BIT(19) 302 #define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18) 303 #define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17) 304 #define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16) 305 #define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8) 306 #define GLOBAL_CFG_CHECK_DONE_MASK BIT(7) 307 #define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6) 308 #define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4) 309 #define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3) 310 #define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2) 311 #define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1) 312 #define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0) 313 314 #define REG_FWD_DSCP_BASE 0x0010 315 #define REG_FWD_BUF_BASE 0x0014 316 317 #define REG_HW_FWD_DSCP_CFG 0x0018 318 #define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28) 319 #define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16) 320 #define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0) 321 322 #define REG_INT_STATUS(_n) \ 323 (((_n) == 4) ? 0x0730 : \ 324 ((_n) == 3) ? 0x0724 : \ 325 ((_n) == 2) ? 0x0720 : \ 326 ((_n) == 1) ? 0x0024 : 0x0020) 327 328 #define REG_INT_ENABLE(_n) \ 329 (((_n) == 4) ? 0x0750 : \ 330 ((_n) == 3) ? 0x0744 : \ 331 ((_n) == 2) ? 0x0740 : \ 332 ((_n) == 1) ? 0x002c : 0x0028) 333 334 /* QDMA_CSR_INT_ENABLE1 */ 335 #define RX15_COHERENT_INT_MASK BIT(31) 336 #define RX14_COHERENT_INT_MASK BIT(30) 337 #define RX13_COHERENT_INT_MASK BIT(29) 338 #define RX12_COHERENT_INT_MASK BIT(28) 339 #define RX11_COHERENT_INT_MASK BIT(27) 340 #define RX10_COHERENT_INT_MASK BIT(26) 341 #define RX9_COHERENT_INT_MASK BIT(25) 342 #define RX8_COHERENT_INT_MASK BIT(24) 343 #define RX7_COHERENT_INT_MASK BIT(23) 344 #define RX6_COHERENT_INT_MASK BIT(22) 345 #define RX5_COHERENT_INT_MASK BIT(21) 346 #define RX4_COHERENT_INT_MASK BIT(20) 347 #define RX3_COHERENT_INT_MASK BIT(19) 348 #define RX2_COHERENT_INT_MASK BIT(18) 349 #define RX1_COHERENT_INT_MASK BIT(17) 350 #define RX0_COHERENT_INT_MASK BIT(16) 351 #define TX7_COHERENT_INT_MASK BIT(15) 352 #define TX6_COHERENT_INT_MASK BIT(14) 353 #define TX5_COHERENT_INT_MASK BIT(13) 354 #define TX4_COHERENT_INT_MASK BIT(12) 355 #define TX3_COHERENT_INT_MASK BIT(11) 356 #define TX2_COHERENT_INT_MASK BIT(10) 357 #define TX1_COHERENT_INT_MASK BIT(9) 358 #define TX0_COHERENT_INT_MASK BIT(8) 359 #define CNT_OVER_FLOW_INT_MASK BIT(7) 360 #define IRQ1_FULL_INT_MASK BIT(5) 361 #define IRQ1_INT_MASK BIT(4) 362 #define HWFWD_DSCP_LOW_INT_MASK BIT(3) 363 #define HWFWD_DSCP_EMPTY_INT_MASK BIT(2) 364 #define IRQ0_FULL_INT_MASK BIT(1) 365 #define IRQ0_INT_MASK BIT(0) 366 367 #define TX_DONE_INT_MASK(_n) \ 368 ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ 369 : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) 370 371 #define INT_TX_MASK \ 372 (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ 373 IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) 374 375 #define INT_IDX0_MASK \ 376 (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ 377 TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ 378 TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ 379 TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ 380 RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ 381 RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ 382 RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ 383 RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ 384 RX15_COHERENT_INT_MASK | INT_TX_MASK) 385 386 /* QDMA_CSR_INT_ENABLE2 */ 387 #define RX15_NO_CPU_DSCP_INT_MASK BIT(31) 388 #define RX14_NO_CPU_DSCP_INT_MASK BIT(30) 389 #define RX13_NO_CPU_DSCP_INT_MASK BIT(29) 390 #define RX12_NO_CPU_DSCP_INT_MASK BIT(28) 391 #define RX11_NO_CPU_DSCP_INT_MASK BIT(27) 392 #define RX10_NO_CPU_DSCP_INT_MASK BIT(26) 393 #define RX9_NO_CPU_DSCP_INT_MASK BIT(25) 394 #define RX8_NO_CPU_DSCP_INT_MASK BIT(24) 395 #define RX7_NO_CPU_DSCP_INT_MASK BIT(23) 396 #define RX6_NO_CPU_DSCP_INT_MASK BIT(22) 397 #define RX5_NO_CPU_DSCP_INT_MASK BIT(21) 398 #define RX4_NO_CPU_DSCP_INT_MASK BIT(20) 399 #define RX3_NO_CPU_DSCP_INT_MASK BIT(19) 400 #define RX2_NO_CPU_DSCP_INT_MASK BIT(18) 401 #define RX1_NO_CPU_DSCP_INT_MASK BIT(17) 402 #define RX0_NO_CPU_DSCP_INT_MASK BIT(16) 403 #define RX15_DONE_INT_MASK BIT(15) 404 #define RX14_DONE_INT_MASK BIT(14) 405 #define RX13_DONE_INT_MASK BIT(13) 406 #define RX12_DONE_INT_MASK BIT(12) 407 #define RX11_DONE_INT_MASK BIT(11) 408 #define RX10_DONE_INT_MASK BIT(10) 409 #define RX9_DONE_INT_MASK BIT(9) 410 #define RX8_DONE_INT_MASK BIT(8) 411 #define RX7_DONE_INT_MASK BIT(7) 412 #define RX6_DONE_INT_MASK BIT(6) 413 #define RX5_DONE_INT_MASK BIT(5) 414 #define RX4_DONE_INT_MASK BIT(4) 415 #define RX3_DONE_INT_MASK BIT(3) 416 #define RX2_DONE_INT_MASK BIT(2) 417 #define RX1_DONE_INT_MASK BIT(1) 418 #define RX0_DONE_INT_MASK BIT(0) 419 420 #define RX_DONE_INT_MASK \ 421 (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ 422 RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ 423 RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ 424 RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ 425 RX15_DONE_INT_MASK) 426 #define INT_IDX1_MASK \ 427 (RX_DONE_INT_MASK | \ 428 RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ 429 RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ 430 RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ 431 RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ 432 RX15_NO_CPU_DSCP_INT_MASK) 433 434 /* QDMA_CSR_INT_ENABLE5 */ 435 #define TX31_COHERENT_INT_MASK BIT(31) 436 #define TX30_COHERENT_INT_MASK BIT(30) 437 #define TX29_COHERENT_INT_MASK BIT(29) 438 #define TX28_COHERENT_INT_MASK BIT(28) 439 #define TX27_COHERENT_INT_MASK BIT(27) 440 #define TX26_COHERENT_INT_MASK BIT(26) 441 #define TX25_COHERENT_INT_MASK BIT(25) 442 #define TX24_COHERENT_INT_MASK BIT(24) 443 #define TX23_COHERENT_INT_MASK BIT(23) 444 #define TX22_COHERENT_INT_MASK BIT(22) 445 #define TX21_COHERENT_INT_MASK BIT(21) 446 #define TX20_COHERENT_INT_MASK BIT(20) 447 #define TX19_COHERENT_INT_MASK BIT(19) 448 #define TX18_COHERENT_INT_MASK BIT(18) 449 #define TX17_COHERENT_INT_MASK BIT(17) 450 #define TX16_COHERENT_INT_MASK BIT(16) 451 #define TX15_COHERENT_INT_MASK BIT(15) 452 #define TX14_COHERENT_INT_MASK BIT(14) 453 #define TX13_COHERENT_INT_MASK BIT(13) 454 #define TX12_COHERENT_INT_MASK BIT(12) 455 #define TX11_COHERENT_INT_MASK BIT(11) 456 #define TX10_COHERENT_INT_MASK BIT(10) 457 #define TX9_COHERENT_INT_MASK BIT(9) 458 #define TX8_COHERENT_INT_MASK BIT(8) 459 460 #define INT_IDX4_MASK \ 461 (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ 462 TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ 463 TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ 464 TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ 465 TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ 466 TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ 467 TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ 468 TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ 469 TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ 470 TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ 471 TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ 472 TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) 473 474 #define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) 475 476 #define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054) 477 #define TX_IRQ_THR_MASK GENMASK(27, 16) 478 #define TX_IRQ_DEPTH_MASK GENMASK(11, 0) 479 480 #define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058) 481 #define IRQ_CLEAR_LEN_MASK GENMASK(7, 0) 482 483 #define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c) 484 #define IRQ_ENTRY_LEN_MASK GENMASK(27, 16) 485 #define IRQ_HEAD_IDX_MASK GENMASK(11, 0) 486 487 #define REG_TX_RING_BASE(_n) \ 488 (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5)) 489 490 #define REG_TX_RING_BLOCKING(_n) \ 491 (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5)) 492 493 #define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6) 494 #define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4) 495 #define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2) 496 #define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1) 497 #define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0) 498 499 #define REG_TX_CPU_IDX(_n) \ 500 (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5)) 501 502 #define TX_RING_CPU_IDX_MASK GENMASK(15, 0) 503 504 #define REG_TX_DMA_IDX(_n) \ 505 (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5)) 506 507 #define TX_RING_DMA_IDX_MASK GENMASK(15, 0) 508 509 #define IRQ_RING_IDX_MASK GENMASK(20, 16) 510 #define IRQ_DESC_IDX_MASK GENMASK(15, 0) 511 512 #define REG_RX_RING_BASE(_n) \ 513 (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5)) 514 515 #define REG_RX_RING_SIZE(_n) \ 516 (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5)) 517 518 #define RX_RING_THR_MASK GENMASK(31, 16) 519 #define RX_RING_SIZE_MASK GENMASK(15, 0) 520 521 #define REG_RX_CPU_IDX(_n) \ 522 (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5)) 523 524 #define RX_RING_CPU_IDX_MASK GENMASK(15, 0) 525 526 #define REG_RX_DMA_IDX(_n) \ 527 (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5)) 528 529 #define REG_RX_DELAY_INT_IDX(_n) \ 530 (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) 531 532 #define RX_DELAY_INT_MASK GENMASK(15, 0) 533 534 #define RX_RING_DMA_IDX_MASK GENMASK(15, 0) 535 536 #define REG_INGRESS_TRTCM_CFG 0x0070 537 #define INGRESS_TRTCM_EN_MASK BIT(31) 538 #define INGRESS_TRTCM_MODE_MASK BIT(30) 539 #define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) 540 #define INGRESS_FAST_TICK_MASK GENMASK(15, 0) 541 542 #define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) 543 #define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) 544 545 #define REG_LMGR_INIT_CFG 0x1000 546 #define LMGR_INIT_START BIT(31) 547 #define LMGR_SRAM_MODE_MASK BIT(30) 548 #define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20) 549 #define HW_FWD_DESC_NUM_MASK GENMASK(16, 0) 550 551 #define REG_FWD_DSCP_LOW_THR 0x1004 552 #define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) 553 554 #define REG_EGRESS_RATE_METER_CFG 0x100c 555 #define EGRESS_RATE_METER_EN_MASK BIT(29) 556 #define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) 557 #define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) 558 #define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) 559 560 #define REG_EGRESS_TRTCM_CFG 0x1010 561 #define EGRESS_TRTCM_EN_MASK BIT(31) 562 #define EGRESS_TRTCM_MODE_MASK BIT(30) 563 #define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) 564 #define EGRESS_FAST_TICK_MASK GENMASK(15, 0) 565 566 #define REG_TXWRR_MODE_CFG 0x1020 567 #define TWRR_WEIGHT_SCALE_MASK BIT(31) 568 #define TWRR_WEIGHT_BASE_MASK BIT(3) 569 570 #define REG_PSE_BUF_USAGE_CFG 0x1028 571 #define PSE_BUF_ESTIMATE_EN_MASK BIT(29) 572 573 #define REG_GLB_TRTCM_CFG 0x1080 574 #define GLB_TRTCM_EN_MASK BIT(31) 575 #define GLB_TRTCM_MODE_MASK BIT(30) 576 #define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16) 577 #define GLB_FAST_TICK_MASK GENMASK(15, 0) 578 579 #define REG_TXQ_CNGST_CFG 0x10a0 580 #define TXQ_CNGST_DROP_EN BIT(31) 581 #define TXQ_CNGST_DEI_DROP_EN BIT(30) 582 583 #define REG_SLA_TRTCM_CFG 0x1150 584 #define SLA_TRTCM_EN_MASK BIT(31) 585 #define SLA_TRTCM_MODE_MASK BIT(30) 586 #define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16) 587 #define SLA_FAST_TICK_MASK GENMASK(15, 0) 588 589 /* CTRL */ 590 #define QDMA_DESC_DONE_MASK BIT(31) 591 #define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */ 592 #define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */ 593 #define QDMA_DESC_DEI_MASK BIT(25) 594 #define QDMA_DESC_NO_DROP_MASK BIT(24) 595 #define QDMA_DESC_LEN_MASK GENMASK(15, 0) 596 /* DATA */ 597 #define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0) 598 /* TX MSG0 */ 599 #define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30) 600 #define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14) 601 #define QDMA_ETH_TXMSG_ICO_MASK BIT(13) 602 #define QDMA_ETH_TXMSG_UCO_MASK BIT(12) 603 #define QDMA_ETH_TXMSG_TCO_MASK BIT(11) 604 #define QDMA_ETH_TXMSG_TSO_MASK BIT(10) 605 #define QDMA_ETH_TXMSG_FAST_MASK BIT(9) 606 #define QDMA_ETH_TXMSG_OAM_MASK BIT(8) 607 #define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3) 608 #define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0) 609 /* TX MSG1 */ 610 #define QDMA_ETH_TXMSG_NO_DROP BIT(31) 611 #define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */ 612 #define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20) 613 #define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15) 614 #define QDMA_ETH_TXMSG_HWF_MASK BIT(14) 615 #define QDMA_ETH_TXMSG_HOP_MASK BIT(13) 616 #define QDMA_ETH_TXMSG_PTP_MASK BIT(12) 617 #define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */ 618 #define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */ 619 620 /* RX MSG1 */ 621 #define QDMA_ETH_RXMSG_DEI_MASK BIT(31) 622 #define QDMA_ETH_RXMSG_IP6_MASK BIT(30) 623 #define QDMA_ETH_RXMSG_IP4_MASK BIT(29) 624 #define QDMA_ETH_RXMSG_IP4F_MASK BIT(28) 625 #define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27) 626 #define QDMA_ETH_RXMSG_L4F_MASK BIT(26) 627 #define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21) 628 #define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16) 629 #define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0) 630 631 struct airoha_qdma_desc { 632 __le32 rsv; 633 __le32 ctrl; 634 __le32 addr; 635 __le32 data; 636 __le32 msg0; 637 __le32 msg1; 638 __le32 msg2; 639 __le32 msg3; 640 }; 641 642 /* CTRL0 */ 643 #define QDMA_FWD_DESC_CTX_MASK BIT(31) 644 #define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28) 645 #define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16) 646 #define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0) 647 /* CTRL1 */ 648 #define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0) 649 /* CTRL2 */ 650 #define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0) 651 652 struct airoha_qdma_fwd_desc { 653 __le32 addr; 654 __le32 ctrl0; 655 __le32 ctrl1; 656 __le32 ctrl2; 657 __le32 msg0; 658 __le32 msg1; 659 __le32 rsv0; 660 __le32 rsv1; 661 }; 662 663 enum { 664 QDMA_INT_REG_IDX0, 665 QDMA_INT_REG_IDX1, 666 QDMA_INT_REG_IDX2, 667 QDMA_INT_REG_IDX3, 668 QDMA_INT_REG_IDX4, 669 QDMA_INT_REG_MAX 670 }; 671 672 enum { 673 XSI_PCIE0_PORT, 674 XSI_PCIE1_PORT, 675 XSI_USB_PORT, 676 XSI_AE_PORT, 677 XSI_ETH_PORT, 678 }; 679 680 enum { 681 XSI_PCIE0_VIP_PORT_MASK = BIT(22), 682 XSI_PCIE1_VIP_PORT_MASK = BIT(23), 683 XSI_USB_VIP_PORT_MASK = BIT(25), 684 XSI_ETH_VIP_PORT_MASK = BIT(24), 685 }; 686 687 enum { 688 DEV_STATE_INITIALIZED, 689 }; 690 691 enum { 692 CDM_CRSN_QSEL_Q1 = 1, 693 CDM_CRSN_QSEL_Q5 = 5, 694 CDM_CRSN_QSEL_Q6 = 6, 695 CDM_CRSN_QSEL_Q15 = 15, 696 }; 697 698 enum { 699 CRSN_08 = 0x8, 700 CRSN_21 = 0x15, /* KA */ 701 CRSN_22 = 0x16, /* hit bind and force route to CPU */ 702 CRSN_24 = 0x18, 703 CRSN_25 = 0x19, 704 }; 705 706 enum { 707 FE_PSE_PORT_CDM1, 708 FE_PSE_PORT_GDM1, 709 FE_PSE_PORT_GDM2, 710 FE_PSE_PORT_GDM3, 711 FE_PSE_PORT_PPE1, 712 FE_PSE_PORT_CDM2, 713 FE_PSE_PORT_CDM3, 714 FE_PSE_PORT_CDM4, 715 FE_PSE_PORT_PPE2, 716 FE_PSE_PORT_GDM4, 717 FE_PSE_PORT_CDM5, 718 FE_PSE_PORT_DROP = 0xf, 719 }; 720 721 struct airoha_queue_entry { 722 union { 723 void *buf; 724 struct sk_buff *skb; 725 }; 726 dma_addr_t dma_addr; 727 u16 dma_len; 728 }; 729 730 struct airoha_queue { 731 struct airoha_qdma *qdma; 732 733 /* protect concurrent queue accesses */ 734 spinlock_t lock; 735 struct airoha_queue_entry *entry; 736 struct airoha_qdma_desc *desc; 737 u16 head; 738 u16 tail; 739 740 int queued; 741 int ndesc; 742 int free_thr; 743 int buf_size; 744 745 struct napi_struct napi; 746 struct page_pool *page_pool; 747 }; 748 749 struct airoha_tx_irq_queue { 750 struct airoha_qdma *qdma; 751 752 struct napi_struct napi; 753 u32 *q; 754 755 int size; 756 int queued; 757 u16 head; 758 }; 759 760 struct airoha_hw_stats { 761 /* protect concurrent hw_stats accesses */ 762 spinlock_t lock; 763 struct u64_stats_sync syncp; 764 765 /* get_stats64 */ 766 u64 rx_ok_pkts; 767 u64 tx_ok_pkts; 768 u64 rx_ok_bytes; 769 u64 tx_ok_bytes; 770 u64 rx_multicast; 771 u64 rx_errors; 772 u64 rx_drops; 773 u64 tx_drops; 774 u64 rx_crc_error; 775 u64 rx_over_errors; 776 /* ethtool stats */ 777 u64 tx_broadcast; 778 u64 tx_multicast; 779 u64 tx_len[7]; 780 u64 rx_broadcast; 781 u64 rx_fragment; 782 u64 rx_jabber; 783 u64 rx_len[7]; 784 }; 785 786 struct airoha_qdma { 787 struct airoha_eth *eth; 788 void __iomem *regs; 789 790 /* protect concurrent irqmask accesses */ 791 spinlock_t irq_lock; 792 u32 irqmask[QDMA_INT_REG_MAX]; 793 int irq; 794 795 struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; 796 797 struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; 798 struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; 799 800 /* descriptor and packet buffers for qdma hw forward */ 801 struct { 802 void *desc; 803 void *q; 804 } hfwd; 805 }; 806 807 struct airoha_gdm_port { 808 struct airoha_qdma *qdma; 809 struct net_device *dev; 810 int id; 811 812 struct airoha_hw_stats stats; 813 }; 814 815 struct airoha_eth { 816 struct device *dev; 817 818 unsigned long state; 819 void __iomem *fe_regs; 820 821 struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; 822 struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; 823 824 struct net_device *napi_dev; 825 826 struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; 827 struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; 828 }; 829 830 static u32 airoha_rr(void __iomem *base, u32 offset) 831 { 832 return readl(base + offset); 833 } 834 835 static void airoha_wr(void __iomem *base, u32 offset, u32 val) 836 { 837 writel(val, base + offset); 838 } 839 840 static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) 841 { 842 val |= (airoha_rr(base, offset) & ~mask); 843 airoha_wr(base, offset, val); 844 845 return val; 846 } 847 848 #define airoha_fe_rr(eth, offset) \ 849 airoha_rr((eth)->fe_regs, (offset)) 850 #define airoha_fe_wr(eth, offset, val) \ 851 airoha_wr((eth)->fe_regs, (offset), (val)) 852 #define airoha_fe_rmw(eth, offset, mask, val) \ 853 airoha_rmw((eth)->fe_regs, (offset), (mask), (val)) 854 #define airoha_fe_set(eth, offset, val) \ 855 airoha_rmw((eth)->fe_regs, (offset), 0, (val)) 856 #define airoha_fe_clear(eth, offset, val) \ 857 airoha_rmw((eth)->fe_regs, (offset), (val), 0) 858 859 #define airoha_qdma_rr(qdma, offset) \ 860 airoha_rr((qdma)->regs, (offset)) 861 #define airoha_qdma_wr(qdma, offset, val) \ 862 airoha_wr((qdma)->regs, (offset), (val)) 863 #define airoha_qdma_rmw(qdma, offset, mask, val) \ 864 airoha_rmw((qdma)->regs, (offset), (mask), (val)) 865 #define airoha_qdma_set(qdma, offset, val) \ 866 airoha_rmw((qdma)->regs, (offset), 0, (val)) 867 #define airoha_qdma_clear(qdma, offset, val) \ 868 airoha_rmw((qdma)->regs, (offset), (val), 0) 869 870 static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, 871 u32 clear, u32 set) 872 { 873 unsigned long flags; 874 875 if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) 876 return; 877 878 spin_lock_irqsave(&qdma->irq_lock, flags); 879 880 qdma->irqmask[index] &= ~clear; 881 qdma->irqmask[index] |= set; 882 airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); 883 /* Read irq_enable register in order to guarantee the update above 884 * completes in the spinlock critical section. 885 */ 886 airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); 887 888 spin_unlock_irqrestore(&qdma->irq_lock, flags); 889 } 890 891 static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, 892 u32 mask) 893 { 894 airoha_qdma_set_irqmask(qdma, index, 0, mask); 895 } 896 897 static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, 898 u32 mask) 899 { 900 airoha_qdma_set_irqmask(qdma, index, mask, 0); 901 } 902 903 static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) 904 { 905 u32 val; 906 907 val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; 908 airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val); 909 910 val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; 911 airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val); 912 airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val); 913 } 914 915 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, 916 u32 val) 917 { 918 airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, 919 FIELD_PREP(GDM_OCFQ_MASK, val)); 920 airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, 921 FIELD_PREP(GDM_MCFQ_MASK, val)); 922 airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, 923 FIELD_PREP(GDM_BCFQ_MASK, val)); 924 airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, 925 FIELD_PREP(GDM_UCFQ_MASK, val)); 926 } 927 928 static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) 929 { 930 u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP; 931 u32 vip_port, cfg_addr; 932 933 switch (port) { 934 case XSI_PCIE0_PORT: 935 vip_port = XSI_PCIE0_VIP_PORT_MASK; 936 cfg_addr = REG_GDM_FWD_CFG(3); 937 break; 938 case XSI_PCIE1_PORT: 939 vip_port = XSI_PCIE1_VIP_PORT_MASK; 940 cfg_addr = REG_GDM_FWD_CFG(3); 941 break; 942 case XSI_USB_PORT: 943 vip_port = XSI_USB_VIP_PORT_MASK; 944 cfg_addr = REG_GDM_FWD_CFG(4); 945 break; 946 case XSI_ETH_PORT: 947 vip_port = XSI_ETH_VIP_PORT_MASK; 948 cfg_addr = REG_GDM_FWD_CFG(4); 949 break; 950 default: 951 return -EINVAL; 952 } 953 954 if (enable) { 955 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); 956 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); 957 } else { 958 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); 959 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); 960 } 961 962 airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val); 963 964 return 0; 965 } 966 967 static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable) 968 { 969 const int port_list[] = { 970 XSI_PCIE0_PORT, 971 XSI_PCIE1_PORT, 972 XSI_USB_PORT, 973 XSI_ETH_PORT 974 }; 975 int i, err; 976 977 for (i = 0; i < ARRAY_SIZE(port_list); i++) { 978 err = airoha_set_gdm_port(eth, port_list[i], enable); 979 if (err) 980 goto error; 981 } 982 983 return 0; 984 985 error: 986 for (i--; i >= 0; i--) 987 airoha_set_gdm_port(eth, port_list[i], false); 988 989 return err; 990 } 991 992 static void airoha_fe_maccr_init(struct airoha_eth *eth) 993 { 994 int p; 995 996 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { 997 airoha_fe_set(eth, REG_GDM_FWD_CFG(p), 998 GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | 999 GDM_DROP_CRC_ERR); 1000 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p), 1001 FE_PSE_PORT_CDM1); 1002 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p), 1003 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1004 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1005 FIELD_PREP(GDM_LONG_LEN_MASK, 4004)); 1006 } 1007 1008 airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, 1009 FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); 1010 1011 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); 1012 } 1013 1014 static void airoha_fe_vip_setup(struct airoha_eth *eth) 1015 { 1016 airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); 1017 airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); 1018 1019 airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); 1020 airoha_fe_wr(eth, REG_FE_VIP_EN(4), 1021 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 1022 PATN_EN_MASK); 1023 1024 airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); 1025 airoha_fe_wr(eth, REG_FE_VIP_EN(6), 1026 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 1027 PATN_EN_MASK); 1028 1029 airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); 1030 airoha_fe_wr(eth, REG_FE_VIP_EN(7), 1031 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 1032 PATN_EN_MASK); 1033 1034 /* BOOTP (0x43) */ 1035 airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); 1036 airoha_fe_wr(eth, REG_FE_VIP_EN(8), 1037 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 1038 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 1039 1040 /* BOOTP (0x44) */ 1041 airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); 1042 airoha_fe_wr(eth, REG_FE_VIP_EN(9), 1043 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 1044 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 1045 1046 /* ISAKMP */ 1047 airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); 1048 airoha_fe_wr(eth, REG_FE_VIP_EN(10), 1049 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 1050 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 1051 1052 airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); 1053 airoha_fe_wr(eth, REG_FE_VIP_EN(11), 1054 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 1055 PATN_EN_MASK); 1056 1057 /* DHCPv6 */ 1058 airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); 1059 airoha_fe_wr(eth, REG_FE_VIP_EN(12), 1060 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 1061 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 1062 1063 airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); 1064 airoha_fe_wr(eth, REG_FE_VIP_EN(19), 1065 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 1066 PATN_EN_MASK); 1067 1068 /* ETH->ETH_P_1905 (0x893a) */ 1069 airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); 1070 airoha_fe_wr(eth, REG_FE_VIP_EN(20), 1071 PATN_FCPU_EN_MASK | PATN_EN_MASK); 1072 1073 airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); 1074 airoha_fe_wr(eth, REG_FE_VIP_EN(21), 1075 PATN_FCPU_EN_MASK | PATN_EN_MASK); 1076 } 1077 1078 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, 1079 u32 port, u32 queue) 1080 { 1081 u32 val; 1082 1083 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 1084 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, 1085 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 1086 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); 1087 val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); 1088 1089 return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); 1090 } 1091 1092 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, 1093 u32 port, u32 queue, u32 val) 1094 { 1095 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, 1096 FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); 1097 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 1098 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | 1099 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, 1100 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 1101 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | 1102 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); 1103 } 1104 1105 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, 1106 u32 port, u32 queue, u32 val) 1107 { 1108 u32 orig_val, tmp, all_rsv, fq_limit; 1109 1110 airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); 1111 1112 /* modify all rsv */ 1113 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); 1114 tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); 1115 all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp); 1116 all_rsv += (val - orig_val); 1117 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, 1118 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); 1119 1120 /* modify hthd */ 1121 tmp = airoha_fe_rr(eth, PSE_FQ_CFG); 1122 fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); 1123 tmp = fq_limit - all_rsv - 0x20; 1124 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 1125 PSE_SHARE_USED_HTHD_MASK, 1126 FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); 1127 1128 tmp = fq_limit - all_rsv - 0x100; 1129 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 1130 PSE_SHARE_USED_MTHD_MASK, 1131 FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); 1132 tmp = (3 * tmp) >> 2; 1133 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, 1134 PSE_SHARE_USED_LTHD_MASK, 1135 FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); 1136 1137 return 0; 1138 } 1139 1140 static void airoha_fe_pse_ports_init(struct airoha_eth *eth) 1141 { 1142 const u32 pse_port_num_queues[] = { 1143 [FE_PSE_PORT_CDM1] = 6, 1144 [FE_PSE_PORT_GDM1] = 6, 1145 [FE_PSE_PORT_GDM2] = 32, 1146 [FE_PSE_PORT_GDM3] = 6, 1147 [FE_PSE_PORT_PPE1] = 4, 1148 [FE_PSE_PORT_CDM2] = 6, 1149 [FE_PSE_PORT_CDM3] = 8, 1150 [FE_PSE_PORT_CDM4] = 10, 1151 [FE_PSE_PORT_PPE2] = 4, 1152 [FE_PSE_PORT_GDM4] = 2, 1153 [FE_PSE_PORT_CDM5] = 2, 1154 }; 1155 int q; 1156 1157 /* hw misses PPE2 oq rsv */ 1158 airoha_fe_set(eth, REG_FE_PSE_BUF_SET, 1159 PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]); 1160 1161 /* CMD1 */ 1162 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) 1163 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, 1164 PSE_QUEUE_RSV_PAGES); 1165 /* GMD1 */ 1166 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) 1167 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, 1168 PSE_QUEUE_RSV_PAGES); 1169 /* GMD2 */ 1170 for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) 1171 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); 1172 /* GMD3 */ 1173 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) 1174 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, 1175 PSE_QUEUE_RSV_PAGES); 1176 /* PPE1 */ 1177 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { 1178 if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) 1179 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 1180 PSE_QUEUE_RSV_PAGES); 1181 else 1182 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); 1183 } 1184 /* CDM2 */ 1185 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) 1186 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, 1187 PSE_QUEUE_RSV_PAGES); 1188 /* CDM3 */ 1189 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) 1190 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); 1191 /* CDM4 */ 1192 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) 1193 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, 1194 PSE_QUEUE_RSV_PAGES); 1195 /* PPE2 */ 1196 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 1197 if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 1198 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 1199 PSE_QUEUE_RSV_PAGES); 1200 else 1201 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); 1202 } 1203 /* GMD4 */ 1204 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) 1205 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, 1206 PSE_QUEUE_RSV_PAGES); 1207 /* CDM5 */ 1208 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) 1209 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, 1210 PSE_QUEUE_RSV_PAGES); 1211 } 1212 1213 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) 1214 { 1215 int i; 1216 1217 for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { 1218 int err, j; 1219 u32 val; 1220 1221 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 1222 1223 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 1224 MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; 1225 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 1226 err = read_poll_timeout(airoha_fe_rr, val, 1227 val & MC_VLAN_CFG_CMD_DONE_MASK, 1228 USEC_PER_MSEC, 5 * USEC_PER_MSEC, 1229 false, eth, REG_MC_VLAN_CFG); 1230 if (err) 1231 return err; 1232 1233 for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { 1234 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 1235 1236 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 1237 FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | 1238 MC_VLAN_CFG_RW_MASK; 1239 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 1240 err = read_poll_timeout(airoha_fe_rr, val, 1241 val & MC_VLAN_CFG_CMD_DONE_MASK, 1242 USEC_PER_MSEC, 1243 5 * USEC_PER_MSEC, false, eth, 1244 REG_MC_VLAN_CFG); 1245 if (err) 1246 return err; 1247 } 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) 1254 { 1255 /* CDM1_CRSN_QSEL */ 1256 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), 1257 CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 1258 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 1259 CDM_CRSN_QSEL_Q1)); 1260 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), 1261 CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 1262 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 1263 CDM_CRSN_QSEL_Q1)); 1264 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), 1265 CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 1266 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 1267 CDM_CRSN_QSEL_Q1)); 1268 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), 1269 CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 1270 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 1271 CDM_CRSN_QSEL_Q6)); 1272 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), 1273 CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 1274 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 1275 CDM_CRSN_QSEL_Q1)); 1276 /* CDM2_CRSN_QSEL */ 1277 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), 1278 CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 1279 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 1280 CDM_CRSN_QSEL_Q1)); 1281 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), 1282 CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 1283 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 1284 CDM_CRSN_QSEL_Q1)); 1285 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), 1286 CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 1287 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 1288 CDM_CRSN_QSEL_Q1)); 1289 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), 1290 CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 1291 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 1292 CDM_CRSN_QSEL_Q6)); 1293 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), 1294 CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 1295 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 1296 CDM_CRSN_QSEL_Q1)); 1297 } 1298 1299 static int airoha_fe_init(struct airoha_eth *eth) 1300 { 1301 airoha_fe_maccr_init(eth); 1302 1303 /* PSE IQ reserve */ 1304 airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, 1305 FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); 1306 airoha_fe_rmw(eth, REG_PSE_IQ_REV2, 1307 PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, 1308 FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | 1309 FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); 1310 1311 /* enable FE copy engine for MC/KA/DPI */ 1312 airoha_fe_wr(eth, REG_FE_PCE_CFG, 1313 PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); 1314 /* set vip queue selection to ring 1 */ 1315 airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, 1316 FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); 1317 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, 1318 FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); 1319 /* set GDM4 source interface offset to 8 */ 1320 airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, 1321 GDM4_SPORT_OFF2_MASK | 1322 GDM4_SPORT_OFF1_MASK | 1323 GDM4_SPORT_OFF0_MASK, 1324 FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | 1325 FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | 1326 FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); 1327 1328 /* set PSE Page as 128B */ 1329 airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, 1330 FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, 1331 FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | 1332 FE_DMA_GLO_PG_SZ_MASK); 1333 airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, 1334 FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | 1335 FE_RST_GDM4_MBI_ARB_MASK); 1336 usleep_range(1000, 2000); 1337 1338 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 1339 * connect other rings to PSE Port0 OQ-0 1340 */ 1341 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); 1342 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); 1343 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); 1344 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); 1345 1346 airoha_fe_vip_setup(eth); 1347 airoha_fe_pse_ports_init(eth); 1348 1349 airoha_fe_set(eth, REG_GDM_MISC_CFG, 1350 GDM2_RDM_ACK_WAIT_PREF_MASK | 1351 GDM2_CHN_VLD_MODE_MASK); 1352 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15); 1353 1354 /* init fragment and assemble Force Port */ 1355 /* NPU Core-3, NPU Bridge Channel-3 */ 1356 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 1357 IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, 1358 FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | 1359 FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); 1360 /* QDMA LAN, RX Ring-22 */ 1361 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 1362 IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, 1363 FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | 1364 FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); 1365 1366 airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); 1367 airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); 1368 1369 airoha_fe_crsn_qsel_init(eth); 1370 1371 airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); 1372 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); 1373 1374 /* default aging mode for mbi unlock issue */ 1375 airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, 1376 MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, 1377 FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | 1378 FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); 1379 1380 /* disable IFC by default */ 1381 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); 1382 1383 /* enable 1:N vlan action, init vlan table */ 1384 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); 1385 1386 return airoha_fe_mc_vlan_clear(eth); 1387 } 1388 1389 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) 1390 { 1391 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 1392 struct airoha_qdma *qdma = q->qdma; 1393 struct airoha_eth *eth = qdma->eth; 1394 int qid = q - &qdma->q_rx[0]; 1395 int nframes = 0; 1396 1397 while (q->queued < q->ndesc - 1) { 1398 struct airoha_queue_entry *e = &q->entry[q->head]; 1399 struct airoha_qdma_desc *desc = &q->desc[q->head]; 1400 struct page *page; 1401 int offset; 1402 u32 val; 1403 1404 page = page_pool_dev_alloc_frag(q->page_pool, &offset, 1405 q->buf_size); 1406 if (!page) 1407 break; 1408 1409 q->head = (q->head + 1) % q->ndesc; 1410 q->queued++; 1411 nframes++; 1412 1413 e->buf = page_address(page) + offset; 1414 e->dma_addr = page_pool_get_dma_addr(page) + offset; 1415 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); 1416 1417 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, 1418 dir); 1419 1420 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); 1421 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 1422 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); 1423 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); 1424 WRITE_ONCE(desc->data, cpu_to_le32(val)); 1425 WRITE_ONCE(desc->msg0, 0); 1426 WRITE_ONCE(desc->msg1, 0); 1427 WRITE_ONCE(desc->msg2, 0); 1428 WRITE_ONCE(desc->msg3, 0); 1429 1430 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), 1431 RX_RING_CPU_IDX_MASK, 1432 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 1433 } 1434 1435 return nframes; 1436 } 1437 1438 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, 1439 struct airoha_qdma_desc *desc) 1440 { 1441 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); 1442 1443 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); 1444 switch (sport) { 1445 case 0x10 ... 0x13: 1446 port = 0; 1447 break; 1448 case 0x2 ... 0x4: 1449 port = sport - 1; 1450 break; 1451 default: 1452 return -EINVAL; 1453 } 1454 1455 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; 1456 } 1457 1458 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) 1459 { 1460 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 1461 struct airoha_qdma *qdma = q->qdma; 1462 struct airoha_eth *eth = qdma->eth; 1463 int qid = q - &qdma->q_rx[0]; 1464 int done = 0; 1465 1466 while (done < budget) { 1467 struct airoha_queue_entry *e = &q->entry[q->tail]; 1468 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 1469 dma_addr_t dma_addr = le32_to_cpu(desc->addr); 1470 u32 desc_ctrl = le32_to_cpu(desc->ctrl); 1471 struct sk_buff *skb; 1472 int len, p; 1473 1474 if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) 1475 break; 1476 1477 if (!dma_addr) 1478 break; 1479 1480 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); 1481 if (!len) 1482 break; 1483 1484 q->tail = (q->tail + 1) % q->ndesc; 1485 q->queued--; 1486 1487 dma_sync_single_for_cpu(eth->dev, dma_addr, 1488 SKB_WITH_OVERHEAD(q->buf_size), dir); 1489 1490 p = airoha_qdma_get_gdm_port(eth, desc); 1491 if (p < 0 || !eth->ports[p]) { 1492 page_pool_put_full_page(q->page_pool, 1493 virt_to_head_page(e->buf), 1494 true); 1495 continue; 1496 } 1497 1498 skb = napi_build_skb(e->buf, q->buf_size); 1499 if (!skb) { 1500 page_pool_put_full_page(q->page_pool, 1501 virt_to_head_page(e->buf), 1502 true); 1503 break; 1504 } 1505 1506 skb_reserve(skb, 2); 1507 __skb_put(skb, len); 1508 skb_mark_for_recycle(skb); 1509 skb->dev = eth->ports[p]->dev; 1510 skb->protocol = eth_type_trans(skb, skb->dev); 1511 skb->ip_summed = CHECKSUM_UNNECESSARY; 1512 skb_record_rx_queue(skb, qid); 1513 napi_gro_receive(&q->napi, skb); 1514 1515 done++; 1516 } 1517 airoha_qdma_fill_rx_queue(q); 1518 1519 return done; 1520 } 1521 1522 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) 1523 { 1524 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); 1525 int cur, done = 0; 1526 1527 do { 1528 cur = airoha_qdma_rx_process(q, budget - done); 1529 done += cur; 1530 } while (cur && done < budget); 1531 1532 if (done < budget && napi_complete(napi)) 1533 airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, 1534 RX_DONE_INT_MASK); 1535 1536 return done; 1537 } 1538 1539 static int airoha_qdma_init_rx_queue(struct airoha_queue *q, 1540 struct airoha_qdma *qdma, int ndesc) 1541 { 1542 const struct page_pool_params pp_params = { 1543 .order = 0, 1544 .pool_size = 256, 1545 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 1546 .dma_dir = DMA_FROM_DEVICE, 1547 .max_len = PAGE_SIZE, 1548 .nid = NUMA_NO_NODE, 1549 .dev = qdma->eth->dev, 1550 .napi = &q->napi, 1551 }; 1552 struct airoha_eth *eth = qdma->eth; 1553 int qid = q - &qdma->q_rx[0], thr; 1554 dma_addr_t dma_addr; 1555 1556 q->buf_size = PAGE_SIZE / 2; 1557 q->ndesc = ndesc; 1558 q->qdma = qdma; 1559 1560 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 1561 GFP_KERNEL); 1562 if (!q->entry) 1563 return -ENOMEM; 1564 1565 q->page_pool = page_pool_create(&pp_params); 1566 if (IS_ERR(q->page_pool)) { 1567 int err = PTR_ERR(q->page_pool); 1568 1569 q->page_pool = NULL; 1570 return err; 1571 } 1572 1573 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 1574 &dma_addr, GFP_KERNEL); 1575 if (!q->desc) 1576 return -ENOMEM; 1577 1578 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); 1579 1580 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); 1581 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), 1582 RX_RING_SIZE_MASK, 1583 FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); 1584 1585 thr = clamp(ndesc >> 3, 1, 32); 1586 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 1587 FIELD_PREP(RX_RING_THR_MASK, thr)); 1588 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 1589 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); 1590 1591 airoha_qdma_fill_rx_queue(q); 1592 1593 return 0; 1594 } 1595 1596 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) 1597 { 1598 struct airoha_eth *eth = q->qdma->eth; 1599 1600 while (q->queued) { 1601 struct airoha_queue_entry *e = &q->entry[q->tail]; 1602 struct page *page = virt_to_head_page(e->buf); 1603 1604 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, 1605 page_pool_get_dma_dir(q->page_pool)); 1606 page_pool_put_full_page(q->page_pool, page, false); 1607 q->tail = (q->tail + 1) % q->ndesc; 1608 q->queued--; 1609 } 1610 } 1611 1612 static int airoha_qdma_init_rx(struct airoha_qdma *qdma) 1613 { 1614 int i; 1615 1616 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1617 int err; 1618 1619 if (!(RX_DONE_INT_MASK & BIT(i))) { 1620 /* rx-queue not binded to irq */ 1621 continue; 1622 } 1623 1624 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, 1625 RX_DSCP_NUM(i)); 1626 if (err) 1627 return err; 1628 } 1629 1630 return 0; 1631 } 1632 1633 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) 1634 { 1635 struct airoha_tx_irq_queue *irq_q; 1636 struct airoha_qdma *qdma; 1637 struct airoha_eth *eth; 1638 int id, done = 0; 1639 1640 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); 1641 qdma = irq_q->qdma; 1642 id = irq_q - &qdma->q_tx_irq[0]; 1643 eth = qdma->eth; 1644 1645 while (irq_q->queued > 0 && done < budget) { 1646 u32 qid, last, val = irq_q->q[irq_q->head]; 1647 struct airoha_queue *q; 1648 1649 if (val == 0xff) 1650 break; 1651 1652 irq_q->q[irq_q->head] = 0xff; /* mark as done */ 1653 irq_q->head = (irq_q->head + 1) % irq_q->size; 1654 irq_q->queued--; 1655 done++; 1656 1657 last = FIELD_GET(IRQ_DESC_IDX_MASK, val); 1658 qid = FIELD_GET(IRQ_RING_IDX_MASK, val); 1659 1660 if (qid >= ARRAY_SIZE(qdma->q_tx)) 1661 continue; 1662 1663 q = &qdma->q_tx[qid]; 1664 if (!q->ndesc) 1665 continue; 1666 1667 spin_lock_bh(&q->lock); 1668 1669 while (q->queued > 0) { 1670 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 1671 struct airoha_queue_entry *e = &q->entry[q->tail]; 1672 u32 desc_ctrl = le32_to_cpu(desc->ctrl); 1673 struct sk_buff *skb = e->skb; 1674 u16 index = q->tail; 1675 1676 if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && 1677 !(desc_ctrl & QDMA_DESC_DROP_MASK)) 1678 break; 1679 1680 q->tail = (q->tail + 1) % q->ndesc; 1681 q->queued--; 1682 1683 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 1684 DMA_TO_DEVICE); 1685 1686 WRITE_ONCE(desc->msg0, 0); 1687 WRITE_ONCE(desc->msg1, 0); 1688 1689 if (skb) { 1690 struct netdev_queue *txq; 1691 1692 txq = netdev_get_tx_queue(skb->dev, qid); 1693 if (netif_tx_queue_stopped(txq) && 1694 q->ndesc - q->queued >= q->free_thr) 1695 netif_tx_wake_queue(txq); 1696 1697 dev_kfree_skb_any(skb); 1698 e->skb = NULL; 1699 } 1700 1701 if (index == last) 1702 break; 1703 } 1704 1705 spin_unlock_bh(&q->lock); 1706 } 1707 1708 if (done) { 1709 int i, len = done >> 7; 1710 1711 for (i = 0; i < len; i++) 1712 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 1713 IRQ_CLEAR_LEN_MASK, 0x80); 1714 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 1715 IRQ_CLEAR_LEN_MASK, (done & 0x7f)); 1716 } 1717 1718 if (done < budget && napi_complete(napi)) 1719 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, 1720 TX_DONE_INT_MASK(id)); 1721 1722 return done; 1723 } 1724 1725 static int airoha_qdma_init_tx_queue(struct airoha_queue *q, 1726 struct airoha_qdma *qdma, int size) 1727 { 1728 struct airoha_eth *eth = qdma->eth; 1729 int i, qid = q - &qdma->q_tx[0]; 1730 dma_addr_t dma_addr; 1731 1732 spin_lock_init(&q->lock); 1733 q->ndesc = size; 1734 q->qdma = qdma; 1735 q->free_thr = 1 + MAX_SKB_FRAGS; 1736 1737 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 1738 GFP_KERNEL); 1739 if (!q->entry) 1740 return -ENOMEM; 1741 1742 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 1743 &dma_addr, GFP_KERNEL); 1744 if (!q->desc) 1745 return -ENOMEM; 1746 1747 for (i = 0; i < q->ndesc; i++) { 1748 u32 val; 1749 1750 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); 1751 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); 1752 } 1753 1754 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); 1755 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 1756 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 1757 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 1758 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); 1759 1760 return 0; 1761 } 1762 1763 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, 1764 struct airoha_qdma *qdma, int size) 1765 { 1766 int id = irq_q - &qdma->q_tx_irq[0]; 1767 struct airoha_eth *eth = qdma->eth; 1768 dma_addr_t dma_addr; 1769 1770 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, 1771 airoha_qdma_tx_napi_poll); 1772 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), 1773 &dma_addr, GFP_KERNEL); 1774 if (!irq_q->q) 1775 return -ENOMEM; 1776 1777 memset(irq_q->q, 0xff, size * sizeof(u32)); 1778 irq_q->size = size; 1779 irq_q->qdma = qdma; 1780 1781 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); 1782 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 1783 FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); 1784 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 1785 FIELD_PREP(TX_IRQ_THR_MASK, 1)); 1786 1787 return 0; 1788 } 1789 1790 static int airoha_qdma_init_tx(struct airoha_qdma *qdma) 1791 { 1792 int i, err; 1793 1794 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1795 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, 1796 IRQ_QUEUE_LEN(i)); 1797 if (err) 1798 return err; 1799 } 1800 1801 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1802 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, 1803 TX_DSCP_NUM); 1804 if (err) 1805 return err; 1806 } 1807 1808 return 0; 1809 } 1810 1811 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) 1812 { 1813 struct airoha_eth *eth = q->qdma->eth; 1814 1815 spin_lock_bh(&q->lock); 1816 while (q->queued) { 1817 struct airoha_queue_entry *e = &q->entry[q->tail]; 1818 1819 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 1820 DMA_TO_DEVICE); 1821 dev_kfree_skb_any(e->skb); 1822 e->skb = NULL; 1823 1824 q->tail = (q->tail + 1) % q->ndesc; 1825 q->queued--; 1826 } 1827 spin_unlock_bh(&q->lock); 1828 } 1829 1830 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) 1831 { 1832 struct airoha_eth *eth = qdma->eth; 1833 dma_addr_t dma_addr; 1834 u32 status; 1835 int size; 1836 1837 size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); 1838 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1839 GFP_KERNEL); 1840 if (!qdma->hfwd.desc) 1841 return -ENOMEM; 1842 1843 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); 1844 1845 size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; 1846 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1847 GFP_KERNEL); 1848 if (!qdma->hfwd.q) 1849 return -ENOMEM; 1850 1851 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); 1852 1853 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, 1854 HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 1855 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); 1856 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1857 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); 1858 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, 1859 LMGR_INIT_START | LMGR_SRAM_MODE_MASK | 1860 HW_FWD_DESC_NUM_MASK, 1861 FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | 1862 LMGR_INIT_START); 1863 1864 return read_poll_timeout(airoha_qdma_rr, status, 1865 !(status & LMGR_INIT_START), USEC_PER_MSEC, 1866 30 * USEC_PER_MSEC, true, qdma, 1867 REG_LMGR_INIT_CFG); 1868 } 1869 1870 static void airoha_qdma_init_qos(struct airoha_qdma *qdma) 1871 { 1872 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1873 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1874 1875 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, 1876 PSE_BUF_ESTIMATE_EN_MASK); 1877 1878 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, 1879 EGRESS_RATE_METER_EN_MASK | 1880 EGRESS_RATE_METER_EQ_RATE_EN_MASK); 1881 /* 2047us x 31 = 63.457ms */ 1882 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1883 EGRESS_RATE_METER_WINDOW_SZ_MASK, 1884 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); 1885 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1886 EGRESS_RATE_METER_TIMESLICE_MASK, 1887 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); 1888 1889 /* ratelimit init */ 1890 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1891 /* fast-tick 25us */ 1892 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1893 FIELD_PREP(GLB_FAST_TICK_MASK, 25)); 1894 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1895 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); 1896 1897 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1898 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1899 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); 1900 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, 1901 EGRESS_SLOW_TICK_RATIO_MASK, 1902 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); 1903 1904 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1905 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, 1906 INGRESS_TRTCM_MODE_MASK); 1907 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1908 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); 1909 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, 1910 INGRESS_SLOW_TICK_RATIO_MASK, 1911 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); 1912 1913 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1914 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1915 FIELD_PREP(SLA_FAST_TICK_MASK, 25)); 1916 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1917 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); 1918 } 1919 1920 static int airoha_qdma_hw_init(struct airoha_qdma *qdma) 1921 { 1922 int i; 1923 1924 /* clear pending irqs */ 1925 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) 1926 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); 1927 1928 /* setup irqs */ 1929 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); 1930 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); 1931 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); 1932 1933 /* setup irq binding */ 1934 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1935 if (!qdma->q_tx[i].ndesc) 1936 continue; 1937 1938 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) 1939 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), 1940 TX_RING_IRQ_BLOCKING_CFG_MASK); 1941 else 1942 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), 1943 TX_RING_IRQ_BLOCKING_CFG_MASK); 1944 } 1945 1946 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, 1947 GLOBAL_CFG_RX_2B_OFFSET_MASK | 1948 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | 1949 GLOBAL_CFG_CPU_TXR_RR_MASK | 1950 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | 1951 GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | 1952 GLOBAL_CFG_MULTICAST_EN_MASK | 1953 GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | 1954 GLOBAL_CFG_TX_WB_DONE_MASK | 1955 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); 1956 1957 airoha_qdma_init_qos(qdma); 1958 1959 /* disable qdma rx delay interrupt */ 1960 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1961 if (!qdma->q_rx[i].ndesc) 1962 continue; 1963 1964 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), 1965 RX_DELAY_INT_MASK); 1966 } 1967 1968 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, 1969 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); 1970 1971 return 0; 1972 } 1973 1974 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) 1975 { 1976 struct airoha_qdma *qdma = dev_instance; 1977 u32 intr[ARRAY_SIZE(qdma->irqmask)]; 1978 int i; 1979 1980 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { 1981 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); 1982 intr[i] &= qdma->irqmask[i]; 1983 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); 1984 } 1985 1986 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) 1987 return IRQ_NONE; 1988 1989 if (intr[1] & RX_DONE_INT_MASK) { 1990 airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, 1991 RX_DONE_INT_MASK); 1992 1993 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1994 if (!qdma->q_rx[i].ndesc) 1995 continue; 1996 1997 if (intr[1] & BIT(i)) 1998 napi_schedule(&qdma->q_rx[i].napi); 1999 } 2000 } 2001 2002 if (intr[0] & INT_TX_MASK) { 2003 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 2004 struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i]; 2005 u32 status, head; 2006 2007 if (!(intr[0] & TX_DONE_INT_MASK(i))) 2008 continue; 2009 2010 airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, 2011 TX_DONE_INT_MASK(i)); 2012 2013 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i)); 2014 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); 2015 irq_q->head = head % irq_q->size; 2016 irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); 2017 2018 napi_schedule(&qdma->q_tx_irq[i].napi); 2019 } 2020 } 2021 2022 return IRQ_HANDLED; 2023 } 2024 2025 static int airoha_qdma_init(struct platform_device *pdev, 2026 struct airoha_eth *eth, 2027 struct airoha_qdma *qdma) 2028 { 2029 int err, id = qdma - ð->qdma[0]; 2030 const char *res; 2031 2032 spin_lock_init(&qdma->irq_lock); 2033 qdma->eth = eth; 2034 2035 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); 2036 if (!res) 2037 return -ENOMEM; 2038 2039 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); 2040 if (IS_ERR(qdma->regs)) 2041 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), 2042 "failed to iomap qdma%d regs\n", id); 2043 2044 qdma->irq = platform_get_irq(pdev, 4 * id); 2045 if (qdma->irq < 0) 2046 return qdma->irq; 2047 2048 err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, 2049 IRQF_SHARED, KBUILD_MODNAME, qdma); 2050 if (err) 2051 return err; 2052 2053 err = airoha_qdma_init_rx(qdma); 2054 if (err) 2055 return err; 2056 2057 err = airoha_qdma_init_tx(qdma); 2058 if (err) 2059 return err; 2060 2061 err = airoha_qdma_init_hfwd_queues(qdma); 2062 if (err) 2063 return err; 2064 2065 return airoha_qdma_hw_init(qdma); 2066 } 2067 2068 static int airoha_hw_init(struct platform_device *pdev, 2069 struct airoha_eth *eth) 2070 { 2071 int err, i; 2072 2073 /* disable xsi */ 2074 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), 2075 eth->xsi_rsts); 2076 if (err) 2077 return err; 2078 2079 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); 2080 if (err) 2081 return err; 2082 2083 msleep(20); 2084 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); 2085 if (err) 2086 return err; 2087 2088 msleep(20); 2089 err = airoha_fe_init(eth); 2090 if (err) 2091 return err; 2092 2093 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 2094 err = airoha_qdma_init(pdev, eth, ð->qdma[i]); 2095 if (err) 2096 return err; 2097 } 2098 2099 set_bit(DEV_STATE_INITIALIZED, ð->state); 2100 2101 return 0; 2102 } 2103 2104 static void airoha_hw_cleanup(struct airoha_qdma *qdma) 2105 { 2106 int i; 2107 2108 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2109 if (!qdma->q_rx[i].ndesc) 2110 continue; 2111 2112 napi_disable(&qdma->q_rx[i].napi); 2113 netif_napi_del(&qdma->q_rx[i].napi); 2114 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); 2115 if (qdma->q_rx[i].page_pool) 2116 page_pool_destroy(qdma->q_rx[i].page_pool); 2117 } 2118 2119 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 2120 napi_disable(&qdma->q_tx_irq[i].napi); 2121 netif_napi_del(&qdma->q_tx_irq[i].napi); 2122 } 2123 2124 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 2125 if (!qdma->q_tx[i].ndesc) 2126 continue; 2127 2128 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 2129 } 2130 } 2131 2132 static void airoha_qdma_start_napi(struct airoha_qdma *qdma) 2133 { 2134 int i; 2135 2136 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 2137 napi_enable(&qdma->q_tx_irq[i].napi); 2138 2139 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2140 if (!qdma->q_rx[i].ndesc) 2141 continue; 2142 2143 napi_enable(&qdma->q_rx[i].napi); 2144 } 2145 } 2146 2147 static void airoha_update_hw_stats(struct airoha_gdm_port *port) 2148 { 2149 struct airoha_eth *eth = port->qdma->eth; 2150 u32 val, i = 0; 2151 2152 spin_lock(&port->stats.lock); 2153 u64_stats_update_begin(&port->stats.syncp); 2154 2155 /* TX */ 2156 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); 2157 port->stats.tx_ok_pkts += ((u64)val << 32); 2158 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); 2159 port->stats.tx_ok_pkts += val; 2160 2161 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); 2162 port->stats.tx_ok_bytes += ((u64)val << 32); 2163 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); 2164 port->stats.tx_ok_bytes += val; 2165 2166 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); 2167 port->stats.tx_drops += val; 2168 2169 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); 2170 port->stats.tx_broadcast += val; 2171 2172 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); 2173 port->stats.tx_multicast += val; 2174 2175 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); 2176 port->stats.tx_len[i] += val; 2177 2178 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); 2179 port->stats.tx_len[i] += ((u64)val << 32); 2180 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); 2181 port->stats.tx_len[i++] += val; 2182 2183 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); 2184 port->stats.tx_len[i] += ((u64)val << 32); 2185 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); 2186 port->stats.tx_len[i++] += val; 2187 2188 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); 2189 port->stats.tx_len[i] += ((u64)val << 32); 2190 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); 2191 port->stats.tx_len[i++] += val; 2192 2193 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); 2194 port->stats.tx_len[i] += ((u64)val << 32); 2195 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); 2196 port->stats.tx_len[i++] += val; 2197 2198 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); 2199 port->stats.tx_len[i] += ((u64)val << 32); 2200 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); 2201 port->stats.tx_len[i++] += val; 2202 2203 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); 2204 port->stats.tx_len[i] += ((u64)val << 32); 2205 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); 2206 port->stats.tx_len[i++] += val; 2207 2208 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); 2209 port->stats.tx_len[i++] += val; 2210 2211 /* RX */ 2212 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); 2213 port->stats.rx_ok_pkts += ((u64)val << 32); 2214 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); 2215 port->stats.rx_ok_pkts += val; 2216 2217 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); 2218 port->stats.rx_ok_bytes += ((u64)val << 32); 2219 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); 2220 port->stats.rx_ok_bytes += val; 2221 2222 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); 2223 port->stats.rx_drops += val; 2224 2225 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); 2226 port->stats.rx_broadcast += val; 2227 2228 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); 2229 port->stats.rx_multicast += val; 2230 2231 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); 2232 port->stats.rx_errors += val; 2233 2234 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); 2235 port->stats.rx_crc_error += val; 2236 2237 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); 2238 port->stats.rx_over_errors += val; 2239 2240 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); 2241 port->stats.rx_fragment += val; 2242 2243 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); 2244 port->stats.rx_jabber += val; 2245 2246 i = 0; 2247 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); 2248 port->stats.rx_len[i] += val; 2249 2250 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); 2251 port->stats.rx_len[i] += ((u64)val << 32); 2252 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); 2253 port->stats.rx_len[i++] += val; 2254 2255 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); 2256 port->stats.rx_len[i] += ((u64)val << 32); 2257 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); 2258 port->stats.rx_len[i++] += val; 2259 2260 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); 2261 port->stats.rx_len[i] += ((u64)val << 32); 2262 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); 2263 port->stats.rx_len[i++] += val; 2264 2265 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); 2266 port->stats.rx_len[i] += ((u64)val << 32); 2267 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); 2268 port->stats.rx_len[i++] += val; 2269 2270 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); 2271 port->stats.rx_len[i] += ((u64)val << 32); 2272 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); 2273 port->stats.rx_len[i++] += val; 2274 2275 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); 2276 port->stats.rx_len[i] += ((u64)val << 32); 2277 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); 2278 port->stats.rx_len[i++] += val; 2279 2280 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); 2281 port->stats.rx_len[i++] += val; 2282 2283 /* reset mib counters */ 2284 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), 2285 FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); 2286 2287 u64_stats_update_end(&port->stats.syncp); 2288 spin_unlock(&port->stats.lock); 2289 } 2290 2291 static int airoha_dev_open(struct net_device *dev) 2292 { 2293 struct airoha_gdm_port *port = netdev_priv(dev); 2294 struct airoha_qdma *qdma = port->qdma; 2295 int err; 2296 2297 netif_tx_start_all_queues(dev); 2298 err = airoha_set_gdm_ports(qdma->eth, true); 2299 if (err) 2300 return err; 2301 2302 if (netdev_uses_dsa(dev)) 2303 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 2304 GDM_STAG_EN_MASK); 2305 else 2306 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 2307 GDM_STAG_EN_MASK); 2308 2309 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, 2310 GLOBAL_CFG_TX_DMA_EN_MASK | 2311 GLOBAL_CFG_RX_DMA_EN_MASK); 2312 2313 return 0; 2314 } 2315 2316 static int airoha_dev_stop(struct net_device *dev) 2317 { 2318 struct airoha_gdm_port *port = netdev_priv(dev); 2319 struct airoha_qdma *qdma = port->qdma; 2320 int err; 2321 2322 netif_tx_disable(dev); 2323 err = airoha_set_gdm_ports(qdma->eth, false); 2324 if (err) 2325 return err; 2326 2327 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, 2328 GLOBAL_CFG_TX_DMA_EN_MASK | 2329 GLOBAL_CFG_RX_DMA_EN_MASK); 2330 2331 return 0; 2332 } 2333 2334 static int airoha_dev_set_macaddr(struct net_device *dev, void *p) 2335 { 2336 struct airoha_gdm_port *port = netdev_priv(dev); 2337 int err; 2338 2339 err = eth_mac_addr(dev, p); 2340 if (err) 2341 return err; 2342 2343 airoha_set_macaddr(port->qdma->eth, dev->dev_addr); 2344 2345 return 0; 2346 } 2347 2348 static int airoha_dev_init(struct net_device *dev) 2349 { 2350 struct airoha_gdm_port *port = netdev_priv(dev); 2351 2352 airoha_set_macaddr(port->qdma->eth, dev->dev_addr); 2353 2354 return 0; 2355 } 2356 2357 static void airoha_dev_get_stats64(struct net_device *dev, 2358 struct rtnl_link_stats64 *storage) 2359 { 2360 struct airoha_gdm_port *port = netdev_priv(dev); 2361 unsigned int start; 2362 2363 airoha_update_hw_stats(port); 2364 do { 2365 start = u64_stats_fetch_begin(&port->stats.syncp); 2366 storage->rx_packets = port->stats.rx_ok_pkts; 2367 storage->tx_packets = port->stats.tx_ok_pkts; 2368 storage->rx_bytes = port->stats.rx_ok_bytes; 2369 storage->tx_bytes = port->stats.tx_ok_bytes; 2370 storage->multicast = port->stats.rx_multicast; 2371 storage->rx_errors = port->stats.rx_errors; 2372 storage->rx_dropped = port->stats.rx_drops; 2373 storage->tx_dropped = port->stats.tx_drops; 2374 storage->rx_crc_errors = port->stats.rx_crc_error; 2375 storage->rx_over_errors = port->stats.rx_over_errors; 2376 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2377 } 2378 2379 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, 2380 struct net_device *dev) 2381 { 2382 struct skb_shared_info *sinfo = skb_shinfo(skb); 2383 struct airoha_gdm_port *port = netdev_priv(dev); 2384 u32 msg0 = 0, msg1, len = skb_headlen(skb); 2385 int i, qid = skb_get_queue_mapping(skb); 2386 struct airoha_qdma *qdma = port->qdma; 2387 u32 nr_frags = 1 + sinfo->nr_frags; 2388 struct netdev_queue *txq; 2389 struct airoha_queue *q; 2390 void *data = skb->data; 2391 u16 index; 2392 u8 fport; 2393 2394 if (skb->ip_summed == CHECKSUM_PARTIAL) 2395 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | 2396 FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | 2397 FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); 2398 2399 /* TSO: fill MSS info in tcp checksum field */ 2400 if (skb_is_gso(skb)) { 2401 if (skb_cow_head(skb, 0)) 2402 goto error; 2403 2404 if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 2405 __be16 csum = cpu_to_be16(sinfo->gso_size); 2406 2407 tcp_hdr(skb)->check = (__force __sum16)csum; 2408 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); 2409 } 2410 } 2411 2412 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; 2413 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 2414 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 2415 2416 q = &qdma->q_tx[qid]; 2417 if (WARN_ON_ONCE(!q->ndesc)) 2418 goto error; 2419 2420 spin_lock_bh(&q->lock); 2421 2422 txq = netdev_get_tx_queue(dev, qid); 2423 if (q->queued + nr_frags > q->ndesc) { 2424 /* not enough space in the queue */ 2425 netif_tx_stop_queue(txq); 2426 spin_unlock_bh(&q->lock); 2427 return NETDEV_TX_BUSY; 2428 } 2429 2430 index = q->head; 2431 for (i = 0; i < nr_frags; i++) { 2432 struct airoha_qdma_desc *desc = &q->desc[index]; 2433 struct airoha_queue_entry *e = &q->entry[index]; 2434 skb_frag_t *frag = &sinfo->frags[i]; 2435 dma_addr_t addr; 2436 u32 val; 2437 2438 addr = dma_map_single(dev->dev.parent, data, len, 2439 DMA_TO_DEVICE); 2440 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) 2441 goto error_unmap; 2442 2443 index = (index + 1) % q->ndesc; 2444 2445 val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); 2446 if (i < nr_frags - 1) 2447 val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); 2448 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 2449 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); 2450 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); 2451 WRITE_ONCE(desc->data, cpu_to_le32(val)); 2452 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); 2453 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); 2454 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); 2455 2456 e->skb = i ? NULL : skb; 2457 e->dma_addr = addr; 2458 e->dma_len = len; 2459 2460 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), 2461 TX_RING_CPU_IDX_MASK, 2462 FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); 2463 2464 data = skb_frag_address(frag); 2465 len = skb_frag_size(frag); 2466 } 2467 2468 q->head = index; 2469 q->queued += i; 2470 2471 skb_tx_timestamp(skb); 2472 if (q->ndesc - q->queued < q->free_thr) 2473 netif_tx_stop_queue(txq); 2474 2475 spin_unlock_bh(&q->lock); 2476 2477 return NETDEV_TX_OK; 2478 2479 error_unmap: 2480 for (i--; i >= 0; i--) { 2481 index = (q->head + i) % q->ndesc; 2482 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, 2483 q->entry[index].dma_len, DMA_TO_DEVICE); 2484 } 2485 2486 spin_unlock_bh(&q->lock); 2487 error: 2488 dev_kfree_skb_any(skb); 2489 dev->stats.tx_dropped++; 2490 2491 return NETDEV_TX_OK; 2492 } 2493 2494 static void airoha_ethtool_get_drvinfo(struct net_device *dev, 2495 struct ethtool_drvinfo *info) 2496 { 2497 struct airoha_gdm_port *port = netdev_priv(dev); 2498 struct airoha_eth *eth = port->qdma->eth; 2499 2500 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); 2501 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); 2502 } 2503 2504 static void airoha_ethtool_get_mac_stats(struct net_device *dev, 2505 struct ethtool_eth_mac_stats *stats) 2506 { 2507 struct airoha_gdm_port *port = netdev_priv(dev); 2508 unsigned int start; 2509 2510 airoha_update_hw_stats(port); 2511 do { 2512 start = u64_stats_fetch_begin(&port->stats.syncp); 2513 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; 2514 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; 2515 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; 2516 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2517 } 2518 2519 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { 2520 { 0, 64 }, 2521 { 65, 127 }, 2522 { 128, 255 }, 2523 { 256, 511 }, 2524 { 512, 1023 }, 2525 { 1024, 1518 }, 2526 { 1519, 10239 }, 2527 {}, 2528 }; 2529 2530 static void 2531 airoha_ethtool_get_rmon_stats(struct net_device *dev, 2532 struct ethtool_rmon_stats *stats, 2533 const struct ethtool_rmon_hist_range **ranges) 2534 { 2535 struct airoha_gdm_port *port = netdev_priv(dev); 2536 struct airoha_hw_stats *hw_stats = &port->stats; 2537 unsigned int start; 2538 2539 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2540 ARRAY_SIZE(hw_stats->tx_len) + 1); 2541 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2542 ARRAY_SIZE(hw_stats->rx_len) + 1); 2543 2544 *ranges = airoha_ethtool_rmon_ranges; 2545 airoha_update_hw_stats(port); 2546 do { 2547 int i; 2548 2549 start = u64_stats_fetch_begin(&port->stats.syncp); 2550 stats->fragments = hw_stats->rx_fragment; 2551 stats->jabbers = hw_stats->rx_jabber; 2552 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; 2553 i++) { 2554 stats->hist[i] = hw_stats->rx_len[i]; 2555 stats->hist_tx[i] = hw_stats->tx_len[i]; 2556 } 2557 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2558 } 2559 2560 static const struct net_device_ops airoha_netdev_ops = { 2561 .ndo_init = airoha_dev_init, 2562 .ndo_open = airoha_dev_open, 2563 .ndo_stop = airoha_dev_stop, 2564 .ndo_start_xmit = airoha_dev_xmit, 2565 .ndo_get_stats64 = airoha_dev_get_stats64, 2566 .ndo_set_mac_address = airoha_dev_set_macaddr, 2567 }; 2568 2569 static const struct ethtool_ops airoha_ethtool_ops = { 2570 .get_drvinfo = airoha_ethtool_get_drvinfo, 2571 .get_eth_mac_stats = airoha_ethtool_get_mac_stats, 2572 .get_rmon_stats = airoha_ethtool_get_rmon_stats, 2573 }; 2574 2575 static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) 2576 { 2577 const __be32 *id_ptr = of_get_property(np, "reg", NULL); 2578 struct airoha_gdm_port *port; 2579 struct airoha_qdma *qdma; 2580 struct net_device *dev; 2581 int err, index; 2582 u32 id; 2583 2584 if (!id_ptr) { 2585 dev_err(eth->dev, "missing gdm port id\n"); 2586 return -EINVAL; 2587 } 2588 2589 id = be32_to_cpup(id_ptr); 2590 index = id - 1; 2591 2592 if (!id || id > ARRAY_SIZE(eth->ports)) { 2593 dev_err(eth->dev, "invalid gdm port id: %d\n", id); 2594 return -EINVAL; 2595 } 2596 2597 if (eth->ports[index]) { 2598 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); 2599 return -EINVAL; 2600 } 2601 2602 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), 2603 AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING); 2604 if (!dev) { 2605 dev_err(eth->dev, "alloc_etherdev failed\n"); 2606 return -ENOMEM; 2607 } 2608 2609 qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; 2610 dev->netdev_ops = &airoha_netdev_ops; 2611 dev->ethtool_ops = &airoha_ethtool_ops; 2612 dev->max_mtu = AIROHA_MAX_MTU; 2613 dev->watchdog_timeo = 5 * HZ; 2614 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2615 NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | 2616 NETIF_F_SG | NETIF_F_TSO; 2617 dev->features |= dev->hw_features; 2618 dev->dev.of_node = np; 2619 dev->irq = qdma->irq; 2620 SET_NETDEV_DEV(dev, eth->dev); 2621 2622 err = of_get_ethdev_address(np, dev); 2623 if (err) { 2624 if (err == -EPROBE_DEFER) 2625 return err; 2626 2627 eth_hw_addr_random(dev); 2628 dev_info(eth->dev, "generated random MAC address %pM\n", 2629 dev->dev_addr); 2630 } 2631 2632 port = netdev_priv(dev); 2633 u64_stats_init(&port->stats.syncp); 2634 spin_lock_init(&port->stats.lock); 2635 port->qdma = qdma; 2636 port->dev = dev; 2637 port->id = id; 2638 eth->ports[index] = port; 2639 2640 return register_netdev(dev); 2641 } 2642 2643 static int airoha_probe(struct platform_device *pdev) 2644 { 2645 struct device_node *np; 2646 struct airoha_eth *eth; 2647 int i, err; 2648 2649 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2650 if (!eth) 2651 return -ENOMEM; 2652 2653 eth->dev = &pdev->dev; 2654 2655 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); 2656 if (err) { 2657 dev_err(eth->dev, "failed configuring DMA mask\n"); 2658 return err; 2659 } 2660 2661 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); 2662 if (IS_ERR(eth->fe_regs)) 2663 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), 2664 "failed to iomap fe regs\n"); 2665 2666 eth->rsts[0].id = "fe"; 2667 eth->rsts[1].id = "pdma"; 2668 eth->rsts[2].id = "qdma"; 2669 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2670 ARRAY_SIZE(eth->rsts), 2671 eth->rsts); 2672 if (err) { 2673 dev_err(eth->dev, "failed to get bulk reset lines\n"); 2674 return err; 2675 } 2676 2677 eth->xsi_rsts[0].id = "xsi-mac"; 2678 eth->xsi_rsts[1].id = "hsi0-mac"; 2679 eth->xsi_rsts[2].id = "hsi1-mac"; 2680 eth->xsi_rsts[3].id = "hsi-mac"; 2681 eth->xsi_rsts[4].id = "xfp-mac"; 2682 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2683 ARRAY_SIZE(eth->xsi_rsts), 2684 eth->xsi_rsts); 2685 if (err) { 2686 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); 2687 return err; 2688 } 2689 2690 eth->napi_dev = alloc_netdev_dummy(0); 2691 if (!eth->napi_dev) 2692 return -ENOMEM; 2693 2694 /* Enable threaded NAPI by default */ 2695 eth->napi_dev->threaded = true; 2696 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); 2697 platform_set_drvdata(pdev, eth); 2698 2699 err = airoha_hw_init(pdev, eth); 2700 if (err) 2701 goto error; 2702 2703 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2704 airoha_qdma_start_napi(ð->qdma[i]); 2705 2706 for_each_child_of_node(pdev->dev.of_node, np) { 2707 if (!of_device_is_compatible(np, "airoha,eth-mac")) 2708 continue; 2709 2710 if (!of_device_is_available(np)) 2711 continue; 2712 2713 err = airoha_alloc_gdm_port(eth, np); 2714 if (err) { 2715 of_node_put(np); 2716 goto error; 2717 } 2718 } 2719 2720 return 0; 2721 2722 error: 2723 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2724 airoha_hw_cleanup(ð->qdma[i]); 2725 2726 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2727 struct airoha_gdm_port *port = eth->ports[i]; 2728 2729 if (port && port->dev->reg_state == NETREG_REGISTERED) 2730 unregister_netdev(port->dev); 2731 } 2732 free_netdev(eth->napi_dev); 2733 platform_set_drvdata(pdev, NULL); 2734 2735 return err; 2736 } 2737 2738 static void airoha_remove(struct platform_device *pdev) 2739 { 2740 struct airoha_eth *eth = platform_get_drvdata(pdev); 2741 int i; 2742 2743 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2744 airoha_hw_cleanup(ð->qdma[i]); 2745 2746 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2747 struct airoha_gdm_port *port = eth->ports[i]; 2748 2749 if (!port) 2750 continue; 2751 2752 airoha_dev_stop(port->dev); 2753 unregister_netdev(port->dev); 2754 } 2755 free_netdev(eth->napi_dev); 2756 2757 platform_set_drvdata(pdev, NULL); 2758 } 2759 2760 static const struct of_device_id of_airoha_match[] = { 2761 { .compatible = "airoha,en7581-eth" }, 2762 { /* sentinel */ } 2763 }; 2764 2765 static struct platform_driver airoha_driver = { 2766 .probe = airoha_probe, 2767 .remove_new = airoha_remove, 2768 .driver = { 2769 .name = KBUILD_MODNAME, 2770 .of_match_table = of_airoha_match, 2771 }, 2772 }; 2773 module_platform_driver(airoha_driver); 2774 2775 MODULE_LICENSE("GPL"); 2776 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 2777 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); 2778