1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/cpu.h> 16 #include <linux/etherdevice.h> 17 #include <linux/if_vlan.h> 18 #include <linux/inetdevice.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/mbus.h> 23 #include <linux/module.h> 24 #include <linux/netdevice.h> 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/phy/phy.h> 31 #include <linux/phy.h> 32 #include <linux/phylink.h> 33 #include <linux/platform_device.h> 34 #include <linux/skbuff.h> 35 #include <net/hwbm.h> 36 #include "mvneta_bm.h" 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <net/page_pool/helpers.h> 41 #include <net/pkt_sched.h> 42 #include <linux/bpf_trace.h> 43 44 /* Registers */ 45 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 46 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 47 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 48 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 49 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 50 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 51 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 52 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 53 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 54 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 55 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 56 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 57 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 58 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 59 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 60 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 61 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 63 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 64 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) 65 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 66 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 67 #define MVNETA_PORT_RX_RESET 0x1cc0 68 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 69 #define MVNETA_PHY_ADDR 0x2000 70 #define MVNETA_PHY_ADDR_MASK 0x1f 71 #define MVNETA_MBUS_RETRY 0x2010 72 #define MVNETA_UNIT_INTR_CAUSE 0x2080 73 #define MVNETA_UNIT_CONTROL 0x20B0 74 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 75 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 76 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 77 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 78 #define MVNETA_BASE_ADDR_ENABLE 0x2290 79 #define MVNETA_AC5_CNM_DDR_TARGET 0x2 80 #define MVNETA_AC5_CNM_DDR_ATTR 0xb 81 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 82 #define MVNETA_PORT_CONFIG 0x2400 83 #define MVNETA_UNI_PROMISC_MODE BIT(0) 84 #define MVNETA_DEF_RXQ(q) ((q) << 1) 85 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 86 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 87 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 88 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 89 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 90 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 91 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 92 MVNETA_DEF_RXQ_ARP(q) | \ 93 MVNETA_DEF_RXQ_TCP(q) | \ 94 MVNETA_DEF_RXQ_UDP(q) | \ 95 MVNETA_DEF_RXQ_BPDU(q) | \ 96 MVNETA_TX_UNSET_ERR_SUM | \ 97 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 98 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 99 #define MVNETA_MAC_ADDR_LOW 0x2414 100 #define MVNETA_MAC_ADDR_HIGH 0x2418 101 #define MVNETA_SDMA_CONFIG 0x241c 102 #define MVNETA_SDMA_BRST_SIZE_16 4 103 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 104 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 105 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 106 #define MVNETA_DESC_SWAP BIT(6) 107 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 108 #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 109 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) 110 #define MVNETA_PORT_STATUS 0x2444 111 #define MVNETA_TX_IN_PRGRS BIT(0) 112 #define MVNETA_TX_FIFO_EMPTY BIT(8) 113 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 114 /* Only exists on Armada XP and Armada 370 */ 115 #define MVNETA_SERDES_CFG 0x24A0 116 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 117 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 118 #define MVNETA_HSGMII_SERDES_PROTO 0x1107 119 #define MVNETA_TYPE_PRIO 0x24bc 120 #define MVNETA_FORCE_UNI BIT(21) 121 #define MVNETA_TXQ_CMD_1 0x24e4 122 #define MVNETA_TXQ_CMD 0x2448 123 #define MVNETA_TXQ_DISABLE_SHIFT 8 124 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 125 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 126 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 127 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 128 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 129 #define MVNETA_ACC_MODE 0x2500 130 #define MVNETA_BM_ADDRESS 0x2504 131 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 132 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 133 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 134 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 136 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 137 138 /* Exception Interrupt Port/Queue Cause register 139 * 140 * Their behavior depend of the mapping done using the PCPX2Q 141 * registers. For a given CPU if the bit associated to a queue is not 142 * set, then for the register a read from this CPU will always return 143 * 0 and a write won't do anything 144 */ 145 146 #define MVNETA_INTR_NEW_CAUSE 0x25a0 147 #define MVNETA_INTR_NEW_MASK 0x25a4 148 149 /* bits 0..7 = TXQ SENT, one bit per queue. 150 * bits 8..15 = RXQ OCCUP, one bit per queue. 151 * bits 16..23 = RXQ FREE, one bit per queue. 152 * bit 29 = OLD_REG_SUM, see old reg ? 153 * bit 30 = TX_ERR_SUM, one bit for 4 ports 154 * bit 31 = MISC_SUM, one bit for 4 ports 155 */ 156 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 157 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 158 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 159 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 160 #define MVNETA_MISCINTR_INTR_MASK BIT(31) 161 162 #define MVNETA_INTR_OLD_CAUSE 0x25a8 163 #define MVNETA_INTR_OLD_MASK 0x25ac 164 165 /* Data Path Port/Queue Cause Register */ 166 #define MVNETA_INTR_MISC_CAUSE 0x25b0 167 #define MVNETA_INTR_MISC_MASK 0x25b4 168 169 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 170 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 171 #define MVNETA_CAUSE_PTP BIT(4) 172 173 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 174 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 175 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 176 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 177 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 178 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 179 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 180 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 181 182 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 183 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 184 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 185 186 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 187 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 188 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 189 190 #define MVNETA_INTR_ENABLE 0x25b8 191 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 192 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 193 194 #define MVNETA_RXQ_CMD 0x2680 195 #define MVNETA_RXQ_DISABLE_SHIFT 8 196 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 197 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 198 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 199 #define MVNETA_GMAC_CTRL_0 0x2c00 200 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 201 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 202 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) 203 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 204 #define MVNETA_GMAC_CTRL_2 0x2c08 205 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 206 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 207 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 208 #define MVNETA_GMAC2_PORT_RESET BIT(6) 209 #define MVNETA_GMAC_STATUS 0x2c10 210 #define MVNETA_GMAC_LINK_UP BIT(0) 211 #define MVNETA_GMAC_SPEED_1000 BIT(1) 212 #define MVNETA_GMAC_SPEED_100 BIT(2) 213 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 214 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 215 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 216 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 217 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 218 #define MVNETA_GMAC_AN_COMPLETE BIT(11) 219 #define MVNETA_GMAC_SYNC_OK BIT(14) 220 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 221 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 222 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 223 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 224 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) 225 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) 226 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 227 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 228 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 229 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) 230 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) 231 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 232 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 233 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 234 #define MVNETA_GMAC_CTRL_4 0x2c90 235 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) 236 #define MVNETA_MIB_COUNTERS_BASE 0x3000 237 #define MVNETA_MIB_LATE_COLLISION 0x7c 238 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 239 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 240 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 241 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 242 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 243 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 244 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 245 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 246 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 247 #define MVNETA_TXQ_DEC_SENT_MASK 0xff 248 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 249 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 250 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 251 #define MVNETA_PORT_TX_RESET 0x3cf0 252 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 253 #define MVNETA_TXQ_CMD1_REG 0x3e00 254 #define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) 255 #define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) 256 #define MVNETA_REFILL_NUM_CLK_REG 0x3e08 257 #define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff 258 #define MVNETA_TX_MTU 0x3e0c 259 #define MVNETA_TX_TOKEN_SIZE 0x3e14 260 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 261 #define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) 262 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 263 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 264 #define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff 265 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 266 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 267 268 /* The values of the bucket refill base period and refill period are taken from 269 * the reference manual, and adds up to a base resolution of 10Kbps. This allows 270 * to cover all rate-limit values from 10Kbps up to 5Gbps 271 */ 272 273 /* Base period for the rate limit algorithm */ 274 #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 275 276 /* Number of Base Period to wait between each bucket refill */ 277 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 278 279 /* The base resolution for rate limiting, in bps. Any max_rate value should be 280 * a multiple of that value. 281 */ 282 #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ 283 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ 284 MVNETA_TXQ_BUCKET_REFILL_PERIOD)) 285 286 #define MVNETA_LPI_CTRL_0 0x2cc0 287 #define MVNETA_LPI_CTRL_0_TS (0xff << 8) 288 #define MVNETA_LPI_CTRL_1 0x2cc4 289 #define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0) 290 #define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1) 291 #define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2) 292 #define MVNETA_LPI_CTRL_1_TW (0xfff << 4) 293 #define MVNETA_LPI_CTRL_2 0x2cc8 294 #define MVNETA_LPI_STATUS 0x2ccc 295 296 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 297 298 /* Descriptor ring Macros */ 299 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 300 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 301 302 /* Various constants */ 303 304 /* Coalescing */ 305 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 306 #define MVNETA_RX_COAL_PKTS 32 307 #define MVNETA_RX_COAL_USEC 100 308 309 /* The two bytes Marvell header. Either contains a special value used 310 * by Marvell switches when a specific hardware mode is enabled (not 311 * supported by this driver) or is filled automatically by zeroes on 312 * the RX side. Those two bytes being at the front of the Ethernet 313 * header, they allow to have the IP header aligned on a 4 bytes 314 * boundary automatically: the hardware skips those two bytes on its 315 * own. 316 */ 317 #define MVNETA_MH_SIZE 2 318 319 #define MVNETA_VLAN_TAG_LEN 4 320 321 #define MVNETA_TX_CSUM_DEF_SIZE 1600 322 #define MVNETA_TX_CSUM_MAX_SIZE 9800 323 #define MVNETA_ACC_MODE_EXT1 1 324 #define MVNETA_ACC_MODE_EXT2 2 325 326 #define MVNETA_MAX_DECODE_WIN 6 327 328 /* Timeout constants */ 329 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 330 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 331 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 332 333 #define MVNETA_TX_MTU_MAX 0x3ffff 334 335 /* The RSS lookup table actually has 256 entries but we do not use 336 * them yet 337 */ 338 #define MVNETA_RSS_LU_TABLE_SIZE 1 339 340 /* Max number of Rx descriptors */ 341 #define MVNETA_MAX_RXD 512 342 343 /* Max number of Tx descriptors */ 344 #define MVNETA_MAX_TXD 1024 345 346 /* Max number of allowed TCP segments for software TSO */ 347 #define MVNETA_MAX_TSO_SEGS 100 348 349 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 350 351 /* The size of a TSO header page */ 352 #define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE) 353 354 /* Number of TSO headers per page. This should be a power of 2 */ 355 #define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE) 356 357 /* Maximum number of TSO header pages */ 358 #define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE) 359 360 /* descriptor aligned size */ 361 #define MVNETA_DESC_ALIGNED_SIZE 32 362 363 /* Number of bytes to be taken into account by HW when putting incoming data 364 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet 365 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. 366 */ 367 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 368 369 #define MVNETA_RX_PKT_SIZE(mtu) \ 370 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 371 ETH_HLEN + ETH_FCS_LEN, \ 372 cache_line_size()) 373 374 /* Driver assumes that the last 3 bits are 0 */ 375 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) 376 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 377 MVNETA_SKB_HEADROOM)) 378 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) 379 380 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ 381 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) 382 383 enum { 384 ETHTOOL_STAT_EEE_WAKEUP, 385 ETHTOOL_STAT_SKB_ALLOC_ERR, 386 ETHTOOL_STAT_REFILL_ERR, 387 ETHTOOL_XDP_REDIRECT, 388 ETHTOOL_XDP_PASS, 389 ETHTOOL_XDP_DROP, 390 ETHTOOL_XDP_TX, 391 ETHTOOL_XDP_TX_ERR, 392 ETHTOOL_XDP_XMIT, 393 ETHTOOL_XDP_XMIT_ERR, 394 ETHTOOL_MAX_STATS, 395 }; 396 397 struct mvneta_statistic { 398 unsigned short offset; 399 unsigned short type; 400 const char name[ETH_GSTRING_LEN]; 401 }; 402 403 #define T_REG_32 32 404 #define T_REG_64 64 405 #define T_SW 1 406 407 #define MVNETA_XDP_PASS 0 408 #define MVNETA_XDP_DROPPED BIT(0) 409 #define MVNETA_XDP_TX BIT(1) 410 #define MVNETA_XDP_REDIR BIT(2) 411 412 static const struct mvneta_statistic mvneta_statistics[] = { 413 { 0x3000, T_REG_64, "good_octets_received", }, 414 { 0x3010, T_REG_32, "good_frames_received", }, 415 { 0x3008, T_REG_32, "bad_octets_received", }, 416 { 0x3014, T_REG_32, "bad_frames_received", }, 417 { 0x3018, T_REG_32, "broadcast_frames_received", }, 418 { 0x301c, T_REG_32, "multicast_frames_received", }, 419 { 0x3050, T_REG_32, "unrec_mac_control_received", }, 420 { 0x3058, T_REG_32, "good_fc_received", }, 421 { 0x305c, T_REG_32, "bad_fc_received", }, 422 { 0x3060, T_REG_32, "undersize_received", }, 423 { 0x3064, T_REG_32, "fragments_received", }, 424 { 0x3068, T_REG_32, "oversize_received", }, 425 { 0x306c, T_REG_32, "jabber_received", }, 426 { 0x3070, T_REG_32, "mac_receive_error", }, 427 { 0x3074, T_REG_32, "bad_crc_event", }, 428 { 0x3078, T_REG_32, "collision", }, 429 { 0x307c, T_REG_32, "late_collision", }, 430 { 0x2484, T_REG_32, "rx_discard", }, 431 { 0x2488, T_REG_32, "rx_overrun", }, 432 { 0x3020, T_REG_32, "frames_64_octets", }, 433 { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 434 { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 435 { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 436 { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 437 { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 438 { 0x3038, T_REG_64, "good_octets_sent", }, 439 { 0x3040, T_REG_32, "good_frames_sent", }, 440 { 0x3044, T_REG_32, "excessive_collision", }, 441 { 0x3048, T_REG_32, "multicast_frames_sent", }, 442 { 0x304c, T_REG_32, "broadcast_frames_sent", }, 443 { 0x3054, T_REG_32, "fc_sent", }, 444 { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 445 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 446 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 447 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 448 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 449 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 450 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 451 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 452 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 453 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 454 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 455 }; 456 457 struct mvneta_stats { 458 u64 rx_packets; 459 u64 rx_bytes; 460 u64 tx_packets; 461 u64 tx_bytes; 462 /* xdp */ 463 u64 xdp_redirect; 464 u64 xdp_pass; 465 u64 xdp_drop; 466 u64 xdp_xmit; 467 u64 xdp_xmit_err; 468 u64 xdp_tx; 469 u64 xdp_tx_err; 470 }; 471 472 struct mvneta_ethtool_stats { 473 struct mvneta_stats ps; 474 u64 skb_alloc_error; 475 u64 refill_error; 476 }; 477 478 struct mvneta_pcpu_stats { 479 struct u64_stats_sync syncp; 480 481 struct mvneta_ethtool_stats es; 482 u64 rx_dropped; 483 u64 rx_errors; 484 }; 485 486 struct mvneta_pcpu_port { 487 /* Pointer to the shared port */ 488 struct mvneta_port *pp; 489 490 /* Pointer to the CPU-local NAPI struct */ 491 struct napi_struct napi; 492 493 /* Cause of the previous interrupt */ 494 u32 cause_rx_tx; 495 }; 496 497 enum { 498 __MVNETA_DOWN, 499 }; 500 501 struct mvneta_port { 502 u8 id; 503 struct mvneta_pcpu_port __percpu *ports; 504 struct mvneta_pcpu_stats __percpu *stats; 505 506 unsigned long state; 507 508 int pkt_size; 509 void __iomem *base; 510 struct mvneta_rx_queue *rxqs; 511 struct mvneta_tx_queue *txqs; 512 struct net_device *dev; 513 struct hlist_node node_online; 514 struct hlist_node node_dead; 515 int rxq_def; 516 /* Protect the access to the percpu interrupt registers, 517 * ensuring that the configuration remains coherent. 518 */ 519 spinlock_t lock; 520 bool is_stopped; 521 522 u32 cause_rx_tx; 523 struct napi_struct napi; 524 525 struct bpf_prog *xdp_prog; 526 527 /* Core clock */ 528 struct clk *clk; 529 /* AXI clock */ 530 struct clk *clk_bus; 531 u8 mcast_count[256]; 532 u16 tx_ring_size; 533 u16 rx_ring_size; 534 535 phy_interface_t phy_interface; 536 struct device_node *dn; 537 unsigned int tx_csum_limit; 538 struct phylink *phylink; 539 struct phylink_config phylink_config; 540 struct phylink_pcs phylink_pcs; 541 struct phy *comphy; 542 543 struct mvneta_bm *bm_priv; 544 struct mvneta_bm_pool *pool_long; 545 struct mvneta_bm_pool *pool_short; 546 int bm_win_id; 547 548 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 549 550 u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 551 552 /* Flags for special SoC configurations */ 553 bool neta_armada3700; 554 bool neta_ac5; 555 u16 rx_offset_correction; 556 const struct mbus_dram_target_info *dram_target_info; 557 }; 558 559 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 560 * layout of the transmit and reception DMA descriptors, and their 561 * layout is therefore defined by the hardware design 562 */ 563 564 #define MVNETA_TX_L3_OFF_SHIFT 0 565 #define MVNETA_TX_IP_HLEN_SHIFT 8 566 #define MVNETA_TX_L4_UDP BIT(16) 567 #define MVNETA_TX_L3_IP6 BIT(17) 568 #define MVNETA_TXD_IP_CSUM BIT(18) 569 #define MVNETA_TXD_Z_PAD BIT(19) 570 #define MVNETA_TXD_L_DESC BIT(20) 571 #define MVNETA_TXD_F_DESC BIT(21) 572 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 573 MVNETA_TXD_L_DESC | \ 574 MVNETA_TXD_F_DESC) 575 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 576 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 577 578 #define MVNETA_RXD_ERR_CRC 0x0 579 #define MVNETA_RXD_BM_POOL_SHIFT 13 580 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) 581 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 582 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 583 #define MVNETA_RXD_ERR_LEN BIT(18) 584 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 585 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 586 #define MVNETA_RXD_L3_IP4 BIT(25) 587 #define MVNETA_RXD_LAST_DESC BIT(26) 588 #define MVNETA_RXD_FIRST_DESC BIT(27) 589 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ 590 MVNETA_RXD_LAST_DESC) 591 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 592 593 #if defined(__LITTLE_ENDIAN) 594 struct mvneta_tx_desc { 595 u32 command; /* Options used by HW for packet transmitting.*/ 596 u16 reserved1; /* csum_l4 (for future use) */ 597 u16 data_size; /* Data size of transmitted packet in bytes */ 598 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 599 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 600 u32 reserved3[4]; /* Reserved - (for future use) */ 601 }; 602 603 struct mvneta_rx_desc { 604 u32 status; /* Info about received packet */ 605 u16 reserved1; /* pnc_info - (for future use, PnC) */ 606 u16 data_size; /* Size of received packet in bytes */ 607 608 u32 buf_phys_addr; /* Physical address of the buffer */ 609 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 610 611 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 612 u16 reserved3; /* prefetch_cmd, for future use */ 613 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 614 615 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 616 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 617 }; 618 #else 619 struct mvneta_tx_desc { 620 u16 data_size; /* Data size of transmitted packet in bytes */ 621 u16 reserved1; /* csum_l4 (for future use) */ 622 u32 command; /* Options used by HW for packet transmitting.*/ 623 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 624 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 625 u32 reserved3[4]; /* Reserved - (for future use) */ 626 }; 627 628 struct mvneta_rx_desc { 629 u16 data_size; /* Size of received packet in bytes */ 630 u16 reserved1; /* pnc_info - (for future use, PnC) */ 631 u32 status; /* Info about received packet */ 632 633 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 634 u32 buf_phys_addr; /* Physical address of the buffer */ 635 636 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 637 u16 reserved3; /* prefetch_cmd, for future use */ 638 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 639 640 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 641 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 642 }; 643 #endif 644 645 enum mvneta_tx_buf_type { 646 MVNETA_TYPE_TSO, 647 MVNETA_TYPE_SKB, 648 MVNETA_TYPE_XDP_TX, 649 MVNETA_TYPE_XDP_NDO, 650 }; 651 652 struct mvneta_tx_buf { 653 enum mvneta_tx_buf_type type; 654 union { 655 struct xdp_frame *xdpf; 656 struct sk_buff *skb; 657 }; 658 }; 659 660 struct mvneta_tx_queue { 661 /* Number of this TX queue, in the range 0-7 */ 662 u8 id; 663 664 /* Number of TX DMA descriptors in the descriptor ring */ 665 int size; 666 667 /* Number of currently used TX DMA descriptor in the 668 * descriptor ring 669 */ 670 int count; 671 int pending; 672 int tx_stop_threshold; 673 int tx_wake_threshold; 674 675 /* Array of transmitted buffers */ 676 struct mvneta_tx_buf *buf; 677 678 /* Index of last TX DMA descriptor that was inserted */ 679 int txq_put_index; 680 681 /* Index of the TX DMA descriptor to be cleaned up */ 682 int txq_get_index; 683 684 u32 done_pkts_coal; 685 686 /* Virtual address of the TX DMA descriptors array */ 687 struct mvneta_tx_desc *descs; 688 689 /* DMA address of the TX DMA descriptors array */ 690 dma_addr_t descs_phys; 691 692 /* Index of the last TX DMA descriptor */ 693 int last_desc; 694 695 /* Index of the next TX DMA descriptor to process */ 696 int next_desc_to_proc; 697 698 /* DMA buffers for TSO headers */ 699 char *tso_hdrs[MVNETA_MAX_TSO_PAGES]; 700 701 /* DMA address of TSO headers */ 702 dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES]; 703 704 /* Affinity mask for CPUs*/ 705 cpumask_t affinity_mask; 706 }; 707 708 struct mvneta_rx_queue { 709 /* rx queue number, in the range 0-7 */ 710 u8 id; 711 712 /* num of rx descriptors in the rx descriptor ring */ 713 int size; 714 715 u32 pkts_coal; 716 u32 time_coal; 717 718 /* page_pool */ 719 struct page_pool *page_pool; 720 struct xdp_rxq_info xdp_rxq; 721 722 /* Virtual address of the RX buffer */ 723 void **buf_virt_addr; 724 725 /* Virtual address of the RX DMA descriptors array */ 726 struct mvneta_rx_desc *descs; 727 728 /* DMA address of the RX DMA descriptors array */ 729 dma_addr_t descs_phys; 730 731 /* Index of the last RX DMA descriptor */ 732 int last_desc; 733 734 /* Index of the next RX DMA descriptor to process */ 735 int next_desc_to_proc; 736 737 /* Index of first RX DMA descriptor to refill */ 738 int first_to_refill; 739 u32 refill_num; 740 }; 741 742 static enum cpuhp_state online_hpstate; 743 /* The hardware supports eight (8) rx queues, but we are only allowing 744 * the first one to be used. Therefore, let's just allocate one queue. 745 */ 746 static int rxq_number = 8; 747 static int txq_number = 8; 748 749 static int rxq_def; 750 751 static int rx_copybreak __read_mostly = 256; 752 753 /* HW BM need that each port be identify by a unique ID */ 754 static int global_port_id; 755 756 #define MVNETA_DRIVER_NAME "mvneta" 757 #define MVNETA_DRIVER_VERSION "1.0" 758 759 /* Utility/helper methods */ 760 761 /* Write helper method */ 762 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 763 { 764 writel(data, pp->base + offset); 765 } 766 767 /* Read helper method */ 768 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 769 { 770 return readl(pp->base + offset); 771 } 772 773 /* Increment txq get counter */ 774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 775 { 776 txq->txq_get_index++; 777 if (txq->txq_get_index == txq->size) 778 txq->txq_get_index = 0; 779 } 780 781 /* Increment txq put counter */ 782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 783 { 784 txq->txq_put_index++; 785 if (txq->txq_put_index == txq->size) 786 txq->txq_put_index = 0; 787 } 788 789 790 /* Clear all MIB counters */ 791 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 792 { 793 int i; 794 795 /* Perform dummy reads from MIB counters */ 796 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 797 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 798 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 799 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 800 } 801 802 /* Get System Network Statistics */ 803 static void 804 mvneta_get_stats64(struct net_device *dev, 805 struct rtnl_link_stats64 *stats) 806 { 807 struct mvneta_port *pp = netdev_priv(dev); 808 unsigned int start; 809 int cpu; 810 811 for_each_possible_cpu(cpu) { 812 struct mvneta_pcpu_stats *cpu_stats; 813 u64 rx_packets; 814 u64 rx_bytes; 815 u64 rx_dropped; 816 u64 rx_errors; 817 u64 tx_packets; 818 u64 tx_bytes; 819 820 cpu_stats = per_cpu_ptr(pp->stats, cpu); 821 do { 822 start = u64_stats_fetch_begin(&cpu_stats->syncp); 823 rx_packets = cpu_stats->es.ps.rx_packets; 824 rx_bytes = cpu_stats->es.ps.rx_bytes; 825 rx_dropped = cpu_stats->rx_dropped; 826 rx_errors = cpu_stats->rx_errors; 827 tx_packets = cpu_stats->es.ps.tx_packets; 828 tx_bytes = cpu_stats->es.ps.tx_bytes; 829 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 830 831 stats->rx_packets += rx_packets; 832 stats->rx_bytes += rx_bytes; 833 stats->rx_dropped += rx_dropped; 834 stats->rx_errors += rx_errors; 835 stats->tx_packets += tx_packets; 836 stats->tx_bytes += tx_bytes; 837 } 838 839 stats->tx_dropped = dev->stats.tx_dropped; 840 } 841 842 /* Rx descriptors helper methods */ 843 844 /* Checks whether the RX descriptor having this status is both the first 845 * and the last descriptor for the RX packet. Each RX packet is currently 846 * received through a single RX descriptor, so not having each RX 847 * descriptor with its first and last bits set is an error 848 */ 849 static int mvneta_rxq_desc_is_first_last(u32 status) 850 { 851 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 852 MVNETA_RXD_FIRST_LAST_DESC; 853 } 854 855 /* Add number of descriptors ready to receive new packets */ 856 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 857 struct mvneta_rx_queue *rxq, 858 int ndescs) 859 { 860 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 861 * be added at once 862 */ 863 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 865 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 866 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 867 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 868 } 869 870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 871 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 872 } 873 874 /* Get number of RX descriptors occupied by received packets */ 875 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 876 struct mvneta_rx_queue *rxq) 877 { 878 u32 val; 879 880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 881 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 882 } 883 884 /* Update num of rx desc called upon return from rx path or 885 * from mvneta_rxq_drop_pkts(). 886 */ 887 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 888 struct mvneta_rx_queue *rxq, 889 int rx_done, int rx_filled) 890 { 891 u32 val; 892 893 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 894 val = rx_done | 895 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 897 return; 898 } 899 900 /* Only 255 descriptors can be added at once */ 901 while ((rx_done > 0) || (rx_filled > 0)) { 902 if (rx_done <= 0xff) { 903 val = rx_done; 904 rx_done = 0; 905 } else { 906 val = 0xff; 907 rx_done -= 0xff; 908 } 909 if (rx_filled <= 0xff) { 910 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 911 rx_filled = 0; 912 } else { 913 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 914 rx_filled -= 0xff; 915 } 916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 917 } 918 } 919 920 /* Get pointer to next RX descriptor to be processed by SW */ 921 static struct mvneta_rx_desc * 922 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 923 { 924 int rx_desc = rxq->next_desc_to_proc; 925 926 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 927 prefetch(rxq->descs + rxq->next_desc_to_proc); 928 return rxq->descs + rx_desc; 929 } 930 931 /* Change maximum receive size of the port. */ 932 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 933 { 934 u32 val; 935 936 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 937 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 938 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 939 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 940 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 941 } 942 943 944 /* Set rx queue offset */ 945 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 946 struct mvneta_rx_queue *rxq, 947 int offset) 948 { 949 u32 val; 950 951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 952 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 953 954 /* Offset is in */ 955 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 957 } 958 959 960 /* Tx descriptors helper methods */ 961 962 /* Update HW with number of TX descriptors to be sent */ 963 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 964 struct mvneta_tx_queue *txq, 965 int pend_desc) 966 { 967 u32 val; 968 969 pend_desc += txq->pending; 970 971 /* Only 255 Tx descriptors can be added at once */ 972 do { 973 val = min(pend_desc, 255); 974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 975 pend_desc -= val; 976 } while (pend_desc > 0); 977 txq->pending = 0; 978 } 979 980 /* Get pointer to next TX descriptor to be processed (send) by HW */ 981 static struct mvneta_tx_desc * 982 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 983 { 984 int tx_desc = txq->next_desc_to_proc; 985 986 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 987 return txq->descs + tx_desc; 988 } 989 990 /* Release the last allocated TX descriptor. Useful to handle DMA 991 * mapping failures in the TX path. 992 */ 993 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 994 { 995 if (txq->next_desc_to_proc == 0) 996 txq->next_desc_to_proc = txq->last_desc - 1; 997 else 998 txq->next_desc_to_proc--; 999 } 1000 1001 /* Set rxq buf size */ 1002 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 1003 struct mvneta_rx_queue *rxq, 1004 int buf_size) 1005 { 1006 u32 val; 1007 1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 1009 1010 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 1011 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 1012 1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 1014 } 1015 1016 /* Disable buffer management (BM) */ 1017 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 1018 struct mvneta_rx_queue *rxq) 1019 { 1020 u32 val; 1021 1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1023 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1025 } 1026 1027 /* Enable buffer management (BM) */ 1028 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, 1029 struct mvneta_rx_queue *rxq) 1030 { 1031 u32 val; 1032 1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1034 val |= MVNETA_RXQ_HW_BUF_ALLOC; 1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1036 } 1037 1038 /* Notify HW about port's assignment of pool for bigger packets */ 1039 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, 1040 struct mvneta_rx_queue *rxq) 1041 { 1042 u32 val; 1043 1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1045 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; 1046 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); 1047 1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1049 } 1050 1051 /* Notify HW about port's assignment of pool for smaller packets */ 1052 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, 1053 struct mvneta_rx_queue *rxq) 1054 { 1055 u32 val; 1056 1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1058 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; 1059 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); 1060 1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1062 } 1063 1064 /* Set port's receive buffer size for assigned BM pool */ 1065 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, 1066 int buf_size, 1067 u8 pool_id) 1068 { 1069 u32 val; 1070 1071 if (!IS_ALIGNED(buf_size, 8)) { 1072 dev_warn(pp->dev->dev.parent, 1073 "illegal buf_size value %d, round to %d\n", 1074 buf_size, ALIGN(buf_size, 8)); 1075 buf_size = ALIGN(buf_size, 8); 1076 } 1077 1078 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); 1079 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; 1080 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); 1081 } 1082 1083 /* Configure MBUS window in order to enable access BM internal SRAM */ 1084 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, 1085 u8 target, u8 attr) 1086 { 1087 u32 win_enable, win_protect; 1088 int i; 1089 1090 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); 1091 1092 if (pp->bm_win_id < 0) { 1093 /* Find first not occupied window */ 1094 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { 1095 if (win_enable & (1 << i)) { 1096 pp->bm_win_id = i; 1097 break; 1098 } 1099 } 1100 if (i == MVNETA_MAX_DECODE_WIN) 1101 return -ENOMEM; 1102 } else { 1103 i = pp->bm_win_id; 1104 } 1105 1106 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 1107 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 1108 1109 if (i < 4) 1110 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 1111 1112 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | 1113 (attr << 8) | target); 1114 1115 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); 1116 1117 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); 1118 win_protect |= 3 << (2 * i); 1119 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 1120 1121 win_enable &= ~(1 << i); 1122 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 1123 1124 return 0; 1125 } 1126 1127 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) 1128 { 1129 u32 wsize; 1130 u8 target, attr; 1131 int err; 1132 1133 /* Get BM window information */ 1134 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, 1135 &target, &attr); 1136 if (err < 0) 1137 return err; 1138 1139 pp->bm_win_id = -1; 1140 1141 /* Open NETA -> BM window */ 1142 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, 1143 target, attr); 1144 if (err < 0) { 1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); 1146 return err; 1147 } 1148 return 0; 1149 } 1150 1151 /* Assign and initialize pools for port. In case of fail 1152 * buffer manager will remain disabled for current port. 1153 */ 1154 static int mvneta_bm_port_init(struct platform_device *pdev, 1155 struct mvneta_port *pp) 1156 { 1157 struct device_node *dn = pdev->dev.of_node; 1158 u32 long_pool_id, short_pool_id; 1159 1160 if (!pp->neta_armada3700) { 1161 int ret; 1162 1163 ret = mvneta_bm_port_mbus_init(pp); 1164 if (ret) 1165 return ret; 1166 } 1167 1168 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { 1169 netdev_info(pp->dev, "missing long pool id\n"); 1170 return -EINVAL; 1171 } 1172 1173 /* Create port's long pool depending on mtu */ 1174 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, 1175 MVNETA_BM_LONG, pp->id, 1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); 1177 if (!pp->pool_long) { 1178 netdev_info(pp->dev, "fail to obtain long pool for port\n"); 1179 return -ENOMEM; 1180 } 1181 1182 pp->pool_long->port_map |= 1 << pp->id; 1183 1184 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, 1185 pp->pool_long->id); 1186 1187 /* If short pool id is not defined, assume using single pool */ 1188 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) 1189 short_pool_id = long_pool_id; 1190 1191 /* Create port's short pool */ 1192 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, 1193 MVNETA_BM_SHORT, pp->id, 1194 MVNETA_BM_SHORT_PKT_SIZE); 1195 if (!pp->pool_short) { 1196 netdev_info(pp->dev, "fail to obtain short pool for port\n"); 1197 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1198 return -ENOMEM; 1199 } 1200 1201 if (short_pool_id != long_pool_id) { 1202 pp->pool_short->port_map |= 1 << pp->id; 1203 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, 1204 pp->pool_short->id); 1205 } 1206 1207 return 0; 1208 } 1209 1210 /* Update settings of a pool for bigger packets */ 1211 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) 1212 { 1213 struct mvneta_bm_pool *bm_pool = pp->pool_long; 1214 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 1215 int num; 1216 1217 /* Release all buffers from long pool */ 1218 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); 1219 if (hwbm_pool->buf_num) { 1220 WARN(1, "cannot free all buffers in pool %d\n", 1221 bm_pool->id); 1222 goto bm_mtu_err; 1223 } 1224 1225 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); 1226 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); 1227 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1228 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1229 1230 /* Fill entire long pool */ 1231 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1232 if (num != hwbm_pool->size) { 1233 WARN(1, "pool %d: %d of %d allocated\n", 1234 bm_pool->id, num, hwbm_pool->size); 1235 goto bm_mtu_err; 1236 } 1237 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); 1238 1239 return; 1240 1241 bm_mtu_err: 1242 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); 1244 1245 pp->bm_priv = NULL; 1246 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 1247 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); 1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); 1249 } 1250 1251 /* Start the Ethernet port RX and TX activity */ 1252 static void mvneta_port_up(struct mvneta_port *pp) 1253 { 1254 int queue; 1255 u32 q_map; 1256 1257 /* Enable all initialized TXs. */ 1258 q_map = 0; 1259 for (queue = 0; queue < txq_number; queue++) { 1260 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 1261 if (txq->descs) 1262 q_map |= (1 << queue); 1263 } 1264 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1265 1266 q_map = 0; 1267 /* Enable all initialized RXQs. */ 1268 for (queue = 0; queue < rxq_number; queue++) { 1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 1270 1271 if (rxq->descs) 1272 q_map |= (1 << queue); 1273 } 1274 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 1275 } 1276 1277 /* Stop the Ethernet port activity */ 1278 static void mvneta_port_down(struct mvneta_port *pp) 1279 { 1280 u32 val; 1281 int count; 1282 1283 /* Stop Rx port activity. Check port Rx activity. */ 1284 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 1285 1286 /* Issue stop command for active channels only */ 1287 if (val != 0) 1288 mvreg_write(pp, MVNETA_RXQ_CMD, 1289 val << MVNETA_RXQ_DISABLE_SHIFT); 1290 1291 /* Wait for all Rx activity to terminate. */ 1292 count = 0; 1293 do { 1294 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 1295 netdev_warn(pp->dev, 1296 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", 1297 val); 1298 break; 1299 } 1300 mdelay(1); 1301 1302 val = mvreg_read(pp, MVNETA_RXQ_CMD); 1303 } while (val & MVNETA_RXQ_ENABLE_MASK); 1304 1305 /* Stop Tx port activity. Check port Tx activity. Issue stop 1306 * command for active channels only 1307 */ 1308 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 1309 1310 if (val != 0) 1311 mvreg_write(pp, MVNETA_TXQ_CMD, 1312 (val << MVNETA_TXQ_DISABLE_SHIFT)); 1313 1314 /* Wait for all Tx activity to terminate. */ 1315 count = 0; 1316 do { 1317 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 1318 netdev_warn(pp->dev, 1319 "TIMEOUT for TX stopped status=0x%08x\n", 1320 val); 1321 break; 1322 } 1323 mdelay(1); 1324 1325 /* Check TX Command reg that all Txqs are stopped */ 1326 val = mvreg_read(pp, MVNETA_TXQ_CMD); 1327 1328 } while (val & MVNETA_TXQ_ENABLE_MASK); 1329 1330 /* Double check to verify that TX FIFO is empty */ 1331 count = 0; 1332 do { 1333 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 1334 netdev_warn(pp->dev, 1335 "TX FIFO empty timeout status=0x%08x\n", 1336 val); 1337 break; 1338 } 1339 mdelay(1); 1340 1341 val = mvreg_read(pp, MVNETA_PORT_STATUS); 1342 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 1343 (val & MVNETA_TX_IN_PRGRS)); 1344 1345 udelay(200); 1346 } 1347 1348 /* Enable the port by setting the port enable bit of the MAC control register */ 1349 static void mvneta_port_enable(struct mvneta_port *pp) 1350 { 1351 u32 val; 1352 1353 /* Enable port */ 1354 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1355 val |= MVNETA_GMAC0_PORT_ENABLE; 1356 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1357 } 1358 1359 /* Disable the port and wait for about 200 usec before retuning */ 1360 static void mvneta_port_disable(struct mvneta_port *pp) 1361 { 1362 u32 val; 1363 1364 /* Reset the Enable bit in the Serial Control Register */ 1365 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1366 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1367 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1368 1369 udelay(200); 1370 } 1371 1372 /* Multicast tables methods */ 1373 1374 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 1376 { 1377 int offset; 1378 u32 val; 1379 1380 if (queue == -1) { 1381 val = 0; 1382 } else { 1383 val = 0x1 | (queue << 1); 1384 val |= (val << 24) | (val << 16) | (val << 8); 1385 } 1386 1387 for (offset = 0; offset <= 0xc; offset += 4) 1388 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 1389 } 1390 1391 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 1393 { 1394 int offset; 1395 u32 val; 1396 1397 if (queue == -1) { 1398 val = 0; 1399 } else { 1400 val = 0x1 | (queue << 1); 1401 val |= (val << 24) | (val << 16) | (val << 8); 1402 } 1403 1404 for (offset = 0; offset <= 0xfc; offset += 4) 1405 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 1406 1407 } 1408 1409 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 1411 { 1412 int offset; 1413 u32 val; 1414 1415 if (queue == -1) { 1416 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 1417 val = 0; 1418 } else { 1419 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 1420 val = 0x1 | (queue << 1); 1421 val |= (val << 24) | (val << 16) | (val << 8); 1422 } 1423 1424 for (offset = 0; offset <= 0xfc; offset += 4) 1425 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 1426 } 1427 1428 static void mvneta_percpu_unmask_interrupt(void *arg) 1429 { 1430 struct mvneta_port *pp = arg; 1431 1432 /* All the queue are unmasked, but actually only the ones 1433 * mapped to this CPU will be unmasked 1434 */ 1435 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1436 MVNETA_RX_INTR_MASK_ALL | 1437 MVNETA_TX_INTR_MASK_ALL | 1438 MVNETA_MISCINTR_INTR_MASK); 1439 } 1440 1441 static void mvneta_percpu_mask_interrupt(void *arg) 1442 { 1443 struct mvneta_port *pp = arg; 1444 1445 /* All the queue are masked, but actually only the ones 1446 * mapped to this CPU will be masked 1447 */ 1448 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1449 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1450 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1451 } 1452 1453 static void mvneta_percpu_clear_intr_cause(void *arg) 1454 { 1455 struct mvneta_port *pp = arg; 1456 1457 /* All the queue are cleared, but actually only the ones 1458 * mapped to this CPU will be cleared 1459 */ 1460 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1461 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1462 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1463 } 1464 1465 /* This method sets defaults to the NETA port: 1466 * Clears interrupt Cause and Mask registers. 1467 * Clears all MAC tables. 1468 * Sets defaults to all registers. 1469 * Resets RX and TX descriptor rings. 1470 * Resets PHY. 1471 * This method can be called after mvneta_port_down() to return the port 1472 * settings to defaults. 1473 */ 1474 static void mvneta_defaults_set(struct mvneta_port *pp) 1475 { 1476 int cpu; 1477 int queue; 1478 u32 val; 1479 int max_cpu = num_present_cpus(); 1480 1481 /* Clear all Cause registers */ 1482 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 1483 1484 /* Mask all interrupts */ 1485 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 1486 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1487 1488 /* Enable MBUS Retry bit16 */ 1489 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1490 1491 /* Set CPU queue access map. CPUs are assigned to the RX and 1492 * TX queues modulo their number. If there is only one TX 1493 * queue then it is assigned to the CPU associated to the 1494 * default RX queue. 1495 */ 1496 for_each_present_cpu(cpu) { 1497 int rxq_map = 0, txq_map = 0; 1498 int rxq, txq; 1499 if (!pp->neta_armada3700) { 1500 for (rxq = 0; rxq < rxq_number; rxq++) 1501 if ((rxq % max_cpu) == cpu) 1502 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 1503 1504 for (txq = 0; txq < txq_number; txq++) 1505 if ((txq % max_cpu) == cpu) 1506 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 1507 1508 /* With only one TX queue we configure a special case 1509 * which will allow to get all the irq on a single 1510 * CPU 1511 */ 1512 if (txq_number == 1) 1513 txq_map = (cpu == pp->rxq_def) ? 1514 MVNETA_CPU_TXQ_ACCESS(0) : 0; 1515 1516 } else { 1517 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 1518 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; 1519 } 1520 1521 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 1522 } 1523 1524 /* Reset RX and TX DMAs */ 1525 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1526 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1527 1528 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1529 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1530 for (queue = 0; queue < txq_number; queue++) { 1531 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1532 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1533 } 1534 1535 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1536 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1537 1538 /* Set Port Acceleration Mode */ 1539 if (pp->bm_priv) 1540 /* HW buffer management + legacy parser */ 1541 val = MVNETA_ACC_MODE_EXT2; 1542 else 1543 /* SW buffer management + legacy parser */ 1544 val = MVNETA_ACC_MODE_EXT1; 1545 mvreg_write(pp, MVNETA_ACC_MODE, val); 1546 1547 if (pp->bm_priv) 1548 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); 1549 1550 /* Update val of portCfg register accordingly with all RxQueue types */ 1551 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1552 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1553 1554 val = 0; 1555 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1556 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1557 1558 /* Build PORT_SDMA_CONFIG_REG */ 1559 val = 0; 1560 1561 /* Default burst size */ 1562 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1563 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1564 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1565 1566 #if defined(__BIG_ENDIAN) 1567 val |= MVNETA_DESC_SWAP; 1568 #endif 1569 1570 /* Assign port SDMA configuration */ 1571 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1572 1573 /* Disable PHY polling in hardware, since we're using the 1574 * kernel phylib to do this. 1575 */ 1576 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 1577 val &= ~MVNETA_PHY_POLLING_ENABLE; 1578 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 1579 1580 mvneta_set_ucast_table(pp, -1); 1581 mvneta_set_special_mcast_table(pp, -1); 1582 mvneta_set_other_mcast_table(pp, -1); 1583 1584 /* Set port interrupt enable register - default enable all */ 1585 mvreg_write(pp, MVNETA_INTR_ENABLE, 1586 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1587 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1588 1589 mvneta_mib_counters_clear(pp); 1590 } 1591 1592 /* Set max sizes for tx queues */ 1593 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1594 1595 { 1596 u32 val, size, mtu; 1597 int queue; 1598 1599 mtu = max_tx_size * 8; 1600 if (mtu > MVNETA_TX_MTU_MAX) 1601 mtu = MVNETA_TX_MTU_MAX; 1602 1603 /* Set MTU */ 1604 val = mvreg_read(pp, MVNETA_TX_MTU); 1605 val &= ~MVNETA_TX_MTU_MAX; 1606 val |= mtu; 1607 mvreg_write(pp, MVNETA_TX_MTU, val); 1608 1609 /* TX token size and all TXQs token size must be larger that MTU */ 1610 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1611 1612 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1613 if (size < mtu) { 1614 size = mtu; 1615 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1616 val |= size; 1617 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1618 } 1619 for (queue = 0; queue < txq_number; queue++) { 1620 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1621 1622 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1623 if (size < mtu) { 1624 size = mtu; 1625 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1626 val |= size; 1627 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1628 } 1629 } 1630 } 1631 1632 /* Set unicast address */ 1633 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1634 int queue) 1635 { 1636 unsigned int unicast_reg; 1637 unsigned int tbl_offset; 1638 unsigned int reg_offset; 1639 1640 /* Locate the Unicast table entry */ 1641 last_nibble = (0xf & last_nibble); 1642 1643 /* offset from unicast tbl base */ 1644 tbl_offset = (last_nibble / 4) * 4; 1645 1646 /* offset within the above reg */ 1647 reg_offset = last_nibble % 4; 1648 1649 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1650 1651 if (queue == -1) { 1652 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1653 unicast_reg &= ~(0xff << (8 * reg_offset)); 1654 } else { 1655 unicast_reg &= ~(0xff << (8 * reg_offset)); 1656 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1657 } 1658 1659 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1660 } 1661 1662 /* Set mac address */ 1663 static void mvneta_mac_addr_set(struct mvneta_port *pp, 1664 const unsigned char *addr, int queue) 1665 { 1666 unsigned int mac_h; 1667 unsigned int mac_l; 1668 1669 if (queue != -1) { 1670 mac_l = (addr[4] << 8) | (addr[5]); 1671 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1672 (addr[2] << 8) | (addr[3] << 0); 1673 1674 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1675 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1676 } 1677 1678 /* Accept frames of this address */ 1679 mvneta_set_ucast_addr(pp, addr[5], queue); 1680 } 1681 1682 /* Set the number of packets that will be received before RX interrupt 1683 * will be generated by HW. 1684 */ 1685 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1686 struct mvneta_rx_queue *rxq, u32 value) 1687 { 1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1689 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1690 } 1691 1692 /* Set the time delay in usec before RX interrupt will be generated by 1693 * HW. 1694 */ 1695 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1696 struct mvneta_rx_queue *rxq, u32 value) 1697 { 1698 u32 val; 1699 unsigned long clk_rate; 1700 1701 clk_rate = clk_get_rate(pp->clk); 1702 val = (clk_rate / 1000000) * value; 1703 1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1705 } 1706 1707 /* Set threshold for TX_DONE pkts coalescing */ 1708 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1709 struct mvneta_tx_queue *txq, u32 value) 1710 { 1711 u32 val; 1712 1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1714 1715 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1716 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1717 1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1719 } 1720 1721 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1722 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1723 u32 phys_addr, void *virt_addr, 1724 struct mvneta_rx_queue *rxq) 1725 { 1726 int i; 1727 1728 rx_desc->buf_phys_addr = phys_addr; 1729 i = rx_desc - rxq->descs; 1730 rxq->buf_virt_addr[i] = virt_addr; 1731 } 1732 1733 /* Decrement sent descriptors counter */ 1734 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1735 struct mvneta_tx_queue *txq, 1736 int sent_desc) 1737 { 1738 u32 val; 1739 1740 /* Only 255 TX descriptors can be updated at once */ 1741 while (sent_desc > 0xff) { 1742 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1744 sent_desc = sent_desc - 0xff; 1745 } 1746 1747 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1749 } 1750 1751 /* Get number of TX descriptors already sent by HW */ 1752 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1753 struct mvneta_tx_queue *txq) 1754 { 1755 u32 val; 1756 int sent_desc; 1757 1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1759 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1760 MVNETA_TXQ_SENT_DESC_SHIFT; 1761 1762 return sent_desc; 1763 } 1764 1765 /* Get number of sent descriptors and decrement counter. 1766 * The number of sent descriptors is returned. 1767 */ 1768 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1769 struct mvneta_tx_queue *txq) 1770 { 1771 int sent_desc; 1772 1773 /* Get number of sent descriptors */ 1774 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1775 1776 /* Decrement sent descriptors counter */ 1777 if (sent_desc) 1778 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1779 1780 return sent_desc; 1781 } 1782 1783 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1784 static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto, 1785 int ip_hdr_len, int l4_proto) 1786 { 1787 u32 command; 1788 1789 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1790 * G_L4_chk, L4_type; required only for checksum 1791 * calculation 1792 */ 1793 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1794 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1795 1796 if (l3_proto == htons(ETH_P_IP)) 1797 command |= MVNETA_TXD_IP_CSUM; 1798 else 1799 command |= MVNETA_TX_L3_IP6; 1800 1801 if (l4_proto == IPPROTO_TCP) 1802 command |= MVNETA_TX_L4_CSUM_FULL; 1803 else if (l4_proto == IPPROTO_UDP) 1804 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1805 else 1806 command |= MVNETA_TX_L4_CSUM_NOT; 1807 1808 return command; 1809 } 1810 1811 1812 /* Display more error info */ 1813 static void mvneta_rx_error(struct mvneta_port *pp, 1814 struct mvneta_rx_desc *rx_desc) 1815 { 1816 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1817 u32 status = rx_desc->status; 1818 1819 /* update per-cpu counter */ 1820 u64_stats_update_begin(&stats->syncp); 1821 stats->rx_errors++; 1822 u64_stats_update_end(&stats->syncp); 1823 1824 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1825 case MVNETA_RXD_ERR_CRC: 1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1827 status, rx_desc->data_size); 1828 break; 1829 case MVNETA_RXD_ERR_OVERRUN: 1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1831 status, rx_desc->data_size); 1832 break; 1833 case MVNETA_RXD_ERR_LEN: 1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1835 status, rx_desc->data_size); 1836 break; 1837 case MVNETA_RXD_ERR_RESOURCE: 1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1839 status, rx_desc->data_size); 1840 break; 1841 } 1842 } 1843 1844 /* Handle RX checksum offload based on the descriptor's status */ 1845 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) 1846 { 1847 if ((pp->dev->features & NETIF_F_RXCSUM) && 1848 (status & MVNETA_RXD_L3_IP4) && 1849 (status & MVNETA_RXD_L4_CSUM_OK)) 1850 return CHECKSUM_UNNECESSARY; 1851 1852 return CHECKSUM_NONE; 1853 } 1854 1855 /* Return tx queue pointer (find last set bit) according to <cause> returned 1856 * form tx_done reg. <cause> must not be null. The return value is always a 1857 * valid queue for matching the first one found in <cause>. 1858 */ 1859 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1860 u32 cause) 1861 { 1862 int queue = fls(cause) - 1; 1863 1864 return &pp->txqs[queue]; 1865 } 1866 1867 /* Free tx queue skbuffs */ 1868 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1869 struct mvneta_tx_queue *txq, int num, 1870 struct netdev_queue *nq, bool napi) 1871 { 1872 unsigned int bytes_compl = 0, pkts_compl = 0; 1873 struct xdp_frame_bulk bq; 1874 int i; 1875 1876 xdp_frame_bulk_init(&bq); 1877 1878 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 1879 1880 for (i = 0; i < num; i++) { 1881 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; 1882 struct mvneta_tx_desc *tx_desc = txq->descs + 1883 txq->txq_get_index; 1884 1885 mvneta_txq_inc_get(txq); 1886 1887 if (buf->type == MVNETA_TYPE_XDP_NDO || 1888 buf->type == MVNETA_TYPE_SKB) 1889 dma_unmap_single(pp->dev->dev.parent, 1890 tx_desc->buf_phys_addr, 1891 tx_desc->data_size, DMA_TO_DEVICE); 1892 if ((buf->type == MVNETA_TYPE_TSO || 1893 buf->type == MVNETA_TYPE_SKB) && buf->skb) { 1894 bytes_compl += buf->skb->len; 1895 pkts_compl++; 1896 dev_kfree_skb_any(buf->skb); 1897 } else if ((buf->type == MVNETA_TYPE_XDP_TX || 1898 buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { 1899 if (napi && buf->type == MVNETA_TYPE_XDP_TX) 1900 xdp_return_frame_rx_napi(buf->xdpf); 1901 else 1902 xdp_return_frame_bulk(buf->xdpf, &bq); 1903 } 1904 } 1905 xdp_flush_frame_bulk(&bq); 1906 1907 rcu_read_unlock(); 1908 1909 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); 1910 } 1911 1912 /* Handle end of transmission */ 1913 static void mvneta_txq_done(struct mvneta_port *pp, 1914 struct mvneta_tx_queue *txq) 1915 { 1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1917 int tx_done; 1918 1919 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1920 if (!tx_done) 1921 return; 1922 1923 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); 1924 1925 txq->count -= tx_done; 1926 1927 if (netif_tx_queue_stopped(nq)) { 1928 if (txq->count <= txq->tx_wake_threshold) 1929 netif_tx_wake_queue(nq); 1930 } 1931 } 1932 1933 /* Refill processing for SW buffer management */ 1934 /* Allocate page per descriptor */ 1935 static int mvneta_rx_refill(struct mvneta_port *pp, 1936 struct mvneta_rx_desc *rx_desc, 1937 struct mvneta_rx_queue *rxq, 1938 gfp_t gfp_mask) 1939 { 1940 dma_addr_t phys_addr; 1941 struct page *page; 1942 1943 page = page_pool_alloc_pages(rxq->page_pool, 1944 gfp_mask | __GFP_NOWARN); 1945 if (!page) 1946 return -ENOMEM; 1947 1948 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 1949 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1950 1951 return 0; 1952 } 1953 1954 /* Handle tx checksum */ 1955 static u32 mvneta_skb_tx_csum(struct sk_buff *skb) 1956 { 1957 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1958 int ip_hdr_len = 0; 1959 __be16 l3_proto = vlan_get_protocol(skb); 1960 u8 l4_proto; 1961 1962 if (l3_proto == htons(ETH_P_IP)) { 1963 struct iphdr *ip4h = ip_hdr(skb); 1964 1965 /* Calculate IPv4 checksum and L4 checksum */ 1966 ip_hdr_len = ip4h->ihl; 1967 l4_proto = ip4h->protocol; 1968 } else if (l3_proto == htons(ETH_P_IPV6)) { 1969 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1970 1971 /* Read l4_protocol from one of IPv6 extra headers */ 1972 if (skb_network_header_len(skb) > 0) 1973 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1974 l4_proto = ip6h->nexthdr; 1975 } else 1976 return MVNETA_TX_L4_CSUM_NOT; 1977 1978 return mvneta_txq_desc_csum(skb_network_offset(skb), 1979 l3_proto, ip_hdr_len, l4_proto); 1980 } 1981 1982 return MVNETA_TX_L4_CSUM_NOT; 1983 } 1984 1985 /* Drop packets received by the RXQ and free buffers */ 1986 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1987 struct mvneta_rx_queue *rxq) 1988 { 1989 int rx_done, i; 1990 1991 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1992 if (rx_done) 1993 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1994 1995 if (pp->bm_priv) { 1996 for (i = 0; i < rx_done; i++) { 1997 struct mvneta_rx_desc *rx_desc = 1998 mvneta_rxq_next_desc_get(rxq); 1999 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2000 struct mvneta_bm_pool *bm_pool; 2001 2002 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2003 /* Return dropped buffer to the pool */ 2004 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2005 rx_desc->buf_phys_addr); 2006 } 2007 return; 2008 } 2009 2010 for (i = 0; i < rxq->size; i++) { 2011 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 2012 void *data = rxq->buf_virt_addr[i]; 2013 if (!data || !(rx_desc->buf_phys_addr)) 2014 continue; 2015 2016 page_pool_put_full_page(rxq->page_pool, data, false); 2017 } 2018 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 2019 xdp_rxq_info_unreg(&rxq->xdp_rxq); 2020 page_pool_destroy(rxq->page_pool); 2021 rxq->page_pool = NULL; 2022 } 2023 2024 static void 2025 mvneta_update_stats(struct mvneta_port *pp, 2026 struct mvneta_stats *ps) 2027 { 2028 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2029 2030 u64_stats_update_begin(&stats->syncp); 2031 stats->es.ps.rx_packets += ps->rx_packets; 2032 stats->es.ps.rx_bytes += ps->rx_bytes; 2033 /* xdp */ 2034 stats->es.ps.xdp_redirect += ps->xdp_redirect; 2035 stats->es.ps.xdp_pass += ps->xdp_pass; 2036 stats->es.ps.xdp_drop += ps->xdp_drop; 2037 u64_stats_update_end(&stats->syncp); 2038 } 2039 2040 static inline 2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) 2042 { 2043 struct mvneta_rx_desc *rx_desc; 2044 int curr_desc = rxq->first_to_refill; 2045 int i; 2046 2047 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { 2048 rx_desc = rxq->descs + curr_desc; 2049 if (!(rx_desc->buf_phys_addr)) { 2050 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { 2051 struct mvneta_pcpu_stats *stats; 2052 2053 pr_err("Can't refill queue %d. Done %d from %d\n", 2054 rxq->id, i, rxq->refill_num); 2055 2056 stats = this_cpu_ptr(pp->stats); 2057 u64_stats_update_begin(&stats->syncp); 2058 stats->es.refill_error++; 2059 u64_stats_update_end(&stats->syncp); 2060 break; 2061 } 2062 } 2063 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); 2064 } 2065 rxq->refill_num -= i; 2066 rxq->first_to_refill = curr_desc; 2067 2068 return i; 2069 } 2070 2071 static void 2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2073 struct xdp_buff *xdp, int sync_len) 2074 { 2075 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2076 int i; 2077 2078 if (likely(!xdp_buff_has_frags(xdp))) 2079 goto out; 2080 2081 for (i = 0; i < sinfo->nr_frags; i++) 2082 page_pool_put_full_page(rxq->page_pool, 2083 skb_frag_page(&sinfo->frags[i]), true); 2084 2085 out: 2086 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), 2087 sync_len, true); 2088 } 2089 2090 static int 2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2092 struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map) 2093 { 2094 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 2095 struct device *dev = pp->dev->dev.parent; 2096 struct mvneta_tx_desc *tx_desc; 2097 int i, num_frames = 1; 2098 struct page *page; 2099 2100 if (unlikely(xdp_frame_has_frags(xdpf))) 2101 num_frames += sinfo->nr_frags; 2102 2103 if (txq->count + num_frames >= txq->size) 2104 return MVNETA_XDP_DROPPED; 2105 2106 for (i = 0; i < num_frames; i++) { 2107 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2108 skb_frag_t *frag = NULL; 2109 int len = xdpf->len; 2110 dma_addr_t dma_addr; 2111 2112 if (unlikely(i)) { /* paged area */ 2113 frag = &sinfo->frags[i - 1]; 2114 len = skb_frag_size(frag); 2115 } 2116 2117 tx_desc = mvneta_txq_next_desc_get(txq); 2118 if (dma_map) { 2119 /* ndo_xdp_xmit */ 2120 void *data; 2121 2122 data = unlikely(frag) ? skb_frag_address(frag) 2123 : xdpf->data; 2124 dma_addr = dma_map_single(dev, data, len, 2125 DMA_TO_DEVICE); 2126 if (dma_mapping_error(dev, dma_addr)) { 2127 mvneta_txq_desc_put(txq); 2128 goto unmap; 2129 } 2130 2131 buf->type = MVNETA_TYPE_XDP_NDO; 2132 } else { 2133 page = unlikely(frag) ? skb_frag_page(frag) 2134 : virt_to_page(xdpf->data); 2135 dma_addr = page_pool_get_dma_addr(page); 2136 if (unlikely(frag)) 2137 dma_addr += skb_frag_off(frag); 2138 else 2139 dma_addr += sizeof(*xdpf) + xdpf->headroom; 2140 dma_sync_single_for_device(dev, dma_addr, len, 2141 DMA_BIDIRECTIONAL); 2142 buf->type = MVNETA_TYPE_XDP_TX; 2143 } 2144 buf->xdpf = unlikely(i) ? NULL : xdpf; 2145 2146 tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; 2147 tx_desc->buf_phys_addr = dma_addr; 2148 tx_desc->data_size = len; 2149 *nxmit_byte += len; 2150 2151 mvneta_txq_inc_put(txq); 2152 } 2153 /*last descriptor */ 2154 tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2155 2156 txq->pending += num_frames; 2157 txq->count += num_frames; 2158 2159 return MVNETA_XDP_TX; 2160 2161 unmap: 2162 for (i--; i >= 0; i--) { 2163 mvneta_txq_desc_put(txq); 2164 tx_desc = txq->descs + txq->next_desc_to_proc; 2165 dma_unmap_single(dev, tx_desc->buf_phys_addr, 2166 tx_desc->data_size, 2167 DMA_TO_DEVICE); 2168 } 2169 2170 return MVNETA_XDP_DROPPED; 2171 } 2172 2173 static int 2174 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2175 { 2176 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2177 struct mvneta_tx_queue *txq; 2178 struct netdev_queue *nq; 2179 int cpu, nxmit_byte = 0; 2180 struct xdp_frame *xdpf; 2181 u32 ret; 2182 2183 xdpf = xdp_convert_buff_to_frame(xdp); 2184 if (unlikely(!xdpf)) 2185 return MVNETA_XDP_DROPPED; 2186 2187 cpu = smp_processor_id(); 2188 txq = &pp->txqs[cpu % txq_number]; 2189 nq = netdev_get_tx_queue(pp->dev, txq->id); 2190 2191 __netif_tx_lock(nq, cpu); 2192 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false); 2193 if (ret == MVNETA_XDP_TX) { 2194 u64_stats_update_begin(&stats->syncp); 2195 stats->es.ps.tx_bytes += nxmit_byte; 2196 stats->es.ps.tx_packets++; 2197 stats->es.ps.xdp_tx++; 2198 u64_stats_update_end(&stats->syncp); 2199 2200 mvneta_txq_pend_desc_add(pp, txq, 0); 2201 } else { 2202 u64_stats_update_begin(&stats->syncp); 2203 stats->es.ps.xdp_tx_err++; 2204 u64_stats_update_end(&stats->syncp); 2205 } 2206 __netif_tx_unlock(nq); 2207 2208 return ret; 2209 } 2210 2211 static int 2212 mvneta_xdp_xmit(struct net_device *dev, int num_frame, 2213 struct xdp_frame **frames, u32 flags) 2214 { 2215 struct mvneta_port *pp = netdev_priv(dev); 2216 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2217 int i, nxmit_byte = 0, nxmit = 0; 2218 int cpu = smp_processor_id(); 2219 struct mvneta_tx_queue *txq; 2220 struct netdev_queue *nq; 2221 u32 ret; 2222 2223 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) 2224 return -ENETDOWN; 2225 2226 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2227 return -EINVAL; 2228 2229 txq = &pp->txqs[cpu % txq_number]; 2230 nq = netdev_get_tx_queue(pp->dev, txq->id); 2231 2232 __netif_tx_lock(nq, cpu); 2233 for (i = 0; i < num_frame; i++) { 2234 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte, 2235 true); 2236 if (ret != MVNETA_XDP_TX) 2237 break; 2238 2239 nxmit++; 2240 } 2241 2242 if (unlikely(flags & XDP_XMIT_FLUSH)) 2243 mvneta_txq_pend_desc_add(pp, txq, 0); 2244 __netif_tx_unlock(nq); 2245 2246 u64_stats_update_begin(&stats->syncp); 2247 stats->es.ps.tx_bytes += nxmit_byte; 2248 stats->es.ps.tx_packets += nxmit; 2249 stats->es.ps.xdp_xmit += nxmit; 2250 stats->es.ps.xdp_xmit_err += num_frame - nxmit; 2251 u64_stats_update_end(&stats->syncp); 2252 2253 return nxmit; 2254 } 2255 2256 static int 2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2258 struct bpf_prog *prog, struct xdp_buff *xdp, 2259 u32 frame_sz, struct mvneta_stats *stats) 2260 { 2261 unsigned int len, data_len, sync; 2262 u32 ret, act; 2263 2264 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2265 data_len = xdp->data_end - xdp->data; 2266 act = bpf_prog_run_xdp(prog, xdp); 2267 2268 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 2269 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2270 sync = max(sync, len); 2271 2272 switch (act) { 2273 case XDP_PASS: 2274 stats->xdp_pass++; 2275 return MVNETA_XDP_PASS; 2276 case XDP_REDIRECT: { 2277 int err; 2278 2279 err = xdp_do_redirect(pp->dev, xdp, prog); 2280 if (unlikely(err)) { 2281 mvneta_xdp_put_buff(pp, rxq, xdp, sync); 2282 ret = MVNETA_XDP_DROPPED; 2283 } else { 2284 ret = MVNETA_XDP_REDIR; 2285 stats->xdp_redirect++; 2286 } 2287 break; 2288 } 2289 case XDP_TX: 2290 ret = mvneta_xdp_xmit_back(pp, xdp); 2291 if (ret != MVNETA_XDP_TX) 2292 mvneta_xdp_put_buff(pp, rxq, xdp, sync); 2293 break; 2294 default: 2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act); 2296 fallthrough; 2297 case XDP_ABORTED: 2298 trace_xdp_exception(pp->dev, prog, act); 2299 fallthrough; 2300 case XDP_DROP: 2301 mvneta_xdp_put_buff(pp, rxq, xdp, sync); 2302 ret = MVNETA_XDP_DROPPED; 2303 stats->xdp_drop++; 2304 break; 2305 } 2306 2307 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; 2308 stats->rx_packets++; 2309 2310 return ret; 2311 } 2312 2313 static void 2314 mvneta_swbm_rx_frame(struct mvneta_port *pp, 2315 struct mvneta_rx_desc *rx_desc, 2316 struct mvneta_rx_queue *rxq, 2317 struct xdp_buff *xdp, int *size, 2318 struct page *page) 2319 { 2320 unsigned char *data = page_address(page); 2321 int data_len = -MVNETA_MH_SIZE, len; 2322 struct net_device *dev = pp->dev; 2323 enum dma_data_direction dma_dir; 2324 2325 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2326 len = MVNETA_MAX_RX_BUF_SIZE; 2327 data_len += len; 2328 } else { 2329 len = *size; 2330 data_len += len - ETH_FCS_LEN; 2331 } 2332 *size = *size - len; 2333 2334 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2335 dma_sync_single_for_cpu(dev->dev.parent, 2336 rx_desc->buf_phys_addr, 2337 len, dma_dir); 2338 2339 rx_desc->buf_phys_addr = 0; 2340 2341 /* Prefetch header */ 2342 prefetch(data); 2343 xdp_buff_clear_frags_flag(xdp); 2344 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, 2345 data_len, true); 2346 } 2347 2348 static void 2349 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 2350 struct mvneta_rx_desc *rx_desc, 2351 struct mvneta_rx_queue *rxq, 2352 struct xdp_buff *xdp, int *size, 2353 struct page *page) 2354 { 2355 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2356 struct net_device *dev = pp->dev; 2357 enum dma_data_direction dma_dir; 2358 int data_len, len; 2359 2360 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2361 len = MVNETA_MAX_RX_BUF_SIZE; 2362 data_len = len; 2363 } else { 2364 len = *size; 2365 data_len = len - ETH_FCS_LEN; 2366 } 2367 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2368 dma_sync_single_for_cpu(dev->dev.parent, 2369 rx_desc->buf_phys_addr, 2370 len, dma_dir); 2371 rx_desc->buf_phys_addr = 0; 2372 2373 if (!xdp_buff_has_frags(xdp)) 2374 sinfo->nr_frags = 0; 2375 2376 if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { 2377 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; 2378 2379 skb_frag_fill_page_desc(frag, page, 2380 pp->rx_offset_correction, data_len); 2381 2382 if (!xdp_buff_has_frags(xdp)) { 2383 sinfo->xdp_frags_size = *size; 2384 xdp_buff_set_frags_flag(xdp); 2385 } 2386 if (page_is_pfmemalloc(page)) 2387 xdp_buff_set_frag_pfmemalloc(xdp); 2388 } else { 2389 page_pool_put_full_page(rxq->page_pool, page, true); 2390 } 2391 *size -= len; 2392 } 2393 2394 static struct sk_buff * 2395 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, 2396 struct xdp_buff *xdp, u32 desc_status) 2397 { 2398 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2399 u32 metasize = xdp->data - xdp->data_meta; 2400 struct sk_buff *skb; 2401 u8 num_frags; 2402 2403 if (unlikely(xdp_buff_has_frags(xdp))) 2404 num_frags = sinfo->nr_frags; 2405 2406 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2407 if (!skb) 2408 return ERR_PTR(-ENOMEM); 2409 2410 skb_mark_for_recycle(skb); 2411 2412 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2413 skb_put(skb, xdp->data_end - xdp->data); 2414 if (metasize) 2415 skb_metadata_set(skb, metasize); 2416 skb->ip_summed = mvneta_rx_csum(pp, desc_status); 2417 2418 if (unlikely(xdp_buff_has_frags(xdp))) 2419 xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size, 2420 num_frags * xdp->frame_sz, 2421 xdp_buff_get_skb_flags(xdp)); 2422 2423 return skb; 2424 } 2425 2426 /* Main rx processing when using software buffer management */ 2427 static int mvneta_rx_swbm(struct napi_struct *napi, 2428 struct mvneta_port *pp, int budget, 2429 struct mvneta_rx_queue *rxq) 2430 { 2431 int rx_proc = 0, rx_todo, refill, size = 0; 2432 struct net_device *dev = pp->dev; 2433 struct mvneta_stats ps = {}; 2434 struct bpf_prog *xdp_prog; 2435 u32 desc_status, frame_sz; 2436 struct xdp_buff xdp_buf; 2437 2438 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); 2439 xdp_buf.data_hard_start = NULL; 2440 2441 /* Get number of received packets */ 2442 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 2443 2444 xdp_prog = READ_ONCE(pp->xdp_prog); 2445 2446 /* Fairness NAPI loop */ 2447 while (rx_proc < budget && rx_proc < rx_todo) { 2448 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2449 u32 rx_status, index; 2450 struct sk_buff *skb; 2451 struct page *page; 2452 2453 index = rx_desc - rxq->descs; 2454 page = (struct page *)rxq->buf_virt_addr[index]; 2455 2456 rx_status = rx_desc->status; 2457 rx_proc++; 2458 rxq->refill_num++; 2459 2460 if (rx_status & MVNETA_RXD_FIRST_DESC) { 2461 /* Check errors only for FIRST descriptor */ 2462 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2463 mvneta_rx_error(pp, rx_desc); 2464 goto next; 2465 } 2466 2467 size = rx_desc->data_size; 2468 frame_sz = size - ETH_FCS_LEN; 2469 desc_status = rx_status; 2470 2471 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, 2472 &size, page); 2473 } else { 2474 if (unlikely(!xdp_buf.data_hard_start)) { 2475 rx_desc->buf_phys_addr = 0; 2476 page_pool_put_full_page(rxq->page_pool, page, 2477 true); 2478 goto next; 2479 } 2480 2481 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, 2482 &size, page); 2483 } /* Middle or Last descriptor */ 2484 2485 if (!(rx_status & MVNETA_RXD_LAST_DESC)) 2486 /* no last descriptor this time */ 2487 continue; 2488 2489 if (size) { 2490 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2491 goto next; 2492 } 2493 2494 if (xdp_prog && 2495 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) 2496 goto next; 2497 2498 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); 2499 if (IS_ERR(skb)) { 2500 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2501 2502 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2503 2504 u64_stats_update_begin(&stats->syncp); 2505 stats->es.skb_alloc_error++; 2506 stats->rx_dropped++; 2507 u64_stats_update_end(&stats->syncp); 2508 2509 goto next; 2510 } 2511 2512 ps.rx_bytes += skb->len; 2513 ps.rx_packets++; 2514 2515 skb->protocol = eth_type_trans(skb, dev); 2516 napi_gro_receive(napi, skb); 2517 next: 2518 xdp_buf.data_hard_start = NULL; 2519 } 2520 2521 if (xdp_buf.data_hard_start) 2522 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2523 2524 if (ps.xdp_redirect) 2525 xdp_do_flush(); 2526 2527 if (ps.rx_packets) 2528 mvneta_update_stats(pp, &ps); 2529 2530 /* return some buffers to hardware queue, one at a time is too slow */ 2531 refill = mvneta_rx_refill_queue(pp, rxq); 2532 2533 /* Update rxq management counters */ 2534 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); 2535 2536 return ps.rx_packets; 2537 } 2538 2539 /* Main rx processing when using hardware buffer management */ 2540 static int mvneta_rx_hwbm(struct napi_struct *napi, 2541 struct mvneta_port *pp, int rx_todo, 2542 struct mvneta_rx_queue *rxq) 2543 { 2544 struct net_device *dev = pp->dev; 2545 int rx_done; 2546 u32 rcvd_pkts = 0; 2547 u32 rcvd_bytes = 0; 2548 2549 /* Get number of received packets */ 2550 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 2551 2552 if (rx_todo > rx_done) 2553 rx_todo = rx_done; 2554 2555 rx_done = 0; 2556 2557 /* Fairness NAPI loop */ 2558 while (rx_done < rx_todo) { 2559 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2560 struct mvneta_bm_pool *bm_pool = NULL; 2561 struct sk_buff *skb; 2562 unsigned char *data; 2563 dma_addr_t phys_addr; 2564 u32 rx_status, frag_size; 2565 int rx_bytes, err; 2566 u8 pool_id; 2567 2568 rx_done++; 2569 rx_status = rx_desc->status; 2570 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 2571 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; 2572 phys_addr = rx_desc->buf_phys_addr; 2573 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2574 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2575 2576 if (!mvneta_rxq_desc_is_first_last(rx_status) || 2577 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 2578 err_drop_frame_ret_pool: 2579 /* Return the buffer to the pool */ 2580 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2581 rx_desc->buf_phys_addr); 2582 err_drop_frame: 2583 mvneta_rx_error(pp, rx_desc); 2584 /* leave the descriptor untouched */ 2585 continue; 2586 } 2587 2588 if (rx_bytes <= rx_copybreak) { 2589 /* better copy a small frame and not unmap the DMA region */ 2590 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 2591 if (unlikely(!skb)) 2592 goto err_drop_frame_ret_pool; 2593 2594 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, 2595 rx_desc->buf_phys_addr, 2596 MVNETA_MH_SIZE + NET_SKB_PAD, 2597 rx_bytes, 2598 DMA_FROM_DEVICE); 2599 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, 2600 rx_bytes); 2601 2602 skb->protocol = eth_type_trans(skb, dev); 2603 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2604 napi_gro_receive(napi, skb); 2605 2606 rcvd_pkts++; 2607 rcvd_bytes += rx_bytes; 2608 2609 /* Return the buffer to the pool */ 2610 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2611 rx_desc->buf_phys_addr); 2612 2613 /* leave the descriptor and buffer untouched */ 2614 continue; 2615 } 2616 2617 /* Refill processing */ 2618 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); 2619 if (err) { 2620 struct mvneta_pcpu_stats *stats; 2621 2622 netdev_err(dev, "Linux processing - Can't refill\n"); 2623 2624 stats = this_cpu_ptr(pp->stats); 2625 u64_stats_update_begin(&stats->syncp); 2626 stats->es.refill_error++; 2627 u64_stats_update_end(&stats->syncp); 2628 2629 goto err_drop_frame_ret_pool; 2630 } 2631 2632 frag_size = bm_pool->hwbm_pool.frag_size; 2633 2634 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); 2635 2636 /* After refill old buffer has to be unmapped regardless 2637 * the skb is successfully built or not. 2638 */ 2639 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, 2640 bm_pool->buf_size, DMA_FROM_DEVICE); 2641 if (!skb) 2642 goto err_drop_frame; 2643 2644 rcvd_pkts++; 2645 rcvd_bytes += rx_bytes; 2646 2647 /* Linux processing */ 2648 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 2649 skb_put(skb, rx_bytes); 2650 2651 skb->protocol = eth_type_trans(skb, dev); 2652 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2653 2654 napi_gro_receive(napi, skb); 2655 } 2656 2657 if (rcvd_pkts) { 2658 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2659 2660 u64_stats_update_begin(&stats->syncp); 2661 stats->es.ps.rx_packets += rcvd_pkts; 2662 stats->es.ps.rx_bytes += rcvd_bytes; 2663 u64_stats_update_end(&stats->syncp); 2664 } 2665 2666 /* Update rxq management counters */ 2667 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 2668 2669 return rx_done; 2670 } 2671 2672 static void mvneta_free_tso_hdrs(struct mvneta_port *pp, 2673 struct mvneta_tx_queue *txq) 2674 { 2675 struct device *dev = pp->dev->dev.parent; 2676 int i; 2677 2678 for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) { 2679 if (txq->tso_hdrs[i]) { 2680 dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE, 2681 txq->tso_hdrs[i], 2682 txq->tso_hdrs_phys[i]); 2683 txq->tso_hdrs[i] = NULL; 2684 } 2685 } 2686 } 2687 2688 static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp, 2689 struct mvneta_tx_queue *txq) 2690 { 2691 struct device *dev = pp->dev->dev.parent; 2692 int i, num; 2693 2694 num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE); 2695 for (i = 0; i < num; i++) { 2696 txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE, 2697 &txq->tso_hdrs_phys[i], 2698 GFP_KERNEL); 2699 if (!txq->tso_hdrs[i]) { 2700 mvneta_free_tso_hdrs(pp, txq); 2701 return -ENOMEM; 2702 } 2703 } 2704 2705 return 0; 2706 } 2707 2708 static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma) 2709 { 2710 int index, offset; 2711 2712 index = txq->txq_put_index / MVNETA_TSO_PER_PAGE; 2713 offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE; 2714 2715 *dma = txq->tso_hdrs_phys[index] + offset; 2716 2717 return txq->tso_hdrs[index] + offset; 2718 } 2719 2720 static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq, 2721 struct tso_t *tso, int size, bool is_last) 2722 { 2723 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2724 int hdr_len = skb_tcp_all_headers(skb); 2725 struct mvneta_tx_desc *tx_desc; 2726 dma_addr_t hdr_phys; 2727 char *hdr; 2728 2729 hdr = mvneta_get_tso_hdr(txq, &hdr_phys); 2730 tso_build_hdr(skb, hdr, tso, size, is_last); 2731 2732 tx_desc = mvneta_txq_next_desc_get(txq); 2733 tx_desc->data_size = hdr_len; 2734 tx_desc->command = mvneta_skb_tx_csum(skb); 2735 tx_desc->command |= MVNETA_TXD_F_DESC; 2736 tx_desc->buf_phys_addr = hdr_phys; 2737 buf->type = MVNETA_TYPE_TSO; 2738 buf->skb = NULL; 2739 2740 mvneta_txq_inc_put(txq); 2741 } 2742 2743 static inline int 2744 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 2745 struct sk_buff *skb, char *data, int size, 2746 bool last_tcp, bool is_last) 2747 { 2748 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2749 struct mvneta_tx_desc *tx_desc; 2750 2751 tx_desc = mvneta_txq_next_desc_get(txq); 2752 tx_desc->data_size = size; 2753 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 2754 size, DMA_TO_DEVICE); 2755 if (unlikely(dma_mapping_error(dev->dev.parent, 2756 tx_desc->buf_phys_addr))) { 2757 mvneta_txq_desc_put(txq); 2758 return -ENOMEM; 2759 } 2760 2761 tx_desc->command = 0; 2762 buf->type = MVNETA_TYPE_SKB; 2763 buf->skb = NULL; 2764 2765 if (last_tcp) { 2766 /* last descriptor in the TCP packet */ 2767 tx_desc->command = MVNETA_TXD_L_DESC; 2768 2769 /* last descriptor in SKB */ 2770 if (is_last) 2771 buf->skb = skb; 2772 } 2773 mvneta_txq_inc_put(txq); 2774 return 0; 2775 } 2776 2777 static void mvneta_release_descs(struct mvneta_port *pp, 2778 struct mvneta_tx_queue *txq, 2779 int first, int num) 2780 { 2781 int desc_idx, i; 2782 2783 desc_idx = first + num; 2784 if (desc_idx >= txq->size) 2785 desc_idx -= txq->size; 2786 2787 for (i = num; i >= 0; i--) { 2788 struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx; 2789 struct mvneta_tx_buf *buf = &txq->buf[desc_idx]; 2790 2791 if (buf->type == MVNETA_TYPE_SKB) 2792 dma_unmap_single(pp->dev->dev.parent, 2793 tx_desc->buf_phys_addr, 2794 tx_desc->data_size, 2795 DMA_TO_DEVICE); 2796 2797 mvneta_txq_desc_put(txq); 2798 2799 if (desc_idx == 0) 2800 desc_idx = txq->size; 2801 desc_idx -= 1; 2802 } 2803 } 2804 2805 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 2806 struct mvneta_tx_queue *txq) 2807 { 2808 int hdr_len, total_len, data_left; 2809 int first_desc, desc_count = 0; 2810 struct mvneta_port *pp = netdev_priv(dev); 2811 struct tso_t tso; 2812 2813 /* Count needed descriptors */ 2814 if ((txq->count + tso_count_descs(skb)) >= txq->size) 2815 return 0; 2816 2817 if (skb_headlen(skb) < skb_tcp_all_headers(skb)) { 2818 pr_info("*** Is this even possible?\n"); 2819 return 0; 2820 } 2821 2822 first_desc = txq->txq_put_index; 2823 2824 /* Initialize the TSO handler, and prepare the first payload */ 2825 hdr_len = tso_start(skb, &tso); 2826 2827 total_len = skb->len - hdr_len; 2828 while (total_len > 0) { 2829 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 2830 total_len -= data_left; 2831 desc_count++; 2832 2833 /* prepare packet headers: MAC + IP + TCP */ 2834 mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0); 2835 2836 while (data_left > 0) { 2837 int size; 2838 desc_count++; 2839 2840 size = min_t(int, tso.size, data_left); 2841 2842 if (mvneta_tso_put_data(dev, txq, skb, 2843 tso.data, size, 2844 size == data_left, 2845 total_len == 0)) 2846 goto err_release; 2847 data_left -= size; 2848 2849 tso_build_data(skb, &tso, size); 2850 } 2851 } 2852 2853 return desc_count; 2854 2855 err_release: 2856 /* Release all used data descriptors; header descriptors must not 2857 * be DMA-unmapped. 2858 */ 2859 mvneta_release_descs(pp, txq, first_desc, desc_count - 1); 2860 return 0; 2861 } 2862 2863 /* Handle tx fragmentation processing */ 2864 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 2865 struct mvneta_tx_queue *txq) 2866 { 2867 struct mvneta_tx_desc *tx_desc; 2868 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2869 int first_desc = txq->txq_put_index; 2870 2871 for (i = 0; i < nr_frags; i++) { 2872 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2873 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2874 void *addr = skb_frag_address(frag); 2875 2876 tx_desc = mvneta_txq_next_desc_get(txq); 2877 tx_desc->data_size = skb_frag_size(frag); 2878 2879 tx_desc->buf_phys_addr = 2880 dma_map_single(pp->dev->dev.parent, addr, 2881 tx_desc->data_size, DMA_TO_DEVICE); 2882 2883 if (dma_mapping_error(pp->dev->dev.parent, 2884 tx_desc->buf_phys_addr)) { 2885 mvneta_txq_desc_put(txq); 2886 goto error; 2887 } 2888 2889 if (i == nr_frags - 1) { 2890 /* Last descriptor */ 2891 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2892 buf->skb = skb; 2893 } else { 2894 /* Descriptor in the middle: Not First, Not Last */ 2895 tx_desc->command = 0; 2896 buf->skb = NULL; 2897 } 2898 buf->type = MVNETA_TYPE_SKB; 2899 mvneta_txq_inc_put(txq); 2900 } 2901 2902 return 0; 2903 2904 error: 2905 /* Release all descriptors that were used to map fragments of 2906 * this packet, as well as the corresponding DMA mappings 2907 */ 2908 mvneta_release_descs(pp, txq, first_desc, i - 1); 2909 return -ENOMEM; 2910 } 2911 2912 /* Main tx processing */ 2913 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) 2914 { 2915 struct mvneta_port *pp = netdev_priv(dev); 2916 u16 txq_id = skb_get_queue_mapping(skb); 2917 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 2918 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2919 struct mvneta_tx_desc *tx_desc; 2920 int len = skb->len; 2921 int frags = 0; 2922 u32 tx_cmd; 2923 2924 if (!netif_running(dev)) 2925 goto out; 2926 2927 if (skb_is_gso(skb)) { 2928 frags = mvneta_tx_tso(skb, dev, txq); 2929 goto out; 2930 } 2931 2932 frags = skb_shinfo(skb)->nr_frags + 1; 2933 2934 /* Get a descriptor for the first part of the packet */ 2935 tx_desc = mvneta_txq_next_desc_get(txq); 2936 2937 tx_cmd = mvneta_skb_tx_csum(skb); 2938 2939 tx_desc->data_size = skb_headlen(skb); 2940 2941 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 2942 tx_desc->data_size, 2943 DMA_TO_DEVICE); 2944 if (unlikely(dma_mapping_error(dev->dev.parent, 2945 tx_desc->buf_phys_addr))) { 2946 mvneta_txq_desc_put(txq); 2947 frags = 0; 2948 goto out; 2949 } 2950 2951 buf->type = MVNETA_TYPE_SKB; 2952 if (frags == 1) { 2953 /* First and Last descriptor */ 2954 tx_cmd |= MVNETA_TXD_FLZ_DESC; 2955 tx_desc->command = tx_cmd; 2956 buf->skb = skb; 2957 mvneta_txq_inc_put(txq); 2958 } else { 2959 /* First but not Last */ 2960 tx_cmd |= MVNETA_TXD_F_DESC; 2961 buf->skb = NULL; 2962 mvneta_txq_inc_put(txq); 2963 tx_desc->command = tx_cmd; 2964 /* Continue with other skb fragments */ 2965 if (mvneta_tx_frag_process(pp, skb, txq)) { 2966 dma_unmap_single(dev->dev.parent, 2967 tx_desc->buf_phys_addr, 2968 tx_desc->data_size, 2969 DMA_TO_DEVICE); 2970 mvneta_txq_desc_put(txq); 2971 frags = 0; 2972 goto out; 2973 } 2974 } 2975 2976 out: 2977 if (frags > 0) { 2978 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 2979 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2980 2981 netdev_tx_sent_queue(nq, len); 2982 2983 txq->count += frags; 2984 if (txq->count >= txq->tx_stop_threshold) 2985 netif_tx_stop_queue(nq); 2986 2987 if (!netdev_xmit_more() || netif_xmit_stopped(nq) || 2988 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) 2989 mvneta_txq_pend_desc_add(pp, txq, frags); 2990 else 2991 txq->pending += frags; 2992 2993 u64_stats_update_begin(&stats->syncp); 2994 stats->es.ps.tx_bytes += len; 2995 stats->es.ps.tx_packets++; 2996 u64_stats_update_end(&stats->syncp); 2997 } else { 2998 dev->stats.tx_dropped++; 2999 dev_kfree_skb_any(skb); 3000 } 3001 3002 return NETDEV_TX_OK; 3003 } 3004 3005 3006 /* Free tx resources, when resetting a port */ 3007 static void mvneta_txq_done_force(struct mvneta_port *pp, 3008 struct mvneta_tx_queue *txq) 3009 3010 { 3011 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3012 int tx_done = txq->count; 3013 3014 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); 3015 3016 /* reset txq */ 3017 txq->count = 0; 3018 txq->txq_put_index = 0; 3019 txq->txq_get_index = 0; 3020 } 3021 3022 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 3023 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 3024 */ 3025 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 3026 { 3027 struct mvneta_tx_queue *txq; 3028 struct netdev_queue *nq; 3029 int cpu = smp_processor_id(); 3030 3031 while (cause_tx_done) { 3032 txq = mvneta_tx_done_policy(pp, cause_tx_done); 3033 3034 nq = netdev_get_tx_queue(pp->dev, txq->id); 3035 __netif_tx_lock(nq, cpu); 3036 3037 if (txq->count) 3038 mvneta_txq_done(pp, txq); 3039 3040 __netif_tx_unlock(nq); 3041 cause_tx_done &= ~((1 << txq->id)); 3042 } 3043 } 3044 3045 /* Compute crc8 of the specified address, using a unique algorithm , 3046 * according to hw spec, different than generic crc8 algorithm 3047 */ 3048 static int mvneta_addr_crc(unsigned char *addr) 3049 { 3050 int crc = 0; 3051 int i; 3052 3053 for (i = 0; i < ETH_ALEN; i++) { 3054 int j; 3055 3056 crc = (crc ^ addr[i]) << 8; 3057 for (j = 7; j >= 0; j--) { 3058 if (crc & (0x100 << j)) 3059 crc ^= 0x107 << j; 3060 } 3061 } 3062 3063 return crc; 3064 } 3065 3066 /* This method controls the net device special MAC multicast support. 3067 * The Special Multicast Table for MAC addresses supports MAC of the form 3068 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3069 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3070 * Table entries in the DA-Filter table. This method set the Special 3071 * Multicast Table appropriate entry. 3072 */ 3073 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 3074 unsigned char last_byte, 3075 int queue) 3076 { 3077 unsigned int smc_table_reg; 3078 unsigned int tbl_offset; 3079 unsigned int reg_offset; 3080 3081 /* Register offset from SMC table base */ 3082 tbl_offset = (last_byte / 4); 3083 /* Entry offset within the above reg */ 3084 reg_offset = last_byte % 4; 3085 3086 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 3087 + tbl_offset * 4)); 3088 3089 if (queue == -1) 3090 smc_table_reg &= ~(0xff << (8 * reg_offset)); 3091 else { 3092 smc_table_reg &= ~(0xff << (8 * reg_offset)); 3093 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3094 } 3095 3096 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 3097 smc_table_reg); 3098 } 3099 3100 /* This method controls the network device Other MAC multicast support. 3101 * The Other Multicast Table is used for multicast of another type. 3102 * A CRC-8 is used as an index to the Other Multicast Table entries 3103 * in the DA-Filter table. 3104 * The method gets the CRC-8 value from the calling routine and 3105 * sets the Other Multicast Table appropriate entry according to the 3106 * specified CRC-8 . 3107 */ 3108 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 3109 unsigned char crc8, 3110 int queue) 3111 { 3112 unsigned int omc_table_reg; 3113 unsigned int tbl_offset; 3114 unsigned int reg_offset; 3115 3116 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 3117 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 3118 3119 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 3120 3121 if (queue == -1) { 3122 /* Clear accepts frame bit at specified Other DA table entry */ 3123 omc_table_reg &= ~(0xff << (8 * reg_offset)); 3124 } else { 3125 omc_table_reg &= ~(0xff << (8 * reg_offset)); 3126 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3127 } 3128 3129 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 3130 } 3131 3132 /* The network device supports multicast using two tables: 3133 * 1) Special Multicast Table for MAC addresses of the form 3134 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3135 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3136 * Table entries in the DA-Filter table. 3137 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 3138 * is used as an index to the Other Multicast Table entries in the 3139 * DA-Filter table. 3140 */ 3141 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 3142 int queue) 3143 { 3144 unsigned char crc_result = 0; 3145 3146 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 3147 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 3148 return 0; 3149 } 3150 3151 crc_result = mvneta_addr_crc(p_addr); 3152 if (queue == -1) { 3153 if (pp->mcast_count[crc_result] == 0) { 3154 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 3155 crc_result); 3156 return -EINVAL; 3157 } 3158 3159 pp->mcast_count[crc_result]--; 3160 if (pp->mcast_count[crc_result] != 0) { 3161 netdev_info(pp->dev, 3162 "After delete there are %d valid Mcast for crc8=0x%02x\n", 3163 pp->mcast_count[crc_result], crc_result); 3164 return -EINVAL; 3165 } 3166 } else 3167 pp->mcast_count[crc_result]++; 3168 3169 mvneta_set_other_mcast_addr(pp, crc_result, queue); 3170 3171 return 0; 3172 } 3173 3174 /* Configure Fitering mode of Ethernet port */ 3175 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 3176 int is_promisc) 3177 { 3178 u32 port_cfg_reg, val; 3179 3180 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 3181 3182 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 3183 3184 /* Set / Clear UPM bit in port configuration register */ 3185 if (is_promisc) { 3186 /* Accept all Unicast addresses */ 3187 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 3188 val |= MVNETA_FORCE_UNI; 3189 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 3190 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 3191 } else { 3192 /* Reject all Unicast addresses */ 3193 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 3194 val &= ~MVNETA_FORCE_UNI; 3195 } 3196 3197 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 3198 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 3199 } 3200 3201 /* register unicast and multicast addresses */ 3202 static void mvneta_set_rx_mode(struct net_device *dev) 3203 { 3204 struct mvneta_port *pp = netdev_priv(dev); 3205 struct netdev_hw_addr *ha; 3206 3207 if (dev->flags & IFF_PROMISC) { 3208 /* Accept all: Multicast + Unicast */ 3209 mvneta_rx_unicast_promisc_set(pp, 1); 3210 mvneta_set_ucast_table(pp, pp->rxq_def); 3211 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3212 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3213 } else { 3214 /* Accept single Unicast */ 3215 mvneta_rx_unicast_promisc_set(pp, 0); 3216 mvneta_set_ucast_table(pp, -1); 3217 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 3218 3219 if (dev->flags & IFF_ALLMULTI) { 3220 /* Accept all multicast */ 3221 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3222 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3223 } else { 3224 /* Accept only initialized multicast */ 3225 mvneta_set_special_mcast_table(pp, -1); 3226 mvneta_set_other_mcast_table(pp, -1); 3227 3228 if (!netdev_mc_empty(dev)) { 3229 netdev_for_each_mc_addr(ha, dev) { 3230 mvneta_mcast_addr_set(pp, ha->addr, 3231 pp->rxq_def); 3232 } 3233 } 3234 } 3235 } 3236 } 3237 3238 /* Interrupt handling - the callback for request_irq() */ 3239 static irqreturn_t mvneta_isr(int irq, void *dev_id) 3240 { 3241 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 3242 3243 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 3244 napi_schedule(&pp->napi); 3245 3246 return IRQ_HANDLED; 3247 } 3248 3249 /* Interrupt handling - the callback for request_percpu_irq() */ 3250 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) 3251 { 3252 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 3253 3254 disable_percpu_irq(port->pp->dev->irq); 3255 napi_schedule(&port->napi); 3256 3257 return IRQ_HANDLED; 3258 } 3259 3260 static void mvneta_link_change(struct mvneta_port *pp) 3261 { 3262 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3263 3264 phylink_pcs_change(&pp->phylink_pcs, 3265 !!(gmac_stat & MVNETA_GMAC_LINK_UP)); 3266 } 3267 3268 /* NAPI handler 3269 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 3270 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 3271 * Bits 8 -15 of the cause Rx Tx register indicate that are received 3272 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 3273 * Each CPU has its own causeRxTx register 3274 */ 3275 static int mvneta_poll(struct napi_struct *napi, int budget) 3276 { 3277 int rx_done = 0; 3278 u32 cause_rx_tx; 3279 int rx_queue; 3280 struct mvneta_port *pp = netdev_priv(napi->dev); 3281 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 3282 3283 if (!netif_running(pp->dev)) { 3284 napi_complete(napi); 3285 return rx_done; 3286 } 3287 3288 /* Read cause register */ 3289 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 3290 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 3291 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 3292 3293 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 3294 3295 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | 3296 MVNETA_CAUSE_LINK_CHANGE)) 3297 mvneta_link_change(pp); 3298 } 3299 3300 /* Release Tx descriptors */ 3301 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 3302 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 3303 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 3304 } 3305 3306 /* For the case where the last mvneta_poll did not process all 3307 * RX packets 3308 */ 3309 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : 3310 port->cause_rx_tx; 3311 3312 rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 3313 if (rx_queue) { 3314 rx_queue = rx_queue - 1; 3315 if (pp->bm_priv) 3316 rx_done = mvneta_rx_hwbm(napi, pp, budget, 3317 &pp->rxqs[rx_queue]); 3318 else 3319 rx_done = mvneta_rx_swbm(napi, pp, budget, 3320 &pp->rxqs[rx_queue]); 3321 } 3322 3323 if (rx_done < budget) { 3324 cause_rx_tx = 0; 3325 napi_complete_done(napi, rx_done); 3326 3327 if (pp->neta_armada3700) { 3328 unsigned long flags; 3329 3330 local_irq_save(flags); 3331 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 3332 MVNETA_RX_INTR_MASK(rxq_number) | 3333 MVNETA_TX_INTR_MASK(txq_number) | 3334 MVNETA_MISCINTR_INTR_MASK); 3335 local_irq_restore(flags); 3336 } else { 3337 enable_percpu_irq(pp->dev->irq, 0); 3338 } 3339 } 3340 3341 if (pp->neta_armada3700) 3342 pp->cause_rx_tx = cause_rx_tx; 3343 else 3344 port->cause_rx_tx = cause_rx_tx; 3345 3346 return rx_done; 3347 } 3348 3349 static int mvneta_create_page_pool(struct mvneta_port *pp, 3350 struct mvneta_rx_queue *rxq, int size) 3351 { 3352 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); 3353 struct page_pool_params pp_params = { 3354 .order = 0, 3355 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 3356 .pool_size = size, 3357 .nid = NUMA_NO_NODE, 3358 .dev = pp->dev->dev.parent, 3359 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 3360 .offset = pp->rx_offset_correction, 3361 .max_len = MVNETA_MAX_RX_BUF_SIZE, 3362 }; 3363 int err; 3364 3365 rxq->page_pool = page_pool_create(&pp_params); 3366 if (IS_ERR(rxq->page_pool)) { 3367 err = PTR_ERR(rxq->page_pool); 3368 rxq->page_pool = NULL; 3369 return err; 3370 } 3371 3372 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, 3373 PAGE_SIZE); 3374 if (err < 0) 3375 goto err_free_pp; 3376 3377 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 3378 rxq->page_pool); 3379 if (err) 3380 goto err_unregister_rxq; 3381 3382 return 0; 3383 3384 err_unregister_rxq: 3385 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3386 err_free_pp: 3387 page_pool_destroy(rxq->page_pool); 3388 rxq->page_pool = NULL; 3389 return err; 3390 } 3391 3392 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 3393 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 3394 int num) 3395 { 3396 int i, err; 3397 3398 err = mvneta_create_page_pool(pp, rxq, num); 3399 if (err < 0) 3400 return err; 3401 3402 for (i = 0; i < num; i++) { 3403 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 3404 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, 3405 GFP_KERNEL) != 0) { 3406 netdev_err(pp->dev, 3407 "%s:rxq %d, %d of %d buffs filled\n", 3408 __func__, rxq->id, i, num); 3409 break; 3410 } 3411 } 3412 3413 /* Add this number of RX descriptors as non occupied (ready to 3414 * get packets) 3415 */ 3416 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 3417 3418 return i; 3419 } 3420 3421 /* Free all packets pending transmit from all TXQs and reset TX port */ 3422 static void mvneta_tx_reset(struct mvneta_port *pp) 3423 { 3424 int queue; 3425 3426 /* free the skb's in the tx ring */ 3427 for (queue = 0; queue < txq_number; queue++) 3428 mvneta_txq_done_force(pp, &pp->txqs[queue]); 3429 3430 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 3431 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 3432 } 3433 3434 static void mvneta_rx_reset(struct mvneta_port *pp) 3435 { 3436 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 3437 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 3438 } 3439 3440 /* Rx/Tx queue initialization/cleanup methods */ 3441 3442 static int mvneta_rxq_sw_init(struct mvneta_port *pp, 3443 struct mvneta_rx_queue *rxq) 3444 { 3445 rxq->size = pp->rx_ring_size; 3446 3447 /* Allocate memory for RX descriptors */ 3448 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3449 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3450 &rxq->descs_phys, GFP_KERNEL); 3451 if (!rxq->descs) 3452 return -ENOMEM; 3453 3454 rxq->last_desc = rxq->size - 1; 3455 3456 return 0; 3457 } 3458 3459 static void mvneta_rxq_hw_init(struct mvneta_port *pp, 3460 struct mvneta_rx_queue *rxq) 3461 { 3462 /* Set Rx descriptors queue starting address */ 3463 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 3464 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 3465 3466 /* Set coalescing pkts and time */ 3467 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3468 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3469 3470 if (!pp->bm_priv) { 3471 /* Set Offset */ 3472 mvneta_rxq_offset_set(pp, rxq, 0); 3473 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 3474 MVNETA_MAX_RX_BUF_SIZE : 3475 MVNETA_RX_BUF_SIZE(pp->pkt_size)); 3476 mvneta_rxq_bm_disable(pp, rxq); 3477 mvneta_rxq_fill(pp, rxq, rxq->size); 3478 } else { 3479 /* Set Offset */ 3480 mvneta_rxq_offset_set(pp, rxq, 3481 NET_SKB_PAD - pp->rx_offset_correction); 3482 3483 mvneta_rxq_bm_enable(pp, rxq); 3484 /* Fill RXQ with buffers from RX pool */ 3485 mvneta_rxq_long_pool_set(pp, rxq); 3486 mvneta_rxq_short_pool_set(pp, rxq); 3487 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); 3488 } 3489 } 3490 3491 /* Create a specified RX queue */ 3492 static int mvneta_rxq_init(struct mvneta_port *pp, 3493 struct mvneta_rx_queue *rxq) 3494 3495 { 3496 int ret; 3497 3498 ret = mvneta_rxq_sw_init(pp, rxq); 3499 if (ret < 0) 3500 return ret; 3501 3502 mvneta_rxq_hw_init(pp, rxq); 3503 3504 return 0; 3505 } 3506 3507 /* Cleanup Rx queue */ 3508 static void mvneta_rxq_deinit(struct mvneta_port *pp, 3509 struct mvneta_rx_queue *rxq) 3510 { 3511 mvneta_rxq_drop_pkts(pp, rxq); 3512 3513 if (rxq->descs) 3514 dma_free_coherent(pp->dev->dev.parent, 3515 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3516 rxq->descs, 3517 rxq->descs_phys); 3518 3519 rxq->descs = NULL; 3520 rxq->last_desc = 0; 3521 rxq->next_desc_to_proc = 0; 3522 rxq->descs_phys = 0; 3523 rxq->first_to_refill = 0; 3524 rxq->refill_num = 0; 3525 } 3526 3527 static int mvneta_txq_sw_init(struct mvneta_port *pp, 3528 struct mvneta_tx_queue *txq) 3529 { 3530 int cpu, err; 3531 3532 txq->size = pp->tx_ring_size; 3533 3534 /* A queue must always have room for at least one skb. 3535 * Therefore, stop the queue when the free entries reaches 3536 * the maximum number of descriptors per skb. 3537 */ 3538 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 3539 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 3540 3541 /* Allocate memory for TX descriptors */ 3542 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3543 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3544 &txq->descs_phys, GFP_KERNEL); 3545 if (!txq->descs) 3546 return -ENOMEM; 3547 3548 txq->last_desc = txq->size - 1; 3549 3550 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); 3551 if (!txq->buf) 3552 return -ENOMEM; 3553 3554 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 3555 err = mvneta_alloc_tso_hdrs(pp, txq); 3556 if (err) 3557 return err; 3558 3559 /* Setup XPS mapping */ 3560 if (pp->neta_armada3700) 3561 cpu = 0; 3562 else if (txq_number > 1) 3563 cpu = txq->id % num_present_cpus(); 3564 else 3565 cpu = pp->rxq_def % num_present_cpus(); 3566 cpumask_set_cpu(cpu, &txq->affinity_mask); 3567 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 3568 3569 return 0; 3570 } 3571 3572 static void mvneta_txq_hw_init(struct mvneta_port *pp, 3573 struct mvneta_tx_queue *txq) 3574 { 3575 /* Set maximum bandwidth for enabled TXQs */ 3576 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 3577 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 3578 3579 /* Set Tx descriptors queue starting address */ 3580 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 3581 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 3582 3583 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 3584 } 3585 3586 /* Create and initialize a tx queue */ 3587 static int mvneta_txq_init(struct mvneta_port *pp, 3588 struct mvneta_tx_queue *txq) 3589 { 3590 int ret; 3591 3592 ret = mvneta_txq_sw_init(pp, txq); 3593 if (ret < 0) 3594 return ret; 3595 3596 mvneta_txq_hw_init(pp, txq); 3597 3598 return 0; 3599 } 3600 3601 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 3602 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, 3603 struct mvneta_tx_queue *txq) 3604 { 3605 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3606 3607 kfree(txq->buf); 3608 3609 mvneta_free_tso_hdrs(pp, txq); 3610 if (txq->descs) 3611 dma_free_coherent(pp->dev->dev.parent, 3612 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3613 txq->descs, txq->descs_phys); 3614 3615 netdev_tx_reset_queue(nq); 3616 3617 txq->buf = NULL; 3618 txq->descs = NULL; 3619 txq->last_desc = 0; 3620 txq->next_desc_to_proc = 0; 3621 txq->descs_phys = 0; 3622 } 3623 3624 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, 3625 struct mvneta_tx_queue *txq) 3626 { 3627 /* Set minimum bandwidth for disabled TXQs */ 3628 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 3629 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 3630 3631 /* Set Tx descriptors queue starting address and size */ 3632 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 3633 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 3634 } 3635 3636 static void mvneta_txq_deinit(struct mvneta_port *pp, 3637 struct mvneta_tx_queue *txq) 3638 { 3639 mvneta_txq_sw_deinit(pp, txq); 3640 mvneta_txq_hw_deinit(pp, txq); 3641 } 3642 3643 /* Cleanup all Tx queues */ 3644 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 3645 { 3646 int queue; 3647 3648 for (queue = 0; queue < txq_number; queue++) 3649 mvneta_txq_deinit(pp, &pp->txqs[queue]); 3650 } 3651 3652 /* Cleanup all Rx queues */ 3653 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 3654 { 3655 int queue; 3656 3657 for (queue = 0; queue < rxq_number; queue++) 3658 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3659 } 3660 3661 3662 /* Init all Rx queues */ 3663 static int mvneta_setup_rxqs(struct mvneta_port *pp) 3664 { 3665 int queue; 3666 3667 for (queue = 0; queue < rxq_number; queue++) { 3668 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 3669 3670 if (err) { 3671 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 3672 __func__, queue); 3673 mvneta_cleanup_rxqs(pp); 3674 return err; 3675 } 3676 } 3677 3678 return 0; 3679 } 3680 3681 /* Init all tx queues */ 3682 static int mvneta_setup_txqs(struct mvneta_port *pp) 3683 { 3684 int queue; 3685 3686 for (queue = 0; queue < txq_number; queue++) { 3687 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 3688 if (err) { 3689 netdev_err(pp->dev, "%s: can't create txq=%d\n", 3690 __func__, queue); 3691 mvneta_cleanup_txqs(pp); 3692 return err; 3693 } 3694 } 3695 3696 return 0; 3697 } 3698 3699 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) 3700 { 3701 int ret; 3702 3703 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); 3704 if (ret) 3705 return ret; 3706 3707 return phy_power_on(pp->comphy); 3708 } 3709 3710 static int mvneta_config_interface(struct mvneta_port *pp, 3711 phy_interface_t interface) 3712 { 3713 int ret = 0; 3714 3715 if (pp->comphy) { 3716 if (interface == PHY_INTERFACE_MODE_SGMII || 3717 interface == PHY_INTERFACE_MODE_1000BASEX || 3718 interface == PHY_INTERFACE_MODE_2500BASEX) { 3719 ret = mvneta_comphy_init(pp, interface); 3720 } 3721 } else { 3722 switch (interface) { 3723 case PHY_INTERFACE_MODE_QSGMII: 3724 mvreg_write(pp, MVNETA_SERDES_CFG, 3725 MVNETA_QSGMII_SERDES_PROTO); 3726 break; 3727 3728 case PHY_INTERFACE_MODE_SGMII: 3729 case PHY_INTERFACE_MODE_1000BASEX: 3730 mvreg_write(pp, MVNETA_SERDES_CFG, 3731 MVNETA_SGMII_SERDES_PROTO); 3732 break; 3733 3734 case PHY_INTERFACE_MODE_2500BASEX: 3735 mvreg_write(pp, MVNETA_SERDES_CFG, 3736 MVNETA_HSGMII_SERDES_PROTO); 3737 break; 3738 default: 3739 break; 3740 } 3741 } 3742 3743 pp->phy_interface = interface; 3744 3745 return ret; 3746 } 3747 3748 static void mvneta_start_dev(struct mvneta_port *pp) 3749 { 3750 int cpu; 3751 3752 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); 3753 3754 mvneta_max_rx_size_set(pp, pp->pkt_size); 3755 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 3756 3757 /* start the Rx/Tx activity */ 3758 mvneta_port_enable(pp); 3759 3760 if (!pp->neta_armada3700) { 3761 /* Enable polling on the port */ 3762 for_each_online_cpu(cpu) { 3763 struct mvneta_pcpu_port *port = 3764 per_cpu_ptr(pp->ports, cpu); 3765 3766 napi_enable(&port->napi); 3767 } 3768 } else { 3769 napi_enable(&pp->napi); 3770 } 3771 3772 /* Unmask interrupts. It has to be done from each CPU */ 3773 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3774 3775 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3776 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3777 MVNETA_CAUSE_LINK_CHANGE); 3778 3779 phylink_start(pp->phylink); 3780 3781 /* We may have called phylink_speed_down before */ 3782 phylink_speed_up(pp->phylink); 3783 3784 netif_tx_start_all_queues(pp->dev); 3785 3786 clear_bit(__MVNETA_DOWN, &pp->state); 3787 } 3788 3789 static void mvneta_stop_dev(struct mvneta_port *pp) 3790 { 3791 unsigned int cpu; 3792 3793 set_bit(__MVNETA_DOWN, &pp->state); 3794 3795 if (device_may_wakeup(&pp->dev->dev)) 3796 phylink_speed_down(pp->phylink, false); 3797 3798 phylink_stop(pp->phylink); 3799 3800 if (!pp->neta_armada3700) { 3801 for_each_online_cpu(cpu) { 3802 struct mvneta_pcpu_port *port = 3803 per_cpu_ptr(pp->ports, cpu); 3804 3805 napi_disable(&port->napi); 3806 } 3807 } else { 3808 napi_disable(&pp->napi); 3809 } 3810 3811 netif_carrier_off(pp->dev); 3812 3813 mvneta_port_down(pp); 3814 netif_tx_stop_all_queues(pp->dev); 3815 3816 /* Stop the port activity */ 3817 mvneta_port_disable(pp); 3818 3819 /* Clear all ethernet port interrupts */ 3820 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 3821 3822 /* Mask all ethernet port interrupts */ 3823 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3824 3825 mvneta_tx_reset(pp); 3826 mvneta_rx_reset(pp); 3827 3828 WARN_ON(phy_power_off(pp->comphy)); 3829 } 3830 3831 static void mvneta_percpu_enable(void *arg) 3832 { 3833 struct mvneta_port *pp = arg; 3834 3835 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 3836 } 3837 3838 static void mvneta_percpu_disable(void *arg) 3839 { 3840 struct mvneta_port *pp = arg; 3841 3842 disable_percpu_irq(pp->dev->irq); 3843 } 3844 3845 /* Change the device mtu */ 3846 static int mvneta_change_mtu(struct net_device *dev, int mtu) 3847 { 3848 struct mvneta_port *pp = netdev_priv(dev); 3849 struct bpf_prog *prog = pp->xdp_prog; 3850 int ret; 3851 3852 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 3853 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 3854 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 3855 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 3856 } 3857 3858 if (prog && !prog->aux->xdp_has_frags && 3859 mtu > MVNETA_MAX_RX_BUF_SIZE) { 3860 netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n", 3861 mtu); 3862 3863 return -EINVAL; 3864 } 3865 3866 WRITE_ONCE(dev->mtu, mtu); 3867 3868 if (!netif_running(dev)) { 3869 if (pp->bm_priv) 3870 mvneta_bm_update_mtu(pp, mtu); 3871 3872 netdev_update_features(dev); 3873 return 0; 3874 } 3875 3876 /* The interface is running, so we have to force a 3877 * reallocation of the queues 3878 */ 3879 mvneta_stop_dev(pp); 3880 on_each_cpu(mvneta_percpu_disable, pp, true); 3881 3882 mvneta_cleanup_txqs(pp); 3883 mvneta_cleanup_rxqs(pp); 3884 3885 if (pp->bm_priv) 3886 mvneta_bm_update_mtu(pp, mtu); 3887 3888 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 3889 3890 ret = mvneta_setup_rxqs(pp); 3891 if (ret) { 3892 netdev_err(dev, "unable to setup rxqs after MTU change\n"); 3893 return ret; 3894 } 3895 3896 ret = mvneta_setup_txqs(pp); 3897 if (ret) { 3898 netdev_err(dev, "unable to setup txqs after MTU change\n"); 3899 return ret; 3900 } 3901 3902 on_each_cpu(mvneta_percpu_enable, pp, true); 3903 mvneta_start_dev(pp); 3904 3905 netdev_update_features(dev); 3906 3907 return 0; 3908 } 3909 3910 static netdev_features_t mvneta_fix_features(struct net_device *dev, 3911 netdev_features_t features) 3912 { 3913 struct mvneta_port *pp = netdev_priv(dev); 3914 3915 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 3916 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 3917 netdev_info(dev, 3918 "Disable IP checksum for MTU greater than %dB\n", 3919 pp->tx_csum_limit); 3920 } 3921 3922 return features; 3923 } 3924 3925 /* Get mac address */ 3926 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 3927 { 3928 u32 mac_addr_l, mac_addr_h; 3929 3930 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 3931 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 3932 addr[0] = (mac_addr_h >> 24) & 0xFF; 3933 addr[1] = (mac_addr_h >> 16) & 0xFF; 3934 addr[2] = (mac_addr_h >> 8) & 0xFF; 3935 addr[3] = mac_addr_h & 0xFF; 3936 addr[4] = (mac_addr_l >> 8) & 0xFF; 3937 addr[5] = mac_addr_l & 0xFF; 3938 } 3939 3940 /* Handle setting mac address */ 3941 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 3942 { 3943 struct mvneta_port *pp = netdev_priv(dev); 3944 struct sockaddr *sockaddr = addr; 3945 int ret; 3946 3947 ret = eth_prepare_mac_addr_change(dev, addr); 3948 if (ret < 0) 3949 return ret; 3950 /* Remove previous address table entry */ 3951 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 3952 3953 /* Set new addr in hw */ 3954 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 3955 3956 eth_commit_mac_addr_change(dev, addr); 3957 return 0; 3958 } 3959 3960 static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) 3961 { 3962 return container_of(pcs, struct mvneta_port, phylink_pcs); 3963 } 3964 3965 static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs, 3966 phy_interface_t interface) 3967 { 3968 /* When operating in an 802.3z mode, we must have AN enabled: 3969 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 3970 * When <PortType> = 1 (1000BASE-X) this field must be set to 1." 3971 * Therefore, inband is "required". 3972 */ 3973 if (phy_interface_mode_is_8023z(interface)) 3974 return LINK_INBAND_ENABLE; 3975 3976 /* QSGMII, SGMII and RGMII can be configured to use inband 3977 * signalling of the AN result. Indicate these as "possible". 3978 */ 3979 if (interface == PHY_INTERFACE_MODE_SGMII || 3980 interface == PHY_INTERFACE_MODE_QSGMII || 3981 phy_interface_mode_is_rgmii(interface)) 3982 return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; 3983 3984 /* For any other modes, indicate that inband is not supported. */ 3985 return LINK_INBAND_DISABLE; 3986 } 3987 3988 static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, 3989 struct phylink_link_state *state) 3990 { 3991 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 3992 u32 gmac_stat; 3993 3994 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3995 3996 if (gmac_stat & MVNETA_GMAC_SPEED_1000) 3997 state->speed = 3998 state->interface == PHY_INTERFACE_MODE_2500BASEX ? 3999 SPEED_2500 : SPEED_1000; 4000 else if (gmac_stat & MVNETA_GMAC_SPEED_100) 4001 state->speed = SPEED_100; 4002 else 4003 state->speed = SPEED_10; 4004 4005 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); 4006 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 4007 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 4008 4009 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) 4010 state->pause |= MLO_PAUSE_RX; 4011 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) 4012 state->pause |= MLO_PAUSE_TX; 4013 } 4014 4015 static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 4016 phy_interface_t interface, 4017 const unsigned long *advertising, 4018 bool permit_pause_to_mac) 4019 { 4020 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 4021 u32 mask, val, an, old_an, changed; 4022 4023 mask = MVNETA_GMAC_INBAND_AN_ENABLE | 4024 MVNETA_GMAC_INBAND_RESTART_AN | 4025 MVNETA_GMAC_AN_SPEED_EN | 4026 MVNETA_GMAC_AN_FLOW_CTRL_EN | 4027 MVNETA_GMAC_AN_DUPLEX_EN; 4028 4029 if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { 4030 mask |= MVNETA_GMAC_CONFIG_MII_SPEED | 4031 MVNETA_GMAC_CONFIG_GMII_SPEED | 4032 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4033 val = MVNETA_GMAC_INBAND_AN_ENABLE; 4034 4035 if (interface == PHY_INTERFACE_MODE_SGMII) { 4036 /* SGMII mode receives the speed and duplex from PHY */ 4037 val |= MVNETA_GMAC_AN_SPEED_EN | 4038 MVNETA_GMAC_AN_DUPLEX_EN; 4039 } else { 4040 /* 802.3z mode has fixed speed and duplex */ 4041 val |= MVNETA_GMAC_CONFIG_GMII_SPEED | 4042 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4043 4044 /* The FLOW_CTRL_EN bit selects either the hardware 4045 * automatically or the CONFIG_FLOW_CTRL manually 4046 * controls the GMAC pause mode. 4047 */ 4048 if (permit_pause_to_mac) 4049 val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; 4050 4051 /* Update the advertisement bits */ 4052 mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 4053 if (phylink_test(advertising, Pause)) 4054 val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 4055 } 4056 } else { 4057 /* Phy or fixed speed - disable in-band AN modes */ 4058 val = 0; 4059 } 4060 4061 old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4062 an = (an & ~mask) | val; 4063 changed = old_an ^ an; 4064 if (changed) 4065 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); 4066 4067 /* We are only interested in the advertisement bits changing */ 4068 return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); 4069 } 4070 4071 static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) 4072 { 4073 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 4074 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4075 4076 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 4077 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); 4078 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 4079 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); 4080 } 4081 4082 static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { 4083 .pcs_inband_caps = mvneta_pcs_inband_caps, 4084 .pcs_get_state = mvneta_pcs_get_state, 4085 .pcs_config = mvneta_pcs_config, 4086 .pcs_an_restart = mvneta_pcs_an_restart, 4087 }; 4088 4089 static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config, 4090 phy_interface_t interface) 4091 { 4092 struct net_device *ndev = to_net_dev(config->dev); 4093 struct mvneta_port *pp = netdev_priv(ndev); 4094 4095 return &pp->phylink_pcs; 4096 } 4097 4098 static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, 4099 phy_interface_t interface) 4100 { 4101 struct net_device *ndev = to_net_dev(config->dev); 4102 struct mvneta_port *pp = netdev_priv(ndev); 4103 u32 val; 4104 4105 if (pp->phy_interface != interface || 4106 phylink_autoneg_inband(mode)) { 4107 /* Force the link down when changing the interface or if in 4108 * in-band mode. According to Armada 370 documentation, we 4109 * can only change the port mode and in-band enable when the 4110 * link is down. 4111 */ 4112 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4113 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4114 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4115 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4116 } 4117 4118 if (pp->phy_interface != interface) 4119 WARN_ON(phy_power_off(pp->comphy)); 4120 4121 /* Enable the 1ms clock */ 4122 if (phylink_autoneg_inband(mode)) { 4123 unsigned long rate = clk_get_rate(pp->clk); 4124 4125 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, 4126 MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); 4127 } 4128 4129 return 0; 4130 } 4131 4132 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, 4133 const struct phylink_link_state *state) 4134 { 4135 struct net_device *ndev = to_net_dev(config->dev); 4136 struct mvneta_port *pp = netdev_priv(ndev); 4137 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 4138 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 4139 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); 4140 4141 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; 4142 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | 4143 MVNETA_GMAC2_PORT_RESET); 4144 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); 4145 4146 /* Even though it might look weird, when we're configured in 4147 * SGMII or QSGMII mode, the RGMII bit needs to be set. 4148 */ 4149 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; 4150 4151 if (state->interface == PHY_INTERFACE_MODE_QSGMII || 4152 state->interface == PHY_INTERFACE_MODE_SGMII || 4153 phy_interface_mode_is_8023z(state->interface)) 4154 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; 4155 4156 if (!phylink_autoneg_inband(mode)) { 4157 /* Phy or fixed speed - nothing to do, leave the 4158 * configured speed, duplex and flow control as-is. 4159 */ 4160 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 4161 /* SGMII mode receives the state from the PHY */ 4162 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; 4163 } else { 4164 /* 802.3z negotiation - only 1000base-X */ 4165 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; 4166 } 4167 4168 /* When at 2.5G, the link partner can send frames with shortened 4169 * preambles. 4170 */ 4171 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 4172 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 4173 4174 if (new_ctrl0 != gmac_ctrl0) 4175 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); 4176 if (new_ctrl2 != gmac_ctrl2) 4177 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); 4178 if (new_ctrl4 != gmac_ctrl4) 4179 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); 4180 4181 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { 4182 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 4183 MVNETA_GMAC2_PORT_RESET) != 0) 4184 continue; 4185 } 4186 } 4187 4188 static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, 4189 phy_interface_t interface) 4190 { 4191 struct net_device *ndev = to_net_dev(config->dev); 4192 struct mvneta_port *pp = netdev_priv(ndev); 4193 u32 val, clk; 4194 4195 /* Disable 1ms clock if not in in-band mode */ 4196 if (!phylink_autoneg_inband(mode)) { 4197 clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 4198 clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 4199 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); 4200 } 4201 4202 if (pp->phy_interface != interface) 4203 /* Enable the Serdes PHY */ 4204 WARN_ON(mvneta_config_interface(pp, interface)); 4205 4206 /* Allow the link to come up if in in-band mode, otherwise the 4207 * link is forced via mac_link_down()/mac_link_up() 4208 */ 4209 if (phylink_autoneg_inband(mode)) { 4210 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4211 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; 4212 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4213 } 4214 4215 return 0; 4216 } 4217 4218 static void mvneta_mac_link_down(struct phylink_config *config, 4219 unsigned int mode, phy_interface_t interface) 4220 { 4221 struct net_device *ndev = to_net_dev(config->dev); 4222 struct mvneta_port *pp = netdev_priv(ndev); 4223 u32 val; 4224 4225 mvneta_port_down(pp); 4226 4227 if (!phylink_autoneg_inband(mode)) { 4228 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4229 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4230 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4231 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4232 } 4233 } 4234 4235 static void mvneta_mac_link_up(struct phylink_config *config, 4236 struct phy_device *phy, 4237 unsigned int mode, phy_interface_t interface, 4238 int speed, int duplex, 4239 bool tx_pause, bool rx_pause) 4240 { 4241 struct net_device *ndev = to_net_dev(config->dev); 4242 struct mvneta_port *pp = netdev_priv(ndev); 4243 u32 val; 4244 4245 if (!phylink_autoneg_inband(mode)) { 4246 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4247 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | 4248 MVNETA_GMAC_CONFIG_MII_SPEED | 4249 MVNETA_GMAC_CONFIG_GMII_SPEED | 4250 MVNETA_GMAC_CONFIG_FLOW_CTRL | 4251 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 4252 val |= MVNETA_GMAC_FORCE_LINK_PASS; 4253 4254 if (speed == SPEED_1000 || speed == SPEED_2500) 4255 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 4256 else if (speed == SPEED_100) 4257 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 4258 4259 if (duplex == DUPLEX_FULL) 4260 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4261 4262 if (tx_pause || rx_pause) 4263 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4264 4265 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4266 } else { 4267 /* When inband doesn't cover flow control or flow control is 4268 * disabled, we need to manually configure it. This bit will 4269 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. 4270 */ 4271 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4272 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; 4273 4274 if (tx_pause || rx_pause) 4275 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4276 4277 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4278 } 4279 4280 mvneta_port_up(pp); 4281 } 4282 4283 static void mvneta_mac_disable_tx_lpi(struct phylink_config *config) 4284 { 4285 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); 4286 u32 lpi1; 4287 4288 lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 4289 lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE | 4290 MVNETA_LPI_CTRL_1_REQUEST_FORCE | 4291 MVNETA_LPI_CTRL_1_MANUAL_MODE); 4292 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); 4293 } 4294 4295 static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, 4296 bool tx_clk_stop) 4297 { 4298 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); 4299 u32 ts, tw, lpi0, lpi1, status; 4300 4301 status = mvreg_read(pp, MVNETA_GMAC_STATUS); 4302 if (status & MVNETA_GMAC_SPEED_1000) { 4303 /* At 1G speeds, the timer resolution are 1us, and 4304 * 802.3 says tw is 16.5us. Round up to 17us. 4305 */ 4306 tw = 17; 4307 ts = timer; 4308 } else { 4309 /* At 100M speeds, the timer resolutions are 10us, and 4310 * 802.3 says tw is 30us. 4311 */ 4312 tw = 3; 4313 ts = DIV_ROUND_UP(timer, 10); 4314 } 4315 4316 if (ts > 255) 4317 ts = 255; 4318 4319 /* Configure ts */ 4320 lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4321 lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS); 4322 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0); 4323 4324 /* Configure tw and enable LPI generation */ 4325 lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 4326 lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW); 4327 lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE; 4328 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); 4329 4330 return 0; 4331 } 4332 4333 static const struct phylink_mac_ops mvneta_phylink_ops = { 4334 .mac_select_pcs = mvneta_mac_select_pcs, 4335 .mac_prepare = mvneta_mac_prepare, 4336 .mac_config = mvneta_mac_config, 4337 .mac_finish = mvneta_mac_finish, 4338 .mac_link_down = mvneta_mac_link_down, 4339 .mac_link_up = mvneta_mac_link_up, 4340 .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi, 4341 .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi, 4342 }; 4343 4344 static int mvneta_mdio_probe(struct mvneta_port *pp) 4345 { 4346 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 4347 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); 4348 4349 if (err) 4350 netdev_err(pp->dev, "could not attach PHY: %d\n", err); 4351 4352 phylink_ethtool_get_wol(pp->phylink, &wol); 4353 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); 4354 4355 /* PHY WoL may be enabled but device wakeup disabled */ 4356 if (wol.supported) 4357 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); 4358 4359 return err; 4360 } 4361 4362 static void mvneta_mdio_remove(struct mvneta_port *pp) 4363 { 4364 phylink_disconnect_phy(pp->phylink); 4365 } 4366 4367 /* Electing a CPU must be done in an atomic way: it should be done 4368 * after or before the removal/insertion of a CPU and this function is 4369 * not reentrant. 4370 */ 4371 static void mvneta_percpu_elect(struct mvneta_port *pp) 4372 { 4373 int elected_cpu = 0, max_cpu, cpu; 4374 4375 /* Use the cpu associated to the rxq when it is online, in all 4376 * the other cases, use the cpu 0 which can't be offline. 4377 */ 4378 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) 4379 elected_cpu = pp->rxq_def; 4380 4381 max_cpu = num_present_cpus(); 4382 4383 for_each_online_cpu(cpu) { 4384 int rxq_map = 0, txq_map = 0; 4385 int rxq; 4386 4387 for (rxq = 0; rxq < rxq_number; rxq++) 4388 if ((rxq % max_cpu) == cpu) 4389 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 4390 4391 if (cpu == elected_cpu) 4392 /* Map the default receive queue to the elected CPU */ 4393 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 4394 4395 /* We update the TX queue map only if we have one 4396 * queue. In this case we associate the TX queue to 4397 * the CPU bound to the default RX queue 4398 */ 4399 if (txq_number == 1) 4400 txq_map = (cpu == elected_cpu) ? 4401 MVNETA_CPU_TXQ_ACCESS(0) : 0; 4402 else 4403 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 4404 MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 4405 4406 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 4407 4408 /* Update the interrupt mask on each CPU according the 4409 * new mapping 4410 */ 4411 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 4412 pp, true); 4413 } 4414 }; 4415 4416 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) 4417 { 4418 int other_cpu; 4419 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4420 node_online); 4421 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4422 4423 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts 4424 * are routed to CPU 0, so we don't need all the cpu-hotplug support 4425 */ 4426 if (pp->neta_armada3700) 4427 return 0; 4428 4429 netdev_lock(port->napi.dev); 4430 spin_lock(&pp->lock); 4431 /* 4432 * Configuring the driver for a new CPU while the driver is 4433 * stopping is racy, so just avoid it. 4434 */ 4435 if (pp->is_stopped) { 4436 spin_unlock(&pp->lock); 4437 netdev_unlock(port->napi.dev); 4438 return 0; 4439 } 4440 netif_tx_stop_all_queues(pp->dev); 4441 4442 /* 4443 * We have to synchronise on tha napi of each CPU except the one 4444 * just being woken up 4445 */ 4446 for_each_online_cpu(other_cpu) { 4447 if (other_cpu != cpu) { 4448 struct mvneta_pcpu_port *other_port = 4449 per_cpu_ptr(pp->ports, other_cpu); 4450 4451 napi_synchronize(&other_port->napi); 4452 } 4453 } 4454 4455 /* Mask all ethernet port interrupts */ 4456 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4457 napi_enable_locked(&port->napi); 4458 4459 /* 4460 * Enable per-CPU interrupts on the CPU that is 4461 * brought up. 4462 */ 4463 mvneta_percpu_enable(pp); 4464 4465 /* 4466 * Enable per-CPU interrupt on the one CPU we care 4467 * about. 4468 */ 4469 mvneta_percpu_elect(pp); 4470 4471 /* Unmask all ethernet port interrupts */ 4472 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4473 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4474 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4475 MVNETA_CAUSE_LINK_CHANGE); 4476 netif_tx_start_all_queues(pp->dev); 4477 spin_unlock(&pp->lock); 4478 netdev_unlock(port->napi.dev); 4479 4480 return 0; 4481 } 4482 4483 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) 4484 { 4485 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4486 node_online); 4487 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4488 4489 /* 4490 * Thanks to this lock we are sure that any pending cpu election is 4491 * done. 4492 */ 4493 spin_lock(&pp->lock); 4494 /* Mask all ethernet port interrupts */ 4495 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4496 spin_unlock(&pp->lock); 4497 4498 napi_synchronize(&port->napi); 4499 napi_disable(&port->napi); 4500 /* Disable per-CPU interrupts on the CPU that is brought down. */ 4501 mvneta_percpu_disable(pp); 4502 return 0; 4503 } 4504 4505 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) 4506 { 4507 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4508 node_dead); 4509 4510 /* Check if a new CPU must be elected now this on is down */ 4511 spin_lock(&pp->lock); 4512 mvneta_percpu_elect(pp); 4513 spin_unlock(&pp->lock); 4514 /* Unmask all ethernet port interrupts */ 4515 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4516 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4517 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4518 MVNETA_CAUSE_LINK_CHANGE); 4519 netif_tx_start_all_queues(pp->dev); 4520 return 0; 4521 } 4522 4523 static int mvneta_open(struct net_device *dev) 4524 { 4525 struct mvneta_port *pp = netdev_priv(dev); 4526 int ret; 4527 4528 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 4529 4530 ret = mvneta_setup_rxqs(pp); 4531 if (ret) 4532 return ret; 4533 4534 ret = mvneta_setup_txqs(pp); 4535 if (ret) 4536 goto err_cleanup_rxqs; 4537 4538 /* Connect to port interrupt line */ 4539 if (pp->neta_armada3700) 4540 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 4541 dev->name, pp); 4542 else 4543 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, 4544 dev->name, pp->ports); 4545 if (ret) { 4546 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 4547 goto err_cleanup_txqs; 4548 } 4549 4550 if (!pp->neta_armada3700) { 4551 /* Enable per-CPU interrupt on all the CPU to handle our RX 4552 * queue interrupts 4553 */ 4554 on_each_cpu(mvneta_percpu_enable, pp, true); 4555 4556 pp->is_stopped = false; 4557 /* Register a CPU notifier to handle the case where our CPU 4558 * might be taken offline. 4559 */ 4560 ret = cpuhp_state_add_instance_nocalls(online_hpstate, 4561 &pp->node_online); 4562 if (ret) 4563 goto err_free_irq; 4564 4565 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4566 &pp->node_dead); 4567 if (ret) 4568 goto err_free_online_hp; 4569 } 4570 4571 ret = mvneta_mdio_probe(pp); 4572 if (ret < 0) { 4573 netdev_err(dev, "cannot probe MDIO bus\n"); 4574 goto err_free_dead_hp; 4575 } 4576 4577 mvneta_start_dev(pp); 4578 4579 return 0; 4580 4581 err_free_dead_hp: 4582 if (!pp->neta_armada3700) 4583 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4584 &pp->node_dead); 4585 err_free_online_hp: 4586 if (!pp->neta_armada3700) 4587 cpuhp_state_remove_instance_nocalls(online_hpstate, 4588 &pp->node_online); 4589 err_free_irq: 4590 if (pp->neta_armada3700) { 4591 free_irq(pp->dev->irq, pp); 4592 } else { 4593 on_each_cpu(mvneta_percpu_disable, pp, true); 4594 free_percpu_irq(pp->dev->irq, pp->ports); 4595 } 4596 err_cleanup_txqs: 4597 mvneta_cleanup_txqs(pp); 4598 err_cleanup_rxqs: 4599 mvneta_cleanup_rxqs(pp); 4600 return ret; 4601 } 4602 4603 /* Stop the port, free port interrupt line */ 4604 static int mvneta_stop(struct net_device *dev) 4605 { 4606 struct mvneta_port *pp = netdev_priv(dev); 4607 4608 if (!pp->neta_armada3700) { 4609 /* Inform that we are stopping so we don't want to setup the 4610 * driver for new CPUs in the notifiers. The code of the 4611 * notifier for CPU online is protected by the same spinlock, 4612 * so when we get the lock, the notifier work is done. 4613 */ 4614 spin_lock(&pp->lock); 4615 pp->is_stopped = true; 4616 spin_unlock(&pp->lock); 4617 4618 mvneta_stop_dev(pp); 4619 mvneta_mdio_remove(pp); 4620 4621 cpuhp_state_remove_instance_nocalls(online_hpstate, 4622 &pp->node_online); 4623 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4624 &pp->node_dead); 4625 on_each_cpu(mvneta_percpu_disable, pp, true); 4626 free_percpu_irq(dev->irq, pp->ports); 4627 } else { 4628 mvneta_stop_dev(pp); 4629 mvneta_mdio_remove(pp); 4630 free_irq(dev->irq, pp); 4631 } 4632 4633 mvneta_cleanup_rxqs(pp); 4634 mvneta_cleanup_txqs(pp); 4635 4636 return 0; 4637 } 4638 4639 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4640 { 4641 struct mvneta_port *pp = netdev_priv(dev); 4642 4643 return phylink_mii_ioctl(pp->phylink, ifr, cmd); 4644 } 4645 4646 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, 4647 struct netlink_ext_ack *extack) 4648 { 4649 bool need_update, running = netif_running(dev); 4650 struct mvneta_port *pp = netdev_priv(dev); 4651 struct bpf_prog *old_prog; 4652 4653 if (prog && !prog->aux->xdp_has_frags && 4654 dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4655 NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags"); 4656 return -EOPNOTSUPP; 4657 } 4658 4659 if (pp->bm_priv) { 4660 NL_SET_ERR_MSG_MOD(extack, 4661 "Hardware Buffer Management not supported on XDP"); 4662 return -EOPNOTSUPP; 4663 } 4664 4665 need_update = !!pp->xdp_prog != !!prog; 4666 if (running && need_update) 4667 mvneta_stop(dev); 4668 4669 old_prog = xchg(&pp->xdp_prog, prog); 4670 if (old_prog) 4671 bpf_prog_put(old_prog); 4672 4673 if (running && need_update) 4674 return mvneta_open(dev); 4675 4676 return 0; 4677 } 4678 4679 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4680 { 4681 switch (xdp->command) { 4682 case XDP_SETUP_PROG: 4683 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); 4684 default: 4685 return -EINVAL; 4686 } 4687 } 4688 4689 /* Ethtool methods */ 4690 4691 /* Set link ksettings (phy address, speed) for ethtools */ 4692 static int 4693 mvneta_ethtool_set_link_ksettings(struct net_device *ndev, 4694 const struct ethtool_link_ksettings *cmd) 4695 { 4696 struct mvneta_port *pp = netdev_priv(ndev); 4697 4698 return phylink_ethtool_ksettings_set(pp->phylink, cmd); 4699 } 4700 4701 /* Get link ksettings for ethtools */ 4702 static int 4703 mvneta_ethtool_get_link_ksettings(struct net_device *ndev, 4704 struct ethtool_link_ksettings *cmd) 4705 { 4706 struct mvneta_port *pp = netdev_priv(ndev); 4707 4708 return phylink_ethtool_ksettings_get(pp->phylink, cmd); 4709 } 4710 4711 static int mvneta_ethtool_nway_reset(struct net_device *dev) 4712 { 4713 struct mvneta_port *pp = netdev_priv(dev); 4714 4715 return phylink_ethtool_nway_reset(pp->phylink); 4716 } 4717 4718 /* Set interrupt coalescing for ethtools */ 4719 static int 4720 mvneta_ethtool_set_coalesce(struct net_device *dev, 4721 struct ethtool_coalesce *c, 4722 struct kernel_ethtool_coalesce *kernel_coal, 4723 struct netlink_ext_ack *extack) 4724 { 4725 struct mvneta_port *pp = netdev_priv(dev); 4726 int queue; 4727 4728 for (queue = 0; queue < rxq_number; queue++) { 4729 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4730 rxq->time_coal = c->rx_coalesce_usecs; 4731 rxq->pkts_coal = c->rx_max_coalesced_frames; 4732 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 4733 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 4734 } 4735 4736 for (queue = 0; queue < txq_number; queue++) { 4737 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4738 txq->done_pkts_coal = c->tx_max_coalesced_frames; 4739 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 4740 } 4741 4742 return 0; 4743 } 4744 4745 /* get coalescing for ethtools */ 4746 static int 4747 mvneta_ethtool_get_coalesce(struct net_device *dev, 4748 struct ethtool_coalesce *c, 4749 struct kernel_ethtool_coalesce *kernel_coal, 4750 struct netlink_ext_ack *extack) 4751 { 4752 struct mvneta_port *pp = netdev_priv(dev); 4753 4754 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 4755 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 4756 4757 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 4758 return 0; 4759 } 4760 4761 4762 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 4763 struct ethtool_drvinfo *drvinfo) 4764 { 4765 strscpy(drvinfo->driver, MVNETA_DRIVER_NAME, 4766 sizeof(drvinfo->driver)); 4767 strscpy(drvinfo->version, MVNETA_DRIVER_VERSION, 4768 sizeof(drvinfo->version)); 4769 strscpy(drvinfo->bus_info, dev_name(&dev->dev), 4770 sizeof(drvinfo->bus_info)); 4771 } 4772 4773 4774 static void 4775 mvneta_ethtool_get_ringparam(struct net_device *netdev, 4776 struct ethtool_ringparam *ring, 4777 struct kernel_ethtool_ringparam *kernel_ring, 4778 struct netlink_ext_ack *extack) 4779 { 4780 struct mvneta_port *pp = netdev_priv(netdev); 4781 4782 ring->rx_max_pending = MVNETA_MAX_RXD; 4783 ring->tx_max_pending = MVNETA_MAX_TXD; 4784 ring->rx_pending = pp->rx_ring_size; 4785 ring->tx_pending = pp->tx_ring_size; 4786 } 4787 4788 static int 4789 mvneta_ethtool_set_ringparam(struct net_device *dev, 4790 struct ethtool_ringparam *ring, 4791 struct kernel_ethtool_ringparam *kernel_ring, 4792 struct netlink_ext_ack *extack) 4793 { 4794 struct mvneta_port *pp = netdev_priv(dev); 4795 4796 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 4797 return -EINVAL; 4798 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 4799 ring->rx_pending : MVNETA_MAX_RXD; 4800 4801 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 4802 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 4803 if (pp->tx_ring_size != ring->tx_pending) 4804 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 4805 pp->tx_ring_size, ring->tx_pending); 4806 4807 if (netif_running(dev)) { 4808 mvneta_stop(dev); 4809 if (mvneta_open(dev)) { 4810 netdev_err(dev, 4811 "error on opening device after ring param change\n"); 4812 return -ENOMEM; 4813 } 4814 } 4815 4816 return 0; 4817 } 4818 4819 static void mvneta_ethtool_get_pauseparam(struct net_device *dev, 4820 struct ethtool_pauseparam *pause) 4821 { 4822 struct mvneta_port *pp = netdev_priv(dev); 4823 4824 phylink_ethtool_get_pauseparam(pp->phylink, pause); 4825 } 4826 4827 static int mvneta_ethtool_set_pauseparam(struct net_device *dev, 4828 struct ethtool_pauseparam *pause) 4829 { 4830 struct mvneta_port *pp = netdev_priv(dev); 4831 4832 return phylink_ethtool_set_pauseparam(pp->phylink, pause); 4833 } 4834 4835 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 4836 u8 *data) 4837 { 4838 if (sset == ETH_SS_STATS) { 4839 struct mvneta_port *pp = netdev_priv(netdev); 4840 int i; 4841 4842 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4843 ethtool_puts(&data, mvneta_statistics[i].name); 4844 4845 if (!pp->bm_priv) { 4846 page_pool_ethtool_stats_get_strings(data); 4847 } 4848 } 4849 } 4850 4851 static void 4852 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, 4853 struct mvneta_ethtool_stats *es) 4854 { 4855 unsigned int start; 4856 int cpu; 4857 4858 for_each_possible_cpu(cpu) { 4859 struct mvneta_pcpu_stats *stats; 4860 u64 skb_alloc_error; 4861 u64 refill_error; 4862 u64 xdp_redirect; 4863 u64 xdp_xmit_err; 4864 u64 xdp_tx_err; 4865 u64 xdp_pass; 4866 u64 xdp_drop; 4867 u64 xdp_xmit; 4868 u64 xdp_tx; 4869 4870 stats = per_cpu_ptr(pp->stats, cpu); 4871 do { 4872 start = u64_stats_fetch_begin(&stats->syncp); 4873 skb_alloc_error = stats->es.skb_alloc_error; 4874 refill_error = stats->es.refill_error; 4875 xdp_redirect = stats->es.ps.xdp_redirect; 4876 xdp_pass = stats->es.ps.xdp_pass; 4877 xdp_drop = stats->es.ps.xdp_drop; 4878 xdp_xmit = stats->es.ps.xdp_xmit; 4879 xdp_xmit_err = stats->es.ps.xdp_xmit_err; 4880 xdp_tx = stats->es.ps.xdp_tx; 4881 xdp_tx_err = stats->es.ps.xdp_tx_err; 4882 } while (u64_stats_fetch_retry(&stats->syncp, start)); 4883 4884 es->skb_alloc_error += skb_alloc_error; 4885 es->refill_error += refill_error; 4886 es->ps.xdp_redirect += xdp_redirect; 4887 es->ps.xdp_pass += xdp_pass; 4888 es->ps.xdp_drop += xdp_drop; 4889 es->ps.xdp_xmit += xdp_xmit; 4890 es->ps.xdp_xmit_err += xdp_xmit_err; 4891 es->ps.xdp_tx += xdp_tx; 4892 es->ps.xdp_tx_err += xdp_tx_err; 4893 } 4894 } 4895 4896 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 4897 { 4898 struct mvneta_ethtool_stats stats = {}; 4899 const struct mvneta_statistic *s; 4900 void __iomem *base = pp->base; 4901 u32 high, low; 4902 u64 val; 4903 int i; 4904 4905 mvneta_ethtool_update_pcpu_stats(pp, &stats); 4906 for (i = 0, s = mvneta_statistics; 4907 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 4908 s++, i++) { 4909 switch (s->type) { 4910 case T_REG_32: 4911 val = readl_relaxed(base + s->offset); 4912 pp->ethtool_stats[i] += val; 4913 break; 4914 case T_REG_64: 4915 /* Docs say to read low 32-bit then high */ 4916 low = readl_relaxed(base + s->offset); 4917 high = readl_relaxed(base + s->offset + 4); 4918 val = (u64)high << 32 | low; 4919 pp->ethtool_stats[i] += val; 4920 break; 4921 case T_SW: 4922 switch (s->offset) { 4923 case ETHTOOL_STAT_EEE_WAKEUP: 4924 val = phylink_get_eee_err(pp->phylink); 4925 pp->ethtool_stats[i] += val; 4926 break; 4927 case ETHTOOL_STAT_SKB_ALLOC_ERR: 4928 pp->ethtool_stats[i] = stats.skb_alloc_error; 4929 break; 4930 case ETHTOOL_STAT_REFILL_ERR: 4931 pp->ethtool_stats[i] = stats.refill_error; 4932 break; 4933 case ETHTOOL_XDP_REDIRECT: 4934 pp->ethtool_stats[i] = stats.ps.xdp_redirect; 4935 break; 4936 case ETHTOOL_XDP_PASS: 4937 pp->ethtool_stats[i] = stats.ps.xdp_pass; 4938 break; 4939 case ETHTOOL_XDP_DROP: 4940 pp->ethtool_stats[i] = stats.ps.xdp_drop; 4941 break; 4942 case ETHTOOL_XDP_TX: 4943 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4944 break; 4945 case ETHTOOL_XDP_TX_ERR: 4946 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 4947 break; 4948 case ETHTOOL_XDP_XMIT: 4949 pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4950 break; 4951 case ETHTOOL_XDP_XMIT_ERR: 4952 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 4953 break; 4954 } 4955 break; 4956 } 4957 } 4958 } 4959 4960 static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) 4961 { 4962 struct page_pool_stats stats = {}; 4963 int i; 4964 4965 for (i = 0; i < rxq_number; i++) { 4966 if (pp->rxqs[i].page_pool) 4967 page_pool_get_stats(pp->rxqs[i].page_pool, &stats); 4968 } 4969 4970 page_pool_ethtool_stats_get(data, &stats); 4971 } 4972 4973 static void mvneta_ethtool_get_stats(struct net_device *dev, 4974 struct ethtool_stats *stats, u64 *data) 4975 { 4976 struct mvneta_port *pp = netdev_priv(dev); 4977 int i; 4978 4979 mvneta_ethtool_update_stats(pp); 4980 4981 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4982 *data++ = pp->ethtool_stats[i]; 4983 4984 if (!pp->bm_priv) 4985 mvneta_ethtool_pp_stats(pp, data); 4986 } 4987 4988 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 4989 { 4990 if (sset == ETH_SS_STATS) { 4991 int count = ARRAY_SIZE(mvneta_statistics); 4992 struct mvneta_port *pp = netdev_priv(dev); 4993 4994 if (!pp->bm_priv) 4995 count += page_pool_ethtool_stats_get_count(); 4996 4997 return count; 4998 } 4999 5000 return -EOPNOTSUPP; 5001 } 5002 5003 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 5004 { 5005 return MVNETA_RSS_LU_TABLE_SIZE; 5006 } 5007 5008 static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 5009 struct ethtool_rxnfc *info, 5010 u32 *rules __always_unused) 5011 { 5012 switch (info->cmd) { 5013 case ETHTOOL_GRXRINGS: 5014 info->data = rxq_number; 5015 return 0; 5016 default: 5017 return -EOPNOTSUPP; 5018 } 5019 } 5020 5021 static int mvneta_config_rss(struct mvneta_port *pp) 5022 { 5023 int cpu; 5024 u32 val; 5025 5026 netif_tx_stop_all_queues(pp->dev); 5027 5028 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 5029 5030 if (!pp->neta_armada3700) { 5031 /* We have to synchronise on the napi of each CPU */ 5032 for_each_online_cpu(cpu) { 5033 struct mvneta_pcpu_port *pcpu_port = 5034 per_cpu_ptr(pp->ports, cpu); 5035 5036 napi_synchronize(&pcpu_port->napi); 5037 napi_disable(&pcpu_port->napi); 5038 } 5039 } else { 5040 napi_synchronize(&pp->napi); 5041 napi_disable(&pp->napi); 5042 } 5043 5044 pp->rxq_def = pp->indir[0]; 5045 5046 /* Update unicast mapping */ 5047 mvneta_set_rx_mode(pp->dev); 5048 5049 /* Update val of portCfg register accordingly with all RxQueue types */ 5050 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 5051 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 5052 5053 /* Update the elected CPU matching the new rxq_def */ 5054 spin_lock(&pp->lock); 5055 mvneta_percpu_elect(pp); 5056 spin_unlock(&pp->lock); 5057 5058 if (!pp->neta_armada3700) { 5059 /* We have to synchronise on the napi of each CPU */ 5060 for_each_online_cpu(cpu) { 5061 struct mvneta_pcpu_port *pcpu_port = 5062 per_cpu_ptr(pp->ports, cpu); 5063 5064 napi_enable(&pcpu_port->napi); 5065 } 5066 } else { 5067 napi_enable(&pp->napi); 5068 } 5069 5070 netif_tx_start_all_queues(pp->dev); 5071 5072 return 0; 5073 } 5074 5075 static int mvneta_ethtool_set_rxfh(struct net_device *dev, 5076 struct ethtool_rxfh_param *rxfh, 5077 struct netlink_ext_ack *extack) 5078 { 5079 struct mvneta_port *pp = netdev_priv(dev); 5080 5081 /* Current code for Armada 3700 doesn't support RSS features yet */ 5082 if (pp->neta_armada3700) 5083 return -EOPNOTSUPP; 5084 5085 /* We require at least one supported parameter to be changed 5086 * and no change in any of the unsupported parameters 5087 */ 5088 if (rxfh->key || 5089 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 5090 rxfh->hfunc != ETH_RSS_HASH_TOP)) 5091 return -EOPNOTSUPP; 5092 5093 if (!rxfh->indir) 5094 return 0; 5095 5096 memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE); 5097 5098 return mvneta_config_rss(pp); 5099 } 5100 5101 static int mvneta_ethtool_get_rxfh(struct net_device *dev, 5102 struct ethtool_rxfh_param *rxfh) 5103 { 5104 struct mvneta_port *pp = netdev_priv(dev); 5105 5106 /* Current code for Armada 3700 doesn't support RSS features yet */ 5107 if (pp->neta_armada3700) 5108 return -EOPNOTSUPP; 5109 5110 rxfh->hfunc = ETH_RSS_HASH_TOP; 5111 5112 if (!rxfh->indir) 5113 return 0; 5114 5115 memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 5116 5117 return 0; 5118 } 5119 5120 static void mvneta_ethtool_get_wol(struct net_device *dev, 5121 struct ethtool_wolinfo *wol) 5122 { 5123 struct mvneta_port *pp = netdev_priv(dev); 5124 5125 phylink_ethtool_get_wol(pp->phylink, wol); 5126 } 5127 5128 static int mvneta_ethtool_set_wol(struct net_device *dev, 5129 struct ethtool_wolinfo *wol) 5130 { 5131 struct mvneta_port *pp = netdev_priv(dev); 5132 int ret; 5133 5134 ret = phylink_ethtool_set_wol(pp->phylink, wol); 5135 if (!ret) 5136 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); 5137 5138 return ret; 5139 } 5140 5141 static int mvneta_ethtool_get_eee(struct net_device *dev, 5142 struct ethtool_keee *eee) 5143 { 5144 struct mvneta_port *pp = netdev_priv(dev); 5145 5146 return phylink_ethtool_get_eee(pp->phylink, eee); 5147 } 5148 5149 static int mvneta_ethtool_set_eee(struct net_device *dev, 5150 struct ethtool_keee *eee) 5151 { 5152 struct mvneta_port *pp = netdev_priv(dev); 5153 5154 /* The Armada 37x documents do not give limits for this other than 5155 * it being an 8-bit register. 5156 */ 5157 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) 5158 return -EINVAL; 5159 5160 return phylink_ethtool_set_eee(pp->phylink, eee); 5161 } 5162 5163 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) 5164 { 5165 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); 5166 } 5167 5168 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) 5169 { 5170 u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); 5171 5172 val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); 5173 val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); 5174 5175 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); 5176 } 5177 5178 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) 5179 { 5180 unsigned long core_clk_rate; 5181 u32 refill_cycles; 5182 u32 val; 5183 5184 core_clk_rate = clk_get_rate(pp->clk); 5185 if (!core_clk_rate) 5186 return -EINVAL; 5187 5188 refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / 5189 (NSEC_PER_SEC / core_clk_rate); 5190 5191 if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) 5192 return -EINVAL; 5193 5194 /* Enable bw limit algorithm version 3 */ 5195 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 5196 val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 5197 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 5198 5199 /* Set the base refill rate */ 5200 mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); 5201 5202 return 0; 5203 } 5204 5205 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) 5206 { 5207 u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 5208 5209 val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 5210 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 5211 } 5212 5213 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, 5214 u64 min_rate, u64 max_rate) 5215 { 5216 u32 refill_val, rem; 5217 u32 val = 0; 5218 5219 /* Convert to from Bps to bps */ 5220 max_rate *= 8; 5221 5222 if (min_rate) 5223 return -EINVAL; 5224 5225 refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, 5226 &rem); 5227 5228 if (rem || !refill_val || 5229 refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) 5230 return -EINVAL; 5231 5232 val = refill_val; 5233 val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << 5234 MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); 5235 5236 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); 5237 5238 return 0; 5239 } 5240 5241 static int mvneta_setup_mqprio(struct net_device *dev, 5242 struct tc_mqprio_qopt_offload *mqprio) 5243 { 5244 struct mvneta_port *pp = netdev_priv(dev); 5245 int rxq, txq, tc, ret; 5246 u8 num_tc; 5247 5248 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) 5249 return 0; 5250 5251 num_tc = mqprio->qopt.num_tc; 5252 5253 if (num_tc > rxq_number) 5254 return -EINVAL; 5255 5256 mvneta_clear_rx_prio_map(pp); 5257 5258 if (!num_tc) { 5259 mvneta_disable_per_queue_rate_limit(pp); 5260 netdev_reset_tc(dev); 5261 return 0; 5262 } 5263 5264 netdev_set_num_tc(dev, mqprio->qopt.num_tc); 5265 5266 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 5267 netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], 5268 mqprio->qopt.offset[tc]); 5269 5270 for (rxq = mqprio->qopt.offset[tc]; 5271 rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 5272 rxq++) { 5273 if (rxq >= rxq_number) 5274 return -EINVAL; 5275 5276 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); 5277 } 5278 } 5279 5280 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 5281 mvneta_disable_per_queue_rate_limit(pp); 5282 return 0; 5283 } 5284 5285 if (mqprio->qopt.num_tc > txq_number) 5286 return -EINVAL; 5287 5288 ret = mvneta_enable_per_queue_rate_limit(pp); 5289 if (ret) 5290 return ret; 5291 5292 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 5293 for (txq = mqprio->qopt.offset[tc]; 5294 txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 5295 txq++) { 5296 if (txq >= txq_number) 5297 return -EINVAL; 5298 5299 ret = mvneta_setup_queue_rates(pp, txq, 5300 mqprio->min_rate[tc], 5301 mqprio->max_rate[tc]); 5302 if (ret) 5303 return ret; 5304 } 5305 } 5306 5307 return 0; 5308 } 5309 5310 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, 5311 void *type_data) 5312 { 5313 switch (type) { 5314 case TC_SETUP_QDISC_MQPRIO: 5315 return mvneta_setup_mqprio(dev, type_data); 5316 default: 5317 return -EOPNOTSUPP; 5318 } 5319 } 5320 5321 static const struct net_device_ops mvneta_netdev_ops = { 5322 .ndo_open = mvneta_open, 5323 .ndo_stop = mvneta_stop, 5324 .ndo_start_xmit = mvneta_tx, 5325 .ndo_set_rx_mode = mvneta_set_rx_mode, 5326 .ndo_set_mac_address = mvneta_set_mac_addr, 5327 .ndo_change_mtu = mvneta_change_mtu, 5328 .ndo_fix_features = mvneta_fix_features, 5329 .ndo_get_stats64 = mvneta_get_stats64, 5330 .ndo_eth_ioctl = mvneta_ioctl, 5331 .ndo_bpf = mvneta_xdp, 5332 .ndo_xdp_xmit = mvneta_xdp_xmit, 5333 .ndo_setup_tc = mvneta_setup_tc, 5334 }; 5335 5336 static const struct ethtool_ops mvneta_eth_tool_ops = { 5337 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 5338 ETHTOOL_COALESCE_MAX_FRAMES, 5339 .nway_reset = mvneta_ethtool_nway_reset, 5340 .get_link = ethtool_op_get_link, 5341 .set_coalesce = mvneta_ethtool_set_coalesce, 5342 .get_coalesce = mvneta_ethtool_get_coalesce, 5343 .get_drvinfo = mvneta_ethtool_get_drvinfo, 5344 .get_ringparam = mvneta_ethtool_get_ringparam, 5345 .set_ringparam = mvneta_ethtool_set_ringparam, 5346 .get_pauseparam = mvneta_ethtool_get_pauseparam, 5347 .set_pauseparam = mvneta_ethtool_set_pauseparam, 5348 .get_strings = mvneta_ethtool_get_strings, 5349 .get_ethtool_stats = mvneta_ethtool_get_stats, 5350 .get_sset_count = mvneta_ethtool_get_sset_count, 5351 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 5352 .get_rxnfc = mvneta_ethtool_get_rxnfc, 5353 .get_rxfh = mvneta_ethtool_get_rxfh, 5354 .set_rxfh = mvneta_ethtool_set_rxfh, 5355 .get_link_ksettings = mvneta_ethtool_get_link_ksettings, 5356 .set_link_ksettings = mvneta_ethtool_set_link_ksettings, 5357 .get_wol = mvneta_ethtool_get_wol, 5358 .set_wol = mvneta_ethtool_set_wol, 5359 .get_eee = mvneta_ethtool_get_eee, 5360 .set_eee = mvneta_ethtool_set_eee, 5361 }; 5362 5363 /* Initialize hw */ 5364 static int mvneta_init(struct device *dev, struct mvneta_port *pp) 5365 { 5366 int queue; 5367 5368 /* Disable port */ 5369 mvneta_port_disable(pp); 5370 5371 /* Set port default values */ 5372 mvneta_defaults_set(pp); 5373 5374 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); 5375 if (!pp->txqs) 5376 return -ENOMEM; 5377 5378 /* Initialize TX descriptor rings */ 5379 for (queue = 0; queue < txq_number; queue++) { 5380 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5381 txq->id = queue; 5382 txq->size = pp->tx_ring_size; 5383 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 5384 } 5385 5386 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); 5387 if (!pp->rxqs) 5388 return -ENOMEM; 5389 5390 /* Create Rx descriptor rings */ 5391 for (queue = 0; queue < rxq_number; queue++) { 5392 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5393 rxq->id = queue; 5394 rxq->size = pp->rx_ring_size; 5395 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 5396 rxq->time_coal = MVNETA_RX_COAL_USEC; 5397 rxq->buf_virt_addr 5398 = devm_kmalloc_array(pp->dev->dev.parent, 5399 rxq->size, 5400 sizeof(*rxq->buf_virt_addr), 5401 GFP_KERNEL); 5402 if (!rxq->buf_virt_addr) 5403 return -ENOMEM; 5404 } 5405 5406 return 0; 5407 } 5408 5409 /* platform glue : initialize decoding windows */ 5410 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 5411 const struct mbus_dram_target_info *dram) 5412 { 5413 u32 win_enable; 5414 u32 win_protect; 5415 int i; 5416 5417 for (i = 0; i < 6; i++) { 5418 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 5419 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 5420 5421 if (i < 4) 5422 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 5423 } 5424 5425 win_enable = 0x3f; 5426 win_protect = 0; 5427 5428 if (dram) { 5429 for (i = 0; i < dram->num_cs; i++) { 5430 const struct mbus_dram_window *cs = dram->cs + i; 5431 5432 mvreg_write(pp, MVNETA_WIN_BASE(i), 5433 (cs->base & 0xffff0000) | 5434 (cs->mbus_attr << 8) | 5435 dram->mbus_dram_target_id); 5436 5437 mvreg_write(pp, MVNETA_WIN_SIZE(i), 5438 (cs->size - 1) & 0xffff0000); 5439 5440 win_enable &= ~(1 << i); 5441 win_protect |= 3 << (2 * i); 5442 } 5443 } else { 5444 if (pp->neta_ac5) 5445 mvreg_write(pp, MVNETA_WIN_BASE(0), 5446 (MVNETA_AC5_CNM_DDR_ATTR << 8) | 5447 MVNETA_AC5_CNM_DDR_TARGET); 5448 /* For Armada3700 open default 4GB Mbus window, leaving 5449 * arbitration of target/attribute to a different layer 5450 * of configuration. 5451 */ 5452 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); 5453 win_enable &= ~BIT(0); 5454 win_protect = 3; 5455 } 5456 5457 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 5458 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 5459 } 5460 5461 /* Power up the port */ 5462 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 5463 { 5464 /* MAC Cause register should be cleared */ 5465 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 5466 5467 if (phy_mode != PHY_INTERFACE_MODE_QSGMII && 5468 phy_mode != PHY_INTERFACE_MODE_SGMII && 5469 !phy_interface_mode_is_8023z(phy_mode) && 5470 !phy_interface_mode_is_rgmii(phy_mode)) 5471 return -EINVAL; 5472 5473 /* Ensure LPI is disabled */ 5474 mvneta_mac_disable_tx_lpi(&pp->phylink_config); 5475 5476 return 0; 5477 } 5478 5479 /* Device initialization routine */ 5480 static int mvneta_probe(struct platform_device *pdev) 5481 { 5482 struct device_node *dn = pdev->dev.of_node; 5483 struct device_node *bm_node; 5484 struct mvneta_port *pp; 5485 struct net_device *dev; 5486 struct phylink *phylink; 5487 struct phy *comphy; 5488 char hw_mac_addr[ETH_ALEN]; 5489 phy_interface_t phy_mode; 5490 const char *mac_from; 5491 int tx_csum_limit; 5492 int err; 5493 int cpu; 5494 5495 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), 5496 txq_number, rxq_number); 5497 if (!dev) 5498 return -ENOMEM; 5499 5500 dev->tx_queue_len = MVNETA_MAX_TXD; 5501 dev->watchdog_timeo = 5 * HZ; 5502 dev->netdev_ops = &mvneta_netdev_ops; 5503 dev->ethtool_ops = &mvneta_eth_tool_ops; 5504 5505 pp = netdev_priv(dev); 5506 spin_lock_init(&pp->lock); 5507 pp->dn = dn; 5508 5509 pp->rxq_def = rxq_def; 5510 pp->indir[0] = rxq_def; 5511 5512 err = of_get_phy_mode(dn, &phy_mode); 5513 if (err) { 5514 dev_err(&pdev->dev, "incorrect phy-mode\n"); 5515 return err; 5516 } 5517 5518 pp->phy_interface = phy_mode; 5519 5520 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); 5521 if (comphy == ERR_PTR(-EPROBE_DEFER)) 5522 return -EPROBE_DEFER; 5523 5524 if (IS_ERR(comphy)) 5525 comphy = NULL; 5526 5527 pp->comphy = comphy; 5528 5529 pp->base = devm_platform_ioremap_resource(pdev, 0); 5530 if (IS_ERR(pp->base)) 5531 return PTR_ERR(pp->base); 5532 5533 /* Get special SoC configurations */ 5534 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) 5535 pp->neta_armada3700 = true; 5536 if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) { 5537 pp->neta_armada3700 = true; 5538 pp->neta_ac5 = true; 5539 } 5540 5541 dev->irq = irq_of_parse_and_map(dn, 0); 5542 if (dev->irq == 0) 5543 return -EINVAL; 5544 5545 pp->clk = devm_clk_get(&pdev->dev, "core"); 5546 if (IS_ERR(pp->clk)) 5547 pp->clk = devm_clk_get(&pdev->dev, NULL); 5548 if (IS_ERR(pp->clk)) { 5549 err = PTR_ERR(pp->clk); 5550 goto err_free_irq; 5551 } 5552 5553 clk_prepare_enable(pp->clk); 5554 5555 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); 5556 if (!IS_ERR(pp->clk_bus)) 5557 clk_prepare_enable(pp->clk_bus); 5558 5559 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; 5560 5561 pp->phylink_config.dev = &dev->dev; 5562 pp->phylink_config.type = PHYLINK_NETDEV; 5563 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | 5564 MAC_100 | MAC_1000FD | MAC_2500FD; 5565 5566 /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */ 5567 __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces); 5568 __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces); 5569 pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD; 5570 pp->phylink_config.lpi_timer_default = 250; 5571 pp->phylink_config.eee_enabled_default = true; 5572 5573 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); 5574 __set_bit(PHY_INTERFACE_MODE_QSGMII, 5575 pp->phylink_config.supported_interfaces); 5576 if (comphy) { 5577 /* If a COMPHY is present, we can support any of the serdes 5578 * modes and switch between them. 5579 */ 5580 __set_bit(PHY_INTERFACE_MODE_SGMII, 5581 pp->phylink_config.supported_interfaces); 5582 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5583 pp->phylink_config.supported_interfaces); 5584 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5585 pp->phylink_config.supported_interfaces); 5586 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 5587 /* No COMPHY, with only 2500BASE-X mode supported */ 5588 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5589 pp->phylink_config.supported_interfaces); 5590 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 5591 phy_mode == PHY_INTERFACE_MODE_SGMII) { 5592 /* No COMPHY, we can switch between 1000BASE-X and SGMII */ 5593 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5594 pp->phylink_config.supported_interfaces); 5595 __set_bit(PHY_INTERFACE_MODE_SGMII, 5596 pp->phylink_config.supported_interfaces); 5597 } 5598 5599 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, 5600 phy_mode, &mvneta_phylink_ops); 5601 if (IS_ERR(phylink)) { 5602 err = PTR_ERR(phylink); 5603 goto err_clk; 5604 } 5605 5606 pp->phylink = phylink; 5607 5608 /* Alloc per-cpu port structure */ 5609 pp->ports = alloc_percpu(struct mvneta_pcpu_port); 5610 if (!pp->ports) { 5611 err = -ENOMEM; 5612 goto err_free_phylink; 5613 } 5614 5615 /* Alloc per-cpu stats */ 5616 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 5617 if (!pp->stats) { 5618 err = -ENOMEM; 5619 goto err_free_ports; 5620 } 5621 5622 err = of_get_ethdev_address(dn, dev); 5623 if (!err) { 5624 mac_from = "device tree"; 5625 } else { 5626 mvneta_get_mac_addr(pp, hw_mac_addr); 5627 if (is_valid_ether_addr(hw_mac_addr)) { 5628 mac_from = "hardware"; 5629 eth_hw_addr_set(dev, hw_mac_addr); 5630 } else { 5631 mac_from = "random"; 5632 eth_hw_addr_random(dev); 5633 } 5634 } 5635 5636 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 5637 if (tx_csum_limit < 0 || 5638 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 5639 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5640 dev_info(&pdev->dev, 5641 "Wrong TX csum limit in DT, set to %dB\n", 5642 MVNETA_TX_CSUM_DEF_SIZE); 5643 } 5644 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 5645 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5646 } else { 5647 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 5648 } 5649 5650 pp->tx_csum_limit = tx_csum_limit; 5651 5652 pp->dram_target_info = mv_mbus_dram_info(); 5653 /* Armada3700 requires setting default configuration of Mbus 5654 * windows, however without using filled mbus_dram_target_info 5655 * structure. 5656 */ 5657 if (pp->dram_target_info || pp->neta_armada3700) 5658 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5659 5660 pp->tx_ring_size = MVNETA_MAX_TXD; 5661 pp->rx_ring_size = MVNETA_MAX_RXD; 5662 5663 pp->dev = dev; 5664 SET_NETDEV_DEV(dev, &pdev->dev); 5665 5666 pp->id = global_port_id++; 5667 5668 /* Obtain access to BM resources if enabled and already initialized */ 5669 bm_node = of_parse_phandle(dn, "buffer-manager", 0); 5670 if (bm_node) { 5671 pp->bm_priv = mvneta_bm_get(bm_node); 5672 if (pp->bm_priv) { 5673 err = mvneta_bm_port_init(pdev, pp); 5674 if (err < 0) { 5675 dev_info(&pdev->dev, 5676 "use SW buffer management\n"); 5677 mvneta_bm_put(pp->bm_priv); 5678 pp->bm_priv = NULL; 5679 } 5680 } 5681 /* Set RX packet offset correction for platforms, whose 5682 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit 5683 * platforms and 0B for 32-bit ones. 5684 */ 5685 pp->rx_offset_correction = max(0, 5686 NET_SKB_PAD - 5687 MVNETA_RX_PKT_OFFSET_CORRECTION); 5688 } 5689 of_node_put(bm_node); 5690 5691 /* sw buffer management */ 5692 if (!pp->bm_priv) 5693 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5694 5695 err = mvneta_init(&pdev->dev, pp); 5696 if (err < 0) 5697 goto err_netdev; 5698 5699 err = mvneta_port_power_up(pp, pp->phy_interface); 5700 if (err < 0) { 5701 dev_err(&pdev->dev, "can't power up port\n"); 5702 goto err_netdev; 5703 } 5704 5705 /* Armada3700 network controller does not support per-cpu 5706 * operation, so only single NAPI should be initialized. 5707 */ 5708 if (pp->neta_armada3700) { 5709 netif_napi_add(dev, &pp->napi, mvneta_poll); 5710 } else { 5711 for_each_present_cpu(cpu) { 5712 struct mvneta_pcpu_port *port = 5713 per_cpu_ptr(pp->ports, cpu); 5714 5715 netif_napi_add(dev, &port->napi, mvneta_poll); 5716 port->pp = pp; 5717 } 5718 } 5719 5720 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5721 NETIF_F_TSO | NETIF_F_RXCSUM; 5722 dev->hw_features |= dev->features; 5723 dev->vlan_features |= dev->features; 5724 if (!pp->bm_priv) 5725 dev->xdp_features = NETDEV_XDP_ACT_BASIC | 5726 NETDEV_XDP_ACT_REDIRECT | 5727 NETDEV_XDP_ACT_NDO_XMIT | 5728 NETDEV_XDP_ACT_RX_SG | 5729 NETDEV_XDP_ACT_NDO_XMIT_SG; 5730 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 5731 netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS); 5732 5733 /* MTU range: 68 - 9676 */ 5734 dev->min_mtu = ETH_MIN_MTU; 5735 /* 9676 == 9700 - 20 and rounding to 8 */ 5736 dev->max_mtu = 9676; 5737 5738 err = register_netdev(dev); 5739 if (err < 0) { 5740 dev_err(&pdev->dev, "failed to register\n"); 5741 goto err_netdev; 5742 } 5743 5744 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 5745 dev->dev_addr); 5746 5747 platform_set_drvdata(pdev, pp->dev); 5748 5749 return 0; 5750 5751 err_netdev: 5752 if (pp->bm_priv) { 5753 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5754 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5755 1 << pp->id); 5756 mvneta_bm_put(pp->bm_priv); 5757 } 5758 free_percpu(pp->stats); 5759 err_free_ports: 5760 free_percpu(pp->ports); 5761 err_free_phylink: 5762 if (pp->phylink) 5763 phylink_destroy(pp->phylink); 5764 err_clk: 5765 clk_disable_unprepare(pp->clk_bus); 5766 clk_disable_unprepare(pp->clk); 5767 err_free_irq: 5768 irq_dispose_mapping(dev->irq); 5769 return err; 5770 } 5771 5772 /* Device removal routine */ 5773 static void mvneta_remove(struct platform_device *pdev) 5774 { 5775 struct net_device *dev = platform_get_drvdata(pdev); 5776 struct mvneta_port *pp = netdev_priv(dev); 5777 5778 unregister_netdev(dev); 5779 clk_disable_unprepare(pp->clk_bus); 5780 clk_disable_unprepare(pp->clk); 5781 free_percpu(pp->ports); 5782 free_percpu(pp->stats); 5783 irq_dispose_mapping(dev->irq); 5784 phylink_destroy(pp->phylink); 5785 5786 if (pp->bm_priv) { 5787 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5788 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5789 1 << pp->id); 5790 mvneta_bm_put(pp->bm_priv); 5791 } 5792 } 5793 5794 #ifdef CONFIG_PM_SLEEP 5795 static int mvneta_suspend(struct device *device) 5796 { 5797 int queue; 5798 struct net_device *dev = dev_get_drvdata(device); 5799 struct mvneta_port *pp = netdev_priv(dev); 5800 5801 if (!netif_running(dev)) 5802 goto clean_exit; 5803 5804 if (!pp->neta_armada3700) { 5805 spin_lock(&pp->lock); 5806 pp->is_stopped = true; 5807 spin_unlock(&pp->lock); 5808 5809 cpuhp_state_remove_instance_nocalls(online_hpstate, 5810 &pp->node_online); 5811 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5812 &pp->node_dead); 5813 } 5814 5815 rtnl_lock(); 5816 mvneta_stop_dev(pp); 5817 rtnl_unlock(); 5818 5819 for (queue = 0; queue < rxq_number; queue++) { 5820 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5821 5822 mvneta_rxq_drop_pkts(pp, rxq); 5823 } 5824 5825 for (queue = 0; queue < txq_number; queue++) { 5826 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5827 5828 mvneta_txq_hw_deinit(pp, txq); 5829 } 5830 5831 clean_exit: 5832 netif_device_detach(dev); 5833 clk_disable_unprepare(pp->clk_bus); 5834 clk_disable_unprepare(pp->clk); 5835 5836 return 0; 5837 } 5838 5839 static int mvneta_resume(struct device *device) 5840 { 5841 struct platform_device *pdev = to_platform_device(device); 5842 struct net_device *dev = dev_get_drvdata(device); 5843 struct mvneta_port *pp = netdev_priv(dev); 5844 int err, queue; 5845 5846 clk_prepare_enable(pp->clk); 5847 if (!IS_ERR(pp->clk_bus)) 5848 clk_prepare_enable(pp->clk_bus); 5849 if (pp->dram_target_info || pp->neta_armada3700) 5850 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5851 if (pp->bm_priv) { 5852 err = mvneta_bm_port_init(pdev, pp); 5853 if (err < 0) { 5854 dev_info(&pdev->dev, "use SW buffer management\n"); 5855 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5856 pp->bm_priv = NULL; 5857 } 5858 } 5859 mvneta_defaults_set(pp); 5860 err = mvneta_port_power_up(pp, pp->phy_interface); 5861 if (err < 0) { 5862 dev_err(device, "can't power up port\n"); 5863 return err; 5864 } 5865 5866 netif_device_attach(dev); 5867 5868 if (!netif_running(dev)) 5869 return 0; 5870 5871 for (queue = 0; queue < rxq_number; queue++) { 5872 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5873 5874 rxq->next_desc_to_proc = 0; 5875 mvneta_rxq_hw_init(pp, rxq); 5876 } 5877 5878 for (queue = 0; queue < txq_number; queue++) { 5879 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5880 5881 txq->next_desc_to_proc = 0; 5882 mvneta_txq_hw_init(pp, txq); 5883 } 5884 5885 if (!pp->neta_armada3700) { 5886 spin_lock(&pp->lock); 5887 pp->is_stopped = false; 5888 spin_unlock(&pp->lock); 5889 cpuhp_state_add_instance_nocalls(online_hpstate, 5890 &pp->node_online); 5891 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5892 &pp->node_dead); 5893 } 5894 5895 rtnl_lock(); 5896 mvneta_start_dev(pp); 5897 rtnl_unlock(); 5898 mvneta_set_rx_mode(dev); 5899 5900 return 0; 5901 } 5902 #endif 5903 5904 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); 5905 5906 static const struct of_device_id mvneta_match[] = { 5907 { .compatible = "marvell,armada-370-neta" }, 5908 { .compatible = "marvell,armada-xp-neta" }, 5909 { .compatible = "marvell,armada-3700-neta" }, 5910 { .compatible = "marvell,armada-ac5-neta" }, 5911 { } 5912 }; 5913 MODULE_DEVICE_TABLE(of, mvneta_match); 5914 5915 static struct platform_driver mvneta_driver = { 5916 .probe = mvneta_probe, 5917 .remove = mvneta_remove, 5918 .driver = { 5919 .name = MVNETA_DRIVER_NAME, 5920 .of_match_table = mvneta_match, 5921 .pm = &mvneta_pm_ops, 5922 }, 5923 }; 5924 5925 static int __init mvneta_driver_init(void) 5926 { 5927 int ret; 5928 5929 BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE); 5930 5931 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", 5932 mvneta_cpu_online, 5933 mvneta_cpu_down_prepare); 5934 if (ret < 0) 5935 goto out; 5936 online_hpstate = ret; 5937 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", 5938 NULL, mvneta_cpu_dead); 5939 if (ret) 5940 goto err_dead; 5941 5942 ret = platform_driver_register(&mvneta_driver); 5943 if (ret) 5944 goto err; 5945 return 0; 5946 5947 err: 5948 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5949 err_dead: 5950 cpuhp_remove_multi_state(online_hpstate); 5951 out: 5952 return ret; 5953 } 5954 module_init(mvneta_driver_init); 5955 5956 static void __exit mvneta_driver_exit(void) 5957 { 5958 platform_driver_unregister(&mvneta_driver); 5959 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5960 cpuhp_remove_multi_state(online_hpstate); 5961 } 5962 module_exit(mvneta_driver_exit); 5963 5964 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 5965 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 5966 MODULE_LICENSE("GPL"); 5967 5968 module_param(rxq_number, int, 0444); 5969 module_param(txq_number, int, 0444); 5970 5971 module_param(rxq_def, int, 0444); 5972 module_param(rx_copybreak, int, 0644); 5973