1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/platform_device.h> 15 #include <linux/skbuff.h> 16 #include <linux/inetdevice.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/interrupt.h> 21 #include <linux/cpumask.h> 22 #include <linux/of.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_address.h> 27 #include <linux/phy.h> 28 #include <linux/phylink.h> 29 #include <linux/phy/phy.h> 30 #include <linux/ptp_classify.h> 31 #include <linux/clk.h> 32 #include <linux/hrtimer.h> 33 #include <linux/ktime.h> 34 #include <linux/regmap.h> 35 #include <uapi/linux/ppp_defs.h> 36 #include <net/ip.h> 37 #include <net/ipv6.h> 38 #include <net/page_pool/helpers.h> 39 #include <net/tso.h> 40 #include <linux/bpf_trace.h> 41 42 #include "mvpp2.h" 43 #include "mvpp2_prs.h" 44 #include "mvpp2_cls.h" 45 46 enum mvpp2_bm_pool_log_num { 47 MVPP2_BM_SHORT, 48 MVPP2_BM_LONG, 49 MVPP2_BM_JUMBO, 50 MVPP2_BM_POOLS_NUM 51 }; 52 53 static struct { 54 int pkt_size; 55 int buf_num; 56 } mvpp2_pools[MVPP2_BM_POOLS_NUM]; 57 58 /* The prototype is added here to be used in start_dev when using ACPI. This 59 * will be removed once phylink is used for all modes (dt+ACPI). 60 */ 61 static void mvpp2_acpi_start(struct mvpp2_port *port); 62 63 /* Queue modes */ 64 #define MVPP2_QDIST_SINGLE_MODE 0 65 #define MVPP2_QDIST_MULTI_MODE 1 66 67 static int queue_mode = MVPP2_QDIST_MULTI_MODE; 68 69 module_param(queue_mode, int, 0444); 70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 71 72 /* Utility/helper methods */ 73 74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 75 { 76 writel(data, priv->swth_base[0] + offset); 77 } 78 79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 80 { 81 return readl(priv->swth_base[0] + offset); 82 } 83 84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) 85 { 86 return readl_relaxed(priv->swth_base[0] + offset); 87 } 88 89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) 90 { 91 return cpu % priv->nthreads; 92 } 93 94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) 95 { 96 writel(data, priv->cm3_base + offset); 97 } 98 99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) 100 { 101 return readl(priv->cm3_base + offset); 102 } 103 104 static struct page_pool * 105 mvpp2_create_page_pool(struct device *dev, int num, int len, 106 enum dma_data_direction dma_dir) 107 { 108 struct page_pool_params pp_params = { 109 /* internal DMA mapping in page_pool */ 110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 111 .pool_size = num, 112 .nid = NUMA_NO_NODE, 113 .dev = dev, 114 .dma_dir = dma_dir, 115 .offset = MVPP2_SKB_HEADROOM, 116 .max_len = len, 117 }; 118 119 return page_pool_create(&pp_params); 120 } 121 122 /* These accessors should be used to access: 123 * 124 * - per-thread registers, where each thread has its own copy of the 125 * register. 126 * 127 * MVPP2_BM_VIRT_ALLOC_REG 128 * MVPP2_BM_ADDR_HIGH_ALLOC 129 * MVPP22_BM_ADDR_HIGH_RLS_REG 130 * MVPP2_BM_VIRT_RLS_REG 131 * MVPP2_ISR_RX_TX_CAUSE_REG 132 * MVPP2_ISR_RX_TX_MASK_REG 133 * MVPP2_TXQ_NUM_REG 134 * MVPP2_AGGR_TXQ_UPDATE_REG 135 * MVPP2_TXQ_RSVD_REQ_REG 136 * MVPP2_TXQ_RSVD_RSLT_REG 137 * MVPP2_TXQ_SENT_REG 138 * MVPP2_RXQ_NUM_REG 139 * 140 * - global registers that must be accessed through a specific thread 141 * window, because they are related to an access to a per-thread 142 * register 143 * 144 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 145 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 146 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 147 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 148 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 149 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 150 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 151 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 152 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 153 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 154 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 155 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 156 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 157 */ 158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, 159 u32 offset, u32 data) 160 { 161 writel(data, priv->swth_base[thread] + offset); 162 } 163 164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, 165 u32 offset) 166 { 167 return readl(priv->swth_base[thread] + offset); 168 } 169 170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, 171 u32 offset, u32 data) 172 { 173 writel_relaxed(data, priv->swth_base[thread] + offset); 174 } 175 176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, 177 u32 offset) 178 { 179 return readl_relaxed(priv->swth_base[thread] + offset); 180 } 181 182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 183 struct mvpp2_tx_desc *tx_desc) 184 { 185 if (port->priv->hw_version == MVPP21) 186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); 187 else 188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & 189 MVPP2_DESC_DMA_MASK; 190 } 191 192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 193 struct mvpp2_tx_desc *tx_desc, 194 dma_addr_t dma_addr) 195 { 196 dma_addr_t addr, offset; 197 198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 199 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 200 201 if (port->priv->hw_version == MVPP21) { 202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); 203 tx_desc->pp21.packet_offset = offset; 204 } else { 205 __le64 val = cpu_to_le64(addr); 206 207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); 208 tx_desc->pp22.buf_dma_addr_ptp |= val; 209 tx_desc->pp22.packet_offset = offset; 210 } 211 } 212 213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 214 struct mvpp2_tx_desc *tx_desc) 215 { 216 if (port->priv->hw_version == MVPP21) 217 return le16_to_cpu(tx_desc->pp21.data_size); 218 else 219 return le16_to_cpu(tx_desc->pp22.data_size); 220 } 221 222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 223 struct mvpp2_tx_desc *tx_desc, 224 size_t size) 225 { 226 if (port->priv->hw_version == MVPP21) 227 tx_desc->pp21.data_size = cpu_to_le16(size); 228 else 229 tx_desc->pp22.data_size = cpu_to_le16(size); 230 } 231 232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 233 struct mvpp2_tx_desc *tx_desc, 234 unsigned int txq) 235 { 236 if (port->priv->hw_version == MVPP21) 237 tx_desc->pp21.phys_txq = txq; 238 else 239 tx_desc->pp22.phys_txq = txq; 240 } 241 242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 243 struct mvpp2_tx_desc *tx_desc, 244 unsigned int command) 245 { 246 if (port->priv->hw_version == MVPP21) 247 tx_desc->pp21.command = cpu_to_le32(command); 248 else 249 tx_desc->pp22.command = cpu_to_le32(command); 250 } 251 252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 253 struct mvpp2_tx_desc *tx_desc) 254 { 255 if (port->priv->hw_version == MVPP21) 256 return tx_desc->pp21.packet_offset; 257 else 258 return tx_desc->pp22.packet_offset; 259 } 260 261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 262 struct mvpp2_rx_desc *rx_desc) 263 { 264 if (port->priv->hw_version == MVPP21) 265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr); 266 else 267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & 268 MVPP2_DESC_DMA_MASK; 269 } 270 271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 272 struct mvpp2_rx_desc *rx_desc) 273 { 274 if (port->priv->hw_version == MVPP21) 275 return le32_to_cpu(rx_desc->pp21.buf_cookie); 276 else 277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & 278 MVPP2_DESC_DMA_MASK; 279 } 280 281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 282 struct mvpp2_rx_desc *rx_desc) 283 { 284 if (port->priv->hw_version == MVPP21) 285 return le16_to_cpu(rx_desc->pp21.data_size); 286 else 287 return le16_to_cpu(rx_desc->pp22.data_size); 288 } 289 290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 291 struct mvpp2_rx_desc *rx_desc) 292 { 293 if (port->priv->hw_version == MVPP21) 294 return le32_to_cpu(rx_desc->pp21.status); 295 else 296 return le32_to_cpu(rx_desc->pp22.status); 297 } 298 299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 300 { 301 txq_pcpu->txq_get_index++; 302 if (txq_pcpu->txq_get_index == txq_pcpu->size) 303 txq_pcpu->txq_get_index = 0; 304 } 305 306 static void mvpp2_txq_inc_put(struct mvpp2_port *port, 307 struct mvpp2_txq_pcpu *txq_pcpu, 308 void *data, 309 struct mvpp2_tx_desc *tx_desc, 310 enum mvpp2_tx_buf_type buf_type) 311 { 312 struct mvpp2_txq_pcpu_buf *tx_buf = 313 txq_pcpu->buffs + txq_pcpu->txq_put_index; 314 tx_buf->type = buf_type; 315 if (buf_type == MVPP2_TYPE_SKB) 316 tx_buf->skb = data; 317 else 318 tx_buf->xdpf = data; 319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 321 mvpp2_txdesc_offset_get(port, tx_desc); 322 txq_pcpu->txq_put_index++; 323 if (txq_pcpu->txq_put_index == txq_pcpu->size) 324 txq_pcpu->txq_put_index = 0; 325 } 326 327 /* Get number of maximum RXQ */ 328 static int mvpp2_get_nrxqs(struct mvpp2 *priv) 329 { 330 unsigned int nrxqs; 331 332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) 333 return 1; 334 335 /* According to the PPv2.2 datasheet and our experiments on 336 * PPv2.1, RX queues have an allocation granularity of 4 (when 337 * more than a single one on PPv2.2). 338 * Round up to nearest multiple of 4. 339 */ 340 nrxqs = (num_possible_cpus() + 3) & ~0x3; 341 if (nrxqs > MVPP2_PORT_MAX_RXQ) 342 nrxqs = MVPP2_PORT_MAX_RXQ; 343 344 return nrxqs; 345 } 346 347 /* Get number of physical egress port */ 348 static inline int mvpp2_egress_port(struct mvpp2_port *port) 349 { 350 return MVPP2_MAX_TCONT + port->id; 351 } 352 353 /* Get number of physical TXQ */ 354 static inline int mvpp2_txq_phys(int port, int txq) 355 { 356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 357 } 358 359 /* Returns a struct page if page_pool is set, otherwise a buffer */ 360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, 361 struct page_pool *page_pool) 362 { 363 if (page_pool) 364 return page_pool_dev_alloc_pages(page_pool); 365 366 if (likely(pool->frag_size <= PAGE_SIZE)) 367 return netdev_alloc_frag(pool->frag_size); 368 369 return kmalloc(pool->frag_size, GFP_ATOMIC); 370 } 371 372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, 373 struct page_pool *page_pool, void *data) 374 { 375 if (page_pool) 376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false); 377 else if (likely(pool->frag_size <= PAGE_SIZE)) 378 skb_free_frag(data); 379 else 380 kfree(data); 381 } 382 383 /* Buffer Manager configuration routines */ 384 385 /* Create pool */ 386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, 387 struct mvpp2_bm_pool *bm_pool, int size) 388 { 389 u32 val; 390 391 /* Number of buffer pointers must be a multiple of 16, as per 392 * hardware constraints 393 */ 394 if (!IS_ALIGNED(size, 16)) 395 return -EINVAL; 396 397 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 398 * bytes per buffer pointer 399 */ 400 if (priv->hw_version == MVPP21) 401 bm_pool->size_bytes = 2 * sizeof(u32) * size; 402 else 403 bm_pool->size_bytes = 2 * sizeof(u64) * size; 404 405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, 406 &bm_pool->dma_addr, 407 GFP_KERNEL); 408 if (!bm_pool->virt_addr) 409 return -ENOMEM; 410 411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 412 MVPP2_BM_POOL_PTR_ALIGN)) { 413 dma_free_coherent(dev, bm_pool->size_bytes, 414 bm_pool->virt_addr, bm_pool->dma_addr); 415 dev_err(dev, "BM pool %d is not %d bytes aligned\n", 416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 417 return -ENOMEM; 418 } 419 420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 421 lower_32_bits(bm_pool->dma_addr)); 422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 423 424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 425 val |= MVPP2_BM_START_MASK; 426 427 val &= ~MVPP2_BM_LOW_THRESH_MASK; 428 val &= ~MVPP2_BM_HIGH_THRESH_MASK; 429 430 /* Set 8 Pools BPPI threshold for MVPP23 */ 431 if (priv->hw_version == MVPP23) { 432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); 433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); 434 } else { 435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); 436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); 437 } 438 439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 440 441 bm_pool->size = size; 442 bm_pool->pkt_size = 0; 443 bm_pool->buf_num = 0; 444 445 return 0; 446 } 447 448 /* Set pool buffer size */ 449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 450 struct mvpp2_bm_pool *bm_pool, 451 int buf_size) 452 { 453 u32 val; 454 455 bm_pool->buf_size = buf_size; 456 457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 459 } 460 461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 462 struct mvpp2_bm_pool *bm_pool, 463 dma_addr_t *dma_addr, 464 phys_addr_t *phys_addr) 465 { 466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 467 468 *dma_addr = mvpp2_thread_read(priv, thread, 469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); 471 472 if (priv->hw_version >= MVPP22) { 473 u32 val; 474 u32 dma_addr_highbits, phys_addr_highbits; 475 476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); 477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 480 481 if (sizeof(dma_addr_t) == 8) 482 *dma_addr |= (u64)dma_addr_highbits << 32; 483 484 if (sizeof(phys_addr_t) == 8) 485 *phys_addr |= (u64)phys_addr_highbits << 32; 486 } 487 488 put_cpu(); 489 } 490 491 /* Free all buffers from the pool */ 492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 493 struct mvpp2_bm_pool *bm_pool, int buf_num) 494 { 495 struct page_pool *pp = NULL; 496 int i; 497 498 if (buf_num > bm_pool->buf_num) { 499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", 500 bm_pool->id, buf_num); 501 buf_num = bm_pool->buf_num; 502 } 503 504 if (priv->percpu_pools) 505 pp = priv->page_pool[bm_pool->id]; 506 507 for (i = 0; i < buf_num; i++) { 508 dma_addr_t buf_dma_addr; 509 phys_addr_t buf_phys_addr; 510 void *data; 511 512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 513 &buf_dma_addr, &buf_phys_addr); 514 515 if (!pp) 516 dma_unmap_single(dev, buf_dma_addr, 517 bm_pool->buf_size, DMA_FROM_DEVICE); 518 519 data = (void *)phys_to_virt(buf_phys_addr); 520 if (!data) 521 break; 522 523 mvpp2_frag_free(bm_pool, pp, data); 524 } 525 526 /* Update BM driver with number of buffers removed from pool */ 527 bm_pool->buf_num -= i; 528 } 529 530 /* Check number of buffers in BM pool */ 531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 532 { 533 int buf_num = 0; 534 535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & 536 MVPP22_BM_POOL_PTRS_NUM_MASK; 537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & 538 MVPP2_BM_BPPI_PTR_NUM_MASK; 539 540 /* HW has one buffer ready which is not reflected in the counters */ 541 if (buf_num) 542 buf_num += 1; 543 544 return buf_num; 545 } 546 547 /* Cleanup pool */ 548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, 549 struct mvpp2_bm_pool *bm_pool) 550 { 551 int buf_num; 552 u32 val; 553 554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); 556 557 /* Check buffer counters after free */ 558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 559 if (buf_num) { 560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", 561 bm_pool->id, bm_pool->buf_num); 562 return 0; 563 } 564 565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 566 val |= MVPP2_BM_STOP_MASK; 567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 568 569 if (priv->percpu_pools) { 570 page_pool_destroy(priv->page_pool[bm_pool->id]); 571 priv->page_pool[bm_pool->id] = NULL; 572 } 573 574 dma_free_coherent(dev, bm_pool->size_bytes, 575 bm_pool->virt_addr, 576 bm_pool->dma_addr); 577 return 0; 578 } 579 580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) 581 { 582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; 583 struct mvpp2_bm_pool *bm_pool; 584 585 if (priv->percpu_pools) 586 poolnum = mvpp2_get_nrxqs(priv) * 2; 587 588 /* Create all pools with maximum size */ 589 size = MVPP2_BM_POOL_SIZE_MAX; 590 for (i = 0; i < poolnum; i++) { 591 bm_pool = &priv->bm_pools[i]; 592 bm_pool->id = i; 593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 594 if (err) 595 goto err_unroll_pools; 596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 597 } 598 return 0; 599 600 err_unroll_pools: 601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); 602 for (i = i - 1; i >= 0; i--) 603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 604 return err; 605 } 606 607 /* Routine enable PPv23 8 pool mode */ 608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) 609 { 610 int val; 611 612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); 613 val |= MVPP23_BM_8POOL_MODE; 614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); 615 } 616 617 /* Cleanup pool before actual initialization in the OS */ 618 static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id) 619 { 620 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 621 u32 val; 622 int i; 623 624 /* Drain the BM from all possible residues left by firmware */ 625 for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++) 626 mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id)); 627 628 put_cpu(); 629 630 /* Stop the BM pool */ 631 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id)); 632 val |= MVPP2_BM_STOP_MASK; 633 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val); 634 } 635 636 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 637 { 638 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 639 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 640 struct mvpp2_port *port; 641 642 if (priv->percpu_pools) 643 poolnum = mvpp2_get_nrxqs(priv) * 2; 644 645 /* Clean up the pool state in case it contains stale state */ 646 for (i = 0; i < poolnum; i++) 647 mvpp2_bm_pool_cleanup(priv, i); 648 649 if (priv->percpu_pools) { 650 for (i = 0; i < priv->port_count; i++) { 651 port = priv->port_list[i]; 652 if (port->xdp_prog) { 653 dma_dir = DMA_BIDIRECTIONAL; 654 break; 655 } 656 } 657 658 for (i = 0; i < poolnum; i++) { 659 /* the pool in use */ 660 int pn = i / (poolnum / 2); 661 662 priv->page_pool[i] = 663 mvpp2_create_page_pool(dev, 664 mvpp2_pools[pn].buf_num, 665 mvpp2_pools[pn].pkt_size, 666 dma_dir); 667 if (IS_ERR(priv->page_pool[i])) { 668 int j; 669 670 for (j = 0; j < i; j++) { 671 page_pool_destroy(priv->page_pool[j]); 672 priv->page_pool[j] = NULL; 673 } 674 return PTR_ERR(priv->page_pool[i]); 675 } 676 } 677 } 678 679 dev_info(dev, "using %d %s buffers\n", poolnum, 680 priv->percpu_pools ? "per-cpu" : "shared"); 681 682 for (i = 0; i < poolnum; i++) { 683 /* Mask BM all interrupts */ 684 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 685 /* Clear BM cause register */ 686 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 687 } 688 689 /* Allocate and initialize BM pools */ 690 priv->bm_pools = devm_kcalloc(dev, poolnum, 691 sizeof(*priv->bm_pools), GFP_KERNEL); 692 if (!priv->bm_pools) 693 return -ENOMEM; 694 695 if (priv->hw_version == MVPP23) 696 mvpp23_bm_set_8pool_mode(priv); 697 698 err = mvpp2_bm_pools_init(dev, priv); 699 if (err < 0) 700 return err; 701 return 0; 702 } 703 704 static void mvpp2_setup_bm_pool(void) 705 { 706 /* Short pool */ 707 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; 708 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; 709 710 /* Long pool */ 711 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; 712 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; 713 714 /* Jumbo pool */ 715 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; 716 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; 717 } 718 719 /* Attach long pool to rxq */ 720 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 721 int lrxq, int long_pool) 722 { 723 u32 val, mask; 724 int prxq; 725 726 /* Get queue physical ID */ 727 prxq = port->rxqs[lrxq]->id; 728 729 if (port->priv->hw_version == MVPP21) 730 mask = MVPP21_RXQ_POOL_LONG_MASK; 731 else 732 mask = MVPP22_RXQ_POOL_LONG_MASK; 733 734 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 735 val &= ~mask; 736 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 737 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 738 } 739 740 /* Attach short pool to rxq */ 741 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 742 int lrxq, int short_pool) 743 { 744 u32 val, mask; 745 int prxq; 746 747 /* Get queue physical ID */ 748 prxq = port->rxqs[lrxq]->id; 749 750 if (port->priv->hw_version == MVPP21) 751 mask = MVPP21_RXQ_POOL_SHORT_MASK; 752 else 753 mask = MVPP22_RXQ_POOL_SHORT_MASK; 754 755 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 756 val &= ~mask; 757 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 758 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 759 } 760 761 static void *mvpp2_buf_alloc(struct mvpp2_port *port, 762 struct mvpp2_bm_pool *bm_pool, 763 struct page_pool *page_pool, 764 dma_addr_t *buf_dma_addr, 765 phys_addr_t *buf_phys_addr, 766 gfp_t gfp_mask) 767 { 768 dma_addr_t dma_addr; 769 struct page *page; 770 void *data; 771 772 data = mvpp2_frag_alloc(bm_pool, page_pool); 773 if (!data) 774 return NULL; 775 776 if (page_pool) { 777 page = (struct page *)data; 778 dma_addr = page_pool_get_dma_addr(page); 779 data = page_to_virt(page); 780 } else { 781 dma_addr = dma_map_single(port->dev->dev.parent, data, 782 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 783 DMA_FROM_DEVICE); 784 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 785 mvpp2_frag_free(bm_pool, NULL, data); 786 return NULL; 787 } 788 } 789 *buf_dma_addr = dma_addr; 790 *buf_phys_addr = virt_to_phys(data); 791 792 return data; 793 } 794 795 /* Routine enable flow control for RXQs condition */ 796 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) 797 { 798 int val, cm3_state, host_id, q; 799 int fq = port->first_rxq; 800 unsigned long flags; 801 802 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 803 804 /* Remove Flow control enable bit to prevent race between FW and Kernel 805 * If Flow control was enabled, it would be re-enabled. 806 */ 807 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 808 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 809 val &= ~FLOW_CONTROL_ENABLE_BIT; 810 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 811 812 /* Set same Flow control for all RXQs */ 813 for (q = 0; q < port->nrxqs; q++) { 814 /* Set stop and start Flow control RXQ thresholds */ 815 val = MSS_THRESHOLD_START; 816 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); 817 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 818 819 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 820 /* Set RXQ port ID */ 821 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 822 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); 823 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 824 + MSS_RXQ_ASS_HOSTID_OFFS)); 825 826 /* Calculate RXQ host ID: 827 * In Single queue mode: Host ID equal to Host ID used for 828 * shared RX interrupt 829 * In Multi queue mode: Host ID equal to number of 830 * RXQ ID / number of CoS queues 831 * In Single resource mode: Host ID always equal to 0 832 */ 833 if (queue_mode == MVPP2_QDIST_SINGLE_MODE) 834 host_id = port->nqvecs; 835 else if (queue_mode == MVPP2_QDIST_MULTI_MODE) 836 host_id = q; 837 else 838 host_id = 0; 839 840 /* Set RXQ host ID */ 841 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) 842 + MSS_RXQ_ASS_HOSTID_OFFS)); 843 844 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 845 } 846 847 /* Notify Firmware that Flow control config space ready for update */ 848 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 849 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 850 val |= cm3_state; 851 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 852 853 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 854 } 855 856 /* Routine disable flow control for RXQs condition */ 857 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) 858 { 859 int val, cm3_state, q; 860 unsigned long flags; 861 int fq = port->first_rxq; 862 863 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 864 865 /* Remove Flow control enable bit to prevent race between FW and Kernel 866 * If Flow control was enabled, it would be re-enabled. 867 */ 868 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 869 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 870 val &= ~FLOW_CONTROL_ENABLE_BIT; 871 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 872 873 /* Disable Flow control for all RXQs */ 874 for (q = 0; q < port->nrxqs; q++) { 875 /* Set threshold 0 to disable Flow control */ 876 val = 0; 877 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); 878 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 879 880 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 881 882 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 883 884 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 885 + MSS_RXQ_ASS_HOSTID_OFFS)); 886 887 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 888 } 889 890 /* Notify Firmware that Flow control config space ready for update */ 891 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 892 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 893 val |= cm3_state; 894 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 895 896 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 897 } 898 899 /* Routine disable/enable flow control for BM pool condition */ 900 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, 901 struct mvpp2_bm_pool *pool, 902 bool en) 903 { 904 int val, cm3_state; 905 unsigned long flags; 906 907 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 908 909 /* Remove Flow control enable bit to prevent race between FW and Kernel 910 * If Flow control were enabled, it would be re-enabled. 911 */ 912 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 913 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 914 val &= ~FLOW_CONTROL_ENABLE_BIT; 915 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 916 917 /* Check if BM pool should be enabled/disable */ 918 if (en) { 919 /* Set BM pool start and stop thresholds per port */ 920 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 921 val |= MSS_BUF_POOL_PORT_OFFS(port->id); 922 val &= ~MSS_BUF_POOL_START_MASK; 923 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); 924 val &= ~MSS_BUF_POOL_STOP_MASK; 925 val |= MSS_THRESHOLD_STOP; 926 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 927 } else { 928 /* Remove BM pool from the port */ 929 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 930 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); 931 932 /* Zero BM pool start and stop thresholds to disable pool 933 * flow control if pool empty (not used by any port) 934 */ 935 if (!pool->buf_num) { 936 val &= ~MSS_BUF_POOL_START_MASK; 937 val &= ~MSS_BUF_POOL_STOP_MASK; 938 } 939 940 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 941 } 942 943 /* Notify Firmware that Flow control config space ready for update */ 944 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 945 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 946 val |= cm3_state; 947 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 948 949 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 950 } 951 952 /* disable/enable flow control for BM pool on all ports */ 953 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) 954 { 955 struct mvpp2_port *port; 956 int i, j; 957 958 for (i = 0; i < priv->port_count; i++) { 959 port = priv->port_list[i]; 960 if (port->priv->percpu_pools) { 961 for (j = 0; j < port->nrxqs; j++) 962 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j], 963 port->tx_fc & en); 964 } else { 965 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); 966 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); 967 } 968 } 969 } 970 971 static int mvpp2_enable_global_fc(struct mvpp2 *priv) 972 { 973 int val, timeout = 0; 974 975 /* Enable global flow control. In this stage global 976 * flow control enabled, but still disabled per port. 977 */ 978 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 979 val |= FLOW_CONTROL_ENABLE_BIT; 980 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 981 982 /* Check if Firmware running and disable FC if not*/ 983 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 984 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 985 986 while (timeout < MSS_FC_MAX_TIMEOUT) { 987 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 988 989 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) 990 return 0; 991 usleep_range(10, 20); 992 timeout++; 993 } 994 995 priv->global_tx_fc = false; 996 return -EOPNOTSUPP; 997 } 998 999 /* Release buffer to BM */ 1000 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 1001 dma_addr_t buf_dma_addr, 1002 phys_addr_t buf_phys_addr) 1003 { 1004 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 1005 unsigned long flags = 0; 1006 1007 if (test_bit(thread, &port->priv->lock_map)) 1008 spin_lock_irqsave(&port->bm_lock[thread], flags); 1009 1010 if (port->priv->hw_version >= MVPP22) { 1011 u32 val = 0; 1012 1013 if (sizeof(dma_addr_t) == 8) 1014 val |= upper_32_bits(buf_dma_addr) & 1015 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 1016 1017 if (sizeof(phys_addr_t) == 8) 1018 val |= (upper_32_bits(buf_phys_addr) 1019 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 1020 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 1021 1022 mvpp2_thread_write_relaxed(port->priv, thread, 1023 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 1024 } 1025 1026 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 1027 * returned in the "cookie" field of the RX 1028 * descriptor. Instead of storing the virtual address, we 1029 * store the physical address 1030 */ 1031 mvpp2_thread_write_relaxed(port->priv, thread, 1032 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 1033 mvpp2_thread_write_relaxed(port->priv, thread, 1034 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 1035 1036 if (test_bit(thread, &port->priv->lock_map)) 1037 spin_unlock_irqrestore(&port->bm_lock[thread], flags); 1038 1039 put_cpu(); 1040 } 1041 1042 /* Allocate buffers for the pool */ 1043 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 1044 struct mvpp2_bm_pool *bm_pool, int buf_num) 1045 { 1046 int i, buf_size, total_size; 1047 dma_addr_t dma_addr; 1048 phys_addr_t phys_addr; 1049 struct page_pool *pp = NULL; 1050 void *buf; 1051 1052 if (port->priv->percpu_pools && 1053 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1054 netdev_err(port->dev, 1055 "attempted to use jumbo frames with per-cpu pools"); 1056 return 0; 1057 } 1058 1059 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 1060 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 1061 1062 if (buf_num < 0 || 1063 (buf_num + bm_pool->buf_num > bm_pool->size)) { 1064 netdev_err(port->dev, 1065 "cannot allocate %d buffers for pool %d\n", 1066 buf_num, bm_pool->id); 1067 return 0; 1068 } 1069 1070 if (port->priv->percpu_pools) 1071 pp = port->priv->page_pool[bm_pool->id]; 1072 for (i = 0; i < buf_num; i++) { 1073 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, 1074 &phys_addr, GFP_KERNEL); 1075 if (!buf) 1076 break; 1077 1078 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 1079 phys_addr); 1080 } 1081 1082 /* Update BM driver with number of buffers added to pool */ 1083 bm_pool->buf_num += i; 1084 1085 netdev_dbg(port->dev, 1086 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 1087 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 1088 1089 netdev_dbg(port->dev, 1090 "pool %d: %d of %d buffers added\n", 1091 bm_pool->id, i, buf_num); 1092 return i; 1093 } 1094 1095 /* Notify the driver that BM pool is being used as specific type and return the 1096 * pool pointer on success 1097 */ 1098 static struct mvpp2_bm_pool * 1099 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) 1100 { 1101 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1102 int num; 1103 1104 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || 1105 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { 1106 netdev_err(port->dev, "Invalid pool %d\n", pool); 1107 return NULL; 1108 } 1109 1110 /* Allocate buffers in case BM pool is used as long pool, but packet 1111 * size doesn't match MTU or BM pool hasn't being used yet 1112 */ 1113 if (new_pool->pkt_size == 0) { 1114 int pkts_num; 1115 1116 /* Set default buffer number or free all the buffers in case 1117 * the pool is not empty 1118 */ 1119 pkts_num = new_pool->buf_num; 1120 if (pkts_num == 0) { 1121 if (port->priv->percpu_pools) { 1122 if (pool < port->nrxqs) 1123 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; 1124 else 1125 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; 1126 } else { 1127 pkts_num = mvpp2_pools[pool].buf_num; 1128 } 1129 } else { 1130 mvpp2_bm_bufs_free(port->dev->dev.parent, 1131 port->priv, new_pool, pkts_num); 1132 } 1133 1134 new_pool->pkt_size = pkt_size; 1135 new_pool->frag_size = 1136 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1137 MVPP2_SKB_SHINFO_SIZE; 1138 1139 /* Allocate buffers for this pool */ 1140 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1141 if (num != pkts_num) { 1142 WARN(1, "pool %d: %d of %d allocated\n", 1143 new_pool->id, num, pkts_num); 1144 return NULL; 1145 } 1146 } 1147 1148 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1149 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1150 1151 return new_pool; 1152 } 1153 1154 static struct mvpp2_bm_pool * 1155 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, 1156 unsigned int pool, int pkt_size) 1157 { 1158 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1159 int num; 1160 1161 if (pool > port->nrxqs * 2) { 1162 netdev_err(port->dev, "Invalid pool %d\n", pool); 1163 return NULL; 1164 } 1165 1166 /* Allocate buffers in case BM pool is used as long pool, but packet 1167 * size doesn't match MTU or BM pool hasn't being used yet 1168 */ 1169 if (new_pool->pkt_size == 0) { 1170 int pkts_num; 1171 1172 /* Set default buffer number or free all the buffers in case 1173 * the pool is not empty 1174 */ 1175 pkts_num = new_pool->buf_num; 1176 if (pkts_num == 0) 1177 pkts_num = mvpp2_pools[type].buf_num; 1178 else 1179 mvpp2_bm_bufs_free(port->dev->dev.parent, 1180 port->priv, new_pool, pkts_num); 1181 1182 new_pool->pkt_size = pkt_size; 1183 new_pool->frag_size = 1184 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1185 MVPP2_SKB_SHINFO_SIZE; 1186 1187 /* Allocate buffers for this pool */ 1188 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1189 if (num != pkts_num) { 1190 WARN(1, "pool %d: %d of %d allocated\n", 1191 new_pool->id, num, pkts_num); 1192 return NULL; 1193 } 1194 } 1195 1196 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1197 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1198 1199 return new_pool; 1200 } 1201 1202 /* Initialize pools for swf, shared buffers variant */ 1203 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) 1204 { 1205 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; 1206 int rxq; 1207 1208 /* If port pkt_size is higher than 1518B: 1209 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1210 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1211 */ 1212 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1213 long_log_pool = MVPP2_BM_JUMBO; 1214 short_log_pool = MVPP2_BM_LONG; 1215 } else { 1216 long_log_pool = MVPP2_BM_LONG; 1217 short_log_pool = MVPP2_BM_SHORT; 1218 } 1219 1220 if (!port->pool_long) { 1221 port->pool_long = 1222 mvpp2_bm_pool_use(port, long_log_pool, 1223 mvpp2_pools[long_log_pool].pkt_size); 1224 if (!port->pool_long) 1225 return -ENOMEM; 1226 1227 port->pool_long->port_map |= BIT(port->id); 1228 1229 for (rxq = 0; rxq < port->nrxqs; rxq++) 1230 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 1231 } 1232 1233 if (!port->pool_short) { 1234 port->pool_short = 1235 mvpp2_bm_pool_use(port, short_log_pool, 1236 mvpp2_pools[short_log_pool].pkt_size); 1237 if (!port->pool_short) 1238 return -ENOMEM; 1239 1240 port->pool_short->port_map |= BIT(port->id); 1241 1242 for (rxq = 0; rxq < port->nrxqs; rxq++) 1243 mvpp2_rxq_short_pool_set(port, rxq, 1244 port->pool_short->id); 1245 } 1246 1247 return 0; 1248 } 1249 1250 /* Initialize pools for swf, percpu buffers variant */ 1251 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) 1252 { 1253 struct mvpp2_bm_pool *bm_pool; 1254 int i; 1255 1256 for (i = 0; i < port->nrxqs; i++) { 1257 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, 1258 mvpp2_pools[MVPP2_BM_SHORT].pkt_size); 1259 if (!bm_pool) 1260 return -ENOMEM; 1261 1262 bm_pool->port_map |= BIT(port->id); 1263 mvpp2_rxq_short_pool_set(port, i, bm_pool->id); 1264 } 1265 1266 for (i = 0; i < port->nrxqs; i++) { 1267 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, 1268 mvpp2_pools[MVPP2_BM_LONG].pkt_size); 1269 if (!bm_pool) 1270 return -ENOMEM; 1271 1272 bm_pool->port_map |= BIT(port->id); 1273 mvpp2_rxq_long_pool_set(port, i, bm_pool->id); 1274 } 1275 1276 port->pool_long = NULL; 1277 port->pool_short = NULL; 1278 1279 return 0; 1280 } 1281 1282 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 1283 { 1284 if (port->priv->percpu_pools) 1285 return mvpp2_swf_bm_pool_init_percpu(port); 1286 else 1287 return mvpp2_swf_bm_pool_init_shared(port); 1288 } 1289 1290 static void mvpp2_set_hw_csum(struct mvpp2_port *port, 1291 enum mvpp2_bm_pool_log_num new_long_pool) 1292 { 1293 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1294 1295 /* Update L4 checksum when jumbo enable/disable on port. 1296 * Only port 0 supports hardware checksum offload due to 1297 * the Tx FIFO size limitation. 1298 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor 1299 * has 7 bits, so the maximum L3 offset is 128. 1300 */ 1301 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1302 port->dev->features &= ~csums; 1303 port->dev->hw_features &= ~csums; 1304 } else { 1305 port->dev->features |= csums; 1306 port->dev->hw_features |= csums; 1307 } 1308 } 1309 1310 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 1311 { 1312 struct mvpp2_port *port = netdev_priv(dev); 1313 enum mvpp2_bm_pool_log_num new_long_pool; 1314 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 1315 1316 if (port->priv->percpu_pools) 1317 goto out_set; 1318 1319 /* If port MTU is higher than 1518B: 1320 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1321 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1322 */ 1323 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1324 new_long_pool = MVPP2_BM_JUMBO; 1325 else 1326 new_long_pool = MVPP2_BM_LONG; 1327 1328 if (new_long_pool != port->pool_long->id) { 1329 if (port->tx_fc) { 1330 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1331 mvpp2_bm_pool_update_fc(port, 1332 port->pool_short, 1333 false); 1334 else 1335 mvpp2_bm_pool_update_fc(port, port->pool_long, 1336 false); 1337 } 1338 1339 /* Remove port from old short & long pool */ 1340 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, 1341 port->pool_long->pkt_size); 1342 port->pool_long->port_map &= ~BIT(port->id); 1343 port->pool_long = NULL; 1344 1345 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, 1346 port->pool_short->pkt_size); 1347 port->pool_short->port_map &= ~BIT(port->id); 1348 port->pool_short = NULL; 1349 1350 port->pkt_size = pkt_size; 1351 1352 /* Add port to new short & long pool */ 1353 mvpp2_swf_bm_pool_init(port); 1354 1355 mvpp2_set_hw_csum(port, new_long_pool); 1356 1357 if (port->tx_fc) { 1358 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1359 mvpp2_bm_pool_update_fc(port, port->pool_long, 1360 true); 1361 else 1362 mvpp2_bm_pool_update_fc(port, port->pool_short, 1363 true); 1364 } 1365 1366 /* Update L4 checksum when jumbo enable/disable on port */ 1367 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1368 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1369 dev->hw_features &= ~(NETIF_F_IP_CSUM | 1370 NETIF_F_IPV6_CSUM); 1371 } else { 1372 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1373 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1374 } 1375 } 1376 1377 out_set: 1378 WRITE_ONCE(dev->mtu, mtu); 1379 dev->wanted_features = dev->features; 1380 1381 netdev_update_features(dev); 1382 return 0; 1383 } 1384 1385 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 1386 { 1387 int i, sw_thread_mask = 0; 1388 1389 for (i = 0; i < port->nqvecs; i++) 1390 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1391 1392 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1393 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 1394 } 1395 1396 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 1397 { 1398 int i, sw_thread_mask = 0; 1399 1400 for (i = 0; i < port->nqvecs; i++) 1401 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1402 1403 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1404 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 1405 } 1406 1407 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 1408 { 1409 struct mvpp2_port *port = qvec->port; 1410 1411 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1412 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 1413 } 1414 1415 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 1416 { 1417 struct mvpp2_port *port = qvec->port; 1418 1419 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1420 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 1421 } 1422 1423 /* Mask the current thread's Rx/Tx interrupts 1424 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1425 * using smp_processor_id() is OK. 1426 */ 1427 static void mvpp2_interrupts_mask(void *arg) 1428 { 1429 struct mvpp2_port *port = arg; 1430 int cpu = smp_processor_id(); 1431 u32 thread; 1432 1433 /* If the thread isn't used, don't do anything */ 1434 if (cpu > port->priv->nthreads) 1435 return; 1436 1437 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1438 1439 mvpp2_thread_write(port->priv, thread, 1440 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 1441 mvpp2_thread_write(port->priv, thread, 1442 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); 1443 } 1444 1445 /* Unmask the current thread's Rx/Tx interrupts. 1446 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1447 * using smp_processor_id() is OK. 1448 */ 1449 static void mvpp2_interrupts_unmask(void *arg) 1450 { 1451 struct mvpp2_port *port = arg; 1452 int cpu = smp_processor_id(); 1453 u32 val, thread; 1454 1455 /* If the thread isn't used, don't do anything */ 1456 if (cpu >= port->priv->nthreads) 1457 return; 1458 1459 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1460 1461 val = MVPP2_CAUSE_MISC_SUM_MASK | 1462 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 1463 if (port->has_tx_irqs) 1464 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 1465 1466 mvpp2_thread_write(port->priv, thread, 1467 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1468 mvpp2_thread_write(port->priv, thread, 1469 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1470 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1471 } 1472 1473 static void 1474 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 1475 { 1476 u32 val; 1477 int i; 1478 1479 if (port->priv->hw_version == MVPP21) 1480 return; 1481 1482 if (mask) 1483 val = 0; 1484 else 1485 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); 1486 1487 for (i = 0; i < port->nqvecs; i++) { 1488 struct mvpp2_queue_vector *v = port->qvecs + i; 1489 1490 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 1491 continue; 1492 1493 mvpp2_thread_write(port->priv, v->sw_thread_id, 1494 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1495 mvpp2_thread_write(port->priv, v->sw_thread_id, 1496 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1497 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1498 } 1499 } 1500 1501 /* Only GOP port 0 has an XLG MAC */ 1502 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) 1503 { 1504 return port->gop_id == 0; 1505 } 1506 1507 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) 1508 { 1509 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); 1510 } 1511 1512 /* Port configuration routines */ 1513 static bool mvpp2_is_xlg(phy_interface_t interface) 1514 { 1515 return interface == PHY_INTERFACE_MODE_10GBASER || 1516 interface == PHY_INTERFACE_MODE_5GBASER || 1517 interface == PHY_INTERFACE_MODE_XAUI; 1518 } 1519 1520 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) 1521 { 1522 u32 old, val; 1523 1524 old = val = readl(ptr); 1525 val &= ~mask; 1526 val |= set; 1527 if (old != val) 1528 writel(val, ptr); 1529 } 1530 1531 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 1532 { 1533 struct mvpp2 *priv = port->priv; 1534 u32 val; 1535 1536 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1537 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 1538 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1539 1540 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1541 if (port->gop_id == 2) { 1542 val |= GENCONF_CTRL0_PORT2_RGMII; 1543 } else if (port->gop_id == 3) { 1544 val |= GENCONF_CTRL0_PORT3_RGMII_MII; 1545 1546 /* According to the specification, GENCONF_CTRL0_PORT3_RGMII 1547 * should be set to 1 for RGMII and 0 for MII. However, tests 1548 * show that it is the other way around. This is also what 1549 * U-Boot does for mvpp2, so it is assumed to be correct. 1550 */ 1551 if (port->phy_interface == PHY_INTERFACE_MODE_MII) 1552 val |= GENCONF_CTRL0_PORT3_RGMII; 1553 else 1554 val &= ~GENCONF_CTRL0_PORT3_RGMII; 1555 } 1556 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1557 } 1558 1559 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 1560 { 1561 struct mvpp2 *priv = port->priv; 1562 u32 val; 1563 1564 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1565 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 1566 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 1567 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1568 1569 if (port->gop_id > 1) { 1570 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1571 if (port->gop_id == 2) 1572 val &= ~GENCONF_CTRL0_PORT2_RGMII; 1573 else if (port->gop_id == 3) 1574 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; 1575 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1576 } 1577 } 1578 1579 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 1580 { 1581 struct mvpp2 *priv = port->priv; 1582 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1583 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1584 u32 val; 1585 1586 val = readl(xpcs + MVPP22_XPCS_CFG0); 1587 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 1588 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 1589 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 1590 writel(val, xpcs + MVPP22_XPCS_CFG0); 1591 1592 val = readl(mpcs + MVPP22_MPCS_CTRL); 1593 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 1594 writel(val, mpcs + MVPP22_MPCS_CTRL); 1595 1596 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1597 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); 1598 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 1599 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1600 } 1601 1602 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) 1603 { 1604 struct mvpp2 *priv = port->priv; 1605 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1606 u32 val; 1607 1608 val = readl(fca + MVPP22_FCA_CONTROL_REG); 1609 val &= ~MVPP22_FCA_ENABLE_PERIODIC; 1610 if (en) 1611 val |= MVPP22_FCA_ENABLE_PERIODIC; 1612 writel(val, fca + MVPP22_FCA_CONTROL_REG); 1613 } 1614 1615 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) 1616 { 1617 struct mvpp2 *priv = port->priv; 1618 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1619 u32 lsb, msb; 1620 1621 lsb = timer & MVPP22_FCA_REG_MASK; 1622 msb = timer >> MVPP22_FCA_REG_SIZE; 1623 1624 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG); 1625 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG); 1626 } 1627 1628 /* Set Flow Control timer x100 faster than pause quanta to ensure that link 1629 * partner won't send traffic if port is in XOFF mode. 1630 */ 1631 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) 1632 { 1633 u32 timer; 1634 1635 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) 1636 * FC_QUANTA; 1637 1638 mvpp22_gop_fca_enable_periodic(port, false); 1639 1640 mvpp22_gop_fca_set_timer(port, timer); 1641 1642 mvpp22_gop_fca_enable_periodic(port, true); 1643 } 1644 1645 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) 1646 { 1647 struct mvpp2 *priv = port->priv; 1648 u32 val; 1649 1650 if (!priv->sysctrl_base) 1651 return 0; 1652 1653 switch (interface) { 1654 case PHY_INTERFACE_MODE_MII: 1655 case PHY_INTERFACE_MODE_RGMII: 1656 case PHY_INTERFACE_MODE_RGMII_ID: 1657 case PHY_INTERFACE_MODE_RGMII_RXID: 1658 case PHY_INTERFACE_MODE_RGMII_TXID: 1659 if (!mvpp2_port_supports_rgmii(port)) 1660 goto invalid_conf; 1661 mvpp22_gop_init_rgmii(port); 1662 break; 1663 case PHY_INTERFACE_MODE_SGMII: 1664 case PHY_INTERFACE_MODE_1000BASEX: 1665 case PHY_INTERFACE_MODE_2500BASEX: 1666 mvpp22_gop_init_sgmii(port); 1667 break; 1668 case PHY_INTERFACE_MODE_5GBASER: 1669 case PHY_INTERFACE_MODE_10GBASER: 1670 if (!mvpp2_port_supports_xlg(port)) 1671 goto invalid_conf; 1672 mvpp22_gop_init_10gkr(port); 1673 break; 1674 default: 1675 goto unsupported_conf; 1676 } 1677 1678 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 1679 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 1680 GENCONF_PORT_CTRL1_EN(port->gop_id); 1681 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 1682 1683 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1684 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 1685 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1686 1687 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 1688 val |= GENCONF_SOFT_RESET1_GOP; 1689 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 1690 1691 mvpp22_gop_fca_set_periodic_timer(port); 1692 1693 unsupported_conf: 1694 return 0; 1695 1696 invalid_conf: 1697 netdev_err(port->dev, "Invalid port configuration\n"); 1698 return -EINVAL; 1699 } 1700 1701 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 1702 { 1703 u32 val; 1704 1705 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1706 phy_interface_mode_is_8023z(port->phy_interface) || 1707 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1708 /* Enable the GMAC link status irq for this port */ 1709 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1710 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1711 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1712 } 1713 1714 if (mvpp2_port_supports_xlg(port)) { 1715 /* Enable the XLG/GIG irqs for this port */ 1716 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1717 if (mvpp2_is_xlg(port->phy_interface)) 1718 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 1719 else 1720 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 1721 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1722 } 1723 } 1724 1725 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 1726 { 1727 u32 val; 1728 1729 if (mvpp2_port_supports_xlg(port)) { 1730 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1731 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 1732 MVPP22_XLG_EXT_INT_MASK_GIG); 1733 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1734 } 1735 1736 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1737 phy_interface_mode_is_8023z(port->phy_interface) || 1738 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1739 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1740 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1741 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1742 } 1743 } 1744 1745 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 1746 { 1747 u32 val; 1748 1749 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, 1750 MVPP22_GMAC_INT_SUM_MASK_PTP, 1751 MVPP22_GMAC_INT_SUM_MASK_PTP); 1752 1753 if (port->phylink || 1754 phy_interface_mode_is_rgmii(port->phy_interface) || 1755 phy_interface_mode_is_8023z(port->phy_interface) || 1756 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1757 val = readl(port->base + MVPP22_GMAC_INT_MASK); 1758 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 1759 writel(val, port->base + MVPP22_GMAC_INT_MASK); 1760 } 1761 1762 if (mvpp2_port_supports_xlg(port)) { 1763 val = readl(port->base + MVPP22_XLG_INT_MASK); 1764 val |= MVPP22_XLG_INT_MASK_LINK; 1765 writel(val, port->base + MVPP22_XLG_INT_MASK); 1766 1767 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, 1768 MVPP22_XLG_EXT_INT_MASK_PTP, 1769 MVPP22_XLG_EXT_INT_MASK_PTP); 1770 } 1771 1772 mvpp22_gop_unmask_irq(port); 1773 } 1774 1775 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). 1776 * 1777 * The PHY mode used by the PPv2 driver comes from the network subsystem, while 1778 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they 1779 * differ. 1780 * 1781 * The COMPHY configures the serdes lanes regardless of the actual use of the 1782 * lanes by the physical layer. This is why configurations like 1783 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. 1784 */ 1785 static int mvpp22_comphy_init(struct mvpp2_port *port, 1786 phy_interface_t interface) 1787 { 1788 int ret; 1789 1790 if (!port->comphy) 1791 return 0; 1792 1793 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); 1794 if (ret) 1795 return ret; 1796 1797 return phy_power_on(port->comphy); 1798 } 1799 1800 static void mvpp2_port_enable(struct mvpp2_port *port) 1801 { 1802 u32 val; 1803 1804 if (mvpp2_port_supports_xlg(port) && 1805 mvpp2_is_xlg(port->phy_interface)) { 1806 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1807 val |= MVPP22_XLG_CTRL0_PORT_EN; 1808 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 1809 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1810 } else { 1811 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1812 val |= MVPP2_GMAC_PORT_EN_MASK; 1813 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 1814 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1815 } 1816 } 1817 1818 static void mvpp2_port_disable(struct mvpp2_port *port) 1819 { 1820 u32 val; 1821 1822 if (mvpp2_port_supports_xlg(port) && 1823 mvpp2_is_xlg(port->phy_interface)) { 1824 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1825 val &= ~MVPP22_XLG_CTRL0_PORT_EN; 1826 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1827 } 1828 1829 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1830 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 1831 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1832 } 1833 1834 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 1835 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 1836 { 1837 u32 val; 1838 1839 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 1840 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 1841 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1842 } 1843 1844 /* Configure loopback port */ 1845 static void mvpp2_port_loopback_set(struct mvpp2_port *port, 1846 const struct phylink_link_state *state) 1847 { 1848 u32 val; 1849 1850 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 1851 1852 if (state->speed == 1000) 1853 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 1854 else 1855 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 1856 1857 if (phy_interface_mode_is_8023z(state->interface) || 1858 state->interface == PHY_INTERFACE_MODE_SGMII) 1859 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 1860 else 1861 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 1862 1863 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1864 } 1865 1866 enum { 1867 ETHTOOL_XDP_REDIRECT, 1868 ETHTOOL_XDP_PASS, 1869 ETHTOOL_XDP_DROP, 1870 ETHTOOL_XDP_TX, 1871 ETHTOOL_XDP_TX_ERR, 1872 ETHTOOL_XDP_XMIT, 1873 ETHTOOL_XDP_XMIT_ERR, 1874 }; 1875 1876 struct mvpp2_ethtool_counter { 1877 unsigned int offset; 1878 const char string[ETH_GSTRING_LEN]; 1879 bool reg_is_64b; 1880 }; 1881 1882 static u64 mvpp2_read_count(struct mvpp2_port *port, 1883 const struct mvpp2_ethtool_counter *counter) 1884 { 1885 u64 val; 1886 1887 val = readl(port->stats_base + counter->offset); 1888 if (counter->reg_is_64b) 1889 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 1890 1891 return val; 1892 } 1893 1894 /* Some counters are accessed indirectly by first writing an index to 1895 * MVPP2_CTRS_IDX. The index can represent various resources depending on the 1896 * register we access, it can be a hit counter for some classification tables, 1897 * a counter specific to a rxq, a txq or a buffer pool. 1898 */ 1899 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) 1900 { 1901 mvpp2_write(priv, MVPP2_CTRS_IDX, index); 1902 return mvpp2_read(priv, reg); 1903 } 1904 1905 /* Due to the fact that software statistics and hardware statistics are, by 1906 * design, incremented at different moments in the chain of packet processing, 1907 * it is very likely that incoming packets could have been dropped after being 1908 * counted by hardware but before reaching software statistics (most probably 1909 * multicast packets), and in the opposite way, during transmission, FCS bytes 1910 * are added in between as well as TSO skb will be split and header bytes added. 1911 * Hence, statistics gathered from userspace with ifconfig (software) and 1912 * ethtool (hardware) cannot be compared. 1913 */ 1914 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { 1915 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 1916 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 1917 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 1918 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 1919 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 1920 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 1921 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 1922 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 1923 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 1924 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 1925 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 1926 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 1927 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 1928 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 1929 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 1930 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 1931 { MVPP2_MIB_FC_SENT, "fc_sent" }, 1932 { MVPP2_MIB_FC_RCVD, "fc_received" }, 1933 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 1934 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 1935 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 1936 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 1937 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 1938 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 1939 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 1940 { MVPP2_MIB_COLLISION, "collision" }, 1941 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 1942 }; 1943 1944 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { 1945 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, 1946 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, 1947 }; 1948 1949 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { 1950 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, 1951 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, 1952 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, 1953 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, 1954 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, 1955 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, 1956 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, 1957 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, 1958 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, 1959 }; 1960 1961 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { 1962 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, 1963 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, 1964 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, 1965 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, 1966 }; 1967 1968 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { 1969 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, 1970 { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, 1971 { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, 1972 { ETHTOOL_XDP_TX, "rx_xdp_tx", }, 1973 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, 1974 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, 1975 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, 1976 }; 1977 1978 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ 1979 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ 1980 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ 1981 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ 1982 ARRAY_SIZE(mvpp2_ethtool_xdp)) 1983 1984 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 1985 u8 *data) 1986 { 1987 struct mvpp2_port *port = netdev_priv(netdev); 1988 const char *str; 1989 int i, q; 1990 1991 if (sset != ETH_SS_STATS) 1992 return; 1993 1994 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) 1995 ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string); 1996 1997 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) 1998 ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string); 1999 2000 for (q = 0; q < port->ntxqs; q++) 2001 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { 2002 str = mvpp2_ethtool_txq_regs[i].string; 2003 ethtool_sprintf(&data, str, q); 2004 } 2005 2006 for (q = 0; q < port->nrxqs; q++) 2007 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { 2008 str = mvpp2_ethtool_rxq_regs[i].string; 2009 ethtool_sprintf(&data, str, q); 2010 } 2011 2012 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) 2013 ethtool_puts(&data, mvpp2_ethtool_xdp[i].string); 2014 } 2015 2016 static void 2017 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) 2018 { 2019 unsigned int start; 2020 unsigned int cpu; 2021 2022 /* Gather XDP Statistics */ 2023 for_each_possible_cpu(cpu) { 2024 struct mvpp2_pcpu_stats *cpu_stats; 2025 u64 xdp_redirect; 2026 u64 xdp_pass; 2027 u64 xdp_drop; 2028 u64 xdp_xmit; 2029 u64 xdp_xmit_err; 2030 u64 xdp_tx; 2031 u64 xdp_tx_err; 2032 2033 cpu_stats = per_cpu_ptr(port->stats, cpu); 2034 do { 2035 start = u64_stats_fetch_begin(&cpu_stats->syncp); 2036 xdp_redirect = cpu_stats->xdp_redirect; 2037 xdp_pass = cpu_stats->xdp_pass; 2038 xdp_drop = cpu_stats->xdp_drop; 2039 xdp_xmit = cpu_stats->xdp_xmit; 2040 xdp_xmit_err = cpu_stats->xdp_xmit_err; 2041 xdp_tx = cpu_stats->xdp_tx; 2042 xdp_tx_err = cpu_stats->xdp_tx_err; 2043 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 2044 2045 xdp_stats->xdp_redirect += xdp_redirect; 2046 xdp_stats->xdp_pass += xdp_pass; 2047 xdp_stats->xdp_drop += xdp_drop; 2048 xdp_stats->xdp_xmit += xdp_xmit; 2049 xdp_stats->xdp_xmit_err += xdp_xmit_err; 2050 xdp_stats->xdp_tx += xdp_tx; 2051 xdp_stats->xdp_tx_err += xdp_tx_err; 2052 } 2053 } 2054 2055 static void mvpp2_read_stats(struct mvpp2_port *port) 2056 { 2057 struct mvpp2_pcpu_stats xdp_stats = {}; 2058 const struct mvpp2_ethtool_counter *s; 2059 u64 *pstats; 2060 int i, q; 2061 2062 pstats = port->ethtool_stats; 2063 2064 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) 2065 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); 2066 2067 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) 2068 *pstats++ += mvpp2_read(port->priv, 2069 mvpp2_ethtool_port_regs[i].offset + 2070 4 * port->id); 2071 2072 for (q = 0; q < port->ntxqs; q++) 2073 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) 2074 *pstats++ += mvpp2_read_index(port->priv, 2075 MVPP22_CTRS_TX_CTR(port->id, q), 2076 mvpp2_ethtool_txq_regs[i].offset); 2077 2078 /* Rxqs are numbered from 0 from the user standpoint, but not from the 2079 * driver's. We need to add the port->first_rxq offset. 2080 */ 2081 for (q = 0; q < port->nrxqs; q++) 2082 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) 2083 *pstats++ += mvpp2_read_index(port->priv, 2084 port->first_rxq + q, 2085 mvpp2_ethtool_rxq_regs[i].offset); 2086 2087 /* Gather XDP Statistics */ 2088 mvpp2_get_xdp_stats(port, &xdp_stats); 2089 2090 for (i = 0, s = mvpp2_ethtool_xdp; 2091 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); 2092 s++, i++) { 2093 switch (s->offset) { 2094 case ETHTOOL_XDP_REDIRECT: 2095 *pstats++ = xdp_stats.xdp_redirect; 2096 break; 2097 case ETHTOOL_XDP_PASS: 2098 *pstats++ = xdp_stats.xdp_pass; 2099 break; 2100 case ETHTOOL_XDP_DROP: 2101 *pstats++ = xdp_stats.xdp_drop; 2102 break; 2103 case ETHTOOL_XDP_TX: 2104 *pstats++ = xdp_stats.xdp_tx; 2105 break; 2106 case ETHTOOL_XDP_TX_ERR: 2107 *pstats++ = xdp_stats.xdp_tx_err; 2108 break; 2109 case ETHTOOL_XDP_XMIT: 2110 *pstats++ = xdp_stats.xdp_xmit; 2111 break; 2112 case ETHTOOL_XDP_XMIT_ERR: 2113 *pstats++ = xdp_stats.xdp_xmit_err; 2114 break; 2115 } 2116 } 2117 } 2118 2119 static void mvpp2_gather_hw_statistics(struct work_struct *work) 2120 { 2121 struct delayed_work *del_work = to_delayed_work(work); 2122 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 2123 stats_work); 2124 2125 mutex_lock(&port->gather_stats_lock); 2126 2127 mvpp2_read_stats(port); 2128 2129 /* No need to read again the counters right after this function if it 2130 * was called asynchronously by the user (ie. use of ethtool). 2131 */ 2132 cancel_delayed_work(&port->stats_work); 2133 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 2134 MVPP2_MIB_COUNTERS_STATS_DELAY); 2135 2136 mutex_unlock(&port->gather_stats_lock); 2137 } 2138 2139 static void mvpp2_ethtool_get_stats(struct net_device *dev, 2140 struct ethtool_stats *stats, u64 *data) 2141 { 2142 struct mvpp2_port *port = netdev_priv(dev); 2143 2144 /* Update statistics for the given port, then take the lock to avoid 2145 * concurrent accesses on the ethtool_stats structure during its copy. 2146 */ 2147 mvpp2_gather_hw_statistics(&port->stats_work.work); 2148 2149 mutex_lock(&port->gather_stats_lock); 2150 memcpy(data, port->ethtool_stats, 2151 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); 2152 mutex_unlock(&port->gather_stats_lock); 2153 } 2154 2155 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 2156 { 2157 struct mvpp2_port *port = netdev_priv(dev); 2158 2159 if (sset == ETH_SS_STATS) 2160 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); 2161 2162 return -EOPNOTSUPP; 2163 } 2164 2165 static void mvpp2_mac_reset_assert(struct mvpp2_port *port) 2166 { 2167 u32 val; 2168 2169 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | 2170 MVPP2_GMAC_PORT_RESET_MASK; 2171 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2172 2173 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { 2174 val = readl(port->base + MVPP22_XLG_CTRL0_REG) & 2175 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; 2176 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 2177 } 2178 } 2179 2180 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) 2181 { 2182 struct mvpp2 *priv = port->priv; 2183 void __iomem *mpcs, *xpcs; 2184 u32 val; 2185 2186 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2187 return; 2188 2189 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2190 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2191 2192 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2193 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 2194 val |= MVPP22_MPCS_CLK_RESET_DIV_SET; 2195 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2196 2197 val = readl(xpcs + MVPP22_XPCS_CFG0); 2198 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2199 } 2200 2201 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, 2202 phy_interface_t interface) 2203 { 2204 struct mvpp2 *priv = port->priv; 2205 void __iomem *mpcs, *xpcs; 2206 u32 val; 2207 2208 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2209 return; 2210 2211 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2212 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2213 2214 switch (interface) { 2215 case PHY_INTERFACE_MODE_5GBASER: 2216 case PHY_INTERFACE_MODE_10GBASER: 2217 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2218 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | 2219 MAC_CLK_RESET_SD_TX; 2220 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 2221 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2222 break; 2223 case PHY_INTERFACE_MODE_XAUI: 2224 case PHY_INTERFACE_MODE_RXAUI: 2225 val = readl(xpcs + MVPP22_XPCS_CFG0); 2226 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2227 break; 2228 default: 2229 break; 2230 } 2231 } 2232 2233 /* Change maximum receive size of the port */ 2234 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2235 { 2236 u32 val; 2237 2238 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2239 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2240 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2241 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2242 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2243 } 2244 2245 /* Change maximum receive size of the port */ 2246 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 2247 { 2248 u32 val; 2249 2250 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 2251 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 2252 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2253 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 2254 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 2255 } 2256 2257 /* Set defaults to the MVPP2 port */ 2258 static void mvpp2_defaults_set(struct mvpp2_port *port) 2259 { 2260 int tx_port_num, val, queue, lrxq; 2261 2262 if (port->priv->hw_version == MVPP21) { 2263 /* Update TX FIFO MIN Threshold */ 2264 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2265 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 2266 /* Min. TX threshold must be less than minimal packet length */ 2267 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 2268 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2269 } 2270 2271 /* Disable Legacy WRR, Disable EJP, Release from reset */ 2272 tx_port_num = mvpp2_egress_port(port); 2273 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 2274 tx_port_num); 2275 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 2276 2277 /* Set TXQ scheduling to Round-Robin */ 2278 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); 2279 2280 /* Close bandwidth for all queues */ 2281 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) 2282 mvpp2_write(port->priv, 2283 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); 2284 2285 /* Set refill period to 1 usec, refill tokens 2286 * and bucket size to maximum 2287 */ 2288 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 2289 port->priv->tclk / USEC_PER_SEC); 2290 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 2291 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 2292 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 2293 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 2294 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 2295 val = MVPP2_TXP_TOKEN_SIZE_MAX; 2296 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2297 2298 /* Set MaximumLowLatencyPacketSize value to 256 */ 2299 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 2300 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 2301 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 2302 2303 /* Enable Rx cache snoop */ 2304 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2305 queue = port->rxqs[lrxq]->id; 2306 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2307 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 2308 MVPP2_SNOOP_BUF_HDR_MASK; 2309 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2310 } 2311 2312 /* At default, mask all interrupts to all present cpus */ 2313 mvpp2_interrupts_disable(port); 2314 } 2315 2316 /* Enable/disable receiving packets */ 2317 static void mvpp2_ingress_enable(struct mvpp2_port *port) 2318 { 2319 u32 val; 2320 int lrxq, queue; 2321 2322 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2323 queue = port->rxqs[lrxq]->id; 2324 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2325 val &= ~MVPP2_RXQ_DISABLE_MASK; 2326 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2327 } 2328 } 2329 2330 static void mvpp2_ingress_disable(struct mvpp2_port *port) 2331 { 2332 u32 val; 2333 int lrxq, queue; 2334 2335 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2336 queue = port->rxqs[lrxq]->id; 2337 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2338 val |= MVPP2_RXQ_DISABLE_MASK; 2339 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2340 } 2341 } 2342 2343 /* Enable transmit via physical egress queue 2344 * - HW starts take descriptors from DRAM 2345 */ 2346 static void mvpp2_egress_enable(struct mvpp2_port *port) 2347 { 2348 u32 qmap; 2349 int queue; 2350 int tx_port_num = mvpp2_egress_port(port); 2351 2352 /* Enable all initialized TXs. */ 2353 qmap = 0; 2354 for (queue = 0; queue < port->ntxqs; queue++) { 2355 struct mvpp2_tx_queue *txq = port->txqs[queue]; 2356 2357 if (txq->descs) 2358 qmap |= (1 << queue); 2359 } 2360 2361 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2362 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 2363 } 2364 2365 /* Disable transmit via physical egress queue 2366 * - HW doesn't take descriptors from DRAM 2367 */ 2368 static void mvpp2_egress_disable(struct mvpp2_port *port) 2369 { 2370 u32 reg_data; 2371 int delay; 2372 int tx_port_num = mvpp2_egress_port(port); 2373 2374 /* Issue stop command for active channels only */ 2375 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2376 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 2377 MVPP2_TXP_SCHED_ENQ_MASK; 2378 if (reg_data != 0) 2379 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 2380 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 2381 2382 /* Wait for all Tx activity to terminate. */ 2383 delay = 0; 2384 do { 2385 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 2386 netdev_warn(port->dev, 2387 "Tx stop timed out, status=0x%08x\n", 2388 reg_data); 2389 break; 2390 } 2391 mdelay(1); 2392 delay++; 2393 2394 /* Check port TX Command register that all 2395 * Tx queues are stopped 2396 */ 2397 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 2398 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 2399 } 2400 2401 /* Rx descriptors helper methods */ 2402 2403 /* Get number of Rx descriptors occupied by received packets */ 2404 static inline int 2405 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 2406 { 2407 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 2408 2409 return val & MVPP2_RXQ_OCCUPIED_MASK; 2410 } 2411 2412 /* Update Rx queue status with the number of occupied and available 2413 * Rx descriptor slots. 2414 */ 2415 static inline void 2416 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 2417 int used_count, int free_count) 2418 { 2419 /* Decrement the number of used descriptors and increment count 2420 * increment the number of free descriptors. 2421 */ 2422 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 2423 2424 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 2425 } 2426 2427 /* Get pointer to next RX descriptor to be processed by SW */ 2428 static inline struct mvpp2_rx_desc * 2429 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 2430 { 2431 int rx_desc = rxq->next_desc_to_proc; 2432 2433 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 2434 prefetch(rxq->descs + rxq->next_desc_to_proc); 2435 return rxq->descs + rx_desc; 2436 } 2437 2438 /* Set rx queue offset */ 2439 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 2440 int prxq, int offset) 2441 { 2442 u32 val; 2443 2444 /* Convert offset from bytes to units of 32 bytes */ 2445 offset = offset >> 5; 2446 2447 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2448 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 2449 2450 /* Offset is in */ 2451 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 2452 MVPP2_RXQ_PACKET_OFFSET_MASK); 2453 2454 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2455 } 2456 2457 /* Tx descriptors helper methods */ 2458 2459 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 2460 static struct mvpp2_tx_desc * 2461 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 2462 { 2463 int tx_desc = txq->next_desc_to_proc; 2464 2465 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 2466 return txq->descs + tx_desc; 2467 } 2468 2469 /* Update HW with number of aggregated Tx descriptors to be sent 2470 * 2471 * Called only from mvpp2_tx(), so migration is disabled, using 2472 * smp_processor_id() is OK. 2473 */ 2474 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 2475 { 2476 /* aggregated access - relevant TXQ number is written in TX desc */ 2477 mvpp2_thread_write(port->priv, 2478 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2479 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 2480 } 2481 2482 /* Check if there are enough free descriptors in aggregated txq. 2483 * If not, update the number of occupied descriptors and repeat the check. 2484 * 2485 * Called only from mvpp2_tx(), so migration is disabled, using 2486 * smp_processor_id() is OK. 2487 */ 2488 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, 2489 struct mvpp2_tx_queue *aggr_txq, int num) 2490 { 2491 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 2492 /* Update number of occupied aggregated Tx descriptors */ 2493 unsigned int thread = 2494 mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2495 u32 val = mvpp2_read_relaxed(port->priv, 2496 MVPP2_AGGR_TXQ_STATUS_REG(thread)); 2497 2498 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 2499 2500 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 2501 return -ENOMEM; 2502 } 2503 return 0; 2504 } 2505 2506 /* Reserved Tx descriptors allocation request 2507 * 2508 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 2509 * only by mvpp2_tx(), so migration is disabled, using 2510 * smp_processor_id() is OK. 2511 */ 2512 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, 2513 struct mvpp2_tx_queue *txq, int num) 2514 { 2515 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2516 struct mvpp2 *priv = port->priv; 2517 u32 val; 2518 2519 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 2520 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); 2521 2522 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); 2523 2524 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 2525 } 2526 2527 /* Check if there are enough reserved descriptors for transmission. 2528 * If not, request chunk of reserved descriptors and check again. 2529 */ 2530 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, 2531 struct mvpp2_tx_queue *txq, 2532 struct mvpp2_txq_pcpu *txq_pcpu, 2533 int num) 2534 { 2535 int req, desc_count; 2536 unsigned int thread; 2537 2538 if (txq_pcpu->reserved_num >= num) 2539 return 0; 2540 2541 /* Not enough descriptors reserved! Update the reserved descriptor 2542 * count and check again. 2543 */ 2544 2545 desc_count = 0; 2546 /* Compute total of used descriptors */ 2547 for (thread = 0; thread < port->priv->nthreads; thread++) { 2548 struct mvpp2_txq_pcpu *txq_pcpu_aux; 2549 2550 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); 2551 desc_count += txq_pcpu_aux->count; 2552 desc_count += txq_pcpu_aux->reserved_num; 2553 } 2554 2555 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 2556 desc_count += req; 2557 2558 if (desc_count > 2559 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) 2560 return -ENOMEM; 2561 2562 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); 2563 2564 /* OK, the descriptor could have been updated: check again. */ 2565 if (txq_pcpu->reserved_num < num) 2566 return -ENOMEM; 2567 return 0; 2568 } 2569 2570 /* Release the last allocated Tx descriptor. Useful to handle DMA 2571 * mapping failures in the Tx path. 2572 */ 2573 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 2574 { 2575 if (txq->next_desc_to_proc == 0) 2576 txq->next_desc_to_proc = txq->last_desc - 1; 2577 else 2578 txq->next_desc_to_proc--; 2579 } 2580 2581 /* Set Tx descriptors fields relevant for CSUM calculation */ 2582 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, 2583 int ip_hdr_len, int l4_proto) 2584 { 2585 u32 command; 2586 2587 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 2588 * G_L4_chk, L4_type required only for checksum calculation 2589 */ 2590 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 2591 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 2592 command |= MVPP2_TXD_IP_CSUM_DISABLE; 2593 2594 if (l3_proto == htons(ETH_P_IP)) { 2595 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 2596 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 2597 } else { 2598 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 2599 } 2600 2601 if (l4_proto == IPPROTO_TCP) { 2602 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 2603 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2604 } else if (l4_proto == IPPROTO_UDP) { 2605 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 2606 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2607 } else { 2608 command |= MVPP2_TXD_L4_CSUM_NOT; 2609 } 2610 2611 return command; 2612 } 2613 2614 /* Get number of sent descriptors and decrement counter. 2615 * The number of sent descriptors is returned. 2616 * Per-thread access 2617 * 2618 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 2619 * (migration disabled) and from the TX completion tasklet (migration 2620 * disabled) so using smp_processor_id() is OK. 2621 */ 2622 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 2623 struct mvpp2_tx_queue *txq) 2624 { 2625 u32 val; 2626 2627 /* Reading status reg resets transmitted descriptor counter */ 2628 val = mvpp2_thread_read_relaxed(port->priv, 2629 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2630 MVPP2_TXQ_SENT_REG(txq->id)); 2631 2632 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 2633 MVPP2_TRANSMITTED_COUNT_OFFSET; 2634 } 2635 2636 /* Called through on_each_cpu(), so runs on all CPUs, with migration 2637 * disabled, therefore using smp_processor_id() is OK. 2638 */ 2639 static void mvpp2_txq_sent_counter_clear(void *arg) 2640 { 2641 struct mvpp2_port *port = arg; 2642 int queue; 2643 2644 /* If the thread isn't used, don't do anything */ 2645 if (smp_processor_id() >= port->priv->nthreads) 2646 return; 2647 2648 for (queue = 0; queue < port->ntxqs; queue++) { 2649 int id = port->txqs[queue]->id; 2650 2651 mvpp2_thread_read(port->priv, 2652 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2653 MVPP2_TXQ_SENT_REG(id)); 2654 } 2655 } 2656 2657 /* Set max sizes for Tx queues */ 2658 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 2659 { 2660 u32 val, size, mtu; 2661 int txq, tx_port_num; 2662 2663 mtu = port->pkt_size * 8; 2664 if (mtu > MVPP2_TXP_MTU_MAX) 2665 mtu = MVPP2_TXP_MTU_MAX; 2666 2667 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 2668 mtu = 3 * mtu; 2669 2670 /* Indirect access to registers */ 2671 tx_port_num = mvpp2_egress_port(port); 2672 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2673 2674 /* Set MTU */ 2675 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 2676 val &= ~MVPP2_TXP_MTU_MAX; 2677 val |= mtu; 2678 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 2679 2680 /* TXP token size and all TXQs token size must be larger that MTU */ 2681 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 2682 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 2683 if (size < mtu) { 2684 size = mtu; 2685 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 2686 val |= size; 2687 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2688 } 2689 2690 for (txq = 0; txq < port->ntxqs; txq++) { 2691 val = mvpp2_read(port->priv, 2692 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 2693 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 2694 2695 if (size < mtu) { 2696 size = mtu; 2697 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 2698 val |= size; 2699 mvpp2_write(port->priv, 2700 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 2701 val); 2702 } 2703 } 2704 } 2705 2706 /* Set the number of non-occupied descriptors threshold */ 2707 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, 2708 struct mvpp2_rx_queue *rxq) 2709 { 2710 u32 val; 2711 2712 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 2713 2714 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); 2715 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; 2716 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; 2717 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); 2718 } 2719 2720 /* Set the number of packets that will be received before Rx interrupt 2721 * will be generated by HW. 2722 */ 2723 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 2724 struct mvpp2_rx_queue *rxq) 2725 { 2726 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2727 2728 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 2729 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 2730 2731 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2732 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, 2733 rxq->pkts_coal); 2734 2735 put_cpu(); 2736 } 2737 2738 /* For some reason in the LSP this is done on each CPU. Why ? */ 2739 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 2740 struct mvpp2_tx_queue *txq) 2741 { 2742 unsigned int thread; 2743 u32 val; 2744 2745 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 2746 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 2747 2748 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 2749 /* PKT-coalescing registers are per-queue + per-thread */ 2750 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { 2751 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2752 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); 2753 } 2754 } 2755 2756 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 2757 { 2758 u64 tmp = (u64)clk_hz * usec; 2759 2760 do_div(tmp, USEC_PER_SEC); 2761 2762 return tmp > U32_MAX ? U32_MAX : tmp; 2763 } 2764 2765 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 2766 { 2767 u64 tmp = (u64)cycles * USEC_PER_SEC; 2768 2769 do_div(tmp, clk_hz); 2770 2771 return tmp > U32_MAX ? U32_MAX : tmp; 2772 } 2773 2774 /* Set the time delay in usec before Rx interrupt */ 2775 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 2776 struct mvpp2_rx_queue *rxq) 2777 { 2778 unsigned long freq = port->priv->tclk; 2779 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2780 2781 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 2782 rxq->time_coal = 2783 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 2784 2785 /* re-evaluate to get actual register value */ 2786 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2787 } 2788 2789 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 2790 } 2791 2792 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 2793 { 2794 unsigned long freq = port->priv->tclk; 2795 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2796 2797 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 2798 port->tx_time_coal = 2799 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 2800 2801 /* re-evaluate to get actual register value */ 2802 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2803 } 2804 2805 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 2806 } 2807 2808 /* Free Tx queue skbuffs */ 2809 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 2810 struct mvpp2_tx_queue *txq, 2811 struct mvpp2_txq_pcpu *txq_pcpu, int num) 2812 { 2813 struct xdp_frame_bulk bq; 2814 int i; 2815 2816 xdp_frame_bulk_init(&bq); 2817 2818 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 2819 2820 for (i = 0; i < num; i++) { 2821 struct mvpp2_txq_pcpu_buf *tx_buf = 2822 txq_pcpu->buffs + txq_pcpu->txq_get_index; 2823 2824 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && 2825 tx_buf->type != MVPP2_TYPE_XDP_TX) 2826 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 2827 tx_buf->size, DMA_TO_DEVICE); 2828 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) 2829 dev_kfree_skb_any(tx_buf->skb); 2830 else if (tx_buf->type == MVPP2_TYPE_XDP_TX || 2831 tx_buf->type == MVPP2_TYPE_XDP_NDO) 2832 xdp_return_frame_bulk(tx_buf->xdpf, &bq); 2833 2834 mvpp2_txq_inc_get(txq_pcpu); 2835 } 2836 xdp_flush_frame_bulk(&bq); 2837 2838 rcu_read_unlock(); 2839 } 2840 2841 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 2842 u32 cause) 2843 { 2844 int queue = fls(cause) - 1; 2845 2846 return port->rxqs[queue]; 2847 } 2848 2849 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 2850 u32 cause) 2851 { 2852 int queue = fls(cause) - 1; 2853 2854 return port->txqs[queue]; 2855 } 2856 2857 /* Handle end of transmission */ 2858 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 2859 struct mvpp2_txq_pcpu *txq_pcpu) 2860 { 2861 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 2862 int tx_done; 2863 2864 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) 2865 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 2866 2867 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 2868 if (!tx_done) 2869 return; 2870 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 2871 2872 txq_pcpu->count -= tx_done; 2873 2874 if (netif_tx_queue_stopped(nq)) 2875 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 2876 netif_tx_wake_queue(nq); 2877 } 2878 2879 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 2880 unsigned int thread) 2881 { 2882 struct mvpp2_tx_queue *txq; 2883 struct mvpp2_txq_pcpu *txq_pcpu; 2884 unsigned int tx_todo = 0; 2885 2886 while (cause) { 2887 txq = mvpp2_get_tx_queue(port, cause); 2888 if (!txq) 2889 break; 2890 2891 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2892 2893 if (txq_pcpu->count) { 2894 mvpp2_txq_done(port, txq, txq_pcpu); 2895 tx_todo += txq_pcpu->count; 2896 } 2897 2898 cause &= ~(1 << txq->log_id); 2899 } 2900 return tx_todo; 2901 } 2902 2903 /* Rx/Tx queue initialization/cleanup methods */ 2904 2905 /* Allocate and initialize descriptors for aggr TXQ */ 2906 static int mvpp2_aggr_txq_init(struct platform_device *pdev, 2907 struct mvpp2_tx_queue *aggr_txq, 2908 unsigned int thread, struct mvpp2 *priv) 2909 { 2910 u32 txq_dma; 2911 2912 /* Allocate memory for TX descriptors */ 2913 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2914 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2915 &aggr_txq->descs_dma, GFP_KERNEL); 2916 if (!aggr_txq->descs) 2917 return -ENOMEM; 2918 2919 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 2920 2921 /* Aggr TXQ no reset WA */ 2922 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 2923 MVPP2_AGGR_TXQ_INDEX_REG(thread)); 2924 2925 /* Set Tx descriptors queue starting address indirect 2926 * access 2927 */ 2928 if (priv->hw_version == MVPP21) 2929 txq_dma = aggr_txq->descs_dma; 2930 else 2931 txq_dma = aggr_txq->descs_dma >> 2932 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 2933 2934 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); 2935 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), 2936 MVPP2_AGGR_TXQ_SIZE); 2937 2938 return 0; 2939 } 2940 2941 /* Create a specified Rx queue */ 2942 static int mvpp2_rxq_init(struct mvpp2_port *port, 2943 struct mvpp2_rx_queue *rxq) 2944 { 2945 struct mvpp2 *priv = port->priv; 2946 unsigned int thread; 2947 u32 rxq_dma; 2948 int err; 2949 2950 rxq->size = port->rx_ring_size; 2951 2952 /* Allocate memory for RX descriptors */ 2953 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 2954 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2955 &rxq->descs_dma, GFP_KERNEL); 2956 if (!rxq->descs) 2957 return -ENOMEM; 2958 2959 rxq->last_desc = rxq->size - 1; 2960 2961 /* Zero occupied and non-occupied counters - direct access */ 2962 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2963 2964 /* Set Rx descriptors queue starting address - indirect access */ 2965 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2966 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2967 if (port->priv->hw_version == MVPP21) 2968 rxq_dma = rxq->descs_dma; 2969 else 2970 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 2971 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 2972 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 2973 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); 2974 put_cpu(); 2975 2976 /* Set Offset */ 2977 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); 2978 2979 /* Set coalescing pkts and time */ 2980 mvpp2_rx_pkts_coal_set(port, rxq); 2981 mvpp2_rx_time_coal_set(port, rxq); 2982 2983 /* Set the number of non occupied descriptors threshold */ 2984 mvpp2_set_rxq_free_tresh(port, rxq); 2985 2986 /* Add number of descriptors ready for receiving packets */ 2987 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 2988 2989 if (priv->percpu_pools) { 2990 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); 2991 if (err < 0) 2992 goto err_free_dma; 2993 2994 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); 2995 if (err < 0) 2996 goto err_unregister_rxq_short; 2997 2998 /* Every RXQ has a pool for short and another for long packets */ 2999 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, 3000 MEM_TYPE_PAGE_POOL, 3001 priv->page_pool[rxq->logic_rxq]); 3002 if (err < 0) 3003 goto err_unregister_rxq_long; 3004 3005 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, 3006 MEM_TYPE_PAGE_POOL, 3007 priv->page_pool[rxq->logic_rxq + 3008 port->nrxqs]); 3009 if (err < 0) 3010 goto err_unregister_mem_rxq_short; 3011 } 3012 3013 return 0; 3014 3015 err_unregister_mem_rxq_short: 3016 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); 3017 err_unregister_rxq_long: 3018 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 3019 err_unregister_rxq_short: 3020 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 3021 err_free_dma: 3022 dma_free_coherent(port->dev->dev.parent, 3023 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 3024 rxq->descs, rxq->descs_dma); 3025 return err; 3026 } 3027 3028 /* Push packets received by the RXQ to BM pool */ 3029 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 3030 struct mvpp2_rx_queue *rxq) 3031 { 3032 int rx_received, i; 3033 3034 rx_received = mvpp2_rxq_received(port, rxq->id); 3035 if (!rx_received) 3036 return; 3037 3038 for (i = 0; i < rx_received; i++) { 3039 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3040 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3041 int pool; 3042 3043 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3044 MVPP2_RXD_BM_POOL_ID_OFFS; 3045 3046 mvpp2_bm_pool_put(port, pool, 3047 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 3048 mvpp2_rxdesc_cookie_get(port, rx_desc)); 3049 } 3050 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 3051 } 3052 3053 /* Cleanup Rx queue */ 3054 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 3055 struct mvpp2_rx_queue *rxq) 3056 { 3057 unsigned int thread; 3058 3059 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) 3060 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 3061 3062 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) 3063 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 3064 3065 mvpp2_rxq_drop_pkts(port, rxq); 3066 3067 if (rxq->descs) 3068 dma_free_coherent(port->dev->dev.parent, 3069 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 3070 rxq->descs, 3071 rxq->descs_dma); 3072 3073 rxq->descs = NULL; 3074 rxq->last_desc = 0; 3075 rxq->next_desc_to_proc = 0; 3076 rxq->descs_dma = 0; 3077 3078 /* Clear Rx descriptors queue starting address and size; 3079 * free descriptor number 3080 */ 3081 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 3082 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3083 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 3084 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); 3085 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); 3086 put_cpu(); 3087 } 3088 3089 /* Create and initialize a Tx queue */ 3090 static int mvpp2_txq_init(struct mvpp2_port *port, 3091 struct mvpp2_tx_queue *txq) 3092 { 3093 u32 val; 3094 unsigned int thread; 3095 int desc, desc_per_txq, tx_port_num; 3096 struct mvpp2_txq_pcpu *txq_pcpu; 3097 3098 txq->size = port->tx_ring_size; 3099 3100 /* Allocate memory for Tx descriptors */ 3101 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 3102 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3103 &txq->descs_dma, GFP_KERNEL); 3104 if (!txq->descs) 3105 return -ENOMEM; 3106 3107 txq->last_desc = txq->size - 1; 3108 3109 /* Set Tx descriptors queue starting address - indirect access */ 3110 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3111 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3112 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 3113 txq->descs_dma); 3114 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 3115 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 3116 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); 3117 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, 3118 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 3119 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); 3120 val &= ~MVPP2_TXQ_PENDING_MASK; 3121 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); 3122 3123 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 3124 * for each existing TXQ. 3125 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 3126 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS 3127 */ 3128 desc_per_txq = 16; 3129 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 3130 (txq->log_id * desc_per_txq); 3131 3132 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, 3133 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 3134 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 3135 put_cpu(); 3136 3137 /* WRR / EJP configuration - indirect access */ 3138 tx_port_num = mvpp2_egress_port(port); 3139 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3140 3141 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 3142 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 3143 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 3144 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 3145 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 3146 3147 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 3148 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 3149 val); 3150 3151 for (thread = 0; thread < port->priv->nthreads; thread++) { 3152 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3153 txq_pcpu->size = txq->size; 3154 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 3155 sizeof(*txq_pcpu->buffs), 3156 GFP_KERNEL); 3157 if (!txq_pcpu->buffs) 3158 return -ENOMEM; 3159 3160 txq_pcpu->count = 0; 3161 txq_pcpu->reserved_num = 0; 3162 txq_pcpu->txq_put_index = 0; 3163 txq_pcpu->txq_get_index = 0; 3164 txq_pcpu->tso_headers = NULL; 3165 3166 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 3167 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 3168 3169 txq_pcpu->tso_headers = 3170 dma_alloc_coherent(port->dev->dev.parent, 3171 txq_pcpu->size * TSO_HEADER_SIZE, 3172 &txq_pcpu->tso_headers_dma, 3173 GFP_KERNEL); 3174 if (!txq_pcpu->tso_headers) 3175 return -ENOMEM; 3176 } 3177 3178 return 0; 3179 } 3180 3181 /* Free allocated TXQ resources */ 3182 static void mvpp2_txq_deinit(struct mvpp2_port *port, 3183 struct mvpp2_tx_queue *txq) 3184 { 3185 struct mvpp2_txq_pcpu *txq_pcpu; 3186 unsigned int thread; 3187 3188 for (thread = 0; thread < port->priv->nthreads; thread++) { 3189 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3190 kfree(txq_pcpu->buffs); 3191 3192 if (txq_pcpu->tso_headers) 3193 dma_free_coherent(port->dev->dev.parent, 3194 txq_pcpu->size * TSO_HEADER_SIZE, 3195 txq_pcpu->tso_headers, 3196 txq_pcpu->tso_headers_dma); 3197 3198 txq_pcpu->tso_headers = NULL; 3199 } 3200 3201 if (txq->descs) 3202 dma_free_coherent(port->dev->dev.parent, 3203 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3204 txq->descs, txq->descs_dma); 3205 3206 txq->descs = NULL; 3207 txq->last_desc = 0; 3208 txq->next_desc_to_proc = 0; 3209 txq->descs_dma = 0; 3210 3211 /* Set minimum bandwidth for disabled TXQs */ 3212 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); 3213 3214 /* Set Tx descriptors queue starting address and size */ 3215 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3216 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3217 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); 3218 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); 3219 put_cpu(); 3220 } 3221 3222 /* Cleanup Tx ports */ 3223 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 3224 { 3225 struct mvpp2_txq_pcpu *txq_pcpu; 3226 int delay, pending; 3227 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3228 u32 val; 3229 3230 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3231 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); 3232 val |= MVPP2_TXQ_DRAIN_EN_MASK; 3233 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3234 3235 /* The napi queue has been stopped so wait for all packets 3236 * to be transmitted. 3237 */ 3238 delay = 0; 3239 do { 3240 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 3241 netdev_warn(port->dev, 3242 "port %d: cleaning queue %d timed out\n", 3243 port->id, txq->log_id); 3244 break; 3245 } 3246 mdelay(1); 3247 delay++; 3248 3249 pending = mvpp2_thread_read(port->priv, thread, 3250 MVPP2_TXQ_PENDING_REG); 3251 pending &= MVPP2_TXQ_PENDING_MASK; 3252 } while (pending); 3253 3254 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 3255 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3256 put_cpu(); 3257 3258 for (thread = 0; thread < port->priv->nthreads; thread++) { 3259 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3260 3261 /* Release all packets */ 3262 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 3263 3264 /* Reset queue */ 3265 txq_pcpu->count = 0; 3266 txq_pcpu->txq_put_index = 0; 3267 txq_pcpu->txq_get_index = 0; 3268 } 3269 } 3270 3271 /* Cleanup all Tx queues */ 3272 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 3273 { 3274 struct mvpp2_tx_queue *txq; 3275 int queue; 3276 u32 val; 3277 3278 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 3279 3280 /* Reset Tx ports and delete Tx queues */ 3281 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 3282 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3283 3284 for (queue = 0; queue < port->ntxqs; queue++) { 3285 txq = port->txqs[queue]; 3286 mvpp2_txq_clean(port, txq); 3287 mvpp2_txq_deinit(port, txq); 3288 } 3289 3290 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3291 3292 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 3293 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3294 } 3295 3296 /* Cleanup all Rx queues */ 3297 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 3298 { 3299 int queue; 3300 3301 for (queue = 0; queue < port->nrxqs; queue++) 3302 mvpp2_rxq_deinit(port, port->rxqs[queue]); 3303 3304 if (port->tx_fc) 3305 mvpp2_rxq_disable_fc(port); 3306 } 3307 3308 /* Init all Rx queues for port */ 3309 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 3310 { 3311 int queue, err; 3312 3313 for (queue = 0; queue < port->nrxqs; queue++) { 3314 err = mvpp2_rxq_init(port, port->rxqs[queue]); 3315 if (err) 3316 goto err_cleanup; 3317 } 3318 3319 if (port->tx_fc) 3320 mvpp2_rxq_enable_fc(port); 3321 3322 return 0; 3323 3324 err_cleanup: 3325 mvpp2_cleanup_rxqs(port); 3326 return err; 3327 } 3328 3329 /* Init all tx queues for port */ 3330 static int mvpp2_setup_txqs(struct mvpp2_port *port) 3331 { 3332 struct mvpp2_tx_queue *txq; 3333 int queue, err; 3334 3335 for (queue = 0; queue < port->ntxqs; queue++) { 3336 txq = port->txqs[queue]; 3337 err = mvpp2_txq_init(port, txq); 3338 if (err) 3339 goto err_cleanup; 3340 3341 /* Assign this queue to a CPU */ 3342 if (queue < num_possible_cpus()) 3343 netif_set_xps_queue(port->dev, cpumask_of(queue), queue); 3344 } 3345 3346 if (port->has_tx_irqs) { 3347 mvpp2_tx_time_coal_set(port); 3348 for (queue = 0; queue < port->ntxqs; queue++) { 3349 txq = port->txqs[queue]; 3350 mvpp2_tx_pkts_coal_set(port, txq); 3351 } 3352 } 3353 3354 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3355 return 0; 3356 3357 err_cleanup: 3358 mvpp2_cleanup_txqs(port); 3359 return err; 3360 } 3361 3362 /* The callback for per-port interrupt */ 3363 static irqreturn_t mvpp2_isr(int irq, void *dev_id) 3364 { 3365 struct mvpp2_queue_vector *qv = dev_id; 3366 3367 mvpp2_qvec_interrupt_disable(qv); 3368 3369 napi_schedule(&qv->napi); 3370 3371 return IRQ_HANDLED; 3372 } 3373 3374 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) 3375 { 3376 struct skb_shared_hwtstamps shhwtstamps; 3377 struct mvpp2_hwtstamp_queue *queue; 3378 struct sk_buff *skb; 3379 void __iomem *ptp_q; 3380 unsigned int id; 3381 u32 r0, r1, r2; 3382 3383 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3384 if (nq) 3385 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; 3386 3387 queue = &port->tx_hwtstamp_queue[nq]; 3388 3389 while (1) { 3390 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; 3391 if (!r0) 3392 break; 3393 3394 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; 3395 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; 3396 3397 id = (r0 >> 1) & 31; 3398 3399 skb = queue->skb[id]; 3400 queue->skb[id] = NULL; 3401 if (skb) { 3402 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; 3403 3404 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); 3405 skb_tstamp_tx(skb, &shhwtstamps); 3406 dev_kfree_skb_any(skb); 3407 } 3408 } 3409 } 3410 3411 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) 3412 { 3413 void __iomem *ptp; 3414 u32 val; 3415 3416 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3417 val = readl(ptp + MVPP22_PTP_INT_CAUSE); 3418 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) 3419 mvpp2_isr_handle_ptp_queue(port, 0); 3420 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) 3421 mvpp2_isr_handle_ptp_queue(port, 1); 3422 } 3423 3424 static void mvpp2_isr_handle_link(struct mvpp2_port *port, 3425 struct phylink_pcs *pcs, bool link) 3426 { 3427 struct net_device *dev = port->dev; 3428 3429 if (port->phylink) { 3430 phylink_pcs_change(pcs, link); 3431 return; 3432 } 3433 3434 if (!netif_running(dev)) 3435 return; 3436 3437 if (link) { 3438 mvpp2_interrupts_enable(port); 3439 3440 mvpp2_egress_enable(port); 3441 mvpp2_ingress_enable(port); 3442 netif_carrier_on(dev); 3443 netif_tx_wake_all_queues(dev); 3444 } else { 3445 netif_tx_stop_all_queues(dev); 3446 netif_carrier_off(dev); 3447 mvpp2_ingress_disable(port); 3448 mvpp2_egress_disable(port); 3449 3450 mvpp2_interrupts_disable(port); 3451 } 3452 } 3453 3454 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) 3455 { 3456 bool link; 3457 u32 val; 3458 3459 val = readl(port->base + MVPP22_XLG_INT_STAT); 3460 if (val & MVPP22_XLG_INT_STAT_LINK) { 3461 val = readl(port->base + MVPP22_XLG_STATUS); 3462 link = (val & MVPP22_XLG_STATUS_LINK_UP); 3463 mvpp2_isr_handle_link(port, &port->pcs_xlg, link); 3464 } 3465 } 3466 3467 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) 3468 { 3469 bool link; 3470 u32 val; 3471 3472 if (phy_interface_mode_is_rgmii(port->phy_interface) || 3473 phy_interface_mode_is_8023z(port->phy_interface) || 3474 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 3475 val = readl(port->base + MVPP22_GMAC_INT_STAT); 3476 if (val & MVPP22_GMAC_INT_STAT_LINK) { 3477 val = readl(port->base + MVPP2_GMAC_STATUS0); 3478 link = (val & MVPP2_GMAC_STATUS0_LINK_UP); 3479 mvpp2_isr_handle_link(port, &port->pcs_gmac, link); 3480 } 3481 } 3482 } 3483 3484 /* Per-port interrupt for link status changes */ 3485 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) 3486 { 3487 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 3488 u32 val; 3489 3490 mvpp22_gop_mask_irq(port); 3491 3492 if (mvpp2_port_supports_xlg(port) && 3493 mvpp2_is_xlg(port->phy_interface)) { 3494 /* Check the external status register */ 3495 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); 3496 if (val & MVPP22_XLG_EXT_INT_STAT_XLG) 3497 mvpp2_isr_handle_xlg(port); 3498 if (val & MVPP22_XLG_EXT_INT_STAT_PTP) 3499 mvpp2_isr_handle_ptp(port); 3500 } else { 3501 /* If it's not the XLG, we must be using the GMAC. 3502 * Check the summary status. 3503 */ 3504 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); 3505 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) 3506 mvpp2_isr_handle_gmac_internal(port); 3507 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) 3508 mvpp2_isr_handle_ptp(port); 3509 } 3510 3511 mvpp22_gop_unmask_irq(port); 3512 return IRQ_HANDLED; 3513 } 3514 3515 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 3516 { 3517 struct net_device *dev; 3518 struct mvpp2_port *port; 3519 struct mvpp2_port_pcpu *port_pcpu; 3520 unsigned int tx_todo, cause; 3521 3522 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); 3523 dev = port_pcpu->dev; 3524 3525 if (!netif_running(dev)) 3526 return HRTIMER_NORESTART; 3527 3528 port_pcpu->timer_scheduled = false; 3529 port = netdev_priv(dev); 3530 3531 /* Process all the Tx queues */ 3532 cause = (1 << port->ntxqs) - 1; 3533 tx_todo = mvpp2_tx_done(port, cause, 3534 mvpp2_cpu_to_thread(port->priv, smp_processor_id())); 3535 3536 /* Set the timer in case not all the packets were processed */ 3537 if (tx_todo && !port_pcpu->timer_scheduled) { 3538 port_pcpu->timer_scheduled = true; 3539 hrtimer_forward_now(&port_pcpu->tx_done_timer, 3540 MVPP2_TXDONE_HRTIMER_PERIOD_NS); 3541 3542 return HRTIMER_RESTART; 3543 } 3544 return HRTIMER_NORESTART; 3545 } 3546 3547 /* Main RX/TX processing routines */ 3548 3549 /* Display more error info */ 3550 static void mvpp2_rx_error(struct mvpp2_port *port, 3551 struct mvpp2_rx_desc *rx_desc) 3552 { 3553 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3554 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 3555 char *err_str = NULL; 3556 3557 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 3558 case MVPP2_RXD_ERR_CRC: 3559 err_str = "crc"; 3560 break; 3561 case MVPP2_RXD_ERR_OVERRUN: 3562 err_str = "overrun"; 3563 break; 3564 case MVPP2_RXD_ERR_RESOURCE: 3565 err_str = "resource"; 3566 break; 3567 } 3568 if (err_str && net_ratelimit()) 3569 netdev_err(port->dev, 3570 "bad rx status %08x (%s error), size=%zu\n", 3571 status, err_str, sz); 3572 } 3573 3574 /* Handle RX checksum offload */ 3575 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) 3576 { 3577 if (((status & MVPP2_RXD_L3_IP4) && 3578 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 3579 (status & MVPP2_RXD_L3_IP6)) 3580 if (((status & MVPP2_RXD_L4_UDP) || 3581 (status & MVPP2_RXD_L4_TCP)) && 3582 (status & MVPP2_RXD_L4_CSUM_OK)) 3583 return CHECKSUM_UNNECESSARY; 3584 3585 return CHECKSUM_NONE; 3586 } 3587 3588 /* Allocate a new skb and add it to BM pool */ 3589 static int mvpp2_rx_refill(struct mvpp2_port *port, 3590 struct mvpp2_bm_pool *bm_pool, 3591 struct page_pool *page_pool, int pool) 3592 { 3593 dma_addr_t dma_addr; 3594 phys_addr_t phys_addr; 3595 void *buf; 3596 3597 buf = mvpp2_buf_alloc(port, bm_pool, page_pool, 3598 &dma_addr, &phys_addr, GFP_ATOMIC); 3599 if (!buf) 3600 return -ENOMEM; 3601 3602 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3603 3604 return 0; 3605 } 3606 3607 /* Handle tx checksum */ 3608 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 3609 { 3610 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3611 int ip_hdr_len = 0; 3612 u8 l4_proto; 3613 __be16 l3_proto = vlan_get_protocol(skb); 3614 3615 if (l3_proto == htons(ETH_P_IP)) { 3616 struct iphdr *ip4h = ip_hdr(skb); 3617 3618 /* Calculate IPv4 checksum and L4 checksum */ 3619 ip_hdr_len = ip4h->ihl; 3620 l4_proto = ip4h->protocol; 3621 } else if (l3_proto == htons(ETH_P_IPV6)) { 3622 struct ipv6hdr *ip6h = ipv6_hdr(skb); 3623 3624 /* Read l4_protocol from one of IPv6 extra headers */ 3625 if (skb_network_header_len(skb) > 0) 3626 ip_hdr_len = (skb_network_header_len(skb) >> 2); 3627 l4_proto = ip6h->nexthdr; 3628 } else { 3629 return MVPP2_TXD_L4_CSUM_NOT; 3630 } 3631 3632 return mvpp2_txq_desc_csum(skb_network_offset(skb), 3633 l3_proto, ip_hdr_len, l4_proto); 3634 } 3635 3636 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 3637 } 3638 3639 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) 3640 { 3641 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3642 struct mvpp2_tx_queue *aggr_txq; 3643 struct mvpp2_txq_pcpu *txq_pcpu; 3644 struct mvpp2_tx_queue *txq; 3645 struct netdev_queue *nq; 3646 3647 txq = port->txqs[txq_id]; 3648 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3649 nq = netdev_get_tx_queue(port->dev, txq_id); 3650 aggr_txq = &port->priv->aggr_txqs[thread]; 3651 3652 txq_pcpu->reserved_num -= nxmit; 3653 txq_pcpu->count += nxmit; 3654 aggr_txq->count += nxmit; 3655 3656 /* Enable transmit */ 3657 wmb(); 3658 mvpp2_aggr_txq_pend_desc_add(port, nxmit); 3659 3660 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 3661 netif_tx_stop_queue(nq); 3662 3663 /* Finalize TX processing */ 3664 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 3665 mvpp2_txq_done(port, txq, txq_pcpu); 3666 } 3667 3668 static int 3669 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, 3670 struct xdp_frame *xdpf, bool dma_map) 3671 { 3672 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3673 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | 3674 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 3675 enum mvpp2_tx_buf_type buf_type; 3676 struct mvpp2_txq_pcpu *txq_pcpu; 3677 struct mvpp2_tx_queue *aggr_txq; 3678 struct mvpp2_tx_desc *tx_desc; 3679 struct mvpp2_tx_queue *txq; 3680 int ret = MVPP2_XDP_TX; 3681 dma_addr_t dma_addr; 3682 3683 txq = port->txqs[txq_id]; 3684 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3685 aggr_txq = &port->priv->aggr_txqs[thread]; 3686 3687 /* Check number of available descriptors */ 3688 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || 3689 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { 3690 ret = MVPP2_XDP_DROPPED; 3691 goto out; 3692 } 3693 3694 /* Get a descriptor for the first part of the packet */ 3695 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3696 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3697 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); 3698 3699 if (dma_map) { 3700 /* XDP_REDIRECT or AF_XDP */ 3701 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, 3702 xdpf->len, DMA_TO_DEVICE); 3703 3704 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 3705 mvpp2_txq_desc_put(txq); 3706 ret = MVPP2_XDP_DROPPED; 3707 goto out; 3708 } 3709 3710 buf_type = MVPP2_TYPE_XDP_NDO; 3711 } else { 3712 /* XDP_TX */ 3713 struct page *page = virt_to_page(xdpf->data); 3714 3715 dma_addr = page_pool_get_dma_addr(page) + 3716 sizeof(*xdpf) + xdpf->headroom; 3717 dma_sync_single_for_device(port->dev->dev.parent, dma_addr, 3718 xdpf->len, DMA_BIDIRECTIONAL); 3719 3720 buf_type = MVPP2_TYPE_XDP_TX; 3721 } 3722 3723 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); 3724 3725 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 3726 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); 3727 3728 out: 3729 return ret; 3730 } 3731 3732 static int 3733 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) 3734 { 3735 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 3736 struct xdp_frame *xdpf; 3737 u16 txq_id; 3738 int ret; 3739 3740 xdpf = xdp_convert_buff_to_frame(xdp); 3741 if (unlikely(!xdpf)) 3742 return MVPP2_XDP_DROPPED; 3743 3744 /* The first of the TX queues are used for XPS, 3745 * the second half for XDP_TX 3746 */ 3747 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3748 3749 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); 3750 if (ret == MVPP2_XDP_TX) { 3751 u64_stats_update_begin(&stats->syncp); 3752 stats->tx_bytes += xdpf->len; 3753 stats->tx_packets++; 3754 stats->xdp_tx++; 3755 u64_stats_update_end(&stats->syncp); 3756 3757 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); 3758 } else { 3759 u64_stats_update_begin(&stats->syncp); 3760 stats->xdp_tx_err++; 3761 u64_stats_update_end(&stats->syncp); 3762 } 3763 3764 return ret; 3765 } 3766 3767 static int 3768 mvpp2_xdp_xmit(struct net_device *dev, int num_frame, 3769 struct xdp_frame **frames, u32 flags) 3770 { 3771 struct mvpp2_port *port = netdev_priv(dev); 3772 int i, nxmit_byte = 0, nxmit = 0; 3773 struct mvpp2_pcpu_stats *stats; 3774 u16 txq_id; 3775 u32 ret; 3776 3777 if (unlikely(test_bit(0, &port->state))) 3778 return -ENETDOWN; 3779 3780 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3781 return -EINVAL; 3782 3783 /* The first of the TX queues are used for XPS, 3784 * the second half for XDP_TX 3785 */ 3786 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3787 3788 for (i = 0; i < num_frame; i++) { 3789 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); 3790 if (ret != MVPP2_XDP_TX) 3791 break; 3792 3793 nxmit_byte += frames[i]->len; 3794 nxmit++; 3795 } 3796 3797 if (likely(nxmit > 0)) 3798 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); 3799 3800 stats = this_cpu_ptr(port->stats); 3801 u64_stats_update_begin(&stats->syncp); 3802 stats->tx_bytes += nxmit_byte; 3803 stats->tx_packets += nxmit; 3804 stats->xdp_xmit += nxmit; 3805 stats->xdp_xmit_err += num_frame - nxmit; 3806 u64_stats_update_end(&stats->syncp); 3807 3808 return nxmit; 3809 } 3810 3811 static int 3812 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, 3813 struct xdp_buff *xdp, struct page_pool *pp, 3814 struct mvpp2_pcpu_stats *stats) 3815 { 3816 unsigned int len, sync, err; 3817 struct page *page; 3818 u32 ret, act; 3819 3820 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3821 act = bpf_prog_run_xdp(prog, xdp); 3822 3823 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 3824 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3825 sync = max(sync, len); 3826 3827 switch (act) { 3828 case XDP_PASS: 3829 stats->xdp_pass++; 3830 ret = MVPP2_XDP_PASS; 3831 break; 3832 case XDP_REDIRECT: 3833 err = xdp_do_redirect(port->dev, xdp, prog); 3834 if (unlikely(err)) { 3835 ret = MVPP2_XDP_DROPPED; 3836 page = virt_to_head_page(xdp->data); 3837 page_pool_put_page(pp, page, sync, true); 3838 } else { 3839 ret = MVPP2_XDP_REDIR; 3840 stats->xdp_redirect++; 3841 } 3842 break; 3843 case XDP_TX: 3844 ret = mvpp2_xdp_xmit_back(port, xdp); 3845 if (ret != MVPP2_XDP_TX) { 3846 page = virt_to_head_page(xdp->data); 3847 page_pool_put_page(pp, page, sync, true); 3848 } 3849 break; 3850 default: 3851 bpf_warn_invalid_xdp_action(port->dev, prog, act); 3852 fallthrough; 3853 case XDP_ABORTED: 3854 trace_xdp_exception(port->dev, prog, act); 3855 fallthrough; 3856 case XDP_DROP: 3857 page = virt_to_head_page(xdp->data); 3858 page_pool_put_page(pp, page, sync, true); 3859 ret = MVPP2_XDP_DROPPED; 3860 stats->xdp_drop++; 3861 break; 3862 } 3863 3864 return ret; 3865 } 3866 3867 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, 3868 int pool, u32 rx_status) 3869 { 3870 phys_addr_t phys_addr, phys_addr_next; 3871 dma_addr_t dma_addr, dma_addr_next; 3872 struct mvpp2_buff_hdr *buff_hdr; 3873 3874 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3875 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 3876 3877 do { 3878 buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); 3879 3880 phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); 3881 dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); 3882 3883 if (port->priv->hw_version >= MVPP22) { 3884 phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); 3885 dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); 3886 } 3887 3888 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3889 3890 phys_addr = phys_addr_next; 3891 dma_addr = dma_addr_next; 3892 3893 } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); 3894 } 3895 3896 /* Main rx processing */ 3897 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 3898 int rx_todo, struct mvpp2_rx_queue *rxq) 3899 { 3900 struct net_device *dev = port->dev; 3901 struct mvpp2_pcpu_stats ps = {}; 3902 enum dma_data_direction dma_dir; 3903 struct bpf_prog *xdp_prog; 3904 struct xdp_buff xdp; 3905 int rx_received; 3906 int rx_done = 0; 3907 u32 xdp_ret = 0; 3908 3909 xdp_prog = READ_ONCE(port->xdp_prog); 3910 3911 /* Get number of received packets and clamp the to-do */ 3912 rx_received = mvpp2_rxq_received(port, rxq->id); 3913 if (rx_todo > rx_received) 3914 rx_todo = rx_received; 3915 3916 while (rx_done < rx_todo) { 3917 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3918 struct mvpp2_bm_pool *bm_pool; 3919 struct page_pool *pp = NULL; 3920 struct sk_buff *skb; 3921 unsigned int frag_size; 3922 dma_addr_t dma_addr; 3923 phys_addr_t phys_addr; 3924 u32 rx_status, timestamp; 3925 int pool, rx_bytes, err, ret; 3926 struct page *page; 3927 void *data; 3928 3929 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 3930 data = (void *)phys_to_virt(phys_addr); 3931 page = virt_to_page(data); 3932 prefetch(page); 3933 3934 rx_done++; 3935 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 3936 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 3937 rx_bytes -= MVPP2_MH_SIZE; 3938 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3939 3940 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3941 MVPP2_RXD_BM_POOL_ID_OFFS; 3942 bm_pool = &port->priv->bm_pools[pool]; 3943 3944 if (port->priv->percpu_pools) { 3945 pp = port->priv->page_pool[pool]; 3946 dma_dir = page_pool_get_dma_dir(pp); 3947 } else { 3948 dma_dir = DMA_FROM_DEVICE; 3949 } 3950 3951 dma_sync_single_for_cpu(dev->dev.parent, dma_addr, 3952 rx_bytes + MVPP2_MH_SIZE, 3953 dma_dir); 3954 3955 /* Buffer header not supported */ 3956 if (rx_status & MVPP2_RXD_BUF_HDR) 3957 goto err_drop_frame; 3958 3959 /* In case of an error, release the requested buffer pointer 3960 * to the Buffer Manager. This request process is controlled 3961 * by the hardware, and the information about the buffer is 3962 * comprised by the RX descriptor. 3963 */ 3964 if (rx_status & MVPP2_RXD_ERR_SUMMARY) 3965 goto err_drop_frame; 3966 3967 /* Prefetch header */ 3968 prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); 3969 3970 if (bm_pool->frag_size > PAGE_SIZE) 3971 frag_size = 0; 3972 else 3973 frag_size = bm_pool->frag_size; 3974 3975 if (xdp_prog) { 3976 struct xdp_rxq_info *xdp_rxq; 3977 3978 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) 3979 xdp_rxq = &rxq->xdp_rxq_short; 3980 else 3981 xdp_rxq = &rxq->xdp_rxq_long; 3982 3983 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); 3984 xdp_prepare_buff(&xdp, data, 3985 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, 3986 rx_bytes, false); 3987 3988 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); 3989 3990 if (ret) { 3991 xdp_ret |= ret; 3992 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 3993 if (err) { 3994 netdev_err(port->dev, "failed to refill BM pools\n"); 3995 goto err_drop_frame; 3996 } 3997 3998 ps.rx_packets++; 3999 ps.rx_bytes += rx_bytes; 4000 continue; 4001 } 4002 } 4003 4004 if (frag_size) 4005 skb = build_skb(data, frag_size); 4006 else 4007 skb = slab_build_skb(data); 4008 if (!skb) { 4009 netdev_warn(port->dev, "skb build failed\n"); 4010 goto err_drop_frame; 4011 } 4012 4013 /* If we have RX hardware timestamping enabled, grab the 4014 * timestamp from the queue and convert. 4015 */ 4016 if (mvpp22_rx_hwtstamping(port)) { 4017 timestamp = le32_to_cpu(rx_desc->pp22.timestamp); 4018 mvpp22_tai_tstamp(port->priv->tai, timestamp, 4019 skb_hwtstamps(skb)); 4020 } 4021 4022 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 4023 if (err) { 4024 netdev_err(port->dev, "failed to refill BM pools\n"); 4025 dev_kfree_skb_any(skb); 4026 goto err_drop_frame; 4027 } 4028 4029 if (pp) 4030 skb_mark_for_recycle(skb); 4031 else 4032 dma_unmap_single_attrs(dev->dev.parent, dma_addr, 4033 bm_pool->buf_size, DMA_FROM_DEVICE, 4034 DMA_ATTR_SKIP_CPU_SYNC); 4035 4036 ps.rx_packets++; 4037 ps.rx_bytes += rx_bytes; 4038 4039 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); 4040 skb_put(skb, rx_bytes); 4041 skb->ip_summed = mvpp2_rx_csum(port, rx_status); 4042 skb->protocol = eth_type_trans(skb, dev); 4043 4044 napi_gro_receive(napi, skb); 4045 continue; 4046 4047 err_drop_frame: 4048 dev->stats.rx_errors++; 4049 mvpp2_rx_error(port, rx_desc); 4050 /* Return the buffer to the pool */ 4051 if (rx_status & MVPP2_RXD_BUF_HDR) 4052 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); 4053 else 4054 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 4055 } 4056 4057 if (xdp_ret & MVPP2_XDP_REDIR) 4058 xdp_do_flush(); 4059 4060 if (ps.rx_packets) { 4061 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 4062 4063 u64_stats_update_begin(&stats->syncp); 4064 stats->rx_packets += ps.rx_packets; 4065 stats->rx_bytes += ps.rx_bytes; 4066 /* xdp */ 4067 stats->xdp_redirect += ps.xdp_redirect; 4068 stats->xdp_pass += ps.xdp_pass; 4069 stats->xdp_drop += ps.xdp_drop; 4070 u64_stats_update_end(&stats->syncp); 4071 } 4072 4073 /* Update Rx queue management counters */ 4074 wmb(); 4075 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 4076 4077 return rx_todo; 4078 } 4079 4080 static inline void 4081 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 4082 struct mvpp2_tx_desc *desc) 4083 { 4084 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4085 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4086 4087 dma_addr_t buf_dma_addr = 4088 mvpp2_txdesc_dma_addr_get(port, desc); 4089 size_t buf_sz = 4090 mvpp2_txdesc_size_get(port, desc); 4091 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 4092 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 4093 buf_sz, DMA_TO_DEVICE); 4094 mvpp2_txq_desc_put(txq); 4095 } 4096 4097 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, 4098 struct mvpp2_tx_desc *desc) 4099 { 4100 /* We only need to clear the low bits */ 4101 if (port->priv->hw_version >= MVPP22) 4102 desc->pp22.ptp_descriptor &= 4103 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4104 } 4105 4106 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, 4107 struct mvpp2_tx_desc *tx_desc, 4108 struct sk_buff *skb) 4109 { 4110 struct mvpp2_hwtstamp_queue *queue; 4111 unsigned int mtype, type, i; 4112 struct ptp_header *hdr; 4113 u64 ptpdesc; 4114 4115 if (port->priv->hw_version == MVPP21 || 4116 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) 4117 return false; 4118 4119 type = ptp_classify_raw(skb); 4120 if (!type) 4121 return false; 4122 4123 hdr = ptp_parse_header(skb, type); 4124 if (!hdr) 4125 return false; 4126 4127 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4128 4129 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | 4130 MVPP22_PTP_ACTION_CAPTURE; 4131 queue = &port->tx_hwtstamp_queue[0]; 4132 4133 switch (type & PTP_CLASS_VMASK) { 4134 case PTP_CLASS_V1: 4135 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); 4136 break; 4137 4138 case PTP_CLASS_V2: 4139 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); 4140 mtype = hdr->tsmt & 15; 4141 /* Direct PTP Sync messages to queue 1 */ 4142 if (mtype == 0) { 4143 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; 4144 queue = &port->tx_hwtstamp_queue[1]; 4145 } 4146 break; 4147 } 4148 4149 /* Take a reference on the skb and insert into our queue */ 4150 i = queue->next; 4151 queue->next = (i + 1) & 31; 4152 if (queue->skb[i]) 4153 dev_kfree_skb_any(queue->skb[i]); 4154 queue->skb[i] = skb_get(skb); 4155 4156 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); 4157 4158 /* 4159 * 3:0 - PTPAction 4160 * 6:4 - PTPPacketFormat 4161 * 7 - PTP_CF_WraparoundCheckEn 4162 * 9:8 - IngressTimestampSeconds[1:0] 4163 * 10 - Reserved 4164 * 11 - MACTimestampingEn 4165 * 17:12 - PTP_TimestampQueueEntryID[5:0] 4166 * 18 - PTPTimestampQueueSelect 4167 * 19 - UDPChecksumUpdateEn 4168 * 27:20 - TimestampOffset 4169 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header 4170 * NTPTs, Y.1731 - L3 to timestamp entry 4171 * 35:28 - UDP Checksum Offset 4172 * 4173 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) 4174 */ 4175 tx_desc->pp22.ptp_descriptor &= 4176 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4177 tx_desc->pp22.ptp_descriptor |= 4178 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); 4179 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); 4180 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); 4181 4182 return true; 4183 } 4184 4185 /* Handle tx fragmentation processing */ 4186 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 4187 struct mvpp2_tx_queue *aggr_txq, 4188 struct mvpp2_tx_queue *txq) 4189 { 4190 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4191 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4192 struct mvpp2_tx_desc *tx_desc; 4193 int i; 4194 dma_addr_t buf_dma_addr; 4195 4196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4197 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4198 void *addr = skb_frag_address(frag); 4199 4200 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4201 mvpp2_txdesc_clear_ptp(port, tx_desc); 4202 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4203 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); 4204 4205 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 4206 skb_frag_size(frag), 4207 DMA_TO_DEVICE); 4208 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 4209 mvpp2_txq_desc_put(txq); 4210 goto cleanup; 4211 } 4212 4213 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4214 4215 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 4216 /* Last descriptor */ 4217 mvpp2_txdesc_cmd_set(port, tx_desc, 4218 MVPP2_TXD_L_DESC); 4219 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4220 } else { 4221 /* Descriptor in the middle: Not First, Not Last */ 4222 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4223 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4224 } 4225 } 4226 4227 return 0; 4228 cleanup: 4229 /* Release all descriptors that were used to map fragments of 4230 * this packet, as well as the corresponding DMA mappings 4231 */ 4232 for (i = i - 1; i >= 0; i--) { 4233 tx_desc = txq->descs + i; 4234 tx_desc_unmap_put(port, txq, tx_desc); 4235 } 4236 4237 return -ENOMEM; 4238 } 4239 4240 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 4241 struct net_device *dev, 4242 struct mvpp2_tx_queue *txq, 4243 struct mvpp2_tx_queue *aggr_txq, 4244 struct mvpp2_txq_pcpu *txq_pcpu, 4245 int hdr_sz) 4246 { 4247 struct mvpp2_port *port = netdev_priv(dev); 4248 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4249 dma_addr_t addr; 4250 4251 mvpp2_txdesc_clear_ptp(port, tx_desc); 4252 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4253 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 4254 4255 addr = txq_pcpu->tso_headers_dma + 4256 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4257 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 4258 4259 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 4260 MVPP2_TXD_F_DESC | 4261 MVPP2_TXD_PADDING_DISABLE); 4262 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4263 } 4264 4265 static inline int mvpp2_tso_put_data(struct sk_buff *skb, 4266 struct net_device *dev, struct tso_t *tso, 4267 struct mvpp2_tx_queue *txq, 4268 struct mvpp2_tx_queue *aggr_txq, 4269 struct mvpp2_txq_pcpu *txq_pcpu, 4270 int sz, bool left, bool last) 4271 { 4272 struct mvpp2_port *port = netdev_priv(dev); 4273 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4274 dma_addr_t buf_dma_addr; 4275 4276 mvpp2_txdesc_clear_ptp(port, tx_desc); 4277 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4278 mvpp2_txdesc_size_set(port, tx_desc, sz); 4279 4280 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 4281 DMA_TO_DEVICE); 4282 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4283 mvpp2_txq_desc_put(txq); 4284 return -ENOMEM; 4285 } 4286 4287 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4288 4289 if (!left) { 4290 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 4291 if (last) { 4292 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4293 return 0; 4294 } 4295 } else { 4296 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4297 } 4298 4299 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4300 return 0; 4301 } 4302 4303 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 4304 struct mvpp2_tx_queue *txq, 4305 struct mvpp2_tx_queue *aggr_txq, 4306 struct mvpp2_txq_pcpu *txq_pcpu) 4307 { 4308 struct mvpp2_port *port = netdev_priv(dev); 4309 int hdr_sz, i, len, descs = 0; 4310 struct tso_t tso; 4311 4312 /* Check number of available descriptors */ 4313 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || 4314 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 4315 tso_count_descs(skb))) 4316 return 0; 4317 4318 hdr_sz = tso_start(skb, &tso); 4319 4320 len = skb->len - hdr_sz; 4321 while (len > 0) { 4322 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 4323 char *hdr = txq_pcpu->tso_headers + 4324 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4325 4326 len -= left; 4327 descs++; 4328 4329 tso_build_hdr(skb, hdr, &tso, left, len == 0); 4330 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 4331 4332 while (left > 0) { 4333 int sz = min_t(int, tso.size, left); 4334 left -= sz; 4335 descs++; 4336 4337 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 4338 txq_pcpu, sz, left, len == 0)) 4339 goto release; 4340 tso_build_data(skb, &tso, sz); 4341 } 4342 } 4343 4344 return descs; 4345 4346 release: 4347 for (i = descs - 1; i >= 0; i--) { 4348 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 4349 tx_desc_unmap_put(port, txq, tx_desc); 4350 } 4351 return 0; 4352 } 4353 4354 /* Main tx processing */ 4355 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 4356 { 4357 struct mvpp2_port *port = netdev_priv(dev); 4358 struct mvpp2_tx_queue *txq, *aggr_txq; 4359 struct mvpp2_txq_pcpu *txq_pcpu; 4360 struct mvpp2_tx_desc *tx_desc; 4361 dma_addr_t buf_dma_addr; 4362 unsigned long flags = 0; 4363 unsigned int thread; 4364 int frags = 0; 4365 u16 txq_id; 4366 u32 tx_cmd; 4367 4368 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4369 4370 txq_id = skb_get_queue_mapping(skb); 4371 txq = port->txqs[txq_id]; 4372 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4373 aggr_txq = &port->priv->aggr_txqs[thread]; 4374 4375 if (test_bit(thread, &port->priv->lock_map)) 4376 spin_lock_irqsave(&port->tx_lock[thread], flags); 4377 4378 if (skb_is_gso(skb)) { 4379 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 4380 goto out; 4381 } 4382 frags = skb_shinfo(skb)->nr_frags + 1; 4383 4384 /* Check number of available descriptors */ 4385 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || 4386 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { 4387 frags = 0; 4388 goto out; 4389 } 4390 4391 /* Get a descriptor for the first part of the packet */ 4392 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4393 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || 4394 !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) 4395 mvpp2_txdesc_clear_ptp(port, tx_desc); 4396 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4397 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 4398 4399 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 4400 skb_headlen(skb), DMA_TO_DEVICE); 4401 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4402 mvpp2_txq_desc_put(txq); 4403 frags = 0; 4404 goto out; 4405 } 4406 4407 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4408 4409 tx_cmd = mvpp2_skb_tx_csum(port, skb); 4410 4411 if (frags == 1) { 4412 /* First and Last descriptor */ 4413 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 4414 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4415 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4416 } else { 4417 /* First but not Last */ 4418 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 4419 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4420 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4421 4422 /* Continue with other skb fragments */ 4423 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 4424 tx_desc_unmap_put(port, txq, tx_desc); 4425 frags = 0; 4426 } 4427 } 4428 4429 out: 4430 if (frags > 0) { 4431 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); 4432 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 4433 4434 txq_pcpu->reserved_num -= frags; 4435 txq_pcpu->count += frags; 4436 aggr_txq->count += frags; 4437 4438 /* Enable transmit */ 4439 wmb(); 4440 mvpp2_aggr_txq_pend_desc_add(port, frags); 4441 4442 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 4443 netif_tx_stop_queue(nq); 4444 4445 u64_stats_update_begin(&stats->syncp); 4446 stats->tx_packets++; 4447 stats->tx_bytes += skb->len; 4448 u64_stats_update_end(&stats->syncp); 4449 } else { 4450 dev->stats.tx_dropped++; 4451 dev_kfree_skb_any(skb); 4452 } 4453 4454 /* Finalize TX processing */ 4455 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 4456 mvpp2_txq_done(port, txq, txq_pcpu); 4457 4458 /* Set the timer in case not all frags were processed */ 4459 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 4460 txq_pcpu->count > 0) { 4461 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); 4462 4463 if (!port_pcpu->timer_scheduled) { 4464 port_pcpu->timer_scheduled = true; 4465 hrtimer_start(&port_pcpu->tx_done_timer, 4466 MVPP2_TXDONE_HRTIMER_PERIOD_NS, 4467 HRTIMER_MODE_REL_PINNED_SOFT); 4468 } 4469 } 4470 4471 if (test_bit(thread, &port->priv->lock_map)) 4472 spin_unlock_irqrestore(&port->tx_lock[thread], flags); 4473 4474 return NETDEV_TX_OK; 4475 } 4476 4477 static inline void mvpp2_cause_error(struct net_device *dev, int cause) 4478 { 4479 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 4480 netdev_err(dev, "FCS error\n"); 4481 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 4482 netdev_err(dev, "rx fifo overrun error\n"); 4483 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 4484 netdev_err(dev, "tx fifo underrun error\n"); 4485 } 4486 4487 static int mvpp2_poll(struct napi_struct *napi, int budget) 4488 { 4489 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 4490 int rx_done = 0; 4491 struct mvpp2_port *port = netdev_priv(napi->dev); 4492 struct mvpp2_queue_vector *qv; 4493 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4494 4495 qv = container_of(napi, struct mvpp2_queue_vector, napi); 4496 4497 /* Rx/Tx cause register 4498 * 4499 * Bits 0-15: each bit indicates received packets on the Rx queue 4500 * (bit 0 is for Rx queue 0). 4501 * 4502 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 4503 * (bit 16 is for Tx queue 0). 4504 * 4505 * Each CPU has its own Rx/Tx cause register 4506 */ 4507 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, 4508 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 4509 4510 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 4511 if (cause_misc) { 4512 mvpp2_cause_error(port->dev, cause_misc); 4513 4514 /* Clear the cause register */ 4515 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 4516 mvpp2_thread_write(port->priv, thread, 4517 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 4518 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 4519 } 4520 4521 if (port->has_tx_irqs) { 4522 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 4523 if (cause_tx) { 4524 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 4525 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 4526 } 4527 } 4528 4529 /* Process RX packets */ 4530 cause_rx = cause_rx_tx & 4531 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 4532 cause_rx <<= qv->first_rxq; 4533 cause_rx |= qv->pending_cause_rx; 4534 while (cause_rx && budget > 0) { 4535 int count; 4536 struct mvpp2_rx_queue *rxq; 4537 4538 rxq = mvpp2_get_rx_queue(port, cause_rx); 4539 if (!rxq) 4540 break; 4541 4542 count = mvpp2_rx(port, napi, budget, rxq); 4543 rx_done += count; 4544 budget -= count; 4545 if (budget > 0) { 4546 /* Clear the bit associated to this Rx queue 4547 * so that next iteration will continue from 4548 * the next Rx queue. 4549 */ 4550 cause_rx &= ~(1 << rxq->logic_rxq); 4551 } 4552 } 4553 4554 if (budget > 0) { 4555 cause_rx = 0; 4556 napi_complete_done(napi, rx_done); 4557 4558 mvpp2_qvec_interrupt_enable(qv); 4559 } 4560 qv->pending_cause_rx = cause_rx; 4561 return rx_done; 4562 } 4563 4564 static void mvpp22_mode_reconfigure(struct mvpp2_port *port, 4565 phy_interface_t interface) 4566 { 4567 u32 ctrl3; 4568 4569 /* Set the GMAC & XLG MAC in reset */ 4570 mvpp2_mac_reset_assert(port); 4571 4572 /* Set the MPCS and XPCS in reset */ 4573 mvpp22_pcs_reset_assert(port); 4574 4575 /* comphy reconfiguration */ 4576 mvpp22_comphy_init(port, interface); 4577 4578 /* gop reconfiguration */ 4579 mvpp22_gop_init(port, interface); 4580 4581 mvpp22_pcs_reset_deassert(port, interface); 4582 4583 if (mvpp2_port_supports_xlg(port)) { 4584 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); 4585 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4586 4587 if (mvpp2_is_xlg(interface)) 4588 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 4589 else 4590 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 4591 4592 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); 4593 } 4594 4595 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) 4596 mvpp2_xlg_max_rx_size_set(port); 4597 else 4598 mvpp2_gmac_max_rx_size_set(port); 4599 } 4600 4601 /* Set hw internals when starting port */ 4602 static void mvpp2_start_dev(struct mvpp2_port *port) 4603 { 4604 int i; 4605 4606 mvpp2_txp_max_tx_size_set(port); 4607 4608 for (i = 0; i < port->nqvecs; i++) 4609 napi_enable(&port->qvecs[i].napi); 4610 4611 /* Enable interrupts on all threads */ 4612 mvpp2_interrupts_enable(port); 4613 4614 if (port->priv->hw_version >= MVPP22) 4615 mvpp22_mode_reconfigure(port, port->phy_interface); 4616 4617 if (port->phylink) { 4618 phylink_start(port->phylink); 4619 } else { 4620 mvpp2_acpi_start(port); 4621 } 4622 4623 netif_tx_start_all_queues(port->dev); 4624 4625 clear_bit(0, &port->state); 4626 } 4627 4628 /* Set hw internals when stopping port */ 4629 static void mvpp2_stop_dev(struct mvpp2_port *port) 4630 { 4631 int i; 4632 4633 set_bit(0, &port->state); 4634 4635 /* Disable interrupts on all threads */ 4636 mvpp2_interrupts_disable(port); 4637 4638 for (i = 0; i < port->nqvecs; i++) 4639 napi_disable(&port->qvecs[i].napi); 4640 4641 if (port->phylink) 4642 phylink_stop(port->phylink); 4643 phy_power_off(port->comphy); 4644 } 4645 4646 static int mvpp2_check_ringparam_valid(struct net_device *dev, 4647 struct ethtool_ringparam *ring) 4648 { 4649 u16 new_rx_pending = ring->rx_pending; 4650 u16 new_tx_pending = ring->tx_pending; 4651 4652 if (ring->rx_pending == 0 || ring->tx_pending == 0) 4653 return -EINVAL; 4654 4655 if (ring->rx_pending > MVPP2_MAX_RXD_MAX) 4656 new_rx_pending = MVPP2_MAX_RXD_MAX; 4657 else if (ring->rx_pending < MSS_THRESHOLD_START) 4658 new_rx_pending = MSS_THRESHOLD_START; 4659 else if (!IS_ALIGNED(ring->rx_pending, 16)) 4660 new_rx_pending = ALIGN(ring->rx_pending, 16); 4661 4662 if (ring->tx_pending > MVPP2_MAX_TXD_MAX) 4663 new_tx_pending = MVPP2_MAX_TXD_MAX; 4664 else if (!IS_ALIGNED(ring->tx_pending, 32)) 4665 new_tx_pending = ALIGN(ring->tx_pending, 32); 4666 4667 /* The Tx ring size cannot be smaller than the minimum number of 4668 * descriptors needed for TSO. 4669 */ 4670 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 4671 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 4672 4673 if (ring->rx_pending != new_rx_pending) { 4674 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 4675 ring->rx_pending, new_rx_pending); 4676 ring->rx_pending = new_rx_pending; 4677 } 4678 4679 if (ring->tx_pending != new_tx_pending) { 4680 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 4681 ring->tx_pending, new_tx_pending); 4682 ring->tx_pending = new_tx_pending; 4683 } 4684 4685 return 0; 4686 } 4687 4688 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 4689 { 4690 u32 mac_addr_l, mac_addr_m, mac_addr_h; 4691 4692 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 4693 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 4694 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 4695 addr[0] = (mac_addr_h >> 24) & 0xFF; 4696 addr[1] = (mac_addr_h >> 16) & 0xFF; 4697 addr[2] = (mac_addr_h >> 8) & 0xFF; 4698 addr[3] = mac_addr_h & 0xFF; 4699 addr[4] = mac_addr_m & 0xFF; 4700 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 4701 } 4702 4703 static int mvpp2_irqs_init(struct mvpp2_port *port) 4704 { 4705 int err, i; 4706 4707 for (i = 0; i < port->nqvecs; i++) { 4708 struct mvpp2_queue_vector *qv = port->qvecs + i; 4709 4710 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4711 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); 4712 if (!qv->mask) { 4713 err = -ENOMEM; 4714 goto err; 4715 } 4716 4717 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 4718 } 4719 4720 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 4721 if (err) 4722 goto err; 4723 4724 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4725 unsigned int cpu; 4726 4727 for_each_present_cpu(cpu) { 4728 if (mvpp2_cpu_to_thread(port->priv, cpu) == 4729 qv->sw_thread_id) 4730 cpumask_set_cpu(cpu, qv->mask); 4731 } 4732 4733 irq_set_affinity_hint(qv->irq, qv->mask); 4734 } 4735 } 4736 4737 return 0; 4738 err: 4739 for (i = 0; i < port->nqvecs; i++) { 4740 struct mvpp2_queue_vector *qv = port->qvecs + i; 4741 4742 irq_set_affinity_hint(qv->irq, NULL); 4743 kfree(qv->mask); 4744 qv->mask = NULL; 4745 free_irq(qv->irq, qv); 4746 } 4747 4748 return err; 4749 } 4750 4751 static void mvpp2_irqs_deinit(struct mvpp2_port *port) 4752 { 4753 int i; 4754 4755 for (i = 0; i < port->nqvecs; i++) { 4756 struct mvpp2_queue_vector *qv = port->qvecs + i; 4757 4758 irq_set_affinity_hint(qv->irq, NULL); 4759 kfree(qv->mask); 4760 qv->mask = NULL; 4761 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 4762 free_irq(qv->irq, qv); 4763 } 4764 } 4765 4766 static bool mvpp22_rss_is_supported(struct mvpp2_port *port) 4767 { 4768 return (queue_mode == MVPP2_QDIST_MULTI_MODE) && 4769 !(port->flags & MVPP2_F_LOOPBACK); 4770 } 4771 4772 static int mvpp2_open(struct net_device *dev) 4773 { 4774 struct mvpp2_port *port = netdev_priv(dev); 4775 struct mvpp2 *priv = port->priv; 4776 unsigned char mac_bcast[ETH_ALEN] = { 4777 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4778 bool valid = false; 4779 int err; 4780 4781 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); 4782 if (err) { 4783 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4784 return err; 4785 } 4786 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); 4787 if (err) { 4788 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); 4789 return err; 4790 } 4791 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 4792 if (err) { 4793 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 4794 return err; 4795 } 4796 err = mvpp2_prs_def_flow(port); 4797 if (err) { 4798 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4799 return err; 4800 } 4801 4802 /* Allocate the Rx/Tx queues */ 4803 err = mvpp2_setup_rxqs(port); 4804 if (err) { 4805 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4806 return err; 4807 } 4808 4809 err = mvpp2_setup_txqs(port); 4810 if (err) { 4811 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4812 goto err_cleanup_rxqs; 4813 } 4814 4815 err = mvpp2_irqs_init(port); 4816 if (err) { 4817 netdev_err(port->dev, "cannot init IRQs\n"); 4818 goto err_cleanup_txqs; 4819 } 4820 4821 if (port->phylink) { 4822 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); 4823 if (err) { 4824 netdev_err(port->dev, "could not attach PHY (%d)\n", 4825 err); 4826 goto err_free_irq; 4827 } 4828 4829 valid = true; 4830 } 4831 4832 if (priv->hw_version >= MVPP22 && port->port_irq) { 4833 err = request_irq(port->port_irq, mvpp2_port_isr, 0, 4834 dev->name, port); 4835 if (err) { 4836 netdev_err(port->dev, 4837 "cannot request port link/ptp IRQ %d\n", 4838 port->port_irq); 4839 goto err_free_irq; 4840 } 4841 4842 mvpp22_gop_setup_irq(port); 4843 4844 /* In default link is down */ 4845 netif_carrier_off(port->dev); 4846 4847 valid = true; 4848 } else { 4849 port->port_irq = 0; 4850 } 4851 4852 if (!valid) { 4853 netdev_err(port->dev, 4854 "invalid configuration: no dt or link IRQ"); 4855 err = -ENOENT; 4856 goto err_free_irq; 4857 } 4858 4859 /* Unmask interrupts on all CPUs */ 4860 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 4861 mvpp2_shared_interrupt_mask_unmask(port, false); 4862 4863 mvpp2_start_dev(port); 4864 4865 /* Start hardware statistics gathering */ 4866 queue_delayed_work(priv->stats_queue, &port->stats_work, 4867 MVPP2_MIB_COUNTERS_STATS_DELAY); 4868 4869 return 0; 4870 4871 err_free_irq: 4872 mvpp2_irqs_deinit(port); 4873 err_cleanup_txqs: 4874 mvpp2_cleanup_txqs(port); 4875 err_cleanup_rxqs: 4876 mvpp2_cleanup_rxqs(port); 4877 return err; 4878 } 4879 4880 static int mvpp2_stop(struct net_device *dev) 4881 { 4882 struct mvpp2_port *port = netdev_priv(dev); 4883 struct mvpp2_port_pcpu *port_pcpu; 4884 unsigned int thread; 4885 4886 mvpp2_stop_dev(port); 4887 4888 /* Mask interrupts on all threads */ 4889 on_each_cpu(mvpp2_interrupts_mask, port, 1); 4890 mvpp2_shared_interrupt_mask_unmask(port, true); 4891 4892 if (port->phylink) 4893 phylink_disconnect_phy(port->phylink); 4894 if (port->port_irq) 4895 free_irq(port->port_irq, port); 4896 4897 mvpp2_irqs_deinit(port); 4898 if (!port->has_tx_irqs) { 4899 for (thread = 0; thread < port->priv->nthreads; thread++) { 4900 port_pcpu = per_cpu_ptr(port->pcpu, thread); 4901 4902 hrtimer_cancel(&port_pcpu->tx_done_timer); 4903 port_pcpu->timer_scheduled = false; 4904 } 4905 } 4906 mvpp2_cleanup_rxqs(port); 4907 mvpp2_cleanup_txqs(port); 4908 4909 cancel_delayed_work_sync(&port->stats_work); 4910 4911 mvpp2_mac_reset_assert(port); 4912 mvpp22_pcs_reset_assert(port); 4913 4914 return 0; 4915 } 4916 4917 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, 4918 struct netdev_hw_addr_list *list) 4919 { 4920 struct netdev_hw_addr *ha; 4921 int ret; 4922 4923 netdev_hw_addr_list_for_each(ha, list) { 4924 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); 4925 if (ret) 4926 return ret; 4927 } 4928 4929 return 0; 4930 } 4931 4932 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) 4933 { 4934 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 4935 mvpp2_prs_vid_enable_filtering(port); 4936 else 4937 mvpp2_prs_vid_disable_filtering(port); 4938 4939 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4940 MVPP2_PRS_L2_UNI_CAST, enable); 4941 4942 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4943 MVPP2_PRS_L2_MULTI_CAST, enable); 4944 } 4945 4946 static void mvpp2_set_rx_mode(struct net_device *dev) 4947 { 4948 struct mvpp2_port *port = netdev_priv(dev); 4949 4950 /* Clear the whole UC and MC list */ 4951 mvpp2_prs_mac_del_all(port); 4952 4953 if (dev->flags & IFF_PROMISC) { 4954 mvpp2_set_rx_promisc(port, true); 4955 return; 4956 } 4957 4958 mvpp2_set_rx_promisc(port, false); 4959 4960 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || 4961 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) 4962 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4963 MVPP2_PRS_L2_UNI_CAST, true); 4964 4965 if (dev->flags & IFF_ALLMULTI) { 4966 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4967 MVPP2_PRS_L2_MULTI_CAST, true); 4968 return; 4969 } 4970 4971 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || 4972 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) 4973 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4974 MVPP2_PRS_L2_MULTI_CAST, true); 4975 } 4976 4977 static int mvpp2_set_mac_address(struct net_device *dev, void *p) 4978 { 4979 const struct sockaddr *addr = p; 4980 int err; 4981 4982 if (!is_valid_ether_addr(addr->sa_data)) 4983 return -EADDRNOTAVAIL; 4984 4985 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 4986 if (err) { 4987 /* Reconfigure parser accept the original MAC address */ 4988 mvpp2_prs_update_mac_da(dev, dev->dev_addr); 4989 netdev_err(dev, "failed to change MAC address\n"); 4990 } 4991 return err; 4992 } 4993 4994 /* Shut down all the ports, reconfigure the pools as percpu or shared, 4995 * then bring up again all ports. 4996 */ 4997 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) 4998 { 4999 bool change_percpu = (percpu != priv->percpu_pools); 5000 int numbufs = MVPP2_BM_POOLS_NUM, i; 5001 struct mvpp2_port *port = NULL; 5002 bool status[MVPP2_MAX_PORTS]; 5003 5004 for (i = 0; i < priv->port_count; i++) { 5005 port = priv->port_list[i]; 5006 status[i] = netif_running(port->dev); 5007 if (status[i]) 5008 mvpp2_stop(port->dev); 5009 } 5010 5011 /* nrxqs is the same for all ports */ 5012 if (priv->percpu_pools) 5013 numbufs = port->nrxqs * 2; 5014 5015 if (change_percpu) 5016 mvpp2_bm_pool_update_priv_fc(priv, false); 5017 5018 for (i = 0; i < numbufs; i++) 5019 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); 5020 5021 devm_kfree(port->dev->dev.parent, priv->bm_pools); 5022 priv->percpu_pools = percpu; 5023 mvpp2_bm_init(port->dev->dev.parent, priv); 5024 5025 for (i = 0; i < priv->port_count; i++) { 5026 port = priv->port_list[i]; 5027 if (percpu && port->ntxqs >= num_possible_cpus() * 2) 5028 xdp_set_features_flag(port->dev, 5029 NETDEV_XDP_ACT_BASIC | 5030 NETDEV_XDP_ACT_REDIRECT | 5031 NETDEV_XDP_ACT_NDO_XMIT); 5032 else 5033 xdp_clear_features_flag(port->dev); 5034 5035 mvpp2_swf_bm_pool_init(port); 5036 if (status[i]) 5037 mvpp2_open(port->dev); 5038 } 5039 5040 if (change_percpu) 5041 mvpp2_bm_pool_update_priv_fc(priv, true); 5042 5043 return 0; 5044 } 5045 5046 static int mvpp2_change_mtu(struct net_device *dev, int mtu) 5047 { 5048 struct mvpp2_port *port = netdev_priv(dev); 5049 bool running = netif_running(dev); 5050 struct mvpp2 *priv = port->priv; 5051 int err; 5052 5053 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 5054 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 5055 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 5056 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 5057 } 5058 5059 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { 5060 netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", 5061 mtu, (int)MVPP2_MAX_RX_BUF_SIZE); 5062 return -EINVAL; 5063 } 5064 5065 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { 5066 if (priv->percpu_pools) { 5067 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); 5068 mvpp2_bm_switch_buffers(priv, false); 5069 } 5070 } else { 5071 bool jumbo = false; 5072 int i; 5073 5074 for (i = 0; i < priv->port_count; i++) 5075 if (priv->port_list[i] != port && 5076 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > 5077 MVPP2_BM_LONG_PKT_SIZE) { 5078 jumbo = true; 5079 break; 5080 } 5081 5082 /* No port is using jumbo frames */ 5083 if (!jumbo) { 5084 dev_info(port->dev->dev.parent, 5085 "all ports have a low MTU, switching to per-cpu buffers"); 5086 mvpp2_bm_switch_buffers(priv, true); 5087 } 5088 } 5089 5090 if (running) 5091 mvpp2_stop_dev(port); 5092 5093 err = mvpp2_bm_update_mtu(dev, mtu); 5094 if (err) { 5095 netdev_err(dev, "failed to change MTU\n"); 5096 /* Reconfigure BM to the original MTU */ 5097 mvpp2_bm_update_mtu(dev, dev->mtu); 5098 } else { 5099 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 5100 } 5101 5102 if (running) { 5103 mvpp2_start_dev(port); 5104 mvpp2_egress_enable(port); 5105 mvpp2_ingress_enable(port); 5106 } 5107 5108 return err; 5109 } 5110 5111 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) 5112 { 5113 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 5114 struct mvpp2 *priv = port->priv; 5115 int err = -1, i; 5116 5117 if (!priv->percpu_pools) 5118 return err; 5119 5120 if (!priv->page_pool[0]) 5121 return -ENOMEM; 5122 5123 for (i = 0; i < priv->port_count; i++) { 5124 port = priv->port_list[i]; 5125 if (port->xdp_prog) { 5126 dma_dir = DMA_BIDIRECTIONAL; 5127 break; 5128 } 5129 } 5130 5131 /* All pools are equal in terms of DMA direction */ 5132 if (priv->page_pool[0]->p.dma_dir != dma_dir) 5133 err = mvpp2_bm_switch_buffers(priv, true); 5134 5135 return err; 5136 } 5137 5138 static void 5139 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 5140 { 5141 struct mvpp2_port *port = netdev_priv(dev); 5142 unsigned int start; 5143 unsigned int cpu; 5144 5145 for_each_possible_cpu(cpu) { 5146 struct mvpp2_pcpu_stats *cpu_stats; 5147 u64 rx_packets; 5148 u64 rx_bytes; 5149 u64 tx_packets; 5150 u64 tx_bytes; 5151 5152 cpu_stats = per_cpu_ptr(port->stats, cpu); 5153 do { 5154 start = u64_stats_fetch_begin(&cpu_stats->syncp); 5155 rx_packets = cpu_stats->rx_packets; 5156 rx_bytes = cpu_stats->rx_bytes; 5157 tx_packets = cpu_stats->tx_packets; 5158 tx_bytes = cpu_stats->tx_bytes; 5159 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 5160 5161 stats->rx_packets += rx_packets; 5162 stats->rx_bytes += rx_bytes; 5163 stats->tx_packets += tx_packets; 5164 stats->tx_bytes += tx_bytes; 5165 } 5166 5167 stats->rx_errors = dev->stats.rx_errors; 5168 stats->rx_dropped = dev->stats.rx_dropped; 5169 stats->tx_dropped = dev->stats.tx_dropped; 5170 } 5171 5172 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5173 { 5174 struct hwtstamp_config config; 5175 void __iomem *ptp; 5176 u32 gcr, int_mask; 5177 5178 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5179 return -EFAULT; 5180 5181 if (config.tx_type != HWTSTAMP_TX_OFF && 5182 config.tx_type != HWTSTAMP_TX_ON) 5183 return -ERANGE; 5184 5185 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 5186 5187 int_mask = gcr = 0; 5188 if (config.tx_type != HWTSTAMP_TX_OFF) { 5189 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; 5190 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | 5191 MVPP22_PTP_INT_MASK_QUEUE0; 5192 } 5193 5194 /* It seems we must also release the TX reset when enabling the TSU */ 5195 if (config.rx_filter != HWTSTAMP_FILTER_NONE) 5196 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | 5197 MVPP22_PTP_GCR_TX_RESET; 5198 5199 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) 5200 mvpp22_tai_start(port->priv->tai); 5201 5202 if (config.rx_filter != HWTSTAMP_FILTER_NONE) { 5203 config.rx_filter = HWTSTAMP_FILTER_ALL; 5204 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5205 MVPP22_PTP_GCR_RX_RESET | 5206 MVPP22_PTP_GCR_TX_RESET | 5207 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5208 port->rx_hwtstamp = true; 5209 } else { 5210 port->rx_hwtstamp = false; 5211 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5212 MVPP22_PTP_GCR_RX_RESET | 5213 MVPP22_PTP_GCR_TX_RESET | 5214 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5215 } 5216 5217 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, 5218 MVPP22_PTP_INT_MASK_QUEUE1 | 5219 MVPP22_PTP_INT_MASK_QUEUE0, int_mask); 5220 5221 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) 5222 mvpp22_tai_stop(port->priv->tai); 5223 5224 port->tx_hwtstamp_type = config.tx_type; 5225 5226 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5227 return -EFAULT; 5228 5229 return 0; 5230 } 5231 5232 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5233 { 5234 struct hwtstamp_config config; 5235 5236 memset(&config, 0, sizeof(config)); 5237 5238 config.tx_type = port->tx_hwtstamp_type; 5239 config.rx_filter = port->rx_hwtstamp ? 5240 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 5241 5242 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5243 return -EFAULT; 5244 5245 return 0; 5246 } 5247 5248 static int mvpp2_ethtool_get_ts_info(struct net_device *dev, 5249 struct kernel_ethtool_ts_info *info) 5250 { 5251 struct mvpp2_port *port = netdev_priv(dev); 5252 5253 if (!port->hwtstamp) 5254 return -EOPNOTSUPP; 5255 5256 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); 5257 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 5258 SOF_TIMESTAMPING_TX_HARDWARE | 5259 SOF_TIMESTAMPING_RX_HARDWARE | 5260 SOF_TIMESTAMPING_RAW_HARDWARE; 5261 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 5262 BIT(HWTSTAMP_TX_ON); 5263 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 5264 BIT(HWTSTAMP_FILTER_ALL); 5265 5266 return 0; 5267 } 5268 5269 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5270 { 5271 struct mvpp2_port *port = netdev_priv(dev); 5272 5273 switch (cmd) { 5274 case SIOCSHWTSTAMP: 5275 if (port->hwtstamp) 5276 return mvpp2_set_ts_config(port, ifr); 5277 break; 5278 5279 case SIOCGHWTSTAMP: 5280 if (port->hwtstamp) 5281 return mvpp2_get_ts_config(port, ifr); 5282 break; 5283 } 5284 5285 if (!port->phylink) 5286 return -ENOTSUPP; 5287 5288 return phylink_mii_ioctl(port->phylink, ifr, cmd); 5289 } 5290 5291 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 5292 { 5293 struct mvpp2_port *port = netdev_priv(dev); 5294 int ret; 5295 5296 ret = mvpp2_prs_vid_entry_add(port, vid); 5297 if (ret) 5298 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", 5299 MVPP2_PRS_VLAN_FILT_MAX - 1); 5300 return ret; 5301 } 5302 5303 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 5304 { 5305 struct mvpp2_port *port = netdev_priv(dev); 5306 5307 mvpp2_prs_vid_entry_remove(port, vid); 5308 return 0; 5309 } 5310 5311 static int mvpp2_set_features(struct net_device *dev, 5312 netdev_features_t features) 5313 { 5314 netdev_features_t changed = dev->features ^ features; 5315 struct mvpp2_port *port = netdev_priv(dev); 5316 5317 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 5318 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 5319 mvpp2_prs_vid_enable_filtering(port); 5320 } else { 5321 /* Invalidate all registered VID filters for this 5322 * port 5323 */ 5324 mvpp2_prs_vid_remove_all(port); 5325 5326 mvpp2_prs_vid_disable_filtering(port); 5327 } 5328 } 5329 5330 if (changed & NETIF_F_RXHASH) { 5331 if (features & NETIF_F_RXHASH) 5332 mvpp22_port_rss_enable(port); 5333 else 5334 mvpp22_port_rss_disable(port); 5335 } 5336 5337 return 0; 5338 } 5339 5340 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) 5341 { 5342 struct bpf_prog *prog = bpf->prog, *old_prog; 5343 bool running = netif_running(port->dev); 5344 bool reset = !prog != !port->xdp_prog; 5345 5346 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { 5347 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); 5348 return -EOPNOTSUPP; 5349 } 5350 5351 if (!port->priv->percpu_pools) { 5352 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); 5353 return -EOPNOTSUPP; 5354 } 5355 5356 if (port->ntxqs < num_possible_cpus() * 2) { 5357 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); 5358 return -EOPNOTSUPP; 5359 } 5360 5361 /* device is up and bpf is added/removed, must setup the RX queues */ 5362 if (running && reset) 5363 mvpp2_stop(port->dev); 5364 5365 old_prog = xchg(&port->xdp_prog, prog); 5366 if (old_prog) 5367 bpf_prog_put(old_prog); 5368 5369 /* bpf is just replaced, RXQ and MTU are already setup */ 5370 if (!reset) 5371 return 0; 5372 5373 /* device was up, restore the link */ 5374 if (running) 5375 mvpp2_open(port->dev); 5376 5377 /* Check Page Pool DMA Direction */ 5378 mvpp2_check_pagepool_dma(port); 5379 5380 return 0; 5381 } 5382 5383 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5384 { 5385 struct mvpp2_port *port = netdev_priv(dev); 5386 5387 switch (xdp->command) { 5388 case XDP_SETUP_PROG: 5389 return mvpp2_xdp_setup(port, xdp); 5390 default: 5391 return -EINVAL; 5392 } 5393 } 5394 5395 /* Ethtool methods */ 5396 5397 static int mvpp2_ethtool_nway_reset(struct net_device *dev) 5398 { 5399 struct mvpp2_port *port = netdev_priv(dev); 5400 5401 if (!port->phylink) 5402 return -ENOTSUPP; 5403 5404 return phylink_ethtool_nway_reset(port->phylink); 5405 } 5406 5407 /* Set interrupt coalescing for ethtools */ 5408 static int 5409 mvpp2_ethtool_set_coalesce(struct net_device *dev, 5410 struct ethtool_coalesce *c, 5411 struct kernel_ethtool_coalesce *kernel_coal, 5412 struct netlink_ext_ack *extack) 5413 { 5414 struct mvpp2_port *port = netdev_priv(dev); 5415 int queue; 5416 5417 for (queue = 0; queue < port->nrxqs; queue++) { 5418 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 5419 5420 rxq->time_coal = c->rx_coalesce_usecs; 5421 rxq->pkts_coal = c->rx_max_coalesced_frames; 5422 mvpp2_rx_pkts_coal_set(port, rxq); 5423 mvpp2_rx_time_coal_set(port, rxq); 5424 } 5425 5426 if (port->has_tx_irqs) { 5427 port->tx_time_coal = c->tx_coalesce_usecs; 5428 mvpp2_tx_time_coal_set(port); 5429 } 5430 5431 for (queue = 0; queue < port->ntxqs; queue++) { 5432 struct mvpp2_tx_queue *txq = port->txqs[queue]; 5433 5434 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5435 5436 if (port->has_tx_irqs) 5437 mvpp2_tx_pkts_coal_set(port, txq); 5438 } 5439 5440 return 0; 5441 } 5442 5443 /* get coalescing for ethtools */ 5444 static int 5445 mvpp2_ethtool_get_coalesce(struct net_device *dev, 5446 struct ethtool_coalesce *c, 5447 struct kernel_ethtool_coalesce *kernel_coal, 5448 struct netlink_ext_ack *extack) 5449 { 5450 struct mvpp2_port *port = netdev_priv(dev); 5451 5452 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 5453 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 5454 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 5455 c->tx_coalesce_usecs = port->tx_time_coal; 5456 return 0; 5457 } 5458 5459 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 5460 struct ethtool_drvinfo *drvinfo) 5461 { 5462 strscpy(drvinfo->driver, MVPP2_DRIVER_NAME, 5463 sizeof(drvinfo->driver)); 5464 strscpy(drvinfo->version, MVPP2_DRIVER_VERSION, 5465 sizeof(drvinfo->version)); 5466 strscpy(drvinfo->bus_info, dev_name(&dev->dev), 5467 sizeof(drvinfo->bus_info)); 5468 } 5469 5470 static void 5471 mvpp2_ethtool_get_ringparam(struct net_device *dev, 5472 struct ethtool_ringparam *ring, 5473 struct kernel_ethtool_ringparam *kernel_ring, 5474 struct netlink_ext_ack *extack) 5475 { 5476 struct mvpp2_port *port = netdev_priv(dev); 5477 5478 ring->rx_max_pending = MVPP2_MAX_RXD_MAX; 5479 ring->tx_max_pending = MVPP2_MAX_TXD_MAX; 5480 ring->rx_pending = port->rx_ring_size; 5481 ring->tx_pending = port->tx_ring_size; 5482 } 5483 5484 static int 5485 mvpp2_ethtool_set_ringparam(struct net_device *dev, 5486 struct ethtool_ringparam *ring, 5487 struct kernel_ethtool_ringparam *kernel_ring, 5488 struct netlink_ext_ack *extack) 5489 { 5490 struct mvpp2_port *port = netdev_priv(dev); 5491 u16 prev_rx_ring_size = port->rx_ring_size; 5492 u16 prev_tx_ring_size = port->tx_ring_size; 5493 int err; 5494 5495 err = mvpp2_check_ringparam_valid(dev, ring); 5496 if (err) 5497 return err; 5498 5499 if (!netif_running(dev)) { 5500 port->rx_ring_size = ring->rx_pending; 5501 port->tx_ring_size = ring->tx_pending; 5502 return 0; 5503 } 5504 5505 /* The interface is running, so we have to force a 5506 * reallocation of the queues 5507 */ 5508 mvpp2_stop_dev(port); 5509 mvpp2_cleanup_rxqs(port); 5510 mvpp2_cleanup_txqs(port); 5511 5512 port->rx_ring_size = ring->rx_pending; 5513 port->tx_ring_size = ring->tx_pending; 5514 5515 err = mvpp2_setup_rxqs(port); 5516 if (err) { 5517 /* Reallocate Rx queues with the original ring size */ 5518 port->rx_ring_size = prev_rx_ring_size; 5519 ring->rx_pending = prev_rx_ring_size; 5520 err = mvpp2_setup_rxqs(port); 5521 if (err) 5522 goto err_out; 5523 } 5524 err = mvpp2_setup_txqs(port); 5525 if (err) { 5526 /* Reallocate Tx queues with the original ring size */ 5527 port->tx_ring_size = prev_tx_ring_size; 5528 ring->tx_pending = prev_tx_ring_size; 5529 err = mvpp2_setup_txqs(port); 5530 if (err) 5531 goto err_clean_rxqs; 5532 } 5533 5534 mvpp2_start_dev(port); 5535 mvpp2_egress_enable(port); 5536 mvpp2_ingress_enable(port); 5537 5538 return 0; 5539 5540 err_clean_rxqs: 5541 mvpp2_cleanup_rxqs(port); 5542 err_out: 5543 netdev_err(dev, "failed to change ring parameters"); 5544 return err; 5545 } 5546 5547 static void mvpp2_ethtool_get_pause_param(struct net_device *dev, 5548 struct ethtool_pauseparam *pause) 5549 { 5550 struct mvpp2_port *port = netdev_priv(dev); 5551 5552 if (!port->phylink) 5553 return; 5554 5555 phylink_ethtool_get_pauseparam(port->phylink, pause); 5556 } 5557 5558 static int mvpp2_ethtool_set_pause_param(struct net_device *dev, 5559 struct ethtool_pauseparam *pause) 5560 { 5561 struct mvpp2_port *port = netdev_priv(dev); 5562 5563 if (!port->phylink) 5564 return -ENOTSUPP; 5565 5566 return phylink_ethtool_set_pauseparam(port->phylink, pause); 5567 } 5568 5569 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, 5570 struct ethtool_link_ksettings *cmd) 5571 { 5572 struct mvpp2_port *port = netdev_priv(dev); 5573 5574 if (!port->phylink) 5575 return -ENOTSUPP; 5576 5577 return phylink_ethtool_ksettings_get(port->phylink, cmd); 5578 } 5579 5580 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, 5581 const struct ethtool_link_ksettings *cmd) 5582 { 5583 struct mvpp2_port *port = netdev_priv(dev); 5584 5585 if (!port->phylink) 5586 return -ENOTSUPP; 5587 5588 return phylink_ethtool_ksettings_set(port->phylink, cmd); 5589 } 5590 5591 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, 5592 struct ethtool_rxnfc *info, u32 *rules) 5593 { 5594 struct mvpp2_port *port = netdev_priv(dev); 5595 int ret = 0, i, loc = 0; 5596 5597 if (!mvpp22_rss_is_supported(port)) 5598 return -EOPNOTSUPP; 5599 5600 switch (info->cmd) { 5601 case ETHTOOL_GRXFH: 5602 ret = mvpp2_ethtool_rxfh_get(port, info); 5603 break; 5604 case ETHTOOL_GRXRINGS: 5605 info->data = port->nrxqs; 5606 break; 5607 case ETHTOOL_GRXCLSRLCNT: 5608 info->rule_cnt = port->n_rfs_rules; 5609 break; 5610 case ETHTOOL_GRXCLSRULE: 5611 ret = mvpp2_ethtool_cls_rule_get(port, info); 5612 break; 5613 case ETHTOOL_GRXCLSRLALL: 5614 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { 5615 if (loc == info->rule_cnt) { 5616 ret = -EMSGSIZE; 5617 break; 5618 } 5619 5620 if (port->rfs_rules[i]) 5621 rules[loc++] = i; 5622 } 5623 break; 5624 default: 5625 return -ENOTSUPP; 5626 } 5627 5628 return ret; 5629 } 5630 5631 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, 5632 struct ethtool_rxnfc *info) 5633 { 5634 struct mvpp2_port *port = netdev_priv(dev); 5635 int ret = 0; 5636 5637 if (!mvpp22_rss_is_supported(port)) 5638 return -EOPNOTSUPP; 5639 5640 switch (info->cmd) { 5641 case ETHTOOL_SRXFH: 5642 ret = mvpp2_ethtool_rxfh_set(port, info); 5643 break; 5644 case ETHTOOL_SRXCLSRLINS: 5645 ret = mvpp2_ethtool_cls_rule_ins(port, info); 5646 break; 5647 case ETHTOOL_SRXCLSRLDEL: 5648 ret = mvpp2_ethtool_cls_rule_del(port, info); 5649 break; 5650 default: 5651 return -EOPNOTSUPP; 5652 } 5653 return ret; 5654 } 5655 5656 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) 5657 { 5658 struct mvpp2_port *port = netdev_priv(dev); 5659 5660 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; 5661 } 5662 5663 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, 5664 struct ethtool_rxfh_param *rxfh) 5665 { 5666 struct mvpp2_port *port = netdev_priv(dev); 5667 u32 rss_context = rxfh->rss_context; 5668 int ret = 0; 5669 5670 if (!mvpp22_rss_is_supported(port)) 5671 return -EOPNOTSUPP; 5672 if (rss_context >= MVPP22_N_RSS_TABLES) 5673 return -EINVAL; 5674 5675 rxfh->hfunc = ETH_RSS_HASH_CRC32; 5676 5677 if (rxfh->indir) 5678 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, 5679 rxfh->indir); 5680 5681 return ret; 5682 } 5683 5684 static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port, 5685 const struct ethtool_rxfh_param *rxfh) 5686 { 5687 if (!mvpp22_rss_is_supported(port)) 5688 return false; 5689 5690 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 5691 rxfh->hfunc != ETH_RSS_HASH_CRC32) 5692 return false; 5693 5694 if (rxfh->key) 5695 return false; 5696 5697 return true; 5698 } 5699 5700 static int mvpp2_create_rxfh_context(struct net_device *dev, 5701 struct ethtool_rxfh_context *ctx, 5702 const struct ethtool_rxfh_param *rxfh, 5703 struct netlink_ext_ack *extack) 5704 { 5705 struct mvpp2_port *port = netdev_priv(dev); 5706 int ret = 0; 5707 5708 if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) 5709 return -EOPNOTSUPP; 5710 5711 ctx->hfunc = ETH_RSS_HASH_CRC32; 5712 5713 ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context); 5714 if (ret) 5715 return ret; 5716 5717 if (!rxfh->indir) 5718 ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context, 5719 ethtool_rxfh_context_indir(ctx)); 5720 else 5721 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, 5722 rxfh->indir); 5723 return ret; 5724 } 5725 5726 static int mvpp2_modify_rxfh_context(struct net_device *dev, 5727 struct ethtool_rxfh_context *ctx, 5728 const struct ethtool_rxfh_param *rxfh, 5729 struct netlink_ext_ack *extack) 5730 { 5731 struct mvpp2_port *port = netdev_priv(dev); 5732 int ret = 0; 5733 5734 if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) 5735 return -EOPNOTSUPP; 5736 5737 if (rxfh->indir) 5738 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, 5739 rxfh->indir); 5740 return ret; 5741 } 5742 5743 static int mvpp2_remove_rxfh_context(struct net_device *dev, 5744 struct ethtool_rxfh_context *ctx, 5745 u32 rss_context, 5746 struct netlink_ext_ack *extack) 5747 { 5748 struct mvpp2_port *port = netdev_priv(dev); 5749 5750 return mvpp22_port_rss_ctx_delete(port, rss_context); 5751 } 5752 5753 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, 5754 struct ethtool_rxfh_param *rxfh, 5755 struct netlink_ext_ack *extack) 5756 { 5757 return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); 5758 } 5759 5760 /* Device ops */ 5761 5762 static const struct net_device_ops mvpp2_netdev_ops = { 5763 .ndo_open = mvpp2_open, 5764 .ndo_stop = mvpp2_stop, 5765 .ndo_start_xmit = mvpp2_tx, 5766 .ndo_set_rx_mode = mvpp2_set_rx_mode, 5767 .ndo_set_mac_address = mvpp2_set_mac_address, 5768 .ndo_change_mtu = mvpp2_change_mtu, 5769 .ndo_get_stats64 = mvpp2_get_stats64, 5770 .ndo_eth_ioctl = mvpp2_ioctl, 5771 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, 5772 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, 5773 .ndo_set_features = mvpp2_set_features, 5774 .ndo_bpf = mvpp2_xdp, 5775 .ndo_xdp_xmit = mvpp2_xdp_xmit, 5776 }; 5777 5778 static const struct ethtool_ops mvpp2_eth_tool_ops = { 5779 .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES, 5780 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5781 ETHTOOL_COALESCE_MAX_FRAMES, 5782 .nway_reset = mvpp2_ethtool_nway_reset, 5783 .get_link = ethtool_op_get_link, 5784 .get_ts_info = mvpp2_ethtool_get_ts_info, 5785 .set_coalesce = mvpp2_ethtool_set_coalesce, 5786 .get_coalesce = mvpp2_ethtool_get_coalesce, 5787 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 5788 .get_ringparam = mvpp2_ethtool_get_ringparam, 5789 .set_ringparam = mvpp2_ethtool_set_ringparam, 5790 .get_strings = mvpp2_ethtool_get_strings, 5791 .get_ethtool_stats = mvpp2_ethtool_get_stats, 5792 .get_sset_count = mvpp2_ethtool_get_sset_count, 5793 .get_pauseparam = mvpp2_ethtool_get_pause_param, 5794 .set_pauseparam = mvpp2_ethtool_set_pause_param, 5795 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, 5796 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, 5797 .get_rxnfc = mvpp2_ethtool_get_rxnfc, 5798 .set_rxnfc = mvpp2_ethtool_set_rxnfc, 5799 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, 5800 .get_rxfh = mvpp2_ethtool_get_rxfh, 5801 .set_rxfh = mvpp2_ethtool_set_rxfh, 5802 .create_rxfh_context = mvpp2_create_rxfh_context, 5803 .modify_rxfh_context = mvpp2_modify_rxfh_context, 5804 .remove_rxfh_context = mvpp2_remove_rxfh_context, 5805 }; 5806 5807 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 5808 * had a single IRQ defined per-port. 5809 */ 5810 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 5811 struct device_node *port_node) 5812 { 5813 struct mvpp2_queue_vector *v = &port->qvecs[0]; 5814 5815 v->first_rxq = 0; 5816 v->nrxqs = port->nrxqs; 5817 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5818 v->sw_thread_id = 0; 5819 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 5820 v->port = port; 5821 v->irq = irq_of_parse_and_map(port_node, 0); 5822 if (v->irq <= 0) 5823 return -EINVAL; 5824 netif_napi_add(port->dev, &v->napi, mvpp2_poll); 5825 5826 port->nqvecs = 1; 5827 5828 return 0; 5829 } 5830 5831 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 5832 struct device_node *port_node) 5833 { 5834 struct mvpp2 *priv = port->priv; 5835 struct mvpp2_queue_vector *v; 5836 int i, ret; 5837 5838 switch (queue_mode) { 5839 case MVPP2_QDIST_SINGLE_MODE: 5840 port->nqvecs = priv->nthreads + 1; 5841 break; 5842 case MVPP2_QDIST_MULTI_MODE: 5843 port->nqvecs = priv->nthreads; 5844 break; 5845 } 5846 5847 for (i = 0; i < port->nqvecs; i++) { 5848 char irqname[16]; 5849 5850 v = port->qvecs + i; 5851 5852 v->port = port; 5853 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 5854 v->sw_thread_id = i; 5855 v->sw_thread_mask = BIT(i); 5856 5857 if (port->flags & MVPP2_F_DT_COMPAT) 5858 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 5859 else 5860 snprintf(irqname, sizeof(irqname), "hif%d", i); 5861 5862 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 5863 v->first_rxq = i; 5864 v->nrxqs = 1; 5865 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 5866 i == (port->nqvecs - 1)) { 5867 v->first_rxq = 0; 5868 v->nrxqs = port->nrxqs; 5869 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5870 5871 if (port->flags & MVPP2_F_DT_COMPAT) 5872 strscpy(irqname, "rx-shared", sizeof(irqname)); 5873 } 5874 5875 if (port_node) 5876 v->irq = of_irq_get_byname(port_node, irqname); 5877 else 5878 v->irq = fwnode_irq_get(port->fwnode, i); 5879 if (v->irq <= 0) { 5880 ret = -EINVAL; 5881 goto err; 5882 } 5883 5884 netif_napi_add(port->dev, &v->napi, mvpp2_poll); 5885 } 5886 5887 return 0; 5888 5889 err: 5890 for (i = 0; i < port->nqvecs; i++) 5891 irq_dispose_mapping(port->qvecs[i].irq); 5892 return ret; 5893 } 5894 5895 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 5896 struct device_node *port_node) 5897 { 5898 if (port->has_tx_irqs) 5899 return mvpp2_multi_queue_vectors_init(port, port_node); 5900 else 5901 return mvpp2_simple_queue_vectors_init(port, port_node); 5902 } 5903 5904 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 5905 { 5906 int i; 5907 5908 for (i = 0; i < port->nqvecs; i++) 5909 irq_dispose_mapping(port->qvecs[i].irq); 5910 } 5911 5912 /* Configure Rx queue group interrupt for this port */ 5913 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 5914 { 5915 struct mvpp2 *priv = port->priv; 5916 u32 val; 5917 int i; 5918 5919 if (priv->hw_version == MVPP21) { 5920 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 5921 port->nrxqs); 5922 return; 5923 } 5924 5925 /* Handle the more complicated PPv2.2 and PPv2.3 case */ 5926 for (i = 0; i < port->nqvecs; i++) { 5927 struct mvpp2_queue_vector *qv = port->qvecs + i; 5928 5929 if (!qv->nrxqs) 5930 continue; 5931 5932 val = qv->sw_thread_id; 5933 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 5934 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5935 5936 val = qv->first_rxq; 5937 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 5938 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5939 } 5940 } 5941 5942 /* Initialize port HW */ 5943 static int mvpp2_port_init(struct mvpp2_port *port) 5944 { 5945 struct device *dev = port->dev->dev.parent; 5946 struct mvpp2 *priv = port->priv; 5947 struct mvpp2_txq_pcpu *txq_pcpu; 5948 unsigned int thread; 5949 int queue, err, val; 5950 5951 /* Checks for hardware constraints */ 5952 if (port->first_rxq + port->nrxqs > 5953 MVPP2_MAX_PORTS * priv->max_port_rxqs) 5954 return -EINVAL; 5955 5956 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) 5957 return -EINVAL; 5958 5959 /* Disable port */ 5960 mvpp2_egress_disable(port); 5961 mvpp2_port_disable(port); 5962 5963 if (mvpp2_is_xlg(port->phy_interface)) { 5964 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 5965 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 5966 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 5967 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 5968 } else { 5969 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5970 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 5971 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 5972 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5973 } 5974 5975 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 5976 5977 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 5978 GFP_KERNEL); 5979 if (!port->txqs) 5980 return -ENOMEM; 5981 5982 /* Associate physical Tx queues to this port and initialize. 5983 * The mapping is predefined. 5984 */ 5985 for (queue = 0; queue < port->ntxqs; queue++) { 5986 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 5987 struct mvpp2_tx_queue *txq; 5988 5989 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 5990 if (!txq) { 5991 err = -ENOMEM; 5992 goto err_free_percpu; 5993 } 5994 5995 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 5996 if (!txq->pcpu) { 5997 err = -ENOMEM; 5998 goto err_free_percpu; 5999 } 6000 6001 txq->id = queue_phy_id; 6002 txq->log_id = queue; 6003 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 6004 for (thread = 0; thread < priv->nthreads; thread++) { 6005 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 6006 txq_pcpu->thread = thread; 6007 } 6008 6009 port->txqs[queue] = txq; 6010 } 6011 6012 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 6013 GFP_KERNEL); 6014 if (!port->rxqs) { 6015 err = -ENOMEM; 6016 goto err_free_percpu; 6017 } 6018 6019 /* Allocate and initialize Rx queue for this port */ 6020 for (queue = 0; queue < port->nrxqs; queue++) { 6021 struct mvpp2_rx_queue *rxq; 6022 6023 /* Map physical Rx queue to port's logical Rx queue */ 6024 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 6025 if (!rxq) { 6026 err = -ENOMEM; 6027 goto err_free_percpu; 6028 } 6029 /* Map this Rx queue to a physical queue */ 6030 rxq->id = port->first_rxq + queue; 6031 rxq->port = port->id; 6032 rxq->logic_rxq = queue; 6033 6034 port->rxqs[queue] = rxq; 6035 } 6036 6037 mvpp2_rx_irqs_setup(port); 6038 6039 /* Create Rx descriptor rings */ 6040 for (queue = 0; queue < port->nrxqs; queue++) { 6041 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 6042 6043 rxq->size = port->rx_ring_size; 6044 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 6045 rxq->time_coal = MVPP2_RX_COAL_USEC; 6046 } 6047 6048 mvpp2_ingress_disable(port); 6049 6050 /* Port default configuration */ 6051 mvpp2_defaults_set(port); 6052 6053 /* Port's classifier configuration */ 6054 mvpp2_cls_oversize_rxq_set(port); 6055 mvpp2_cls_port_config(port); 6056 6057 if (mvpp22_rss_is_supported(port)) 6058 mvpp22_port_rss_init(port); 6059 6060 /* Provide an initial Rx packet size */ 6061 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 6062 6063 /* Initialize pools for swf */ 6064 err = mvpp2_swf_bm_pool_init(port); 6065 if (err) 6066 goto err_free_percpu; 6067 6068 /* Clear all port stats */ 6069 mvpp2_read_stats(port); 6070 memset(port->ethtool_stats, 0, 6071 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); 6072 6073 return 0; 6074 6075 err_free_percpu: 6076 for (queue = 0; queue < port->ntxqs; queue++) { 6077 if (!port->txqs[queue]) 6078 continue; 6079 free_percpu(port->txqs[queue]->pcpu); 6080 } 6081 return err; 6082 } 6083 6084 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, 6085 unsigned long *flags) 6086 { 6087 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", 6088 "tx-cpu3" }; 6089 int i; 6090 6091 for (i = 0; i < 5; i++) 6092 if (of_property_match_string(port_node, "interrupt-names", 6093 irqs[i]) < 0) 6094 return false; 6095 6096 *flags |= MVPP2_F_DT_COMPAT; 6097 return true; 6098 } 6099 6100 /* Checks if the port dt description has the required Tx interrupts: 6101 * - PPv2.1: there are no such interrupts. 6102 * - PPv2.2 and PPv2.3: 6103 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] 6104 * - The new ones have: "hifX" with X in [0..8] 6105 * 6106 * All those variants are supported to keep the backward compatibility. 6107 */ 6108 static bool mvpp2_port_has_irqs(struct mvpp2 *priv, 6109 struct device_node *port_node, 6110 unsigned long *flags) 6111 { 6112 char name[5]; 6113 int i; 6114 6115 /* ACPI */ 6116 if (!port_node) 6117 return true; 6118 6119 if (priv->hw_version == MVPP21) 6120 return false; 6121 6122 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) 6123 return true; 6124 6125 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 6126 snprintf(name, 5, "hif%d", i); 6127 if (of_property_match_string(port_node, "interrupt-names", 6128 name) < 0) 6129 return false; 6130 } 6131 6132 return true; 6133 } 6134 6135 static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 6136 struct fwnode_handle *fwnode, 6137 char **mac_from) 6138 { 6139 struct mvpp2_port *port = netdev_priv(dev); 6140 char hw_mac_addr[ETH_ALEN] = {0}; 6141 char fw_mac_addr[ETH_ALEN]; 6142 int ret; 6143 6144 if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { 6145 *mac_from = "firmware node"; 6146 eth_hw_addr_set(dev, fw_mac_addr); 6147 return 0; 6148 } 6149 6150 if (priv->hw_version == MVPP21) { 6151 mvpp21_get_mac_address(port, hw_mac_addr); 6152 if (is_valid_ether_addr(hw_mac_addr)) { 6153 *mac_from = "hardware"; 6154 eth_hw_addr_set(dev, hw_mac_addr); 6155 return 0; 6156 } 6157 } 6158 6159 /* Only valid on OF enabled platforms */ 6160 ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr); 6161 if (ret == -EPROBE_DEFER) 6162 return ret; 6163 if (!ret) { 6164 *mac_from = "nvmem cell"; 6165 eth_hw_addr_set(dev, fw_mac_addr); 6166 return 0; 6167 } 6168 6169 *mac_from = "random"; 6170 eth_hw_addr_random(dev); 6171 6172 return 0; 6173 } 6174 6175 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) 6176 { 6177 return container_of(config, struct mvpp2_port, phylink_config); 6178 } 6179 6180 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) 6181 { 6182 return container_of(pcs, struct mvpp2_port, pcs_xlg); 6183 } 6184 6185 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) 6186 { 6187 return container_of(pcs, struct mvpp2_port, pcs_gmac); 6188 } 6189 6190 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, 6191 struct phylink_link_state *state) 6192 { 6193 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); 6194 u32 val; 6195 6196 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) 6197 state->speed = SPEED_5000; 6198 else 6199 state->speed = SPEED_10000; 6200 state->duplex = 1; 6201 state->an_complete = 1; 6202 6203 val = readl(port->base + MVPP22_XLG_STATUS); 6204 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); 6205 6206 state->pause = 0; 6207 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6208 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) 6209 state->pause |= MLO_PAUSE_TX; 6210 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) 6211 state->pause |= MLO_PAUSE_RX; 6212 } 6213 6214 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 6215 phy_interface_t interface, 6216 const unsigned long *advertising, 6217 bool permit_pause_to_mac) 6218 { 6219 return 0; 6220 } 6221 6222 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { 6223 .pcs_get_state = mvpp2_xlg_pcs_get_state, 6224 .pcs_config = mvpp2_xlg_pcs_config, 6225 }; 6226 6227 static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, 6228 unsigned long *supported, 6229 const struct phylink_link_state *state) 6230 { 6231 /* When in 802.3z mode, we must have AN enabled: 6232 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 6233 * When <PortType> = 1 (1000BASE-X) this field must be set to 1. 6234 */ 6235 if (phy_interface_mode_is_8023z(state->interface) && 6236 !phylink_test(state->advertising, Autoneg)) 6237 return -EINVAL; 6238 6239 return 0; 6240 } 6241 6242 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, 6243 struct phylink_link_state *state) 6244 { 6245 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6246 u32 val; 6247 6248 val = readl(port->base + MVPP2_GMAC_STATUS0); 6249 6250 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); 6251 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); 6252 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); 6253 6254 switch (port->phy_interface) { 6255 case PHY_INTERFACE_MODE_1000BASEX: 6256 state->speed = SPEED_1000; 6257 break; 6258 case PHY_INTERFACE_MODE_2500BASEX: 6259 state->speed = SPEED_2500; 6260 break; 6261 default: 6262 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) 6263 state->speed = SPEED_1000; 6264 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) 6265 state->speed = SPEED_100; 6266 else 6267 state->speed = SPEED_10; 6268 } 6269 6270 state->pause = 0; 6271 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) 6272 state->pause |= MLO_PAUSE_RX; 6273 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) 6274 state->pause |= MLO_PAUSE_TX; 6275 } 6276 6277 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 6278 phy_interface_t interface, 6279 const unsigned long *advertising, 6280 bool permit_pause_to_mac) 6281 { 6282 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6283 u32 mask, val, an, old_an, changed; 6284 6285 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | 6286 MVPP2_GMAC_IN_BAND_AUTONEG | 6287 MVPP2_GMAC_AN_SPEED_EN | 6288 MVPP2_GMAC_FLOW_CTRL_AUTONEG | 6289 MVPP2_GMAC_AN_DUPLEX_EN; 6290 6291 if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { 6292 mask |= MVPP2_GMAC_CONFIG_MII_SPEED | 6293 MVPP2_GMAC_CONFIG_GMII_SPEED | 6294 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6295 val = MVPP2_GMAC_IN_BAND_AUTONEG; 6296 6297 if (interface == PHY_INTERFACE_MODE_SGMII) { 6298 /* SGMII mode receives the speed and duplex from PHY */ 6299 val |= MVPP2_GMAC_AN_SPEED_EN | 6300 MVPP2_GMAC_AN_DUPLEX_EN; 6301 } else { 6302 /* 802.3z mode has fixed speed and duplex */ 6303 val |= MVPP2_GMAC_CONFIG_GMII_SPEED | 6304 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6305 6306 /* The FLOW_CTRL_AUTONEG bit selects either the hardware 6307 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG 6308 * manually controls the GMAC pause modes. 6309 */ 6310 if (permit_pause_to_mac) 6311 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; 6312 6313 /* Configure advertisement bits */ 6314 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; 6315 if (phylink_test(advertising, Pause)) 6316 val |= MVPP2_GMAC_FC_ADV_EN; 6317 if (phylink_test(advertising, Asym_Pause)) 6318 val |= MVPP2_GMAC_FC_ADV_ASM_EN; 6319 } 6320 } else { 6321 val = 0; 6322 } 6323 6324 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6325 an = (an & ~mask) | val; 6326 changed = an ^ old_an; 6327 if (changed) 6328 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6329 6330 /* We are only interested in the advertisement bits changing */ 6331 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); 6332 } 6333 6334 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) 6335 { 6336 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6337 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6338 6339 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, 6340 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6341 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, 6342 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6343 } 6344 6345 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { 6346 .pcs_validate = mvpp2_gmac_pcs_validate, 6347 .pcs_get_state = mvpp2_gmac_pcs_get_state, 6348 .pcs_config = mvpp2_gmac_pcs_config, 6349 .pcs_an_restart = mvpp2_gmac_pcs_an_restart, 6350 }; 6351 6352 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, 6353 const struct phylink_link_state *state) 6354 { 6355 u32 val; 6356 6357 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6358 MVPP22_XLG_CTRL0_MAC_RESET_DIS, 6359 MVPP22_XLG_CTRL0_MAC_RESET_DIS); 6360 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, 6361 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | 6362 MVPP22_XLG_CTRL4_EN_IDLE_CHECK | 6363 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, 6364 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); 6365 6366 /* Wait for reset to deassert */ 6367 do { 6368 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6369 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); 6370 } 6371 6372 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, 6373 const struct phylink_link_state *state) 6374 { 6375 u32 old_ctrl0, ctrl0; 6376 u32 old_ctrl2, ctrl2; 6377 u32 old_ctrl4, ctrl4; 6378 6379 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 6380 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 6381 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 6382 6383 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; 6384 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); 6385 6386 /* Configure port type */ 6387 if (phy_interface_mode_is_8023z(state->interface)) { 6388 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; 6389 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6390 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6391 MVPP22_CTRL4_DP_CLK_SEL | 6392 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6393 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6394 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; 6395 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6396 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6397 MVPP22_CTRL4_DP_CLK_SEL | 6398 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6399 } else if (phy_interface_mode_is_rgmii(state->interface)) { 6400 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; 6401 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 6402 MVPP22_CTRL4_SYNC_BYPASS_DIS | 6403 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6404 } 6405 6406 /* Configure negotiation style */ 6407 if (!phylink_autoneg_inband(mode)) { 6408 /* Phy or fixed speed - no in-band AN, nothing to do, leave the 6409 * configured speed, duplex and flow control as-is. 6410 */ 6411 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6412 /* SGMII in-band mode receives the speed and duplex from 6413 * the PHY. Flow control information is not received. */ 6414 } else if (phy_interface_mode_is_8023z(state->interface)) { 6415 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can 6416 * they negotiate duplex: they are always operating with a fixed 6417 * speed of 1000/2500Mbps in full duplex, so force 1000/2500 6418 * speed and full duplex here. 6419 */ 6420 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; 6421 } 6422 6423 if (old_ctrl0 != ctrl0) 6424 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); 6425 if (old_ctrl2 != ctrl2) 6426 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 6427 if (old_ctrl4 != ctrl4) 6428 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); 6429 } 6430 6431 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, 6432 phy_interface_t interface) 6433 { 6434 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6435 6436 /* Select the appropriate PCS operations depending on the 6437 * configured interface mode. We will only switch to a mode 6438 * that the validate() checks have already passed. 6439 */ 6440 if (mvpp2_is_xlg(interface)) 6441 return &port->pcs_xlg; 6442 else 6443 return &port->pcs_gmac; 6444 } 6445 6446 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, 6447 phy_interface_t interface) 6448 { 6449 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6450 6451 /* Check for invalid configuration */ 6452 if (mvpp2_is_xlg(interface) && port->gop_id != 0) { 6453 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); 6454 return -EINVAL; 6455 } 6456 6457 if (port->phy_interface != interface || 6458 phylink_autoneg_inband(mode)) { 6459 /* Force the link down when changing the interface or if in 6460 * in-band mode to ensure we do not change the configuration 6461 * while the hardware is indicating link is up. We force both 6462 * XLG and GMAC down to ensure that they're both in a known 6463 * state. 6464 */ 6465 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6466 MVPP2_GMAC_FORCE_LINK_PASS | 6467 MVPP2_GMAC_FORCE_LINK_DOWN, 6468 MVPP2_GMAC_FORCE_LINK_DOWN); 6469 6470 if (mvpp2_port_supports_xlg(port)) 6471 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6472 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6473 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 6474 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); 6475 } 6476 6477 /* Make sure the port is disabled when reconfiguring the mode */ 6478 mvpp2_port_disable(port); 6479 6480 if (port->phy_interface != interface) { 6481 /* Place GMAC into reset */ 6482 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6483 MVPP2_GMAC_PORT_RESET_MASK, 6484 MVPP2_GMAC_PORT_RESET_MASK); 6485 6486 if (port->priv->hw_version >= MVPP22) { 6487 mvpp22_gop_mask_irq(port); 6488 6489 phy_power_off(port->comphy); 6490 6491 /* Reconfigure the serdes lanes */ 6492 mvpp22_mode_reconfigure(port, interface); 6493 } 6494 } 6495 6496 return 0; 6497 } 6498 6499 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, 6500 const struct phylink_link_state *state) 6501 { 6502 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6503 6504 /* mac (re)configuration */ 6505 if (mvpp2_is_xlg(state->interface)) 6506 mvpp2_xlg_config(port, mode, state); 6507 else if (phy_interface_mode_is_rgmii(state->interface) || 6508 phy_interface_mode_is_8023z(state->interface) || 6509 state->interface == PHY_INTERFACE_MODE_SGMII) 6510 mvpp2_gmac_config(port, mode, state); 6511 6512 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 6513 mvpp2_port_loopback_set(port, state); 6514 } 6515 6516 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, 6517 phy_interface_t interface) 6518 { 6519 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6520 6521 if (port->priv->hw_version >= MVPP22 && 6522 port->phy_interface != interface) { 6523 port->phy_interface = interface; 6524 6525 /* Unmask interrupts */ 6526 mvpp22_gop_unmask_irq(port); 6527 } 6528 6529 if (!mvpp2_is_xlg(interface)) { 6530 /* Release GMAC reset and wait */ 6531 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6532 MVPP2_GMAC_PORT_RESET_MASK, 0); 6533 6534 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 6535 MVPP2_GMAC_PORT_RESET_MASK) 6536 continue; 6537 } 6538 6539 mvpp2_port_enable(port); 6540 6541 /* Allow the link to come up if in in-band mode, otherwise the 6542 * link is forced via mac_link_down()/mac_link_up() 6543 */ 6544 if (phylink_autoneg_inband(mode)) { 6545 if (mvpp2_is_xlg(interface)) 6546 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6547 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6548 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); 6549 else 6550 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6551 MVPP2_GMAC_FORCE_LINK_PASS | 6552 MVPP2_GMAC_FORCE_LINK_DOWN, 0); 6553 } 6554 6555 return 0; 6556 } 6557 6558 static void mvpp2_mac_link_up(struct phylink_config *config, 6559 struct phy_device *phy, 6560 unsigned int mode, phy_interface_t interface, 6561 int speed, int duplex, 6562 bool tx_pause, bool rx_pause) 6563 { 6564 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6565 u32 val; 6566 int i; 6567 6568 if (mvpp2_is_xlg(interface)) { 6569 if (!phylink_autoneg_inband(mode)) { 6570 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6571 if (tx_pause) 6572 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 6573 if (rx_pause) 6574 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 6575 6576 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6577 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | 6578 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6579 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | 6580 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); 6581 } 6582 } else { 6583 if (!phylink_autoneg_inband(mode)) { 6584 val = MVPP2_GMAC_FORCE_LINK_PASS; 6585 6586 if (speed == SPEED_1000 || speed == SPEED_2500) 6587 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 6588 else if (speed == SPEED_100) 6589 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 6590 6591 if (duplex == DUPLEX_FULL) 6592 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6593 6594 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6595 MVPP2_GMAC_FORCE_LINK_DOWN | 6596 MVPP2_GMAC_FORCE_LINK_PASS | 6597 MVPP2_GMAC_CONFIG_MII_SPEED | 6598 MVPP2_GMAC_CONFIG_GMII_SPEED | 6599 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); 6600 } 6601 6602 /* We can always update the flow control enable bits; 6603 * these will only be effective if flow control AN 6604 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. 6605 */ 6606 val = 0; 6607 if (tx_pause) 6608 val |= MVPP22_CTRL4_TX_FC_EN; 6609 if (rx_pause) 6610 val |= MVPP22_CTRL4_RX_FC_EN; 6611 6612 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, 6613 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, 6614 val); 6615 } 6616 6617 if (port->priv->global_tx_fc) { 6618 port->tx_fc = tx_pause; 6619 if (tx_pause) 6620 mvpp2_rxq_enable_fc(port); 6621 else 6622 mvpp2_rxq_disable_fc(port); 6623 if (port->priv->percpu_pools) { 6624 for (i = 0; i < port->nrxqs; i++) 6625 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); 6626 } else { 6627 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); 6628 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); 6629 } 6630 if (port->priv->hw_version == MVPP23) 6631 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); 6632 } 6633 6634 mvpp2_port_enable(port); 6635 6636 mvpp2_egress_enable(port); 6637 mvpp2_ingress_enable(port); 6638 netif_tx_wake_all_queues(port->dev); 6639 } 6640 6641 static void mvpp2_mac_link_down(struct phylink_config *config, 6642 unsigned int mode, phy_interface_t interface) 6643 { 6644 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6645 u32 val; 6646 6647 if (!phylink_autoneg_inband(mode)) { 6648 if (mvpp2_is_xlg(interface)) { 6649 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6650 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6651 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 6652 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 6653 } else { 6654 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6655 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 6656 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 6657 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6658 } 6659 } 6660 6661 netif_tx_stop_all_queues(port->dev); 6662 mvpp2_egress_disable(port); 6663 mvpp2_ingress_disable(port); 6664 6665 mvpp2_port_disable(port); 6666 } 6667 6668 static const struct phylink_mac_ops mvpp2_phylink_ops = { 6669 .mac_select_pcs = mvpp2_select_pcs, 6670 .mac_prepare = mvpp2_mac_prepare, 6671 .mac_config = mvpp2_mac_config, 6672 .mac_finish = mvpp2_mac_finish, 6673 .mac_link_up = mvpp2_mac_link_up, 6674 .mac_link_down = mvpp2_mac_link_down, 6675 }; 6676 6677 /* Work-around for ACPI */ 6678 static void mvpp2_acpi_start(struct mvpp2_port *port) 6679 { 6680 /* Phylink isn't used as of now for ACPI, so the MAC has to be 6681 * configured manually when the interface is started. This will 6682 * be removed as soon as the phylink ACPI support lands in. 6683 */ 6684 struct phylink_link_state state = { 6685 .interface = port->phy_interface, 6686 }; 6687 struct phylink_pcs *pcs; 6688 6689 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); 6690 6691 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, 6692 port->phy_interface); 6693 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); 6694 pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED, 6695 port->phy_interface, state.advertising, 6696 false); 6697 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, 6698 port->phy_interface); 6699 mvpp2_mac_link_up(&port->phylink_config, NULL, 6700 MLO_AN_INBAND, port->phy_interface, 6701 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); 6702 } 6703 6704 /* In order to ensure backward compatibility for ACPI, check if the port 6705 * firmware node comprises the necessary description allowing to use phylink. 6706 */ 6707 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) 6708 { 6709 if (!is_acpi_node(port_fwnode)) 6710 return false; 6711 6712 return (!fwnode_property_present(port_fwnode, "phy-handle") && 6713 !fwnode_property_present(port_fwnode, "managed") && 6714 !fwnode_get_named_child_node(port_fwnode, "fixed-link")); 6715 } 6716 6717 /* Ports initialization */ 6718 static int mvpp2_port_probe(struct platform_device *pdev, 6719 struct fwnode_handle *port_fwnode, 6720 struct mvpp2 *priv) 6721 { 6722 struct phy *comphy = NULL; 6723 struct mvpp2_port *port; 6724 struct mvpp2_port_pcpu *port_pcpu; 6725 struct device_node *port_node = to_of_node(port_fwnode); 6726 netdev_features_t features; 6727 struct net_device *dev; 6728 struct phylink *phylink; 6729 char *mac_from = ""; 6730 unsigned int ntxqs, nrxqs, thread; 6731 unsigned long flags = 0; 6732 bool has_tx_irqs; 6733 u32 id; 6734 int phy_mode; 6735 int err, i; 6736 6737 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); 6738 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { 6739 dev_err(&pdev->dev, 6740 "not enough IRQs to support multi queue mode\n"); 6741 return -EINVAL; 6742 } 6743 6744 ntxqs = MVPP2_MAX_TXQ; 6745 nrxqs = mvpp2_get_nrxqs(priv); 6746 6747 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 6748 if (!dev) 6749 return -ENOMEM; 6750 6751 phy_mode = fwnode_get_phy_mode(port_fwnode); 6752 if (phy_mode < 0) { 6753 dev_err(&pdev->dev, "incorrect phy mode\n"); 6754 err = phy_mode; 6755 goto err_free_netdev; 6756 } 6757 6758 /* 6759 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. 6760 * Existing usage of 10GBASE-KR is not correct; no backplane 6761 * negotiation is done, and this driver does not actually support 6762 * 10GBASE-KR. 6763 */ 6764 if (phy_mode == PHY_INTERFACE_MODE_10GKR) 6765 phy_mode = PHY_INTERFACE_MODE_10GBASER; 6766 6767 if (port_node) { 6768 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 6769 if (IS_ERR(comphy)) { 6770 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 6771 err = -EPROBE_DEFER; 6772 goto err_free_netdev; 6773 } 6774 comphy = NULL; 6775 } 6776 } 6777 6778 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { 6779 err = -EINVAL; 6780 dev_err(&pdev->dev, "missing port-id value\n"); 6781 goto err_free_netdev; 6782 } 6783 6784 dev->tx_queue_len = MVPP2_MAX_TXD_MAX; 6785 dev->watchdog_timeo = 5 * HZ; 6786 dev->netdev_ops = &mvpp2_netdev_ops; 6787 dev->ethtool_ops = &mvpp2_eth_tool_ops; 6788 6789 port = netdev_priv(dev); 6790 port->dev = dev; 6791 port->fwnode = port_fwnode; 6792 port->ntxqs = ntxqs; 6793 port->nrxqs = nrxqs; 6794 port->priv = priv; 6795 port->has_tx_irqs = has_tx_irqs; 6796 port->flags = flags; 6797 6798 err = mvpp2_queue_vectors_init(port, port_node); 6799 if (err) 6800 goto err_free_netdev; 6801 6802 if (port_node) 6803 port->port_irq = of_irq_get_byname(port_node, "link"); 6804 else 6805 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); 6806 if (port->port_irq == -EPROBE_DEFER) { 6807 err = -EPROBE_DEFER; 6808 goto err_deinit_qvecs; 6809 } 6810 if (port->port_irq <= 0) 6811 /* the link irq is optional */ 6812 port->port_irq = 0; 6813 6814 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) 6815 port->flags |= MVPP2_F_LOOPBACK; 6816 6817 port->id = id; 6818 if (priv->hw_version == MVPP21) 6819 port->first_rxq = port->id * port->nrxqs; 6820 else 6821 port->first_rxq = port->id * priv->max_port_rxqs; 6822 6823 port->of_node = port_node; 6824 port->phy_interface = phy_mode; 6825 port->comphy = comphy; 6826 6827 if (priv->hw_version == MVPP21) { 6828 port->base = devm_platform_ioremap_resource(pdev, 2 + id); 6829 if (IS_ERR(port->base)) { 6830 err = PTR_ERR(port->base); 6831 goto err_free_irq; 6832 } 6833 6834 port->stats_base = port->priv->lms_base + 6835 MVPP21_MIB_COUNTERS_OFFSET + 6836 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 6837 } else { 6838 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", 6839 &port->gop_id)) { 6840 err = -EINVAL; 6841 dev_err(&pdev->dev, "missing gop-port-id value\n"); 6842 goto err_deinit_qvecs; 6843 } 6844 6845 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 6846 port->stats_base = port->priv->iface_base + 6847 MVPP22_MIB_COUNTERS_OFFSET + 6848 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 6849 6850 /* We may want a property to describe whether we should use 6851 * MAC hardware timestamping. 6852 */ 6853 if (priv->tai) 6854 port->hwtstamp = true; 6855 } 6856 6857 /* Alloc per-cpu and ethtool stats */ 6858 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 6859 if (!port->stats) { 6860 err = -ENOMEM; 6861 goto err_free_irq; 6862 } 6863 6864 port->ethtool_stats = devm_kcalloc(&pdev->dev, 6865 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), 6866 sizeof(u64), GFP_KERNEL); 6867 if (!port->ethtool_stats) { 6868 err = -ENOMEM; 6869 goto err_free_stats; 6870 } 6871 6872 mutex_init(&port->gather_stats_lock); 6873 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 6874 6875 err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); 6876 if (err < 0) 6877 goto err_free_stats; 6878 6879 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; 6880 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; 6881 SET_NETDEV_DEV(dev, &pdev->dev); 6882 6883 err = mvpp2_port_init(port); 6884 if (err < 0) { 6885 dev_err(&pdev->dev, "failed to init port %d\n", id); 6886 goto err_free_stats; 6887 } 6888 6889 mvpp2_port_periodic_xon_disable(port); 6890 6891 mvpp2_mac_reset_assert(port); 6892 mvpp22_pcs_reset_assert(port); 6893 6894 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 6895 if (!port->pcpu) { 6896 err = -ENOMEM; 6897 goto err_free_txq_pcpu; 6898 } 6899 6900 if (!port->has_tx_irqs) { 6901 for (thread = 0; thread < priv->nthreads; thread++) { 6902 port_pcpu = per_cpu_ptr(port->pcpu, thread); 6903 6904 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 6905 HRTIMER_MODE_REL_PINNED_SOFT); 6906 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 6907 port_pcpu->timer_scheduled = false; 6908 port_pcpu->dev = dev; 6909 } 6910 } 6911 6912 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6913 NETIF_F_TSO; 6914 dev->features = features | NETIF_F_RXCSUM; 6915 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | 6916 NETIF_F_HW_VLAN_CTAG_FILTER; 6917 6918 if (mvpp22_rss_is_supported(port)) { 6919 dev->hw_features |= NETIF_F_RXHASH; 6920 dev->features |= NETIF_F_NTUPLE; 6921 } 6922 6923 if (!port->priv->percpu_pools) 6924 mvpp2_set_hw_csum(port, port->pool_long->id); 6925 else if (port->ntxqs >= num_possible_cpus() * 2) 6926 dev->xdp_features = NETDEV_XDP_ACT_BASIC | 6927 NETDEV_XDP_ACT_REDIRECT | 6928 NETDEV_XDP_ACT_NDO_XMIT; 6929 6930 dev->vlan_features |= features; 6931 netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); 6932 6933 dev->priv_flags |= IFF_UNICAST_FLT; 6934 6935 /* MTU range: 68 - 9704 */ 6936 dev->min_mtu = ETH_MIN_MTU; 6937 /* 9704 == 9728 - 20 and rounding to 8 */ 6938 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 6939 device_set_node(&dev->dev, port_fwnode); 6940 dev->dev_port = port->id; 6941 6942 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; 6943 port->pcs_gmac.neg_mode = true; 6944 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; 6945 port->pcs_xlg.neg_mode = true; 6946 6947 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { 6948 port->phylink_config.dev = &dev->dev; 6949 port->phylink_config.type = PHYLINK_NETDEV; 6950 port->phylink_config.mac_capabilities = 6951 MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; 6952 6953 if (port->priv->global_tx_fc) 6954 port->phylink_config.mac_capabilities |= 6955 MAC_SYM_PAUSE | MAC_ASYM_PAUSE; 6956 6957 if (mvpp2_port_supports_xlg(port)) { 6958 /* If a COMPHY is present, we can support any of 6959 * the serdes modes and switch between them. 6960 */ 6961 if (comphy) { 6962 __set_bit(PHY_INTERFACE_MODE_5GBASER, 6963 port->phylink_config.supported_interfaces); 6964 __set_bit(PHY_INTERFACE_MODE_10GBASER, 6965 port->phylink_config.supported_interfaces); 6966 __set_bit(PHY_INTERFACE_MODE_XAUI, 6967 port->phylink_config.supported_interfaces); 6968 } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { 6969 __set_bit(PHY_INTERFACE_MODE_5GBASER, 6970 port->phylink_config.supported_interfaces); 6971 } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { 6972 __set_bit(PHY_INTERFACE_MODE_10GBASER, 6973 port->phylink_config.supported_interfaces); 6974 } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { 6975 __set_bit(PHY_INTERFACE_MODE_XAUI, 6976 port->phylink_config.supported_interfaces); 6977 } 6978 6979 if (comphy) 6980 port->phylink_config.mac_capabilities |= 6981 MAC_10000FD | MAC_5000FD; 6982 else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) 6983 port->phylink_config.mac_capabilities |= 6984 MAC_5000FD; 6985 else 6986 port->phylink_config.mac_capabilities |= 6987 MAC_10000FD; 6988 } 6989 6990 if (mvpp2_port_supports_rgmii(port)) { 6991 phy_interface_set_rgmii(port->phylink_config.supported_interfaces); 6992 __set_bit(PHY_INTERFACE_MODE_MII, 6993 port->phylink_config.supported_interfaces); 6994 } 6995 6996 if (comphy) { 6997 /* If a COMPHY is present, we can support any of the 6998 * serdes modes and switch between them. 6999 */ 7000 __set_bit(PHY_INTERFACE_MODE_SGMII, 7001 port->phylink_config.supported_interfaces); 7002 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 7003 port->phylink_config.supported_interfaces); 7004 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 7005 port->phylink_config.supported_interfaces); 7006 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 7007 /* No COMPHY, with only 2500BASE-X mode supported */ 7008 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 7009 port->phylink_config.supported_interfaces); 7010 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 7011 phy_mode == PHY_INTERFACE_MODE_SGMII) { 7012 /* No COMPHY, we can switch between 1000BASE-X and SGMII 7013 */ 7014 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 7015 port->phylink_config.supported_interfaces); 7016 __set_bit(PHY_INTERFACE_MODE_SGMII, 7017 port->phylink_config.supported_interfaces); 7018 } 7019 7020 phylink = phylink_create(&port->phylink_config, port_fwnode, 7021 phy_mode, &mvpp2_phylink_ops); 7022 if (IS_ERR(phylink)) { 7023 err = PTR_ERR(phylink); 7024 goto err_free_port_pcpu; 7025 } 7026 port->phylink = phylink; 7027 } else { 7028 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); 7029 port->phylink = NULL; 7030 } 7031 7032 /* Cycle the comphy to power it down, saving 270mW per port - 7033 * don't worry about an error powering it up. When the comphy 7034 * driver does this, we can remove this code. 7035 */ 7036 if (port->comphy) { 7037 err = mvpp22_comphy_init(port, port->phy_interface); 7038 if (err == 0) 7039 phy_power_off(port->comphy); 7040 } 7041 7042 err = register_netdev(dev); 7043 if (err < 0) { 7044 dev_err(&pdev->dev, "failed to register netdev\n"); 7045 goto err_phylink; 7046 } 7047 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 7048 7049 priv->port_list[priv->port_count++] = port; 7050 7051 return 0; 7052 7053 err_phylink: 7054 if (port->phylink) 7055 phylink_destroy(port->phylink); 7056 err_free_port_pcpu: 7057 free_percpu(port->pcpu); 7058 err_free_txq_pcpu: 7059 for (i = 0; i < port->ntxqs; i++) 7060 free_percpu(port->txqs[i]->pcpu); 7061 err_free_stats: 7062 free_percpu(port->stats); 7063 err_free_irq: 7064 if (port->port_irq) 7065 irq_dispose_mapping(port->port_irq); 7066 err_deinit_qvecs: 7067 mvpp2_queue_vectors_deinit(port); 7068 err_free_netdev: 7069 free_netdev(dev); 7070 return err; 7071 } 7072 7073 /* Ports removal routine */ 7074 static void mvpp2_port_remove(struct mvpp2_port *port) 7075 { 7076 int i; 7077 7078 unregister_netdev(port->dev); 7079 if (port->phylink) 7080 phylink_destroy(port->phylink); 7081 free_percpu(port->pcpu); 7082 free_percpu(port->stats); 7083 for (i = 0; i < port->ntxqs; i++) 7084 free_percpu(port->txqs[i]->pcpu); 7085 mvpp2_queue_vectors_deinit(port); 7086 if (port->port_irq) 7087 irq_dispose_mapping(port->port_irq); 7088 free_netdev(port->dev); 7089 } 7090 7091 /* Initialize decoding windows */ 7092 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 7093 struct mvpp2 *priv) 7094 { 7095 u32 win_enable; 7096 int i; 7097 7098 for (i = 0; i < 6; i++) { 7099 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 7100 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 7101 7102 if (i < 4) 7103 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 7104 } 7105 7106 win_enable = 0; 7107 7108 for (i = 0; i < dram->num_cs; i++) { 7109 const struct mbus_dram_window *cs = dram->cs + i; 7110 7111 mvpp2_write(priv, MVPP2_WIN_BASE(i), 7112 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 7113 dram->mbus_dram_target_id); 7114 7115 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 7116 (cs->size - 1) & 0xffff0000); 7117 7118 win_enable |= (1 << i); 7119 } 7120 7121 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 7122 } 7123 7124 /* Initialize Rx FIFO's */ 7125 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 7126 { 7127 int port; 7128 7129 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 7130 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 7131 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7132 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 7133 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 7134 } 7135 7136 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7137 MVPP2_RX_FIFO_PORT_MIN_PKT); 7138 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7139 } 7140 7141 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) 7142 { 7143 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); 7144 7145 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); 7146 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); 7147 } 7148 7149 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. 7150 * 4kB fixed space must be assigned for the loopback port. 7151 * Redistribute remaining avialable 44kB space among all active ports. 7152 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G 7153 * SGMII link. 7154 */ 7155 static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 7156 { 7157 int remaining_ports_count; 7158 unsigned long port_map; 7159 int size_remainder; 7160 int port, size; 7161 7162 /* The loopback requires fixed 4kB of the FIFO space assignment. */ 7163 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7164 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7165 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7166 7167 /* Set RX FIFO size to 0 for inactive ports. */ 7168 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7169 mvpp22_rx_fifo_set_hw(priv, port, 0); 7170 7171 /* Assign remaining RX FIFO space among all active ports. */ 7172 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; 7173 remaining_ports_count = hweight_long(port_map); 7174 7175 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7176 if (remaining_ports_count == 1) 7177 size = size_remainder; 7178 else if (port == 0) 7179 size = max(size_remainder / remaining_ports_count, 7180 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 7181 else if (port == 1) 7182 size = max(size_remainder / remaining_ports_count, 7183 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 7184 else 7185 size = size_remainder / remaining_ports_count; 7186 7187 size_remainder -= size; 7188 remaining_ports_count--; 7189 7190 mvpp22_rx_fifo_set_hw(priv, port, size); 7191 } 7192 7193 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7194 MVPP2_RX_FIFO_PORT_MIN_PKT); 7195 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7196 } 7197 7198 /* Configure Rx FIFO Flow control thresholds */ 7199 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) 7200 { 7201 int port, val; 7202 7203 /* Port 0: maximum speed -10Gb/s port 7204 * required by spec RX FIFO threshold 9KB 7205 * Port 1: maximum speed -5Gb/s port 7206 * required by spec RX FIFO threshold 4KB 7207 * Port 2: maximum speed -1Gb/s port 7208 * required by spec RX FIFO threshold 2KB 7209 */ 7210 7211 /* Without loopback port */ 7212 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { 7213 if (port == 0) { 7214 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7215 << MVPP2_RX_FC_TRSH_OFFS; 7216 val &= MVPP2_RX_FC_TRSH_MASK; 7217 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7218 } else if (port == 1) { 7219 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7220 << MVPP2_RX_FC_TRSH_OFFS; 7221 val &= MVPP2_RX_FC_TRSH_MASK; 7222 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7223 } else { 7224 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7225 << MVPP2_RX_FC_TRSH_OFFS; 7226 val &= MVPP2_RX_FC_TRSH_MASK; 7227 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7228 } 7229 } 7230 } 7231 7232 /* Configure Rx FIFO Flow control thresholds */ 7233 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) 7234 { 7235 int val; 7236 7237 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); 7238 7239 if (en) 7240 val |= MVPP2_RX_FC_EN; 7241 else 7242 val &= ~MVPP2_RX_FC_EN; 7243 7244 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7245 } 7246 7247 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) 7248 { 7249 int threshold = MVPP2_TX_FIFO_THRESHOLD(size); 7250 7251 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); 7252 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); 7253 } 7254 7255 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. 7256 * 1kB fixed space must be assigned for the loopback port. 7257 * Redistribute remaining avialable 18kB space among all active ports. 7258 * The 10G interface should use 10kB (which is maximum possible size 7259 * per single port). 7260 */ 7261 static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 7262 { 7263 int remaining_ports_count; 7264 unsigned long port_map; 7265 int size_remainder; 7266 int port, size; 7267 7268 /* The loopback requires fixed 1kB of the FIFO space assignment. */ 7269 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7270 MVPP22_TX_FIFO_DATA_SIZE_1KB); 7271 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7272 7273 /* Set TX FIFO size to 0 for inactive ports. */ 7274 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7275 mvpp22_tx_fifo_set_hw(priv, port, 0); 7276 7277 /* Assign remaining TX FIFO space among all active ports. */ 7278 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; 7279 remaining_ports_count = hweight_long(port_map); 7280 7281 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7282 if (remaining_ports_count == 1) 7283 size = min(size_remainder, 7284 MVPP22_TX_FIFO_DATA_SIZE_10KB); 7285 else if (port == 0) 7286 size = MVPP22_TX_FIFO_DATA_SIZE_10KB; 7287 else 7288 size = size_remainder / remaining_ports_count; 7289 7290 size_remainder -= size; 7291 remaining_ports_count--; 7292 7293 mvpp22_tx_fifo_set_hw(priv, port, size); 7294 } 7295 } 7296 7297 static void mvpp2_axi_init(struct mvpp2 *priv) 7298 { 7299 u32 val, rdval, wrval; 7300 7301 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 7302 7303 /* AXI Bridge Configuration */ 7304 7305 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 7306 << MVPP22_AXI_ATTR_CACHE_OFFS; 7307 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7308 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7309 7310 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 7311 << MVPP22_AXI_ATTR_CACHE_OFFS; 7312 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7313 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7314 7315 /* BM */ 7316 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 7317 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 7318 7319 /* Descriptors */ 7320 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 7321 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 7322 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 7323 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 7324 7325 /* Buffer Data */ 7326 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 7327 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 7328 7329 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 7330 << MVPP22_AXI_CODE_CACHE_OFFS; 7331 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 7332 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7333 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 7334 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 7335 7336 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 7337 << MVPP22_AXI_CODE_CACHE_OFFS; 7338 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7339 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7340 7341 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 7342 7343 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 7344 << MVPP22_AXI_CODE_CACHE_OFFS; 7345 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7346 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7347 7348 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 7349 } 7350 7351 /* Initialize network controller common part HW */ 7352 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 7353 { 7354 const struct mbus_dram_target_info *dram_target_info; 7355 int err, i; 7356 u32 val; 7357 7358 /* MBUS windows configuration */ 7359 dram_target_info = mv_mbus_dram_info(); 7360 if (dram_target_info) 7361 mvpp2_conf_mbus_windows(dram_target_info, priv); 7362 7363 if (priv->hw_version >= MVPP22) 7364 mvpp2_axi_init(priv); 7365 7366 /* Disable HW PHY polling */ 7367 if (priv->hw_version == MVPP21) { 7368 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7369 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 7370 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7371 } else { 7372 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7373 val &= ~MVPP22_SMI_POLLING_EN; 7374 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7375 } 7376 7377 /* Allocate and initialize aggregated TXQs */ 7378 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, 7379 sizeof(*priv->aggr_txqs), 7380 GFP_KERNEL); 7381 if (!priv->aggr_txqs) 7382 return -ENOMEM; 7383 7384 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7385 priv->aggr_txqs[i].id = i; 7386 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 7387 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 7388 if (err < 0) 7389 return err; 7390 } 7391 7392 /* Fifo Init */ 7393 if (priv->hw_version == MVPP21) { 7394 mvpp2_rx_fifo_init(priv); 7395 } else { 7396 mvpp22_rx_fifo_init(priv); 7397 mvpp22_tx_fifo_init(priv); 7398 if (priv->hw_version == MVPP23) 7399 mvpp23_rx_fifo_fc_set_tresh(priv); 7400 } 7401 7402 if (priv->hw_version == MVPP21) 7403 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 7404 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 7405 7406 /* Allow cache snoop when transmiting packets */ 7407 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 7408 7409 /* Buffer Manager initialization */ 7410 err = mvpp2_bm_init(&pdev->dev, priv); 7411 if (err < 0) 7412 return err; 7413 7414 /* Parser default initialization */ 7415 err = mvpp2_prs_default_init(pdev, priv); 7416 if (err < 0) 7417 return err; 7418 7419 /* Classifier default initialization */ 7420 mvpp2_cls_init(priv); 7421 7422 return 0; 7423 } 7424 7425 static int mvpp2_get_sram(struct platform_device *pdev, 7426 struct mvpp2 *priv) 7427 { 7428 struct resource *res; 7429 void __iomem *base; 7430 7431 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 7432 if (!res) { 7433 if (has_acpi_companion(&pdev->dev)) 7434 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n"); 7435 else 7436 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n"); 7437 return 0; 7438 } 7439 7440 base = devm_ioremap_resource(&pdev->dev, res); 7441 if (IS_ERR(base)) 7442 return PTR_ERR(base); 7443 7444 priv->cm3_base = base; 7445 return 0; 7446 } 7447 7448 static int mvpp2_probe(struct platform_device *pdev) 7449 { 7450 struct mvpp2 *priv; 7451 struct resource *res; 7452 void __iomem *base; 7453 int i, shared; 7454 int err; 7455 7456 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 7457 if (!priv) 7458 return -ENOMEM; 7459 7460 priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev); 7461 7462 /* multi queue mode isn't supported on PPV2.1, fallback to single 7463 * mode 7464 */ 7465 if (priv->hw_version == MVPP21) 7466 queue_mode = MVPP2_QDIST_SINGLE_MODE; 7467 7468 base = devm_platform_ioremap_resource(pdev, 0); 7469 if (IS_ERR(base)) 7470 return PTR_ERR(base); 7471 7472 if (priv->hw_version == MVPP21) { 7473 priv->lms_base = devm_platform_ioremap_resource(pdev, 1); 7474 if (IS_ERR(priv->lms_base)) 7475 return PTR_ERR(priv->lms_base); 7476 } else { 7477 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 7478 if (!res) { 7479 dev_err(&pdev->dev, "Invalid resource\n"); 7480 return -EINVAL; 7481 } 7482 if (has_acpi_companion(&pdev->dev)) { 7483 /* In case the MDIO memory region is declared in 7484 * the ACPI, it can already appear as 'in-use' 7485 * in the OS. Because it is overlapped by second 7486 * region of the network controller, make 7487 * sure it is released, before requesting it again. 7488 * The care is taken by mvpp2 driver to avoid 7489 * concurrent access to this memory region. 7490 */ 7491 release_resource(res); 7492 } 7493 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 7494 if (IS_ERR(priv->iface_base)) 7495 return PTR_ERR(priv->iface_base); 7496 7497 /* Map CM3 SRAM */ 7498 err = mvpp2_get_sram(pdev, priv); 7499 if (err) 7500 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n"); 7501 7502 /* Enable global Flow Control only if handler to SRAM not NULL */ 7503 if (priv->cm3_base) 7504 priv->global_tx_fc = true; 7505 } 7506 7507 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) { 7508 priv->sysctrl_base = 7509 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 7510 "marvell,system-controller"); 7511 if (IS_ERR(priv->sysctrl_base)) 7512 /* The system controller regmap is optional for dt 7513 * compatibility reasons. When not provided, the 7514 * configuration of the GoP relies on the 7515 * firmware/bootloader. 7516 */ 7517 priv->sysctrl_base = NULL; 7518 } 7519 7520 if (priv->hw_version >= MVPP22 && 7521 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) 7522 priv->percpu_pools = 1; 7523 7524 mvpp2_setup_bm_pool(); 7525 7526 7527 priv->nthreads = min_t(unsigned int, num_present_cpus(), 7528 MVPP2_MAX_THREADS); 7529 7530 shared = num_present_cpus() - priv->nthreads; 7531 if (shared > 0) 7532 bitmap_set(&priv->lock_map, 0, 7533 min_t(int, shared, MVPP2_MAX_THREADS)); 7534 7535 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7536 u32 addr_space_sz; 7537 7538 addr_space_sz = (priv->hw_version == MVPP21 ? 7539 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 7540 priv->swth_base[i] = base + i * addr_space_sz; 7541 } 7542 7543 if (priv->hw_version == MVPP21) 7544 priv->max_port_rxqs = 8; 7545 else 7546 priv->max_port_rxqs = 32; 7547 7548 if (dev_of_node(&pdev->dev)) { 7549 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 7550 if (IS_ERR(priv->pp_clk)) 7551 return PTR_ERR(priv->pp_clk); 7552 err = clk_prepare_enable(priv->pp_clk); 7553 if (err < 0) 7554 return err; 7555 7556 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 7557 if (IS_ERR(priv->gop_clk)) { 7558 err = PTR_ERR(priv->gop_clk); 7559 goto err_pp_clk; 7560 } 7561 err = clk_prepare_enable(priv->gop_clk); 7562 if (err < 0) 7563 goto err_pp_clk; 7564 7565 if (priv->hw_version >= MVPP22) { 7566 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 7567 if (IS_ERR(priv->mg_clk)) { 7568 err = PTR_ERR(priv->mg_clk); 7569 goto err_gop_clk; 7570 } 7571 7572 err = clk_prepare_enable(priv->mg_clk); 7573 if (err < 0) 7574 goto err_gop_clk; 7575 7576 priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk"); 7577 if (IS_ERR(priv->mg_core_clk)) { 7578 err = PTR_ERR(priv->mg_core_clk); 7579 goto err_mg_clk; 7580 } 7581 7582 err = clk_prepare_enable(priv->mg_core_clk); 7583 if (err < 0) 7584 goto err_mg_clk; 7585 } 7586 7587 priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk"); 7588 if (IS_ERR(priv->axi_clk)) { 7589 err = PTR_ERR(priv->axi_clk); 7590 goto err_mg_core_clk; 7591 } 7592 7593 err = clk_prepare_enable(priv->axi_clk); 7594 if (err < 0) 7595 goto err_mg_core_clk; 7596 7597 /* Get system's tclk rate */ 7598 priv->tclk = clk_get_rate(priv->pp_clk); 7599 } else { 7600 err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk); 7601 if (err) { 7602 dev_err(&pdev->dev, "missing clock-frequency value\n"); 7603 return err; 7604 } 7605 } 7606 7607 if (priv->hw_version >= MVPP22) { 7608 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 7609 if (err) 7610 goto err_axi_clk; 7611 /* Sadly, the BM pools all share the same register to 7612 * store the high 32 bits of their address. So they 7613 * must all have the same high 32 bits, which forces 7614 * us to restrict coherent memory to DMA_BIT_MASK(32). 7615 */ 7616 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7617 if (err) 7618 goto err_axi_clk; 7619 } 7620 7621 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ 7622 device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { 7623 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) 7624 priv->port_map |= BIT(i); 7625 } 7626 7627 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) 7628 priv->hw_version = MVPP23; 7629 7630 /* Init mss lock */ 7631 spin_lock_init(&priv->mss_spinlock); 7632 7633 /* Initialize network controller */ 7634 err = mvpp2_init(pdev, priv); 7635 if (err < 0) { 7636 dev_err(&pdev->dev, "failed to initialize controller\n"); 7637 goto err_axi_clk; 7638 } 7639 7640 err = mvpp22_tai_probe(&pdev->dev, priv); 7641 if (err < 0) 7642 goto err_axi_clk; 7643 7644 /* Initialize ports */ 7645 device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { 7646 err = mvpp2_port_probe(pdev, port_fwnode, priv); 7647 if (err < 0) 7648 goto err_port_probe; 7649 } 7650 7651 if (priv->port_count == 0) { 7652 dev_err(&pdev->dev, "no ports enabled\n"); 7653 err = -ENODEV; 7654 goto err_axi_clk; 7655 } 7656 7657 /* Statistics must be gathered regularly because some of them (like 7658 * packets counters) are 32-bit registers and could overflow quite 7659 * quickly. For instance, a 10Gb link used at full bandwidth with the 7660 * smallest packets (64B) will overflow a 32-bit counter in less than 7661 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 7662 */ 7663 snprintf(priv->queue_name, sizeof(priv->queue_name), 7664 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 7665 priv->port_count > 1 ? "+" : ""); 7666 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 7667 if (!priv->stats_queue) { 7668 err = -ENOMEM; 7669 goto err_port_probe; 7670 } 7671 7672 if (priv->global_tx_fc && priv->hw_version >= MVPP22) { 7673 err = mvpp2_enable_global_fc(priv); 7674 if (err) 7675 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n"); 7676 } 7677 7678 mvpp2_dbgfs_init(priv, pdev->name); 7679 7680 platform_set_drvdata(pdev, priv); 7681 return 0; 7682 7683 err_port_probe: 7684 for (i = 0; i < priv->port_count; i++) 7685 mvpp2_port_remove(priv->port_list[i]); 7686 err_axi_clk: 7687 clk_disable_unprepare(priv->axi_clk); 7688 err_mg_core_clk: 7689 clk_disable_unprepare(priv->mg_core_clk); 7690 err_mg_clk: 7691 clk_disable_unprepare(priv->mg_clk); 7692 err_gop_clk: 7693 clk_disable_unprepare(priv->gop_clk); 7694 err_pp_clk: 7695 clk_disable_unprepare(priv->pp_clk); 7696 return err; 7697 } 7698 7699 static void mvpp2_remove(struct platform_device *pdev) 7700 { 7701 struct mvpp2 *priv = platform_get_drvdata(pdev); 7702 int i, poolnum = MVPP2_BM_POOLS_NUM; 7703 7704 mvpp2_dbgfs_cleanup(priv); 7705 7706 for (i = 0; i < priv->port_count; i++) { 7707 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 7708 mvpp2_port_remove(priv->port_list[i]); 7709 } 7710 7711 destroy_workqueue(priv->stats_queue); 7712 7713 if (priv->percpu_pools) 7714 poolnum = mvpp2_get_nrxqs(priv) * 2; 7715 7716 for (i = 0; i < poolnum; i++) { 7717 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 7718 7719 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); 7720 } 7721 7722 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7723 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 7724 7725 dma_free_coherent(&pdev->dev, 7726 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 7727 aggr_txq->descs, 7728 aggr_txq->descs_dma); 7729 } 7730 7731 if (!dev_of_node(&pdev->dev)) 7732 return; 7733 7734 clk_disable_unprepare(priv->axi_clk); 7735 clk_disable_unprepare(priv->mg_core_clk); 7736 clk_disable_unprepare(priv->mg_clk); 7737 clk_disable_unprepare(priv->pp_clk); 7738 clk_disable_unprepare(priv->gop_clk); 7739 } 7740 7741 static const struct of_device_id mvpp2_match[] = { 7742 { 7743 .compatible = "marvell,armada-375-pp2", 7744 .data = (void *)MVPP21, 7745 }, 7746 { 7747 .compatible = "marvell,armada-7k-pp22", 7748 .data = (void *)MVPP22, 7749 }, 7750 { } 7751 }; 7752 MODULE_DEVICE_TABLE(of, mvpp2_match); 7753 7754 #ifdef CONFIG_ACPI 7755 static const struct acpi_device_id mvpp2_acpi_match[] = { 7756 { "MRVL0110", MVPP22 }, 7757 { }, 7758 }; 7759 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); 7760 #endif 7761 7762 static struct platform_driver mvpp2_driver = { 7763 .probe = mvpp2_probe, 7764 .remove = mvpp2_remove, 7765 .driver = { 7766 .name = MVPP2_DRIVER_NAME, 7767 .of_match_table = mvpp2_match, 7768 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), 7769 }, 7770 }; 7771 7772 static int __init mvpp2_driver_init(void) 7773 { 7774 return platform_driver_register(&mvpp2_driver); 7775 } 7776 module_init(mvpp2_driver_init); 7777 7778 static void __exit mvpp2_driver_exit(void) 7779 { 7780 platform_driver_unregister(&mvpp2_driver); 7781 mvpp2_dbgfs_exit(); 7782 } 7783 module_exit(mvpp2_driver_exit); 7784 7785 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 7786 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 7787 MODULE_LICENSE("GPL v2"); 7788