1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver 4 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/wait.h> 14 #include <linux/spinlock.h> 15 #include <linux/sort.h> 16 #include <linux/random.h> 17 #include <linux/netdevice.h> 18 #include <linux/skbuff.h> 19 #include <linux/socket.h> 20 #include <linux/etherdevice.h> 21 #include <linux/ethtool.h> 22 #include <linux/if_ether.h> 23 #include <linux/if_vlan.h> 24 #include <linux/if_bridge.h> 25 #include <linux/bitops.h> 26 #include <linux/ctype.h> 27 #include <linux/workqueue.h> 28 #include <net/switchdev.h> 29 #include <net/rtnetlink.h> 30 #include <net/netevent.h> 31 #include <net/arp.h> 32 #include <net/fib_rules.h> 33 #include <net/fib_notifier.h> 34 #include <linux/io-64-nonatomic-lo-hi.h> 35 36 #include "rocker_hw.h" 37 #include "rocker.h" 38 #include "rocker_tlv.h" 39 40 static const char rocker_driver_name[] = "rocker"; 41 42 static const struct pci_device_id rocker_pci_id_table[] = { 43 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, 44 {0, } 45 }; 46 47 struct rocker_wait { 48 wait_queue_head_t wait; 49 bool done; 50 bool nowait; 51 }; 52 53 static void rocker_wait_reset(struct rocker_wait *wait) 54 { 55 wait->done = false; 56 wait->nowait = false; 57 } 58 59 static void rocker_wait_init(struct rocker_wait *wait) 60 { 61 init_waitqueue_head(&wait->wait); 62 rocker_wait_reset(wait); 63 } 64 65 static struct rocker_wait *rocker_wait_create(void) 66 { 67 struct rocker_wait *wait; 68 69 wait = kzalloc(sizeof(*wait), GFP_KERNEL); 70 if (!wait) 71 return NULL; 72 return wait; 73 } 74 75 static void rocker_wait_destroy(struct rocker_wait *wait) 76 { 77 kfree(wait); 78 } 79 80 static bool rocker_wait_event_timeout(struct rocker_wait *wait, 81 unsigned long timeout) 82 { 83 wait_event_timeout(wait->wait, wait->done, HZ / 10); 84 if (!wait->done) 85 return false; 86 return true; 87 } 88 89 static void rocker_wait_wake_up(struct rocker_wait *wait) 90 { 91 wait->done = true; 92 wake_up(&wait->wait); 93 } 94 95 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) 96 { 97 return rocker->msix_entries[vector].vector; 98 } 99 100 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) 101 { 102 return rocker_msix_vector(rocker_port->rocker, 103 ROCKER_MSIX_VEC_TX(rocker_port->port_number)); 104 } 105 106 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) 107 { 108 return rocker_msix_vector(rocker_port->rocker, 109 ROCKER_MSIX_VEC_RX(rocker_port->port_number)); 110 } 111 112 #define rocker_write32(rocker, reg, val) \ 113 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) 114 #define rocker_read32(rocker, reg) \ 115 readl((rocker)->hw_addr + (ROCKER_ ## reg)) 116 #define rocker_write64(rocker, reg, val) \ 117 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) 118 #define rocker_read64(rocker, reg) \ 119 readq((rocker)->hw_addr + (ROCKER_ ## reg)) 120 121 /***************************** 122 * HW basic testing functions 123 *****************************/ 124 125 static int rocker_reg_test(const struct rocker *rocker) 126 { 127 const struct pci_dev *pdev = rocker->pdev; 128 u64 test_reg; 129 u64 rnd; 130 131 rnd = get_random_u32(); 132 rnd >>= 1; 133 rocker_write32(rocker, TEST_REG, rnd); 134 test_reg = rocker_read32(rocker, TEST_REG); 135 if (test_reg != rnd * 2) { 136 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", 137 test_reg, rnd * 2); 138 return -EIO; 139 } 140 141 rnd = get_random_u32(); 142 rnd <<= 31; 143 rnd |= get_random_u32(); 144 rocker_write64(rocker, TEST_REG64, rnd); 145 test_reg = rocker_read64(rocker, TEST_REG64); 146 if (test_reg != rnd * 2) { 147 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", 148 test_reg, rnd * 2); 149 return -EIO; 150 } 151 152 return 0; 153 } 154 155 static int rocker_dma_test_one(const struct rocker *rocker, 156 struct rocker_wait *wait, u32 test_type, 157 dma_addr_t dma_handle, const unsigned char *buf, 158 const unsigned char *expect, size_t size) 159 { 160 const struct pci_dev *pdev = rocker->pdev; 161 int i; 162 163 rocker_wait_reset(wait); 164 rocker_write32(rocker, TEST_DMA_CTRL, test_type); 165 166 if (!rocker_wait_event_timeout(wait, HZ / 10)) { 167 dev_err(&pdev->dev, "no interrupt received within a timeout\n"); 168 return -EIO; 169 } 170 171 for (i = 0; i < size; i++) { 172 if (buf[i] != expect[i]) { 173 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", 174 buf[i], i, expect[i]); 175 return -EIO; 176 } 177 } 178 return 0; 179 } 180 181 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) 182 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96 183 184 static int rocker_dma_test_offset(const struct rocker *rocker, 185 struct rocker_wait *wait, int offset) 186 { 187 struct pci_dev *pdev = rocker->pdev; 188 unsigned char *alloc; 189 unsigned char *buf; 190 unsigned char *expect; 191 dma_addr_t dma_handle; 192 int i; 193 int err; 194 195 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, 196 GFP_KERNEL | GFP_DMA); 197 if (!alloc) 198 return -ENOMEM; 199 buf = alloc + offset; 200 expect = buf + ROCKER_TEST_DMA_BUF_SIZE; 201 202 dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE, 203 DMA_BIDIRECTIONAL); 204 if (dma_mapping_error(&pdev->dev, dma_handle)) { 205 err = -EIO; 206 goto free_alloc; 207 } 208 209 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); 210 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); 211 212 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); 213 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, 214 dma_handle, buf, expect, 215 ROCKER_TEST_DMA_BUF_SIZE); 216 if (err) 217 goto unmap; 218 219 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); 220 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, 221 dma_handle, buf, expect, 222 ROCKER_TEST_DMA_BUF_SIZE); 223 if (err) 224 goto unmap; 225 226 get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); 227 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) 228 expect[i] = ~buf[i]; 229 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, 230 dma_handle, buf, expect, 231 ROCKER_TEST_DMA_BUF_SIZE); 232 if (err) 233 goto unmap; 234 235 unmap: 236 dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, 237 DMA_BIDIRECTIONAL); 238 free_alloc: 239 kfree(alloc); 240 241 return err; 242 } 243 244 static int rocker_dma_test(const struct rocker *rocker, 245 struct rocker_wait *wait) 246 { 247 int i; 248 int err; 249 250 for (i = 0; i < 8; i++) { 251 err = rocker_dma_test_offset(rocker, wait, i); 252 if (err) 253 return err; 254 } 255 return 0; 256 } 257 258 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) 259 { 260 struct rocker_wait *wait = dev_id; 261 262 rocker_wait_wake_up(wait); 263 264 return IRQ_HANDLED; 265 } 266 267 static int rocker_basic_hw_test(const struct rocker *rocker) 268 { 269 const struct pci_dev *pdev = rocker->pdev; 270 struct rocker_wait wait; 271 int err; 272 273 err = rocker_reg_test(rocker); 274 if (err) { 275 dev_err(&pdev->dev, "reg test failed\n"); 276 return err; 277 } 278 279 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), 280 rocker_test_irq_handler, 0, 281 rocker_driver_name, &wait); 282 if (err) { 283 dev_err(&pdev->dev, "cannot assign test irq\n"); 284 return err; 285 } 286 287 rocker_wait_init(&wait); 288 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); 289 290 if (!rocker_wait_event_timeout(&wait, HZ / 10)) { 291 dev_err(&pdev->dev, "no interrupt received within a timeout\n"); 292 err = -EIO; 293 goto free_irq; 294 } 295 296 err = rocker_dma_test(rocker, &wait); 297 if (err) 298 dev_err(&pdev->dev, "dma test failed\n"); 299 300 free_irq: 301 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); 302 return err; 303 } 304 305 /****************************************** 306 * DMA rings and descriptors manipulations 307 ******************************************/ 308 309 static u32 __pos_inc(u32 pos, size_t limit) 310 { 311 return ++pos == limit ? 0 : pos; 312 } 313 314 static int rocker_desc_err(const struct rocker_desc_info *desc_info) 315 { 316 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; 317 318 switch (err) { 319 case ROCKER_OK: 320 return 0; 321 case -ROCKER_ENOENT: 322 return -ENOENT; 323 case -ROCKER_ENXIO: 324 return -ENXIO; 325 case -ROCKER_ENOMEM: 326 return -ENOMEM; 327 case -ROCKER_EEXIST: 328 return -EEXIST; 329 case -ROCKER_EINVAL: 330 return -EINVAL; 331 case -ROCKER_EMSGSIZE: 332 return -EMSGSIZE; 333 case -ROCKER_ENOTSUP: 334 return -EOPNOTSUPP; 335 case -ROCKER_ENOBUFS: 336 return -ENOBUFS; 337 } 338 339 return -EINVAL; 340 } 341 342 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) 343 { 344 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; 345 } 346 347 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) 348 { 349 u32 comp_err = desc_info->desc->comp_err; 350 351 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; 352 } 353 354 static void * 355 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) 356 { 357 return (void *)(uintptr_t)desc_info->desc->cookie; 358 } 359 360 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, 361 void *ptr) 362 { 363 desc_info->desc->cookie = (uintptr_t) ptr; 364 } 365 366 static struct rocker_desc_info * 367 rocker_desc_head_get(const struct rocker_dma_ring_info *info) 368 { 369 struct rocker_desc_info *desc_info; 370 u32 head = __pos_inc(info->head, info->size); 371 372 desc_info = &info->desc_info[info->head]; 373 if (head == info->tail) 374 return NULL; /* ring full */ 375 desc_info->tlv_size = 0; 376 return desc_info; 377 } 378 379 static void rocker_desc_commit(const struct rocker_desc_info *desc_info) 380 { 381 desc_info->desc->buf_size = desc_info->data_size; 382 desc_info->desc->tlv_size = desc_info->tlv_size; 383 } 384 385 static void rocker_desc_head_set(const struct rocker *rocker, 386 struct rocker_dma_ring_info *info, 387 const struct rocker_desc_info *desc_info) 388 { 389 u32 head = __pos_inc(info->head, info->size); 390 391 BUG_ON(head == info->tail); 392 rocker_desc_commit(desc_info); 393 info->head = head; 394 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); 395 } 396 397 static struct rocker_desc_info * 398 rocker_desc_tail_get(struct rocker_dma_ring_info *info) 399 { 400 struct rocker_desc_info *desc_info; 401 402 if (info->tail == info->head) 403 return NULL; /* nothing to be done between head and tail */ 404 desc_info = &info->desc_info[info->tail]; 405 if (!rocker_desc_gen(desc_info)) 406 return NULL; /* gen bit not set, desc is not ready yet */ 407 info->tail = __pos_inc(info->tail, info->size); 408 desc_info->tlv_size = desc_info->desc->tlv_size; 409 return desc_info; 410 } 411 412 static void rocker_dma_ring_credits_set(const struct rocker *rocker, 413 const struct rocker_dma_ring_info *info, 414 u32 credits) 415 { 416 if (credits) 417 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); 418 } 419 420 static unsigned long rocker_dma_ring_size_fix(size_t size) 421 { 422 return max(ROCKER_DMA_SIZE_MIN, 423 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); 424 } 425 426 static int rocker_dma_ring_create(const struct rocker *rocker, 427 unsigned int type, 428 size_t size, 429 struct rocker_dma_ring_info *info) 430 { 431 int i; 432 433 BUG_ON(size != rocker_dma_ring_size_fix(size)); 434 info->size = size; 435 info->type = type; 436 info->head = 0; 437 info->tail = 0; 438 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), 439 GFP_KERNEL); 440 if (!info->desc_info) 441 return -ENOMEM; 442 443 info->desc = dma_alloc_coherent(&rocker->pdev->dev, 444 info->size * sizeof(*info->desc), 445 &info->mapaddr, GFP_KERNEL); 446 if (!info->desc) { 447 kfree(info->desc_info); 448 return -ENOMEM; 449 } 450 451 for (i = 0; i < info->size; i++) 452 info->desc_info[i].desc = &info->desc[i]; 453 454 rocker_write32(rocker, DMA_DESC_CTRL(info->type), 455 ROCKER_DMA_DESC_CTRL_RESET); 456 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); 457 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); 458 459 return 0; 460 } 461 462 static void rocker_dma_ring_destroy(const struct rocker *rocker, 463 const struct rocker_dma_ring_info *info) 464 { 465 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); 466 467 dma_free_coherent(&rocker->pdev->dev, 468 info->size * sizeof(struct rocker_desc), info->desc, 469 info->mapaddr); 470 kfree(info->desc_info); 471 } 472 473 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, 474 struct rocker_dma_ring_info *info) 475 { 476 int i; 477 478 BUG_ON(info->head || info->tail); 479 480 /* When ring is consumer, we need to advance head for each desc. 481 * That tells hw that the desc is ready to be used by it. 482 */ 483 for (i = 0; i < info->size - 1; i++) 484 rocker_desc_head_set(rocker, info, &info->desc_info[i]); 485 rocker_desc_commit(&info->desc_info[i]); 486 } 487 488 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, 489 const struct rocker_dma_ring_info *info, 490 int direction, size_t buf_size) 491 { 492 struct pci_dev *pdev = rocker->pdev; 493 int i; 494 int err; 495 496 for (i = 0; i < info->size; i++) { 497 struct rocker_desc_info *desc_info = &info->desc_info[i]; 498 struct rocker_desc *desc = &info->desc[i]; 499 dma_addr_t dma_handle; 500 char *buf; 501 502 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); 503 if (!buf) { 504 err = -ENOMEM; 505 goto rollback; 506 } 507 508 dma_handle = dma_map_single(&pdev->dev, buf, buf_size, 509 direction); 510 if (dma_mapping_error(&pdev->dev, dma_handle)) { 511 kfree(buf); 512 err = -EIO; 513 goto rollback; 514 } 515 516 desc_info->data = buf; 517 desc_info->data_size = buf_size; 518 dma_unmap_addr_set(desc_info, mapaddr, dma_handle); 519 520 desc->buf_addr = dma_handle; 521 desc->buf_size = buf_size; 522 } 523 return 0; 524 525 rollback: 526 for (i--; i >= 0; i--) { 527 const struct rocker_desc_info *desc_info = &info->desc_info[i]; 528 529 dma_unmap_single(&pdev->dev, 530 dma_unmap_addr(desc_info, mapaddr), 531 desc_info->data_size, direction); 532 kfree(desc_info->data); 533 } 534 return err; 535 } 536 537 static void rocker_dma_ring_bufs_free(const struct rocker *rocker, 538 const struct rocker_dma_ring_info *info, 539 int direction) 540 { 541 struct pci_dev *pdev = rocker->pdev; 542 int i; 543 544 for (i = 0; i < info->size; i++) { 545 const struct rocker_desc_info *desc_info = &info->desc_info[i]; 546 struct rocker_desc *desc = &info->desc[i]; 547 548 desc->buf_addr = 0; 549 desc->buf_size = 0; 550 dma_unmap_single(&pdev->dev, 551 dma_unmap_addr(desc_info, mapaddr), 552 desc_info->data_size, direction); 553 kfree(desc_info->data); 554 } 555 } 556 557 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) 558 { 559 struct rocker_wait *wait; 560 561 wait = rocker_wait_create(); 562 if (!wait) 563 return -ENOMEM; 564 rocker_desc_cookie_ptr_set(desc_info, wait); 565 return 0; 566 } 567 568 static void 569 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) 570 { 571 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); 572 573 rocker_wait_destroy(wait); 574 } 575 576 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) 577 { 578 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; 579 int i; 580 int err; 581 582 for (i = 0; i < cmd_ring->size; i++) { 583 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); 584 if (err) 585 goto rollback; 586 } 587 return 0; 588 589 rollback: 590 for (i--; i >= 0; i--) 591 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); 592 return err; 593 } 594 595 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) 596 { 597 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; 598 int i; 599 600 for (i = 0; i < cmd_ring->size; i++) 601 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); 602 } 603 604 static int rocker_dma_rings_init(struct rocker *rocker) 605 { 606 const struct pci_dev *pdev = rocker->pdev; 607 int err; 608 609 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, 610 ROCKER_DMA_CMD_DEFAULT_SIZE, 611 &rocker->cmd_ring); 612 if (err) { 613 dev_err(&pdev->dev, "failed to create command dma ring\n"); 614 return err; 615 } 616 617 spin_lock_init(&rocker->cmd_ring_lock); 618 619 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, 620 DMA_BIDIRECTIONAL, PAGE_SIZE); 621 if (err) { 622 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); 623 goto err_dma_cmd_ring_bufs_alloc; 624 } 625 626 err = rocker_dma_cmd_ring_waits_alloc(rocker); 627 if (err) { 628 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); 629 goto err_dma_cmd_ring_waits_alloc; 630 } 631 632 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, 633 ROCKER_DMA_EVENT_DEFAULT_SIZE, 634 &rocker->event_ring); 635 if (err) { 636 dev_err(&pdev->dev, "failed to create event dma ring\n"); 637 goto err_dma_event_ring_create; 638 } 639 640 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, 641 DMA_FROM_DEVICE, PAGE_SIZE); 642 if (err) { 643 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); 644 goto err_dma_event_ring_bufs_alloc; 645 } 646 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); 647 return 0; 648 649 err_dma_event_ring_bufs_alloc: 650 rocker_dma_ring_destroy(rocker, &rocker->event_ring); 651 err_dma_event_ring_create: 652 rocker_dma_cmd_ring_waits_free(rocker); 653 err_dma_cmd_ring_waits_alloc: 654 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, 655 DMA_BIDIRECTIONAL); 656 err_dma_cmd_ring_bufs_alloc: 657 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); 658 return err; 659 } 660 661 static void rocker_dma_rings_fini(struct rocker *rocker) 662 { 663 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, 664 DMA_BIDIRECTIONAL); 665 rocker_dma_ring_destroy(rocker, &rocker->event_ring); 666 rocker_dma_cmd_ring_waits_free(rocker); 667 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, 668 DMA_BIDIRECTIONAL); 669 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); 670 } 671 672 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, 673 struct rocker_desc_info *desc_info, 674 struct sk_buff *skb, size_t buf_len) 675 { 676 const struct rocker *rocker = rocker_port->rocker; 677 struct pci_dev *pdev = rocker->pdev; 678 dma_addr_t dma_handle; 679 680 dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len, 681 DMA_FROM_DEVICE); 682 if (dma_mapping_error(&pdev->dev, dma_handle)) 683 return -EIO; 684 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) 685 goto tlv_put_failure; 686 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) 687 goto tlv_put_failure; 688 return 0; 689 690 tlv_put_failure: 691 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE); 692 desc_info->tlv_size = 0; 693 return -EMSGSIZE; 694 } 695 696 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) 697 { 698 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 699 } 700 701 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, 702 struct rocker_desc_info *desc_info) 703 { 704 struct net_device *dev = rocker_port->dev; 705 struct sk_buff *skb; 706 size_t buf_len = rocker_port_rx_buf_len(rocker_port); 707 int err; 708 709 /* Ensure that hw will see tlv_size zero in case of an error. 710 * That tells hw to use another descriptor. 711 */ 712 rocker_desc_cookie_ptr_set(desc_info, NULL); 713 desc_info->tlv_size = 0; 714 715 skb = netdev_alloc_skb_ip_align(dev, buf_len); 716 if (!skb) 717 return -ENOMEM; 718 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); 719 if (err) { 720 dev_kfree_skb_any(skb); 721 return err; 722 } 723 rocker_desc_cookie_ptr_set(desc_info, skb); 724 return 0; 725 } 726 727 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, 728 const struct rocker_tlv **attrs) 729 { 730 struct pci_dev *pdev = rocker->pdev; 731 dma_addr_t dma_handle; 732 size_t len; 733 734 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || 735 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) 736 return; 737 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); 738 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); 739 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE); 740 } 741 742 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, 743 const struct rocker_desc_info *desc_info) 744 { 745 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; 746 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); 747 748 if (!skb) 749 return; 750 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); 751 rocker_dma_rx_ring_skb_unmap(rocker, attrs); 752 dev_kfree_skb_any(skb); 753 } 754 755 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) 756 { 757 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; 758 const struct rocker *rocker = rocker_port->rocker; 759 int i; 760 int err; 761 762 for (i = 0; i < rx_ring->size; i++) { 763 err = rocker_dma_rx_ring_skb_alloc(rocker_port, 764 &rx_ring->desc_info[i]); 765 if (err) 766 goto rollback; 767 } 768 return 0; 769 770 rollback: 771 for (i--; i >= 0; i--) 772 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); 773 return err; 774 } 775 776 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) 777 { 778 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; 779 const struct rocker *rocker = rocker_port->rocker; 780 int i; 781 782 for (i = 0; i < rx_ring->size; i++) 783 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); 784 } 785 786 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) 787 { 788 struct rocker *rocker = rocker_port->rocker; 789 int err; 790 791 err = rocker_dma_ring_create(rocker, 792 ROCKER_DMA_TX(rocker_port->port_number), 793 ROCKER_DMA_TX_DEFAULT_SIZE, 794 &rocker_port->tx_ring); 795 if (err) { 796 netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); 797 return err; 798 } 799 800 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, 801 DMA_TO_DEVICE, 802 ROCKER_DMA_TX_DESC_SIZE); 803 if (err) { 804 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); 805 goto err_dma_tx_ring_bufs_alloc; 806 } 807 808 err = rocker_dma_ring_create(rocker, 809 ROCKER_DMA_RX(rocker_port->port_number), 810 ROCKER_DMA_RX_DEFAULT_SIZE, 811 &rocker_port->rx_ring); 812 if (err) { 813 netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); 814 goto err_dma_rx_ring_create; 815 } 816 817 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, 818 DMA_BIDIRECTIONAL, 819 ROCKER_DMA_RX_DESC_SIZE); 820 if (err) { 821 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); 822 goto err_dma_rx_ring_bufs_alloc; 823 } 824 825 err = rocker_dma_rx_ring_skbs_alloc(rocker_port); 826 if (err) { 827 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); 828 goto err_dma_rx_ring_skbs_alloc; 829 } 830 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); 831 832 return 0; 833 834 err_dma_rx_ring_skbs_alloc: 835 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, 836 DMA_BIDIRECTIONAL); 837 err_dma_rx_ring_bufs_alloc: 838 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); 839 err_dma_rx_ring_create: 840 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, 841 DMA_TO_DEVICE); 842 err_dma_tx_ring_bufs_alloc: 843 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); 844 return err; 845 } 846 847 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) 848 { 849 struct rocker *rocker = rocker_port->rocker; 850 851 rocker_dma_rx_ring_skbs_free(rocker_port); 852 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, 853 DMA_BIDIRECTIONAL); 854 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); 855 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, 856 DMA_TO_DEVICE); 857 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); 858 } 859 860 static void rocker_port_set_enable(const struct rocker_port *rocker_port, 861 bool enable) 862 { 863 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 864 865 if (enable) 866 val |= 1ULL << rocker_port->pport; 867 else 868 val &= ~(1ULL << rocker_port->pport); 869 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 870 } 871 872 /******************************** 873 * Interrupt handler and helpers 874 ********************************/ 875 876 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) 877 { 878 struct rocker *rocker = dev_id; 879 const struct rocker_desc_info *desc_info; 880 struct rocker_wait *wait; 881 u32 credits = 0; 882 883 spin_lock(&rocker->cmd_ring_lock); 884 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { 885 wait = rocker_desc_cookie_ptr_get(desc_info); 886 if (wait->nowait) { 887 rocker_desc_gen_clear(desc_info); 888 } else { 889 rocker_wait_wake_up(wait); 890 } 891 credits++; 892 } 893 spin_unlock(&rocker->cmd_ring_lock); 894 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); 895 896 return IRQ_HANDLED; 897 } 898 899 static void rocker_port_link_up(const struct rocker_port *rocker_port) 900 { 901 netif_carrier_on(rocker_port->dev); 902 netdev_info(rocker_port->dev, "Link is up\n"); 903 } 904 905 static void rocker_port_link_down(const struct rocker_port *rocker_port) 906 { 907 netif_carrier_off(rocker_port->dev); 908 netdev_info(rocker_port->dev, "Link is down\n"); 909 } 910 911 static int rocker_event_link_change(const struct rocker *rocker, 912 const struct rocker_tlv *info) 913 { 914 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; 915 unsigned int port_number; 916 bool link_up; 917 struct rocker_port *rocker_port; 918 919 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); 920 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || 921 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) 922 return -EIO; 923 port_number = 924 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; 925 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); 926 927 if (port_number >= rocker->port_count) 928 return -EINVAL; 929 930 rocker_port = rocker->ports[port_number]; 931 if (netif_carrier_ok(rocker_port->dev) != link_up) { 932 if (link_up) 933 rocker_port_link_up(rocker_port); 934 else 935 rocker_port_link_down(rocker_port); 936 } 937 938 return 0; 939 } 940 941 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, 942 const unsigned char *addr, 943 __be16 vlan_id); 944 945 static int rocker_event_mac_vlan_seen(const struct rocker *rocker, 946 const struct rocker_tlv *info) 947 { 948 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; 949 unsigned int port_number; 950 struct rocker_port *rocker_port; 951 const unsigned char *addr; 952 __be16 vlan_id; 953 954 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); 955 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || 956 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || 957 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) 958 return -EIO; 959 port_number = 960 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; 961 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); 962 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); 963 964 if (port_number >= rocker->port_count) 965 return -EINVAL; 966 967 rocker_port = rocker->ports[port_number]; 968 return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); 969 } 970 971 static int rocker_event_process(const struct rocker *rocker, 972 const struct rocker_desc_info *desc_info) 973 { 974 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; 975 const struct rocker_tlv *info; 976 u16 type; 977 978 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); 979 if (!attrs[ROCKER_TLV_EVENT_TYPE] || 980 !attrs[ROCKER_TLV_EVENT_INFO]) 981 return -EIO; 982 983 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); 984 info = attrs[ROCKER_TLV_EVENT_INFO]; 985 986 switch (type) { 987 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: 988 return rocker_event_link_change(rocker, info); 989 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: 990 return rocker_event_mac_vlan_seen(rocker, info); 991 } 992 993 return -EOPNOTSUPP; 994 } 995 996 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) 997 { 998 struct rocker *rocker = dev_id; 999 const struct pci_dev *pdev = rocker->pdev; 1000 const struct rocker_desc_info *desc_info; 1001 u32 credits = 0; 1002 int err; 1003 1004 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { 1005 err = rocker_desc_err(desc_info); 1006 if (err) { 1007 dev_err(&pdev->dev, "event desc received with err %d\n", 1008 err); 1009 } else { 1010 err = rocker_event_process(rocker, desc_info); 1011 if (err) 1012 dev_err(&pdev->dev, "event processing failed with err %d\n", 1013 err); 1014 } 1015 rocker_desc_gen_clear(desc_info); 1016 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); 1017 credits++; 1018 } 1019 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); 1020 1021 return IRQ_HANDLED; 1022 } 1023 1024 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) 1025 { 1026 struct rocker_port *rocker_port = dev_id; 1027 1028 napi_schedule(&rocker_port->napi_tx); 1029 return IRQ_HANDLED; 1030 } 1031 1032 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) 1033 { 1034 struct rocker_port *rocker_port = dev_id; 1035 1036 napi_schedule(&rocker_port->napi_rx); 1037 return IRQ_HANDLED; 1038 } 1039 1040 /******************** 1041 * Command interface 1042 ********************/ 1043 1044 int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, 1045 rocker_cmd_prep_cb_t prepare, void *prepare_priv, 1046 rocker_cmd_proc_cb_t process, void *process_priv) 1047 { 1048 struct rocker *rocker = rocker_port->rocker; 1049 struct rocker_desc_info *desc_info; 1050 struct rocker_wait *wait; 1051 unsigned long lock_flags; 1052 int err; 1053 1054 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); 1055 1056 desc_info = rocker_desc_head_get(&rocker->cmd_ring); 1057 if (!desc_info) { 1058 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1059 return -EAGAIN; 1060 } 1061 1062 wait = rocker_desc_cookie_ptr_get(desc_info); 1063 rocker_wait_init(wait); 1064 wait->nowait = nowait; 1065 1066 err = prepare(rocker_port, desc_info, prepare_priv); 1067 if (err) { 1068 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1069 return err; 1070 } 1071 1072 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); 1073 1074 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1075 1076 if (nowait) 1077 return 0; 1078 1079 if (!rocker_wait_event_timeout(wait, HZ / 10)) 1080 return -EIO; 1081 1082 err = rocker_desc_err(desc_info); 1083 if (err) 1084 return err; 1085 1086 if (process) 1087 err = process(rocker_port, desc_info, process_priv); 1088 1089 rocker_desc_gen_clear(desc_info); 1090 return err; 1091 } 1092 1093 static int 1094 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, 1095 struct rocker_desc_info *desc_info, 1096 void *priv) 1097 { 1098 struct rocker_tlv *cmd_info; 1099 1100 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1101 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) 1102 return -EMSGSIZE; 1103 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1104 if (!cmd_info) 1105 return -EMSGSIZE; 1106 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1107 rocker_port->pport)) 1108 return -EMSGSIZE; 1109 rocker_tlv_nest_end(desc_info, cmd_info); 1110 return 0; 1111 } 1112 1113 static int 1114 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, 1115 const struct rocker_desc_info *desc_info, 1116 void *priv) 1117 { 1118 struct ethtool_link_ksettings *ecmd = priv; 1119 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1120 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1121 u32 speed; 1122 u8 duplex; 1123 u8 autoneg; 1124 1125 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1126 if (!attrs[ROCKER_TLV_CMD_INFO]) 1127 return -EIO; 1128 1129 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1130 attrs[ROCKER_TLV_CMD_INFO]); 1131 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || 1132 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || 1133 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) 1134 return -EIO; 1135 1136 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); 1137 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); 1138 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); 1139 1140 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 1141 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 1142 1143 ecmd->base.phy_address = 0xff; 1144 ecmd->base.port = PORT_TP; 1145 ecmd->base.speed = speed; 1146 ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; 1147 ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 1148 1149 return 0; 1150 } 1151 1152 static int 1153 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, 1154 const struct rocker_desc_info *desc_info, 1155 void *priv) 1156 { 1157 unsigned char *macaddr = priv; 1158 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1159 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1160 const struct rocker_tlv *attr; 1161 1162 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1163 if (!attrs[ROCKER_TLV_CMD_INFO]) 1164 return -EIO; 1165 1166 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1167 attrs[ROCKER_TLV_CMD_INFO]); 1168 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; 1169 if (!attr) 1170 return -EIO; 1171 1172 if (rocker_tlv_len(attr) != ETH_ALEN) 1173 return -EINVAL; 1174 1175 ether_addr_copy(macaddr, rocker_tlv_data(attr)); 1176 return 0; 1177 } 1178 1179 static int 1180 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, 1181 const struct rocker_desc_info *desc_info, 1182 void *priv) 1183 { 1184 u8 *p_mode = priv; 1185 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1186 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1187 const struct rocker_tlv *attr; 1188 1189 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1190 if (!attrs[ROCKER_TLV_CMD_INFO]) 1191 return -EIO; 1192 1193 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1194 attrs[ROCKER_TLV_CMD_INFO]); 1195 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; 1196 if (!attr) 1197 return -EIO; 1198 1199 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); 1200 return 0; 1201 } 1202 1203 struct port_name { 1204 char *buf; 1205 size_t len; 1206 }; 1207 1208 static int 1209 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, 1210 const struct rocker_desc_info *desc_info, 1211 void *priv) 1212 { 1213 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1214 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1215 struct port_name *name = priv; 1216 const struct rocker_tlv *attr; 1217 size_t i, j, len; 1218 const char *str; 1219 1220 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1221 if (!attrs[ROCKER_TLV_CMD_INFO]) 1222 return -EIO; 1223 1224 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1225 attrs[ROCKER_TLV_CMD_INFO]); 1226 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; 1227 if (!attr) 1228 return -EIO; 1229 1230 len = min_t(size_t, rocker_tlv_len(attr), name->len); 1231 str = rocker_tlv_data(attr); 1232 1233 /* make sure name only contains alphanumeric characters */ 1234 for (i = j = 0; i < len; ++i) { 1235 if (isalnum(str[i])) { 1236 name->buf[j] = str[i]; 1237 j++; 1238 } 1239 } 1240 1241 if (j == 0) 1242 return -EIO; 1243 1244 name->buf[j] = '\0'; 1245 1246 return 0; 1247 } 1248 1249 static int 1250 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, 1251 struct rocker_desc_info *desc_info, 1252 void *priv) 1253 { 1254 struct ethtool_link_ksettings *ecmd = priv; 1255 struct rocker_tlv *cmd_info; 1256 1257 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1258 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1259 return -EMSGSIZE; 1260 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1261 if (!cmd_info) 1262 return -EMSGSIZE; 1263 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1264 rocker_port->pport)) 1265 return -EMSGSIZE; 1266 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, 1267 ecmd->base.speed)) 1268 return -EMSGSIZE; 1269 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, 1270 ecmd->base.duplex)) 1271 return -EMSGSIZE; 1272 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, 1273 ecmd->base.autoneg)) 1274 return -EMSGSIZE; 1275 rocker_tlv_nest_end(desc_info, cmd_info); 1276 return 0; 1277 } 1278 1279 static int 1280 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, 1281 struct rocker_desc_info *desc_info, 1282 void *priv) 1283 { 1284 const unsigned char *macaddr = priv; 1285 struct rocker_tlv *cmd_info; 1286 1287 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1288 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1289 return -EMSGSIZE; 1290 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1291 if (!cmd_info) 1292 return -EMSGSIZE; 1293 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1294 rocker_port->pport)) 1295 return -EMSGSIZE; 1296 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, 1297 ETH_ALEN, macaddr)) 1298 return -EMSGSIZE; 1299 rocker_tlv_nest_end(desc_info, cmd_info); 1300 return 0; 1301 } 1302 1303 static int 1304 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, 1305 struct rocker_desc_info *desc_info, 1306 void *priv) 1307 { 1308 int mtu = *(int *)priv; 1309 struct rocker_tlv *cmd_info; 1310 1311 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1312 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1313 return -EMSGSIZE; 1314 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1315 if (!cmd_info) 1316 return -EMSGSIZE; 1317 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1318 rocker_port->pport)) 1319 return -EMSGSIZE; 1320 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, 1321 mtu)) 1322 return -EMSGSIZE; 1323 rocker_tlv_nest_end(desc_info, cmd_info); 1324 return 0; 1325 } 1326 1327 static int 1328 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, 1329 struct rocker_desc_info *desc_info, 1330 void *priv) 1331 { 1332 bool learning = *(bool *)priv; 1333 struct rocker_tlv *cmd_info; 1334 1335 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1336 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1337 return -EMSGSIZE; 1338 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1339 if (!cmd_info) 1340 return -EMSGSIZE; 1341 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1342 rocker_port->pport)) 1343 return -EMSGSIZE; 1344 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, 1345 learning)) 1346 return -EMSGSIZE; 1347 rocker_tlv_nest_end(desc_info, cmd_info); 1348 return 0; 1349 } 1350 1351 static int 1352 rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, 1353 struct ethtool_link_ksettings *ecmd) 1354 { 1355 return rocker_cmd_exec(rocker_port, false, 1356 rocker_cmd_get_port_settings_prep, NULL, 1357 rocker_cmd_get_port_settings_ethtool_proc, 1358 ecmd); 1359 } 1360 1361 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, 1362 unsigned char *macaddr) 1363 { 1364 return rocker_cmd_exec(rocker_port, false, 1365 rocker_cmd_get_port_settings_prep, NULL, 1366 rocker_cmd_get_port_settings_macaddr_proc, 1367 macaddr); 1368 } 1369 1370 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, 1371 u8 *p_mode) 1372 { 1373 return rocker_cmd_exec(rocker_port, false, 1374 rocker_cmd_get_port_settings_prep, NULL, 1375 rocker_cmd_get_port_settings_mode_proc, p_mode); 1376 } 1377 1378 static int 1379 rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, 1380 const struct ethtool_link_ksettings *ecmd) 1381 { 1382 struct ethtool_link_ksettings copy_ecmd; 1383 1384 memcpy(©_ecmd, ecmd, sizeof(copy_ecmd)); 1385 1386 return rocker_cmd_exec(rocker_port, false, 1387 rocker_cmd_set_port_settings_ethtool_prep, 1388 ©_ecmd, NULL, NULL); 1389 } 1390 1391 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, 1392 unsigned char *macaddr) 1393 { 1394 return rocker_cmd_exec(rocker_port, false, 1395 rocker_cmd_set_port_settings_macaddr_prep, 1396 macaddr, NULL, NULL); 1397 } 1398 1399 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, 1400 int mtu) 1401 { 1402 return rocker_cmd_exec(rocker_port, false, 1403 rocker_cmd_set_port_settings_mtu_prep, 1404 &mtu, NULL, NULL); 1405 } 1406 1407 int rocker_port_set_learning(struct rocker_port *rocker_port, 1408 bool learning) 1409 { 1410 return rocker_cmd_exec(rocker_port, false, 1411 rocker_cmd_set_port_learning_prep, 1412 &learning, NULL, NULL); 1413 } 1414 1415 /********************** 1416 * Worlds manipulation 1417 **********************/ 1418 1419 static struct rocker_world_ops *rocker_world_ops[] = { 1420 &rocker_ofdpa_ops, 1421 }; 1422 1423 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) 1424 1425 static struct rocker_world_ops *rocker_world_ops_find(u8 mode) 1426 { 1427 int i; 1428 1429 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) 1430 if (rocker_world_ops[i]->mode == mode) 1431 return rocker_world_ops[i]; 1432 return NULL; 1433 } 1434 1435 static int rocker_world_init(struct rocker *rocker, u8 mode) 1436 { 1437 struct rocker_world_ops *wops; 1438 int err; 1439 1440 wops = rocker_world_ops_find(mode); 1441 if (!wops) { 1442 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", 1443 mode); 1444 return -EINVAL; 1445 } 1446 rocker->wops = wops; 1447 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); 1448 if (!rocker->wpriv) 1449 return -ENOMEM; 1450 if (!wops->init) 1451 return 0; 1452 err = wops->init(rocker); 1453 if (err) 1454 kfree(rocker->wpriv); 1455 return err; 1456 } 1457 1458 static void rocker_world_fini(struct rocker *rocker) 1459 { 1460 struct rocker_world_ops *wops = rocker->wops; 1461 1462 if (!wops || !wops->fini) 1463 return; 1464 wops->fini(rocker); 1465 kfree(rocker->wpriv); 1466 } 1467 1468 static int rocker_world_check_init(struct rocker_port *rocker_port) 1469 { 1470 struct rocker *rocker = rocker_port->rocker; 1471 u8 mode; 1472 int err; 1473 1474 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); 1475 if (err) { 1476 dev_err(&rocker->pdev->dev, "failed to get port mode\n"); 1477 return err; 1478 } 1479 if (rocker->wops) { 1480 if (rocker->wops->mode != mode) { 1481 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); 1482 return -EINVAL; 1483 } 1484 return 0; 1485 } 1486 return rocker_world_init(rocker, mode); 1487 } 1488 1489 static int rocker_world_port_pre_init(struct rocker_port *rocker_port) 1490 { 1491 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1492 int err; 1493 1494 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); 1495 if (!rocker_port->wpriv) 1496 return -ENOMEM; 1497 if (!wops->port_pre_init) 1498 return 0; 1499 err = wops->port_pre_init(rocker_port); 1500 if (err) 1501 kfree(rocker_port->wpriv); 1502 return 0; 1503 } 1504 1505 static int rocker_world_port_init(struct rocker_port *rocker_port) 1506 { 1507 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1508 1509 if (!wops->port_init) 1510 return 0; 1511 return wops->port_init(rocker_port); 1512 } 1513 1514 static void rocker_world_port_fini(struct rocker_port *rocker_port) 1515 { 1516 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1517 1518 if (!wops->port_fini) 1519 return; 1520 wops->port_fini(rocker_port); 1521 } 1522 1523 static void rocker_world_port_post_fini(struct rocker_port *rocker_port) 1524 { 1525 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1526 1527 if (!wops->port_post_fini) 1528 return; 1529 wops->port_post_fini(rocker_port); 1530 kfree(rocker_port->wpriv); 1531 } 1532 1533 static int rocker_world_port_open(struct rocker_port *rocker_port) 1534 { 1535 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1536 1537 if (!wops->port_open) 1538 return 0; 1539 return wops->port_open(rocker_port); 1540 } 1541 1542 static void rocker_world_port_stop(struct rocker_port *rocker_port) 1543 { 1544 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1545 1546 if (!wops->port_stop) 1547 return; 1548 wops->port_stop(rocker_port); 1549 } 1550 1551 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, 1552 u8 state) 1553 { 1554 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1555 1556 if (!wops->port_attr_stp_state_set) 1557 return -EOPNOTSUPP; 1558 1559 return wops->port_attr_stp_state_set(rocker_port, state); 1560 } 1561 1562 static int 1563 rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * 1564 rocker_port, 1565 unsigned long * 1566 p_brport_flags_support) 1567 { 1568 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1569 1570 if (!wops->port_attr_bridge_flags_support_get) 1571 return -EOPNOTSUPP; 1572 return wops->port_attr_bridge_flags_support_get(rocker_port, 1573 p_brport_flags_support); 1574 } 1575 1576 static int 1577 rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port, 1578 struct switchdev_brport_flags flags) 1579 { 1580 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1581 unsigned long brport_flags_s; 1582 int err; 1583 1584 if (!wops->port_attr_bridge_flags_set) 1585 return -EOPNOTSUPP; 1586 1587 err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, 1588 &brport_flags_s); 1589 if (err) 1590 return err; 1591 1592 if (flags.mask & ~brport_flags_s) 1593 return -EINVAL; 1594 1595 return 0; 1596 } 1597 1598 static int 1599 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, 1600 struct switchdev_brport_flags flags) 1601 { 1602 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1603 1604 if (!wops->port_attr_bridge_flags_set) 1605 return -EOPNOTSUPP; 1606 1607 return wops->port_attr_bridge_flags_set(rocker_port, flags.val); 1608 } 1609 1610 static int 1611 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, 1612 u32 ageing_time) 1613 { 1614 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1615 1616 if (!wops->port_attr_bridge_ageing_time_set) 1617 return -EOPNOTSUPP; 1618 1619 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time); 1620 } 1621 1622 static int 1623 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, 1624 const struct switchdev_obj_port_vlan *vlan) 1625 { 1626 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1627 1628 if (!wops->port_obj_vlan_add) 1629 return -EOPNOTSUPP; 1630 1631 return wops->port_obj_vlan_add(rocker_port, vlan); 1632 } 1633 1634 static int 1635 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, 1636 const struct switchdev_obj_port_vlan *vlan) 1637 { 1638 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1639 1640 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1641 return -EOPNOTSUPP; 1642 1643 if (!wops->port_obj_vlan_del) 1644 return -EOPNOTSUPP; 1645 return wops->port_obj_vlan_del(rocker_port, vlan); 1646 } 1647 1648 static int 1649 rocker_world_port_fdb_add(struct rocker_port *rocker_port, 1650 struct switchdev_notifier_fdb_info *info) 1651 { 1652 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1653 1654 if (!wops->port_obj_fdb_add) 1655 return -EOPNOTSUPP; 1656 1657 return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr); 1658 } 1659 1660 static int 1661 rocker_world_port_fdb_del(struct rocker_port *rocker_port, 1662 struct switchdev_notifier_fdb_info *info) 1663 { 1664 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1665 1666 if (!wops->port_obj_fdb_del) 1667 return -EOPNOTSUPP; 1668 return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr); 1669 } 1670 1671 static int rocker_world_port_master_linked(struct rocker_port *rocker_port, 1672 struct net_device *master, 1673 struct netlink_ext_ack *extack) 1674 { 1675 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1676 1677 if (!wops->port_master_linked) 1678 return -EOPNOTSUPP; 1679 return wops->port_master_linked(rocker_port, master, extack); 1680 } 1681 1682 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, 1683 struct net_device *master) 1684 { 1685 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1686 1687 if (!wops->port_master_unlinked) 1688 return -EOPNOTSUPP; 1689 return wops->port_master_unlinked(rocker_port, master); 1690 } 1691 1692 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, 1693 struct neighbour *n) 1694 { 1695 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1696 1697 if (!wops->port_neigh_update) 1698 return -EOPNOTSUPP; 1699 return wops->port_neigh_update(rocker_port, n); 1700 } 1701 1702 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, 1703 struct neighbour *n) 1704 { 1705 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1706 1707 if (!wops->port_neigh_destroy) 1708 return -EOPNOTSUPP; 1709 return wops->port_neigh_destroy(rocker_port, n); 1710 } 1711 1712 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, 1713 const unsigned char *addr, 1714 __be16 vlan_id) 1715 { 1716 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1717 1718 if (!wops->port_ev_mac_vlan_seen) 1719 return -EOPNOTSUPP; 1720 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); 1721 } 1722 1723 static int rocker_world_fib4_add(struct rocker *rocker, 1724 const struct fib_entry_notifier_info *fen_info) 1725 { 1726 struct rocker_world_ops *wops = rocker->wops; 1727 1728 if (!wops->fib4_add) 1729 return 0; 1730 return wops->fib4_add(rocker, fen_info); 1731 } 1732 1733 static int rocker_world_fib4_del(struct rocker *rocker, 1734 const struct fib_entry_notifier_info *fen_info) 1735 { 1736 struct rocker_world_ops *wops = rocker->wops; 1737 1738 if (!wops->fib4_del) 1739 return 0; 1740 return wops->fib4_del(rocker, fen_info); 1741 } 1742 1743 static void rocker_world_fib4_abort(struct rocker *rocker) 1744 { 1745 struct rocker_world_ops *wops = rocker->wops; 1746 1747 if (wops->fib4_abort) 1748 wops->fib4_abort(rocker); 1749 } 1750 1751 /***************** 1752 * Net device ops 1753 *****************/ 1754 1755 static int rocker_port_open(struct net_device *dev) 1756 { 1757 struct rocker_port *rocker_port = netdev_priv(dev); 1758 int err; 1759 1760 err = rocker_port_dma_rings_init(rocker_port); 1761 if (err) 1762 return err; 1763 1764 err = request_irq(rocker_msix_tx_vector(rocker_port), 1765 rocker_tx_irq_handler, 0, 1766 rocker_driver_name, rocker_port); 1767 if (err) { 1768 netdev_err(rocker_port->dev, "cannot assign tx irq\n"); 1769 goto err_request_tx_irq; 1770 } 1771 1772 err = request_irq(rocker_msix_rx_vector(rocker_port), 1773 rocker_rx_irq_handler, 0, 1774 rocker_driver_name, rocker_port); 1775 if (err) { 1776 netdev_err(rocker_port->dev, "cannot assign rx irq\n"); 1777 goto err_request_rx_irq; 1778 } 1779 1780 err = rocker_world_port_open(rocker_port); 1781 if (err) { 1782 netdev_err(rocker_port->dev, "cannot open port in world\n"); 1783 goto err_world_port_open; 1784 } 1785 1786 napi_enable(&rocker_port->napi_tx); 1787 napi_enable(&rocker_port->napi_rx); 1788 if (!dev->proto_down) 1789 rocker_port_set_enable(rocker_port, true); 1790 netif_start_queue(dev); 1791 return 0; 1792 1793 err_world_port_open: 1794 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); 1795 err_request_rx_irq: 1796 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); 1797 err_request_tx_irq: 1798 rocker_port_dma_rings_fini(rocker_port); 1799 return err; 1800 } 1801 1802 static int rocker_port_stop(struct net_device *dev) 1803 { 1804 struct rocker_port *rocker_port = netdev_priv(dev); 1805 1806 netif_stop_queue(dev); 1807 rocker_port_set_enable(rocker_port, false); 1808 napi_disable(&rocker_port->napi_rx); 1809 napi_disable(&rocker_port->napi_tx); 1810 rocker_world_port_stop(rocker_port); 1811 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); 1812 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); 1813 rocker_port_dma_rings_fini(rocker_port); 1814 1815 return 0; 1816 } 1817 1818 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, 1819 const struct rocker_desc_info *desc_info) 1820 { 1821 const struct rocker *rocker = rocker_port->rocker; 1822 struct pci_dev *pdev = rocker->pdev; 1823 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; 1824 struct rocker_tlv *attr; 1825 int rem; 1826 1827 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); 1828 if (!attrs[ROCKER_TLV_TX_FRAGS]) 1829 return; 1830 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { 1831 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; 1832 dma_addr_t dma_handle; 1833 size_t len; 1834 1835 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) 1836 continue; 1837 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, 1838 attr); 1839 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || 1840 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) 1841 continue; 1842 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); 1843 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); 1844 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE); 1845 } 1846 } 1847 1848 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, 1849 struct rocker_desc_info *desc_info, 1850 char *buf, size_t buf_len) 1851 { 1852 const struct rocker *rocker = rocker_port->rocker; 1853 struct pci_dev *pdev = rocker->pdev; 1854 dma_addr_t dma_handle; 1855 struct rocker_tlv *frag; 1856 1857 dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE); 1858 if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) { 1859 if (net_ratelimit()) 1860 netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); 1861 return -EIO; 1862 } 1863 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); 1864 if (!frag) 1865 goto unmap_frag; 1866 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, 1867 dma_handle)) 1868 goto nest_cancel; 1869 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, 1870 buf_len)) 1871 goto nest_cancel; 1872 rocker_tlv_nest_end(desc_info, frag); 1873 return 0; 1874 1875 nest_cancel: 1876 rocker_tlv_nest_cancel(desc_info, frag); 1877 unmap_frag: 1878 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE); 1879 return -EMSGSIZE; 1880 } 1881 1882 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) 1883 { 1884 struct rocker_port *rocker_port = netdev_priv(dev); 1885 struct rocker *rocker = rocker_port->rocker; 1886 struct rocker_desc_info *desc_info; 1887 struct rocker_tlv *frags; 1888 int i; 1889 int err; 1890 1891 desc_info = rocker_desc_head_get(&rocker_port->tx_ring); 1892 if (unlikely(!desc_info)) { 1893 if (net_ratelimit()) 1894 netdev_err(dev, "tx ring full when queue awake\n"); 1895 return NETDEV_TX_BUSY; 1896 } 1897 1898 rocker_desc_cookie_ptr_set(desc_info, skb); 1899 1900 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); 1901 if (!frags) 1902 goto out; 1903 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, 1904 skb->data, skb_headlen(skb)); 1905 if (err) 1906 goto nest_cancel; 1907 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { 1908 err = skb_linearize(skb); 1909 if (err) 1910 goto unmap_frags; 1911 } 1912 1913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1914 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1915 1916 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, 1917 skb_frag_address(frag), 1918 skb_frag_size(frag)); 1919 if (err) 1920 goto unmap_frags; 1921 } 1922 rocker_tlv_nest_end(desc_info, frags); 1923 1924 rocker_desc_gen_clear(desc_info); 1925 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); 1926 1927 desc_info = rocker_desc_head_get(&rocker_port->tx_ring); 1928 if (!desc_info) 1929 netif_stop_queue(dev); 1930 1931 return NETDEV_TX_OK; 1932 1933 unmap_frags: 1934 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 1935 nest_cancel: 1936 rocker_tlv_nest_cancel(desc_info, frags); 1937 out: 1938 dev_kfree_skb(skb); 1939 dev->stats.tx_dropped++; 1940 1941 return NETDEV_TX_OK; 1942 } 1943 1944 static int rocker_port_set_mac_address(struct net_device *dev, void *p) 1945 { 1946 struct sockaddr *addr = p; 1947 struct rocker_port *rocker_port = netdev_priv(dev); 1948 int err; 1949 1950 if (!is_valid_ether_addr(addr->sa_data)) 1951 return -EADDRNOTAVAIL; 1952 1953 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); 1954 if (err) 1955 return err; 1956 eth_hw_addr_set(dev, addr->sa_data); 1957 return 0; 1958 } 1959 1960 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) 1961 { 1962 struct rocker_port *rocker_port = netdev_priv(dev); 1963 int running = netif_running(dev); 1964 int err; 1965 1966 if (running) 1967 rocker_port_stop(dev); 1968 1969 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); 1970 WRITE_ONCE(dev->mtu, new_mtu); 1971 1972 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); 1973 if (err) 1974 return err; 1975 1976 if (running) 1977 err = rocker_port_open(dev); 1978 1979 return err; 1980 } 1981 1982 static int rocker_port_get_phys_port_name(struct net_device *dev, 1983 char *buf, size_t len) 1984 { 1985 struct rocker_port *rocker_port = netdev_priv(dev); 1986 struct port_name name = { .buf = buf, .len = len }; 1987 int err; 1988 1989 err = rocker_cmd_exec(rocker_port, false, 1990 rocker_cmd_get_port_settings_prep, NULL, 1991 rocker_cmd_get_port_settings_phys_name_proc, 1992 &name); 1993 1994 return err ? -EOPNOTSUPP : 0; 1995 } 1996 1997 static void rocker_port_neigh_destroy(struct net_device *dev, 1998 struct neighbour *n) 1999 { 2000 struct rocker_port *rocker_port = netdev_priv(n->dev); 2001 int err; 2002 2003 err = rocker_world_port_neigh_destroy(rocker_port, n); 2004 if (err) 2005 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", 2006 err); 2007 } 2008 2009 static int rocker_port_get_port_parent_id(struct net_device *dev, 2010 struct netdev_phys_item_id *ppid) 2011 { 2012 const struct rocker_port *rocker_port = netdev_priv(dev); 2013 const struct rocker *rocker = rocker_port->rocker; 2014 2015 ppid->id_len = sizeof(rocker->hw.id); 2016 memcpy(&ppid->id, &rocker->hw.id, ppid->id_len); 2017 2018 return 0; 2019 } 2020 2021 static const struct net_device_ops rocker_port_netdev_ops = { 2022 .ndo_open = rocker_port_open, 2023 .ndo_stop = rocker_port_stop, 2024 .ndo_start_xmit = rocker_port_xmit, 2025 .ndo_set_mac_address = rocker_port_set_mac_address, 2026 .ndo_change_mtu = rocker_port_change_mtu, 2027 .ndo_get_phys_port_name = rocker_port_get_phys_port_name, 2028 .ndo_neigh_destroy = rocker_port_neigh_destroy, 2029 .ndo_get_port_parent_id = rocker_port_get_port_parent_id, 2030 }; 2031 2032 /******************** 2033 * swdev interface 2034 ********************/ 2035 2036 static int rocker_port_attr_set(struct net_device *dev, 2037 const struct switchdev_attr *attr) 2038 { 2039 struct rocker_port *rocker_port = netdev_priv(dev); 2040 int err = 0; 2041 2042 switch (attr->id) { 2043 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 2044 err = rocker_world_port_attr_stp_state_set(rocker_port, 2045 attr->u.stp_state); 2046 break; 2047 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 2048 err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port, 2049 attr->u.brport_flags); 2050 break; 2051 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 2052 err = rocker_world_port_attr_bridge_flags_set(rocker_port, 2053 attr->u.brport_flags); 2054 break; 2055 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 2056 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, 2057 attr->u.ageing_time); 2058 break; 2059 default: 2060 err = -EOPNOTSUPP; 2061 break; 2062 } 2063 2064 return err; 2065 } 2066 2067 static int rocker_port_obj_add(struct net_device *dev, 2068 const struct switchdev_obj *obj) 2069 { 2070 struct rocker_port *rocker_port = netdev_priv(dev); 2071 int err = 0; 2072 2073 switch (obj->id) { 2074 case SWITCHDEV_OBJ_ID_PORT_VLAN: 2075 err = rocker_world_port_obj_vlan_add(rocker_port, 2076 SWITCHDEV_OBJ_PORT_VLAN(obj)); 2077 break; 2078 default: 2079 err = -EOPNOTSUPP; 2080 break; 2081 } 2082 2083 return err; 2084 } 2085 2086 static int rocker_port_obj_del(struct net_device *dev, 2087 const struct switchdev_obj *obj) 2088 { 2089 struct rocker_port *rocker_port = netdev_priv(dev); 2090 int err = 0; 2091 2092 switch (obj->id) { 2093 case SWITCHDEV_OBJ_ID_PORT_VLAN: 2094 err = rocker_world_port_obj_vlan_del(rocker_port, 2095 SWITCHDEV_OBJ_PORT_VLAN(obj)); 2096 break; 2097 default: 2098 err = -EOPNOTSUPP; 2099 break; 2100 } 2101 2102 return err; 2103 } 2104 2105 struct rocker_fib_event_work { 2106 struct work_struct work; 2107 union { 2108 struct fib_entry_notifier_info fen_info; 2109 struct fib_rule_notifier_info fr_info; 2110 }; 2111 struct rocker *rocker; 2112 unsigned long event; 2113 }; 2114 2115 static void rocker_router_fib_event_work(struct work_struct *work) 2116 { 2117 struct rocker_fib_event_work *fib_work = 2118 container_of(work, struct rocker_fib_event_work, work); 2119 struct rocker *rocker = fib_work->rocker; 2120 struct fib_rule *rule; 2121 int err; 2122 2123 /* Protect internal structures from changes */ 2124 rtnl_lock(); 2125 switch (fib_work->event) { 2126 case FIB_EVENT_ENTRY_REPLACE: 2127 err = rocker_world_fib4_add(rocker, &fib_work->fen_info); 2128 if (err) 2129 rocker_world_fib4_abort(rocker); 2130 fib_info_put(fib_work->fen_info.fi); 2131 break; 2132 case FIB_EVENT_ENTRY_DEL: 2133 rocker_world_fib4_del(rocker, &fib_work->fen_info); 2134 fib_info_put(fib_work->fen_info.fi); 2135 break; 2136 case FIB_EVENT_RULE_ADD: 2137 case FIB_EVENT_RULE_DEL: 2138 rule = fib_work->fr_info.rule; 2139 if (!fib4_rule_default(rule)) 2140 rocker_world_fib4_abort(rocker); 2141 fib_rule_put(rule); 2142 break; 2143 } 2144 rtnl_unlock(); 2145 kfree(fib_work); 2146 } 2147 2148 /* Called with rcu_read_lock() */ 2149 static int rocker_router_fib_event(struct notifier_block *nb, 2150 unsigned long event, void *ptr) 2151 { 2152 struct rocker *rocker = container_of(nb, struct rocker, fib_nb); 2153 struct rocker_fib_event_work *fib_work; 2154 struct fib_notifier_info *info = ptr; 2155 2156 if (info->family != AF_INET) 2157 return NOTIFY_DONE; 2158 2159 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 2160 if (WARN_ON(!fib_work)) 2161 return NOTIFY_BAD; 2162 2163 INIT_WORK(&fib_work->work, rocker_router_fib_event_work); 2164 fib_work->rocker = rocker; 2165 fib_work->event = event; 2166 2167 switch (event) { 2168 case FIB_EVENT_ENTRY_REPLACE: 2169 case FIB_EVENT_ENTRY_DEL: 2170 if (info->family == AF_INET) { 2171 struct fib_entry_notifier_info *fen_info = ptr; 2172 2173 if (fen_info->fi->fib_nh_is_v6) { 2174 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); 2175 kfree(fib_work); 2176 return notifier_from_errno(-EINVAL); 2177 } 2178 if (fen_info->fi->nh) { 2179 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); 2180 kfree(fib_work); 2181 return notifier_from_errno(-EINVAL); 2182 } 2183 } 2184 2185 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); 2186 /* Take referece on fib_info to prevent it from being 2187 * freed while work is queued. Release it afterwards. 2188 */ 2189 fib_info_hold(fib_work->fen_info.fi); 2190 break; 2191 case FIB_EVENT_RULE_ADD: 2192 case FIB_EVENT_RULE_DEL: 2193 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); 2194 fib_rule_get(fib_work->fr_info.rule); 2195 break; 2196 } 2197 2198 queue_work(rocker->rocker_owq, &fib_work->work); 2199 2200 return NOTIFY_DONE; 2201 } 2202 2203 /******************** 2204 * ethtool interface 2205 ********************/ 2206 2207 static int 2208 rocker_port_get_link_ksettings(struct net_device *dev, 2209 struct ethtool_link_ksettings *ecmd) 2210 { 2211 struct rocker_port *rocker_port = netdev_priv(dev); 2212 2213 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); 2214 } 2215 2216 static int 2217 rocker_port_set_link_ksettings(struct net_device *dev, 2218 const struct ethtool_link_ksettings *ecmd) 2219 { 2220 struct rocker_port *rocker_port = netdev_priv(dev); 2221 2222 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); 2223 } 2224 2225 static void rocker_port_get_drvinfo(struct net_device *dev, 2226 struct ethtool_drvinfo *drvinfo) 2227 { 2228 strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); 2229 } 2230 2231 static struct rocker_port_stats { 2232 char str[ETH_GSTRING_LEN]; 2233 int type; 2234 } rocker_port_stats[] = { 2235 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, 2236 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, 2237 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, 2238 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, 2239 2240 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, 2241 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, 2242 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, 2243 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, 2244 }; 2245 2246 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) 2247 2248 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, 2249 u8 *data) 2250 { 2251 u8 *p = data; 2252 int i; 2253 2254 switch (stringset) { 2255 case ETH_SS_STATS: 2256 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { 2257 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); 2258 p += ETH_GSTRING_LEN; 2259 } 2260 break; 2261 } 2262 } 2263 2264 static int 2265 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, 2266 struct rocker_desc_info *desc_info, 2267 void *priv) 2268 { 2269 struct rocker_tlv *cmd_stats; 2270 2271 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 2272 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) 2273 return -EMSGSIZE; 2274 2275 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 2276 if (!cmd_stats) 2277 return -EMSGSIZE; 2278 2279 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, 2280 rocker_port->pport)) 2281 return -EMSGSIZE; 2282 2283 rocker_tlv_nest_end(desc_info, cmd_stats); 2284 2285 return 0; 2286 } 2287 2288 static int 2289 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, 2290 const struct rocker_desc_info *desc_info, 2291 void *priv) 2292 { 2293 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 2294 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; 2295 const struct rocker_tlv *pattr; 2296 u32 pport; 2297 u64 *data = priv; 2298 int i; 2299 2300 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 2301 2302 if (!attrs[ROCKER_TLV_CMD_INFO]) 2303 return -EIO; 2304 2305 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, 2306 attrs[ROCKER_TLV_CMD_INFO]); 2307 2308 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) 2309 return -EIO; 2310 2311 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); 2312 if (pport != rocker_port->pport) 2313 return -EIO; 2314 2315 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { 2316 pattr = stats_attrs[rocker_port_stats[i].type]; 2317 if (!pattr) 2318 continue; 2319 2320 data[i] = rocker_tlv_get_u64(pattr); 2321 } 2322 2323 return 0; 2324 } 2325 2326 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, 2327 void *priv) 2328 { 2329 return rocker_cmd_exec(rocker_port, false, 2330 rocker_cmd_get_port_stats_prep, NULL, 2331 rocker_cmd_get_port_stats_ethtool_proc, 2332 priv); 2333 } 2334 2335 static void rocker_port_get_stats(struct net_device *dev, 2336 struct ethtool_stats *stats, u64 *data) 2337 { 2338 struct rocker_port *rocker_port = netdev_priv(dev); 2339 2340 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { 2341 int i; 2342 2343 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) 2344 data[i] = 0; 2345 } 2346 } 2347 2348 static int rocker_port_get_sset_count(struct net_device *netdev, int sset) 2349 { 2350 switch (sset) { 2351 case ETH_SS_STATS: 2352 return ROCKER_PORT_STATS_LEN; 2353 default: 2354 return -EOPNOTSUPP; 2355 } 2356 } 2357 2358 static const struct ethtool_ops rocker_port_ethtool_ops = { 2359 .get_drvinfo = rocker_port_get_drvinfo, 2360 .get_link = ethtool_op_get_link, 2361 .get_strings = rocker_port_get_strings, 2362 .get_ethtool_stats = rocker_port_get_stats, 2363 .get_sset_count = rocker_port_get_sset_count, 2364 .get_link_ksettings = rocker_port_get_link_ksettings, 2365 .set_link_ksettings = rocker_port_set_link_ksettings, 2366 }; 2367 2368 /***************** 2369 * NAPI interface 2370 *****************/ 2371 2372 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) 2373 { 2374 return container_of(napi, struct rocker_port, napi_tx); 2375 } 2376 2377 static int rocker_port_poll_tx(struct napi_struct *napi, int budget) 2378 { 2379 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); 2380 const struct rocker *rocker = rocker_port->rocker; 2381 const struct rocker_desc_info *desc_info; 2382 u32 credits = 0; 2383 int err; 2384 2385 /* Cleanup tx descriptors */ 2386 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { 2387 struct sk_buff *skb; 2388 2389 err = rocker_desc_err(desc_info); 2390 if (err && net_ratelimit()) 2391 netdev_err(rocker_port->dev, "tx desc received with err %d\n", 2392 err); 2393 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 2394 2395 skb = rocker_desc_cookie_ptr_get(desc_info); 2396 if (err == 0) { 2397 rocker_port->dev->stats.tx_packets++; 2398 rocker_port->dev->stats.tx_bytes += skb->len; 2399 } else { 2400 rocker_port->dev->stats.tx_errors++; 2401 } 2402 2403 dev_kfree_skb_any(skb); 2404 credits++; 2405 } 2406 2407 if (credits && netif_queue_stopped(rocker_port->dev)) 2408 netif_wake_queue(rocker_port->dev); 2409 2410 napi_complete(napi); 2411 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); 2412 2413 return 0; 2414 } 2415 2416 static int rocker_port_rx_proc(const struct rocker *rocker, 2417 const struct rocker_port *rocker_port, 2418 struct rocker_desc_info *desc_info) 2419 { 2420 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; 2421 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); 2422 size_t rx_len; 2423 u16 rx_flags = 0; 2424 2425 if (!skb) 2426 return -ENOENT; 2427 2428 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); 2429 if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) 2430 return -EINVAL; 2431 if (attrs[ROCKER_TLV_RX_FLAGS]) 2432 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); 2433 2434 rocker_dma_rx_ring_skb_unmap(rocker, attrs); 2435 2436 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); 2437 skb_put(skb, rx_len); 2438 skb->protocol = eth_type_trans(skb, rocker_port->dev); 2439 2440 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) 2441 skb->offload_fwd_mark = 1; 2442 2443 rocker_port->dev->stats.rx_packets++; 2444 rocker_port->dev->stats.rx_bytes += skb->len; 2445 2446 netif_receive_skb(skb); 2447 2448 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); 2449 } 2450 2451 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) 2452 { 2453 return container_of(napi, struct rocker_port, napi_rx); 2454 } 2455 2456 static int rocker_port_poll_rx(struct napi_struct *napi, int budget) 2457 { 2458 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); 2459 const struct rocker *rocker = rocker_port->rocker; 2460 struct rocker_desc_info *desc_info; 2461 u32 credits = 0; 2462 int err; 2463 2464 /* Process rx descriptors */ 2465 while (credits < budget && 2466 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { 2467 err = rocker_desc_err(desc_info); 2468 if (err) { 2469 if (net_ratelimit()) 2470 netdev_err(rocker_port->dev, "rx desc received with err %d\n", 2471 err); 2472 } else { 2473 err = rocker_port_rx_proc(rocker, rocker_port, 2474 desc_info); 2475 if (err && net_ratelimit()) 2476 netdev_err(rocker_port->dev, "rx processing failed with err %d\n", 2477 err); 2478 } 2479 if (err) 2480 rocker_port->dev->stats.rx_errors++; 2481 2482 rocker_desc_gen_clear(desc_info); 2483 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); 2484 credits++; 2485 } 2486 2487 if (credits < budget) 2488 napi_complete_done(napi, credits); 2489 2490 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); 2491 2492 return credits; 2493 } 2494 2495 /***************** 2496 * PCI driver ops 2497 *****************/ 2498 2499 static void rocker_carrier_init(const struct rocker_port *rocker_port) 2500 { 2501 const struct rocker *rocker = rocker_port->rocker; 2502 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); 2503 bool link_up; 2504 2505 link_up = link_status & (1 << rocker_port->pport); 2506 if (link_up) 2507 netif_carrier_on(rocker_port->dev); 2508 else 2509 netif_carrier_off(rocker_port->dev); 2510 } 2511 2512 static void rocker_remove_ports(struct rocker *rocker) 2513 { 2514 struct rocker_port *rocker_port; 2515 int i; 2516 2517 for (i = 0; i < rocker->port_count; i++) { 2518 rocker_port = rocker->ports[i]; 2519 if (!rocker_port) 2520 continue; 2521 rocker_world_port_fini(rocker_port); 2522 unregister_netdev(rocker_port->dev); 2523 rocker_world_port_post_fini(rocker_port); 2524 free_netdev(rocker_port->dev); 2525 } 2526 rocker_world_fini(rocker); 2527 kfree(rocker->ports); 2528 } 2529 2530 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) 2531 { 2532 const struct rocker *rocker = rocker_port->rocker; 2533 const struct pci_dev *pdev = rocker->pdev; 2534 u8 addr[ETH_ALEN]; 2535 int err; 2536 2537 err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr); 2538 if (!err) { 2539 eth_hw_addr_set(rocker_port->dev, addr); 2540 } else { 2541 dev_warn(&pdev->dev, "failed to get mac address, using random\n"); 2542 eth_hw_addr_random(rocker_port->dev); 2543 } 2544 } 2545 2546 #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU 2547 #define ROCKER_PORT_MAX_MTU 9000 2548 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) 2549 { 2550 struct pci_dev *pdev = rocker->pdev; 2551 struct rocker_port *rocker_port; 2552 struct net_device *dev; 2553 int err; 2554 2555 dev = alloc_etherdev(sizeof(struct rocker_port)); 2556 if (!dev) 2557 return -ENOMEM; 2558 SET_NETDEV_DEV(dev, &pdev->dev); 2559 rocker_port = netdev_priv(dev); 2560 rocker_port->dev = dev; 2561 rocker_port->rocker = rocker; 2562 rocker_port->port_number = port_number; 2563 rocker_port->pport = port_number + 1; 2564 2565 err = rocker_world_check_init(rocker_port); 2566 if (err) { 2567 dev_err(&pdev->dev, "world init failed\n"); 2568 goto err_world_check_init; 2569 } 2570 2571 rocker_port_dev_addr_init(rocker_port); 2572 dev->netdev_ops = &rocker_port_netdev_ops; 2573 dev->ethtool_ops = &rocker_port_ethtool_ops; 2574 netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx); 2575 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx); 2576 rocker_carrier_init(rocker_port); 2577 2578 dev->features |= NETIF_F_SG; 2579 dev->netns_local = true; 2580 2581 /* MTU range: 68 - 9000 */ 2582 dev->min_mtu = ROCKER_PORT_MIN_MTU; 2583 dev->max_mtu = ROCKER_PORT_MAX_MTU; 2584 2585 err = rocker_world_port_pre_init(rocker_port); 2586 if (err) { 2587 dev_err(&pdev->dev, "port world pre-init failed\n"); 2588 goto err_world_port_pre_init; 2589 } 2590 err = register_netdev(dev); 2591 if (err) { 2592 dev_err(&pdev->dev, "register_netdev failed\n"); 2593 goto err_register_netdev; 2594 } 2595 rocker->ports[port_number] = rocker_port; 2596 2597 err = rocker_world_port_init(rocker_port); 2598 if (err) { 2599 dev_err(&pdev->dev, "port world init failed\n"); 2600 goto err_world_port_init; 2601 } 2602 2603 return 0; 2604 2605 err_world_port_init: 2606 rocker->ports[port_number] = NULL; 2607 unregister_netdev(dev); 2608 err_register_netdev: 2609 rocker_world_port_post_fini(rocker_port); 2610 err_world_port_pre_init: 2611 err_world_check_init: 2612 free_netdev(dev); 2613 return err; 2614 } 2615 2616 static int rocker_probe_ports(struct rocker *rocker) 2617 { 2618 int i; 2619 size_t alloc_size; 2620 int err; 2621 2622 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 2623 rocker->ports = kzalloc(alloc_size, GFP_KERNEL); 2624 if (!rocker->ports) 2625 return -ENOMEM; 2626 for (i = 0; i < rocker->port_count; i++) { 2627 err = rocker_probe_port(rocker, i); 2628 if (err) 2629 goto remove_ports; 2630 } 2631 return 0; 2632 2633 remove_ports: 2634 rocker_remove_ports(rocker); 2635 return err; 2636 } 2637 2638 static int rocker_msix_init(struct rocker *rocker) 2639 { 2640 struct pci_dev *pdev = rocker->pdev; 2641 int msix_entries; 2642 int i; 2643 int err; 2644 2645 msix_entries = pci_msix_vec_count(pdev); 2646 if (msix_entries < 0) 2647 return msix_entries; 2648 2649 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) 2650 return -EINVAL; 2651 2652 rocker->msix_entries = kmalloc_array(msix_entries, 2653 sizeof(struct msix_entry), 2654 GFP_KERNEL); 2655 if (!rocker->msix_entries) 2656 return -ENOMEM; 2657 2658 for (i = 0; i < msix_entries; i++) 2659 rocker->msix_entries[i].entry = i; 2660 2661 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); 2662 if (err < 0) 2663 goto err_enable_msix; 2664 2665 return 0; 2666 2667 err_enable_msix: 2668 kfree(rocker->msix_entries); 2669 return err; 2670 } 2671 2672 static void rocker_msix_fini(const struct rocker *rocker) 2673 { 2674 pci_disable_msix(rocker->pdev); 2675 kfree(rocker->msix_entries); 2676 } 2677 2678 static bool rocker_port_dev_check(const struct net_device *dev) 2679 { 2680 return dev->netdev_ops == &rocker_port_netdev_ops; 2681 } 2682 2683 static int 2684 rocker_switchdev_port_attr_set_event(struct net_device *netdev, 2685 struct switchdev_notifier_port_attr_info *port_attr_info) 2686 { 2687 int err; 2688 2689 err = rocker_port_attr_set(netdev, port_attr_info->attr); 2690 2691 port_attr_info->handled = true; 2692 return notifier_from_errno(err); 2693 } 2694 2695 struct rocker_switchdev_event_work { 2696 struct work_struct work; 2697 struct switchdev_notifier_fdb_info fdb_info; 2698 struct rocker_port *rocker_port; 2699 unsigned long event; 2700 }; 2701 2702 static void 2703 rocker_fdb_offload_notify(struct rocker_port *rocker_port, 2704 struct switchdev_notifier_fdb_info *recv_info) 2705 { 2706 struct switchdev_notifier_fdb_info info = {}; 2707 2708 info.addr = recv_info->addr; 2709 info.vid = recv_info->vid; 2710 info.offloaded = true; 2711 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 2712 rocker_port->dev, &info.info, NULL); 2713 } 2714 2715 static void rocker_switchdev_event_work(struct work_struct *work) 2716 { 2717 struct rocker_switchdev_event_work *switchdev_work = 2718 container_of(work, struct rocker_switchdev_event_work, work); 2719 struct rocker_port *rocker_port = switchdev_work->rocker_port; 2720 struct switchdev_notifier_fdb_info *fdb_info; 2721 int err; 2722 2723 rtnl_lock(); 2724 switch (switchdev_work->event) { 2725 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2726 fdb_info = &switchdev_work->fdb_info; 2727 if (!fdb_info->added_by_user || fdb_info->is_local) 2728 break; 2729 err = rocker_world_port_fdb_add(rocker_port, fdb_info); 2730 if (err) { 2731 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); 2732 break; 2733 } 2734 rocker_fdb_offload_notify(rocker_port, fdb_info); 2735 break; 2736 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2737 fdb_info = &switchdev_work->fdb_info; 2738 if (!fdb_info->added_by_user || fdb_info->is_local) 2739 break; 2740 err = rocker_world_port_fdb_del(rocker_port, fdb_info); 2741 if (err) 2742 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); 2743 break; 2744 } 2745 rtnl_unlock(); 2746 2747 kfree(switchdev_work->fdb_info.addr); 2748 kfree(switchdev_work); 2749 dev_put(rocker_port->dev); 2750 } 2751 2752 /* called under rcu_read_lock() */ 2753 static int rocker_switchdev_event(struct notifier_block *unused, 2754 unsigned long event, void *ptr) 2755 { 2756 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2757 struct rocker_switchdev_event_work *switchdev_work; 2758 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2759 struct rocker_port *rocker_port; 2760 2761 if (!rocker_port_dev_check(dev)) 2762 return NOTIFY_DONE; 2763 2764 if (event == SWITCHDEV_PORT_ATTR_SET) 2765 return rocker_switchdev_port_attr_set_event(dev, ptr); 2766 2767 rocker_port = netdev_priv(dev); 2768 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2769 if (WARN_ON(!switchdev_work)) 2770 return NOTIFY_BAD; 2771 2772 INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work); 2773 switchdev_work->rocker_port = rocker_port; 2774 switchdev_work->event = event; 2775 2776 switch (event) { 2777 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2778 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2779 memcpy(&switchdev_work->fdb_info, ptr, 2780 sizeof(switchdev_work->fdb_info)); 2781 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2782 if (unlikely(!switchdev_work->fdb_info.addr)) { 2783 kfree(switchdev_work); 2784 return NOTIFY_BAD; 2785 } 2786 2787 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2788 fdb_info->addr); 2789 /* Take a reference on the rocker device */ 2790 dev_hold(dev); 2791 break; 2792 default: 2793 kfree(switchdev_work); 2794 return NOTIFY_DONE; 2795 } 2796 2797 queue_work(rocker_port->rocker->rocker_owq, 2798 &switchdev_work->work); 2799 return NOTIFY_DONE; 2800 } 2801 2802 static int 2803 rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, 2804 struct switchdev_notifier_port_obj_info *port_obj_info) 2805 { 2806 int err = -EOPNOTSUPP; 2807 2808 switch (event) { 2809 case SWITCHDEV_PORT_OBJ_ADD: 2810 err = rocker_port_obj_add(netdev, port_obj_info->obj); 2811 break; 2812 case SWITCHDEV_PORT_OBJ_DEL: 2813 err = rocker_port_obj_del(netdev, port_obj_info->obj); 2814 break; 2815 } 2816 2817 port_obj_info->handled = true; 2818 return notifier_from_errno(err); 2819 } 2820 2821 static int rocker_switchdev_blocking_event(struct notifier_block *unused, 2822 unsigned long event, void *ptr) 2823 { 2824 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2825 2826 if (!rocker_port_dev_check(dev)) 2827 return NOTIFY_DONE; 2828 2829 switch (event) { 2830 case SWITCHDEV_PORT_OBJ_ADD: 2831 case SWITCHDEV_PORT_OBJ_DEL: 2832 return rocker_switchdev_port_obj_event(event, dev, ptr); 2833 case SWITCHDEV_PORT_ATTR_SET: 2834 return rocker_switchdev_port_attr_set_event(dev, ptr); 2835 } 2836 2837 return NOTIFY_DONE; 2838 } 2839 2840 static struct notifier_block rocker_switchdev_notifier = { 2841 .notifier_call = rocker_switchdev_event, 2842 }; 2843 2844 static struct notifier_block rocker_switchdev_blocking_notifier = { 2845 .notifier_call = rocker_switchdev_blocking_event, 2846 }; 2847 2848 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2849 { 2850 struct notifier_block *nb; 2851 struct rocker *rocker; 2852 int err; 2853 2854 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); 2855 if (!rocker) 2856 return -ENOMEM; 2857 2858 err = pci_enable_device(pdev); 2859 if (err) { 2860 dev_err(&pdev->dev, "pci_enable_device failed\n"); 2861 goto err_pci_enable_device; 2862 } 2863 2864 err = pci_request_regions(pdev, rocker_driver_name); 2865 if (err) { 2866 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2867 goto err_pci_request_regions; 2868 } 2869 2870 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2871 if (err) { 2872 dev_err(&pdev->dev, "dma_set_mask failed\n"); 2873 goto err_pci_set_dma_mask; 2874 } 2875 2876 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { 2877 dev_err(&pdev->dev, "invalid PCI region size\n"); 2878 err = -EINVAL; 2879 goto err_pci_resource_len_check; 2880 } 2881 2882 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), 2883 pci_resource_len(pdev, 0)); 2884 if (!rocker->hw_addr) { 2885 dev_err(&pdev->dev, "ioremap failed\n"); 2886 err = -EIO; 2887 goto err_ioremap; 2888 } 2889 pci_set_master(pdev); 2890 2891 rocker->pdev = pdev; 2892 pci_set_drvdata(pdev, rocker); 2893 2894 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); 2895 2896 err = rocker_msix_init(rocker); 2897 if (err) { 2898 dev_err(&pdev->dev, "MSI-X init failed\n"); 2899 goto err_msix_init; 2900 } 2901 2902 err = rocker_basic_hw_test(rocker); 2903 if (err) { 2904 dev_err(&pdev->dev, "basic hw test failed\n"); 2905 goto err_basic_hw_test; 2906 } 2907 2908 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); 2909 2910 err = rocker_dma_rings_init(rocker); 2911 if (err) 2912 goto err_dma_rings_init; 2913 2914 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), 2915 rocker_cmd_irq_handler, 0, 2916 rocker_driver_name, rocker); 2917 if (err) { 2918 dev_err(&pdev->dev, "cannot assign cmd irq\n"); 2919 goto err_request_cmd_irq; 2920 } 2921 2922 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), 2923 rocker_event_irq_handler, 0, 2924 rocker_driver_name, rocker); 2925 if (err) { 2926 dev_err(&pdev->dev, "cannot assign event irq\n"); 2927 goto err_request_event_irq; 2928 } 2929 2930 rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name, 2931 WQ_MEM_RECLAIM); 2932 if (!rocker->rocker_owq) { 2933 err = -ENOMEM; 2934 goto err_alloc_ordered_workqueue; 2935 } 2936 2937 err = rocker_probe_ports(rocker); 2938 if (err) { 2939 dev_err(&pdev->dev, "failed to probe ports\n"); 2940 goto err_probe_ports; 2941 } 2942 2943 /* Only FIBs pointing to our own netdevs are programmed into 2944 * the device, so no need to pass a callback. 2945 */ 2946 rocker->fib_nb.notifier_call = rocker_router_fib_event; 2947 err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL); 2948 if (err) 2949 goto err_register_fib_notifier; 2950 2951 err = register_switchdev_notifier(&rocker_switchdev_notifier); 2952 if (err) { 2953 dev_err(&pdev->dev, "Failed to register switchdev notifier\n"); 2954 goto err_register_switchdev_notifier; 2955 } 2956 2957 nb = &rocker_switchdev_blocking_notifier; 2958 err = register_switchdev_blocking_notifier(nb); 2959 if (err) { 2960 dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n"); 2961 goto err_register_switchdev_blocking_notifier; 2962 } 2963 2964 rocker->hw.id = rocker_read64(rocker, SWITCH_ID); 2965 2966 dev_info(&pdev->dev, "Rocker switch with id %*phN\n", 2967 (int)sizeof(rocker->hw.id), &rocker->hw.id); 2968 2969 return 0; 2970 2971 err_register_switchdev_blocking_notifier: 2972 unregister_switchdev_notifier(&rocker_switchdev_notifier); 2973 err_register_switchdev_notifier: 2974 unregister_fib_notifier(&init_net, &rocker->fib_nb); 2975 err_register_fib_notifier: 2976 rocker_remove_ports(rocker); 2977 err_probe_ports: 2978 destroy_workqueue(rocker->rocker_owq); 2979 err_alloc_ordered_workqueue: 2980 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); 2981 err_request_event_irq: 2982 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); 2983 err_request_cmd_irq: 2984 rocker_dma_rings_fini(rocker); 2985 err_dma_rings_init: 2986 err_basic_hw_test: 2987 rocker_msix_fini(rocker); 2988 err_msix_init: 2989 iounmap(rocker->hw_addr); 2990 err_ioremap: 2991 err_pci_resource_len_check: 2992 err_pci_set_dma_mask: 2993 pci_release_regions(pdev); 2994 err_pci_request_regions: 2995 pci_disable_device(pdev); 2996 err_pci_enable_device: 2997 kfree(rocker); 2998 return err; 2999 } 3000 3001 static void rocker_remove(struct pci_dev *pdev) 3002 { 3003 struct rocker *rocker = pci_get_drvdata(pdev); 3004 struct notifier_block *nb; 3005 3006 nb = &rocker_switchdev_blocking_notifier; 3007 unregister_switchdev_blocking_notifier(nb); 3008 3009 unregister_switchdev_notifier(&rocker_switchdev_notifier); 3010 unregister_fib_notifier(&init_net, &rocker->fib_nb); 3011 rocker_remove_ports(rocker); 3012 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); 3013 destroy_workqueue(rocker->rocker_owq); 3014 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); 3015 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); 3016 rocker_dma_rings_fini(rocker); 3017 rocker_msix_fini(rocker); 3018 iounmap(rocker->hw_addr); 3019 pci_release_regions(rocker->pdev); 3020 pci_disable_device(rocker->pdev); 3021 kfree(rocker); 3022 } 3023 3024 static struct pci_driver rocker_pci_driver = { 3025 .name = rocker_driver_name, 3026 .id_table = rocker_pci_id_table, 3027 .probe = rocker_probe, 3028 .remove = rocker_remove, 3029 }; 3030 3031 /************************************ 3032 * Net device notifier event handler 3033 ************************************/ 3034 3035 static bool rocker_port_dev_check_under(const struct net_device *dev, 3036 struct rocker *rocker) 3037 { 3038 struct rocker_port *rocker_port; 3039 3040 if (!rocker_port_dev_check(dev)) 3041 return false; 3042 3043 rocker_port = netdev_priv(dev); 3044 if (rocker_port->rocker != rocker) 3045 return false; 3046 3047 return true; 3048 } 3049 3050 struct rocker_walk_data { 3051 struct rocker *rocker; 3052 struct rocker_port *port; 3053 }; 3054 3055 static int rocker_lower_dev_walk(struct net_device *lower_dev, 3056 struct netdev_nested_priv *priv) 3057 { 3058 struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data; 3059 int ret = 0; 3060 3061 if (rocker_port_dev_check_under(lower_dev, data->rocker)) { 3062 data->port = netdev_priv(lower_dev); 3063 ret = 1; 3064 } 3065 3066 return ret; 3067 } 3068 3069 struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, 3070 struct rocker *rocker) 3071 { 3072 struct netdev_nested_priv priv; 3073 struct rocker_walk_data data; 3074 3075 if (rocker_port_dev_check_under(dev, rocker)) 3076 return netdev_priv(dev); 3077 3078 data.rocker = rocker; 3079 data.port = NULL; 3080 priv.data = (void *)&data; 3081 netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv); 3082 3083 return data.port; 3084 } 3085 3086 static int rocker_netdevice_event(struct notifier_block *unused, 3087 unsigned long event, void *ptr) 3088 { 3089 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 3090 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3091 struct netdev_notifier_changeupper_info *info; 3092 struct rocker_port *rocker_port; 3093 int err; 3094 3095 if (!rocker_port_dev_check(dev)) 3096 return NOTIFY_DONE; 3097 3098 switch (event) { 3099 case NETDEV_CHANGEUPPER: 3100 info = ptr; 3101 if (!info->master) 3102 goto out; 3103 rocker_port = netdev_priv(dev); 3104 if (info->linking) { 3105 err = rocker_world_port_master_linked(rocker_port, 3106 info->upper_dev, 3107 extack); 3108 if (err) 3109 netdev_warn(dev, "failed to reflect master linked (err %d)\n", 3110 err); 3111 } else { 3112 err = rocker_world_port_master_unlinked(rocker_port, 3113 info->upper_dev); 3114 if (err) 3115 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", 3116 err); 3117 } 3118 } 3119 out: 3120 return NOTIFY_DONE; 3121 } 3122 3123 static struct notifier_block rocker_netdevice_nb __read_mostly = { 3124 .notifier_call = rocker_netdevice_event, 3125 }; 3126 3127 /************************************ 3128 * Net event notifier event handler 3129 ************************************/ 3130 3131 static int rocker_netevent_event(struct notifier_block *unused, 3132 unsigned long event, void *ptr) 3133 { 3134 struct rocker_port *rocker_port; 3135 struct net_device *dev; 3136 struct neighbour *n = ptr; 3137 int err; 3138 3139 switch (event) { 3140 case NETEVENT_NEIGH_UPDATE: 3141 if (n->tbl != &arp_tbl) 3142 return NOTIFY_DONE; 3143 dev = n->dev; 3144 if (!rocker_port_dev_check(dev)) 3145 return NOTIFY_DONE; 3146 rocker_port = netdev_priv(dev); 3147 err = rocker_world_port_neigh_update(rocker_port, n); 3148 if (err) 3149 netdev_warn(dev, "failed to handle neigh update (err %d)\n", 3150 err); 3151 break; 3152 } 3153 3154 return NOTIFY_DONE; 3155 } 3156 3157 static struct notifier_block rocker_netevent_nb __read_mostly = { 3158 .notifier_call = rocker_netevent_event, 3159 }; 3160 3161 /*********************** 3162 * Module init and exit 3163 ***********************/ 3164 3165 static int __init rocker_module_init(void) 3166 { 3167 int err; 3168 3169 register_netdevice_notifier(&rocker_netdevice_nb); 3170 register_netevent_notifier(&rocker_netevent_nb); 3171 err = pci_register_driver(&rocker_pci_driver); 3172 if (err) 3173 goto err_pci_register_driver; 3174 return 0; 3175 3176 err_pci_register_driver: 3177 unregister_netevent_notifier(&rocker_netevent_nb); 3178 unregister_netdevice_notifier(&rocker_netdevice_nb); 3179 return err; 3180 } 3181 3182 static void __exit rocker_module_exit(void) 3183 { 3184 unregister_netevent_notifier(&rocker_netevent_nb); 3185 unregister_netdevice_notifier(&rocker_netdevice_nb); 3186 pci_unregister_driver(&rocker_pci_driver); 3187 } 3188 3189 module_init(rocker_module_init); 3190 module_exit(rocker_module_exit); 3191 3192 MODULE_LICENSE("GPL v2"); 3193 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3194 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); 3195 MODULE_DESCRIPTION("Rocker switch device driver"); 3196 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); 3197