1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver 4 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/wait.h> 14 #include <linux/spinlock.h> 15 #include <linux/sort.h> 16 #include <linux/random.h> 17 #include <linux/netdevice.h> 18 #include <linux/skbuff.h> 19 #include <linux/socket.h> 20 #include <linux/etherdevice.h> 21 #include <linux/ethtool.h> 22 #include <linux/if_ether.h> 23 #include <linux/if_vlan.h> 24 #include <linux/if_bridge.h> 25 #include <linux/bitops.h> 26 #include <linux/ctype.h> 27 #include <linux/workqueue.h> 28 #include <net/switchdev.h> 29 #include <net/rtnetlink.h> 30 #include <net/netevent.h> 31 #include <net/arp.h> 32 #include <net/fib_rules.h> 33 #include <net/fib_notifier.h> 34 #include <linux/io-64-nonatomic-lo-hi.h> 35 36 #include "rocker_hw.h" 37 #include "rocker.h" 38 #include "rocker_tlv.h" 39 40 static const char rocker_driver_name[] = "rocker"; 41 42 static const struct pci_device_id rocker_pci_id_table[] = { 43 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, 44 {0, } 45 }; 46 47 struct rocker_wait { 48 wait_queue_head_t wait; 49 bool done; 50 bool nowait; 51 }; 52 53 static void rocker_wait_reset(struct rocker_wait *wait) 54 { 55 wait->done = false; 56 wait->nowait = false; 57 } 58 59 static void rocker_wait_init(struct rocker_wait *wait) 60 { 61 init_waitqueue_head(&wait->wait); 62 rocker_wait_reset(wait); 63 } 64 65 static struct rocker_wait *rocker_wait_create(void) 66 { 67 struct rocker_wait *wait; 68 69 wait = kzalloc(sizeof(*wait), GFP_KERNEL); 70 if (!wait) 71 return NULL; 72 return wait; 73 } 74 75 static void rocker_wait_destroy(struct rocker_wait *wait) 76 { 77 kfree(wait); 78 } 79 80 static bool rocker_wait_event_timeout(struct rocker_wait *wait, 81 unsigned long timeout) 82 { 83 wait_event_timeout(wait->wait, wait->done, HZ / 10); 84 if (!wait->done) 85 return false; 86 return true; 87 } 88 89 static void rocker_wait_wake_up(struct rocker_wait *wait) 90 { 91 wait->done = true; 92 wake_up(&wait->wait); 93 } 94 95 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) 96 { 97 return rocker->msix_entries[vector].vector; 98 } 99 100 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) 101 { 102 return rocker_msix_vector(rocker_port->rocker, 103 ROCKER_MSIX_VEC_TX(rocker_port->port_number)); 104 } 105 106 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) 107 { 108 return rocker_msix_vector(rocker_port->rocker, 109 ROCKER_MSIX_VEC_RX(rocker_port->port_number)); 110 } 111 112 #define rocker_write32(rocker, reg, val) \ 113 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) 114 #define rocker_read32(rocker, reg) \ 115 readl((rocker)->hw_addr + (ROCKER_ ## reg)) 116 #define rocker_write64(rocker, reg, val) \ 117 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) 118 #define rocker_read64(rocker, reg) \ 119 readq((rocker)->hw_addr + (ROCKER_ ## reg)) 120 121 /***************************** 122 * HW basic testing functions 123 *****************************/ 124 125 static int rocker_reg_test(const struct rocker *rocker) 126 { 127 const struct pci_dev *pdev = rocker->pdev; 128 u64 test_reg; 129 u64 rnd; 130 131 rnd = get_random_u32(); 132 rnd >>= 1; 133 rocker_write32(rocker, TEST_REG, rnd); 134 test_reg = rocker_read32(rocker, TEST_REG); 135 if (test_reg != rnd * 2) { 136 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", 137 test_reg, rnd * 2); 138 return -EIO; 139 } 140 141 rnd = get_random_u32(); 142 rnd <<= 31; 143 rnd |= get_random_u32(); 144 rocker_write64(rocker, TEST_REG64, rnd); 145 test_reg = rocker_read64(rocker, TEST_REG64); 146 if (test_reg != rnd * 2) { 147 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", 148 test_reg, rnd * 2); 149 return -EIO; 150 } 151 152 return 0; 153 } 154 155 static int rocker_dma_test_one(const struct rocker *rocker, 156 struct rocker_wait *wait, u32 test_type, 157 dma_addr_t dma_handle, const unsigned char *buf, 158 const unsigned char *expect, size_t size) 159 { 160 const struct pci_dev *pdev = rocker->pdev; 161 int i; 162 163 rocker_wait_reset(wait); 164 rocker_write32(rocker, TEST_DMA_CTRL, test_type); 165 166 if (!rocker_wait_event_timeout(wait, HZ / 10)) { 167 dev_err(&pdev->dev, "no interrupt received within a timeout\n"); 168 return -EIO; 169 } 170 171 for (i = 0; i < size; i++) { 172 if (buf[i] != expect[i]) { 173 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", 174 buf[i], i, expect[i]); 175 return -EIO; 176 } 177 } 178 return 0; 179 } 180 181 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) 182 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96 183 184 static int rocker_dma_test_offset(const struct rocker *rocker, 185 struct rocker_wait *wait, int offset) 186 { 187 struct pci_dev *pdev = rocker->pdev; 188 unsigned char *alloc; 189 unsigned char *buf; 190 unsigned char *expect; 191 dma_addr_t dma_handle; 192 int i; 193 int err; 194 195 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, 196 GFP_KERNEL | GFP_DMA); 197 if (!alloc) 198 return -ENOMEM; 199 buf = alloc + offset; 200 expect = buf + ROCKER_TEST_DMA_BUF_SIZE; 201 202 dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE, 203 DMA_BIDIRECTIONAL); 204 if (dma_mapping_error(&pdev->dev, dma_handle)) { 205 err = -EIO; 206 goto free_alloc; 207 } 208 209 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); 210 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); 211 212 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); 213 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, 214 dma_handle, buf, expect, 215 ROCKER_TEST_DMA_BUF_SIZE); 216 if (err) 217 goto unmap; 218 219 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); 220 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, 221 dma_handle, buf, expect, 222 ROCKER_TEST_DMA_BUF_SIZE); 223 if (err) 224 goto unmap; 225 226 get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); 227 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) 228 expect[i] = ~buf[i]; 229 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, 230 dma_handle, buf, expect, 231 ROCKER_TEST_DMA_BUF_SIZE); 232 if (err) 233 goto unmap; 234 235 unmap: 236 dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, 237 DMA_BIDIRECTIONAL); 238 free_alloc: 239 kfree(alloc); 240 241 return err; 242 } 243 244 static int rocker_dma_test(const struct rocker *rocker, 245 struct rocker_wait *wait) 246 { 247 int i; 248 int err; 249 250 for (i = 0; i < 8; i++) { 251 err = rocker_dma_test_offset(rocker, wait, i); 252 if (err) 253 return err; 254 } 255 return 0; 256 } 257 258 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) 259 { 260 struct rocker_wait *wait = dev_id; 261 262 rocker_wait_wake_up(wait); 263 264 return IRQ_HANDLED; 265 } 266 267 static int rocker_basic_hw_test(const struct rocker *rocker) 268 { 269 const struct pci_dev *pdev = rocker->pdev; 270 struct rocker_wait wait; 271 int err; 272 273 err = rocker_reg_test(rocker); 274 if (err) { 275 dev_err(&pdev->dev, "reg test failed\n"); 276 return err; 277 } 278 279 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), 280 rocker_test_irq_handler, 0, 281 rocker_driver_name, &wait); 282 if (err) { 283 dev_err(&pdev->dev, "cannot assign test irq\n"); 284 return err; 285 } 286 287 rocker_wait_init(&wait); 288 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); 289 290 if (!rocker_wait_event_timeout(&wait, HZ / 10)) { 291 dev_err(&pdev->dev, "no interrupt received within a timeout\n"); 292 err = -EIO; 293 goto free_irq; 294 } 295 296 err = rocker_dma_test(rocker, &wait); 297 if (err) 298 dev_err(&pdev->dev, "dma test failed\n"); 299 300 free_irq: 301 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); 302 return err; 303 } 304 305 /****************************************** 306 * DMA rings and descriptors manipulations 307 ******************************************/ 308 309 static u32 __pos_inc(u32 pos, size_t limit) 310 { 311 return ++pos == limit ? 0 : pos; 312 } 313 314 static int rocker_desc_err(const struct rocker_desc_info *desc_info) 315 { 316 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; 317 318 switch (err) { 319 case ROCKER_OK: 320 return 0; 321 case -ROCKER_ENOENT: 322 return -ENOENT; 323 case -ROCKER_ENXIO: 324 return -ENXIO; 325 case -ROCKER_ENOMEM: 326 return -ENOMEM; 327 case -ROCKER_EEXIST: 328 return -EEXIST; 329 case -ROCKER_EINVAL: 330 return -EINVAL; 331 case -ROCKER_EMSGSIZE: 332 return -EMSGSIZE; 333 case -ROCKER_ENOTSUP: 334 return -EOPNOTSUPP; 335 case -ROCKER_ENOBUFS: 336 return -ENOBUFS; 337 } 338 339 return -EINVAL; 340 } 341 342 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) 343 { 344 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; 345 } 346 347 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) 348 { 349 u32 comp_err = desc_info->desc->comp_err; 350 351 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; 352 } 353 354 static void * 355 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) 356 { 357 return (void *)(uintptr_t)desc_info->desc->cookie; 358 } 359 360 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, 361 void *ptr) 362 { 363 desc_info->desc->cookie = (uintptr_t) ptr; 364 } 365 366 static struct rocker_desc_info * 367 rocker_desc_head_get(const struct rocker_dma_ring_info *info) 368 { 369 struct rocker_desc_info *desc_info; 370 u32 head = __pos_inc(info->head, info->size); 371 372 desc_info = &info->desc_info[info->head]; 373 if (head == info->tail) 374 return NULL; /* ring full */ 375 desc_info->tlv_size = 0; 376 return desc_info; 377 } 378 379 static void rocker_desc_commit(const struct rocker_desc_info *desc_info) 380 { 381 desc_info->desc->buf_size = desc_info->data_size; 382 desc_info->desc->tlv_size = desc_info->tlv_size; 383 } 384 385 static void rocker_desc_head_set(const struct rocker *rocker, 386 struct rocker_dma_ring_info *info, 387 const struct rocker_desc_info *desc_info) 388 { 389 u32 head = __pos_inc(info->head, info->size); 390 391 BUG_ON(head == info->tail); 392 rocker_desc_commit(desc_info); 393 info->head = head; 394 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); 395 } 396 397 static struct rocker_desc_info * 398 rocker_desc_tail_get(struct rocker_dma_ring_info *info) 399 { 400 struct rocker_desc_info *desc_info; 401 402 if (info->tail == info->head) 403 return NULL; /* nothing to be done between head and tail */ 404 desc_info = &info->desc_info[info->tail]; 405 if (!rocker_desc_gen(desc_info)) 406 return NULL; /* gen bit not set, desc is not ready yet */ 407 info->tail = __pos_inc(info->tail, info->size); 408 desc_info->tlv_size = desc_info->desc->tlv_size; 409 return desc_info; 410 } 411 412 static void rocker_dma_ring_credits_set(const struct rocker *rocker, 413 const struct rocker_dma_ring_info *info, 414 u32 credits) 415 { 416 if (credits) 417 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); 418 } 419 420 static unsigned long rocker_dma_ring_size_fix(size_t size) 421 { 422 return max(ROCKER_DMA_SIZE_MIN, 423 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); 424 } 425 426 static int rocker_dma_ring_create(const struct rocker *rocker, 427 unsigned int type, 428 size_t size, 429 struct rocker_dma_ring_info *info) 430 { 431 int i; 432 433 BUG_ON(size != rocker_dma_ring_size_fix(size)); 434 info->size = size; 435 info->type = type; 436 info->head = 0; 437 info->tail = 0; 438 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), 439 GFP_KERNEL); 440 if (!info->desc_info) 441 return -ENOMEM; 442 443 info->desc = dma_alloc_coherent(&rocker->pdev->dev, 444 info->size * sizeof(*info->desc), 445 &info->mapaddr, GFP_KERNEL); 446 if (!info->desc) { 447 kfree(info->desc_info); 448 return -ENOMEM; 449 } 450 451 for (i = 0; i < info->size; i++) 452 info->desc_info[i].desc = &info->desc[i]; 453 454 rocker_write32(rocker, DMA_DESC_CTRL(info->type), 455 ROCKER_DMA_DESC_CTRL_RESET); 456 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); 457 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); 458 459 return 0; 460 } 461 462 static void rocker_dma_ring_destroy(const struct rocker *rocker, 463 const struct rocker_dma_ring_info *info) 464 { 465 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); 466 467 dma_free_coherent(&rocker->pdev->dev, 468 info->size * sizeof(struct rocker_desc), info->desc, 469 info->mapaddr); 470 kfree(info->desc_info); 471 } 472 473 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, 474 struct rocker_dma_ring_info *info) 475 { 476 int i; 477 478 BUG_ON(info->head || info->tail); 479 480 /* When ring is consumer, we need to advance head for each desc. 481 * That tells hw that the desc is ready to be used by it. 482 */ 483 for (i = 0; i < info->size - 1; i++) 484 rocker_desc_head_set(rocker, info, &info->desc_info[i]); 485 rocker_desc_commit(&info->desc_info[i]); 486 } 487 488 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, 489 const struct rocker_dma_ring_info *info, 490 int direction, size_t buf_size) 491 { 492 struct pci_dev *pdev = rocker->pdev; 493 int i; 494 int err; 495 496 for (i = 0; i < info->size; i++) { 497 struct rocker_desc_info *desc_info = &info->desc_info[i]; 498 struct rocker_desc *desc = &info->desc[i]; 499 dma_addr_t dma_handle; 500 char *buf; 501 502 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); 503 if (!buf) { 504 err = -ENOMEM; 505 goto rollback; 506 } 507 508 dma_handle = dma_map_single(&pdev->dev, buf, buf_size, 509 direction); 510 if (dma_mapping_error(&pdev->dev, dma_handle)) { 511 kfree(buf); 512 err = -EIO; 513 goto rollback; 514 } 515 516 desc_info->data = buf; 517 desc_info->data_size = buf_size; 518 dma_unmap_addr_set(desc_info, mapaddr, dma_handle); 519 520 desc->buf_addr = dma_handle; 521 desc->buf_size = buf_size; 522 } 523 return 0; 524 525 rollback: 526 for (i--; i >= 0; i--) { 527 const struct rocker_desc_info *desc_info = &info->desc_info[i]; 528 529 dma_unmap_single(&pdev->dev, 530 dma_unmap_addr(desc_info, mapaddr), 531 desc_info->data_size, direction); 532 kfree(desc_info->data); 533 } 534 return err; 535 } 536 537 static void rocker_dma_ring_bufs_free(const struct rocker *rocker, 538 const struct rocker_dma_ring_info *info, 539 int direction) 540 { 541 struct pci_dev *pdev = rocker->pdev; 542 int i; 543 544 for (i = 0; i < info->size; i++) { 545 const struct rocker_desc_info *desc_info = &info->desc_info[i]; 546 struct rocker_desc *desc = &info->desc[i]; 547 548 desc->buf_addr = 0; 549 desc->buf_size = 0; 550 dma_unmap_single(&pdev->dev, 551 dma_unmap_addr(desc_info, mapaddr), 552 desc_info->data_size, direction); 553 kfree(desc_info->data); 554 } 555 } 556 557 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) 558 { 559 struct rocker_wait *wait; 560 561 wait = rocker_wait_create(); 562 if (!wait) 563 return -ENOMEM; 564 rocker_desc_cookie_ptr_set(desc_info, wait); 565 return 0; 566 } 567 568 static void 569 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) 570 { 571 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); 572 573 rocker_wait_destroy(wait); 574 } 575 576 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) 577 { 578 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; 579 int i; 580 int err; 581 582 for (i = 0; i < cmd_ring->size; i++) { 583 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); 584 if (err) 585 goto rollback; 586 } 587 return 0; 588 589 rollback: 590 for (i--; i >= 0; i--) 591 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); 592 return err; 593 } 594 595 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) 596 { 597 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; 598 int i; 599 600 for (i = 0; i < cmd_ring->size; i++) 601 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); 602 } 603 604 static int rocker_dma_rings_init(struct rocker *rocker) 605 { 606 const struct pci_dev *pdev = rocker->pdev; 607 int err; 608 609 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, 610 ROCKER_DMA_CMD_DEFAULT_SIZE, 611 &rocker->cmd_ring); 612 if (err) { 613 dev_err(&pdev->dev, "failed to create command dma ring\n"); 614 return err; 615 } 616 617 spin_lock_init(&rocker->cmd_ring_lock); 618 619 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, 620 DMA_BIDIRECTIONAL, PAGE_SIZE); 621 if (err) { 622 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); 623 goto err_dma_cmd_ring_bufs_alloc; 624 } 625 626 err = rocker_dma_cmd_ring_waits_alloc(rocker); 627 if (err) { 628 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); 629 goto err_dma_cmd_ring_waits_alloc; 630 } 631 632 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, 633 ROCKER_DMA_EVENT_DEFAULT_SIZE, 634 &rocker->event_ring); 635 if (err) { 636 dev_err(&pdev->dev, "failed to create event dma ring\n"); 637 goto err_dma_event_ring_create; 638 } 639 640 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, 641 DMA_FROM_DEVICE, PAGE_SIZE); 642 if (err) { 643 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); 644 goto err_dma_event_ring_bufs_alloc; 645 } 646 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); 647 return 0; 648 649 err_dma_event_ring_bufs_alloc: 650 rocker_dma_ring_destroy(rocker, &rocker->event_ring); 651 err_dma_event_ring_create: 652 rocker_dma_cmd_ring_waits_free(rocker); 653 err_dma_cmd_ring_waits_alloc: 654 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, 655 DMA_BIDIRECTIONAL); 656 err_dma_cmd_ring_bufs_alloc: 657 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); 658 return err; 659 } 660 661 static void rocker_dma_rings_fini(struct rocker *rocker) 662 { 663 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, 664 DMA_BIDIRECTIONAL); 665 rocker_dma_ring_destroy(rocker, &rocker->event_ring); 666 rocker_dma_cmd_ring_waits_free(rocker); 667 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, 668 DMA_BIDIRECTIONAL); 669 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); 670 } 671 672 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, 673 struct rocker_desc_info *desc_info, 674 struct sk_buff *skb, size_t buf_len) 675 { 676 const struct rocker *rocker = rocker_port->rocker; 677 struct pci_dev *pdev = rocker->pdev; 678 dma_addr_t dma_handle; 679 680 dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len, 681 DMA_FROM_DEVICE); 682 if (dma_mapping_error(&pdev->dev, dma_handle)) 683 return -EIO; 684 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) 685 goto tlv_put_failure; 686 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) 687 goto tlv_put_failure; 688 return 0; 689 690 tlv_put_failure: 691 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE); 692 desc_info->tlv_size = 0; 693 return -EMSGSIZE; 694 } 695 696 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) 697 { 698 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 699 } 700 701 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, 702 struct rocker_desc_info *desc_info) 703 { 704 struct net_device *dev = rocker_port->dev; 705 struct sk_buff *skb; 706 size_t buf_len = rocker_port_rx_buf_len(rocker_port); 707 int err; 708 709 /* Ensure that hw will see tlv_size zero in case of an error. 710 * That tells hw to use another descriptor. 711 */ 712 rocker_desc_cookie_ptr_set(desc_info, NULL); 713 desc_info->tlv_size = 0; 714 715 skb = netdev_alloc_skb_ip_align(dev, buf_len); 716 if (!skb) 717 return -ENOMEM; 718 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); 719 if (err) { 720 dev_kfree_skb_any(skb); 721 return err; 722 } 723 rocker_desc_cookie_ptr_set(desc_info, skb); 724 return 0; 725 } 726 727 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, 728 const struct rocker_tlv **attrs) 729 { 730 struct pci_dev *pdev = rocker->pdev; 731 dma_addr_t dma_handle; 732 size_t len; 733 734 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || 735 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) 736 return; 737 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); 738 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); 739 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE); 740 } 741 742 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, 743 const struct rocker_desc_info *desc_info) 744 { 745 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; 746 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); 747 748 if (!skb) 749 return; 750 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); 751 rocker_dma_rx_ring_skb_unmap(rocker, attrs); 752 dev_kfree_skb_any(skb); 753 } 754 755 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) 756 { 757 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; 758 const struct rocker *rocker = rocker_port->rocker; 759 int i; 760 int err; 761 762 for (i = 0; i < rx_ring->size; i++) { 763 err = rocker_dma_rx_ring_skb_alloc(rocker_port, 764 &rx_ring->desc_info[i]); 765 if (err) 766 goto rollback; 767 } 768 return 0; 769 770 rollback: 771 for (i--; i >= 0; i--) 772 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); 773 return err; 774 } 775 776 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) 777 { 778 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; 779 const struct rocker *rocker = rocker_port->rocker; 780 int i; 781 782 for (i = 0; i < rx_ring->size; i++) 783 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); 784 } 785 786 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) 787 { 788 struct rocker *rocker = rocker_port->rocker; 789 int err; 790 791 err = rocker_dma_ring_create(rocker, 792 ROCKER_DMA_TX(rocker_port->port_number), 793 ROCKER_DMA_TX_DEFAULT_SIZE, 794 &rocker_port->tx_ring); 795 if (err) { 796 netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); 797 return err; 798 } 799 800 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, 801 DMA_TO_DEVICE, 802 ROCKER_DMA_TX_DESC_SIZE); 803 if (err) { 804 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); 805 goto err_dma_tx_ring_bufs_alloc; 806 } 807 808 err = rocker_dma_ring_create(rocker, 809 ROCKER_DMA_RX(rocker_port->port_number), 810 ROCKER_DMA_RX_DEFAULT_SIZE, 811 &rocker_port->rx_ring); 812 if (err) { 813 netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); 814 goto err_dma_rx_ring_create; 815 } 816 817 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, 818 DMA_BIDIRECTIONAL, 819 ROCKER_DMA_RX_DESC_SIZE); 820 if (err) { 821 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); 822 goto err_dma_rx_ring_bufs_alloc; 823 } 824 825 err = rocker_dma_rx_ring_skbs_alloc(rocker_port); 826 if (err) { 827 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); 828 goto err_dma_rx_ring_skbs_alloc; 829 } 830 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); 831 832 return 0; 833 834 err_dma_rx_ring_skbs_alloc: 835 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, 836 DMA_BIDIRECTIONAL); 837 err_dma_rx_ring_bufs_alloc: 838 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); 839 err_dma_rx_ring_create: 840 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, 841 DMA_TO_DEVICE); 842 err_dma_tx_ring_bufs_alloc: 843 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); 844 return err; 845 } 846 847 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) 848 { 849 struct rocker *rocker = rocker_port->rocker; 850 851 rocker_dma_rx_ring_skbs_free(rocker_port); 852 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, 853 DMA_BIDIRECTIONAL); 854 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); 855 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, 856 DMA_TO_DEVICE); 857 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); 858 } 859 860 static void rocker_port_set_enable(const struct rocker_port *rocker_port, 861 bool enable) 862 { 863 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 864 865 if (enable) 866 val |= 1ULL << rocker_port->pport; 867 else 868 val &= ~(1ULL << rocker_port->pport); 869 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 870 } 871 872 /******************************** 873 * Interrupt handler and helpers 874 ********************************/ 875 876 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) 877 { 878 struct rocker *rocker = dev_id; 879 const struct rocker_desc_info *desc_info; 880 struct rocker_wait *wait; 881 u32 credits = 0; 882 883 spin_lock(&rocker->cmd_ring_lock); 884 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { 885 wait = rocker_desc_cookie_ptr_get(desc_info); 886 if (wait->nowait) { 887 rocker_desc_gen_clear(desc_info); 888 } else { 889 rocker_wait_wake_up(wait); 890 } 891 credits++; 892 } 893 spin_unlock(&rocker->cmd_ring_lock); 894 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); 895 896 return IRQ_HANDLED; 897 } 898 899 static void rocker_port_link_up(const struct rocker_port *rocker_port) 900 { 901 netif_carrier_on(rocker_port->dev); 902 netdev_info(rocker_port->dev, "Link is up\n"); 903 } 904 905 static void rocker_port_link_down(const struct rocker_port *rocker_port) 906 { 907 netif_carrier_off(rocker_port->dev); 908 netdev_info(rocker_port->dev, "Link is down\n"); 909 } 910 911 static int rocker_event_link_change(const struct rocker *rocker, 912 const struct rocker_tlv *info) 913 { 914 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; 915 unsigned int port_number; 916 bool link_up; 917 struct rocker_port *rocker_port; 918 919 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); 920 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || 921 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) 922 return -EIO; 923 port_number = 924 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; 925 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); 926 927 if (port_number >= rocker->port_count) 928 return -EINVAL; 929 930 rocker_port = rocker->ports[port_number]; 931 if (netif_carrier_ok(rocker_port->dev) != link_up) { 932 if (link_up) 933 rocker_port_link_up(rocker_port); 934 else 935 rocker_port_link_down(rocker_port); 936 } 937 938 return 0; 939 } 940 941 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, 942 const unsigned char *addr, 943 __be16 vlan_id); 944 945 static int rocker_event_mac_vlan_seen(const struct rocker *rocker, 946 const struct rocker_tlv *info) 947 { 948 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; 949 unsigned int port_number; 950 struct rocker_port *rocker_port; 951 const unsigned char *addr; 952 __be16 vlan_id; 953 954 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); 955 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || 956 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || 957 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) 958 return -EIO; 959 port_number = 960 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; 961 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); 962 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); 963 964 if (port_number >= rocker->port_count) 965 return -EINVAL; 966 967 rocker_port = rocker->ports[port_number]; 968 return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); 969 } 970 971 static int rocker_event_process(const struct rocker *rocker, 972 const struct rocker_desc_info *desc_info) 973 { 974 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; 975 const struct rocker_tlv *info; 976 u16 type; 977 978 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); 979 if (!attrs[ROCKER_TLV_EVENT_TYPE] || 980 !attrs[ROCKER_TLV_EVENT_INFO]) 981 return -EIO; 982 983 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); 984 info = attrs[ROCKER_TLV_EVENT_INFO]; 985 986 switch (type) { 987 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: 988 return rocker_event_link_change(rocker, info); 989 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: 990 return rocker_event_mac_vlan_seen(rocker, info); 991 } 992 993 return -EOPNOTSUPP; 994 } 995 996 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) 997 { 998 struct rocker *rocker = dev_id; 999 const struct pci_dev *pdev = rocker->pdev; 1000 const struct rocker_desc_info *desc_info; 1001 u32 credits = 0; 1002 int err; 1003 1004 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { 1005 err = rocker_desc_err(desc_info); 1006 if (err) { 1007 dev_err(&pdev->dev, "event desc received with err %d\n", 1008 err); 1009 } else { 1010 err = rocker_event_process(rocker, desc_info); 1011 if (err) 1012 dev_err(&pdev->dev, "event processing failed with err %d\n", 1013 err); 1014 } 1015 rocker_desc_gen_clear(desc_info); 1016 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); 1017 credits++; 1018 } 1019 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); 1020 1021 return IRQ_HANDLED; 1022 } 1023 1024 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) 1025 { 1026 struct rocker_port *rocker_port = dev_id; 1027 1028 napi_schedule(&rocker_port->napi_tx); 1029 return IRQ_HANDLED; 1030 } 1031 1032 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) 1033 { 1034 struct rocker_port *rocker_port = dev_id; 1035 1036 napi_schedule(&rocker_port->napi_rx); 1037 return IRQ_HANDLED; 1038 } 1039 1040 /******************** 1041 * Command interface 1042 ********************/ 1043 1044 int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, 1045 rocker_cmd_prep_cb_t prepare, void *prepare_priv, 1046 rocker_cmd_proc_cb_t process, void *process_priv) 1047 { 1048 struct rocker *rocker = rocker_port->rocker; 1049 struct rocker_desc_info *desc_info; 1050 struct rocker_wait *wait; 1051 unsigned long lock_flags; 1052 int err; 1053 1054 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); 1055 1056 desc_info = rocker_desc_head_get(&rocker->cmd_ring); 1057 if (!desc_info) { 1058 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1059 return -EAGAIN; 1060 } 1061 1062 wait = rocker_desc_cookie_ptr_get(desc_info); 1063 rocker_wait_init(wait); 1064 wait->nowait = nowait; 1065 1066 err = prepare(rocker_port, desc_info, prepare_priv); 1067 if (err) { 1068 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1069 return err; 1070 } 1071 1072 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); 1073 1074 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); 1075 1076 if (nowait) 1077 return 0; 1078 1079 if (!rocker_wait_event_timeout(wait, HZ / 10)) 1080 return -EIO; 1081 1082 err = rocker_desc_err(desc_info); 1083 if (err) 1084 return err; 1085 1086 if (process) 1087 err = process(rocker_port, desc_info, process_priv); 1088 1089 rocker_desc_gen_clear(desc_info); 1090 return err; 1091 } 1092 1093 static int 1094 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, 1095 struct rocker_desc_info *desc_info, 1096 void *priv) 1097 { 1098 struct rocker_tlv *cmd_info; 1099 1100 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1101 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) 1102 return -EMSGSIZE; 1103 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1104 if (!cmd_info) 1105 return -EMSGSIZE; 1106 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1107 rocker_port->pport)) 1108 return -EMSGSIZE; 1109 rocker_tlv_nest_end(desc_info, cmd_info); 1110 return 0; 1111 } 1112 1113 static int 1114 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, 1115 const struct rocker_desc_info *desc_info, 1116 void *priv) 1117 { 1118 struct ethtool_link_ksettings *ecmd = priv; 1119 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1120 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1121 u32 speed; 1122 u8 duplex; 1123 u8 autoneg; 1124 1125 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1126 if (!attrs[ROCKER_TLV_CMD_INFO]) 1127 return -EIO; 1128 1129 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1130 attrs[ROCKER_TLV_CMD_INFO]); 1131 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || 1132 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || 1133 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) 1134 return -EIO; 1135 1136 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); 1137 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); 1138 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); 1139 1140 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 1141 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 1142 1143 ecmd->base.phy_address = 0xff; 1144 ecmd->base.port = PORT_TP; 1145 ecmd->base.speed = speed; 1146 ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; 1147 ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 1148 1149 return 0; 1150 } 1151 1152 static int 1153 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, 1154 const struct rocker_desc_info *desc_info, 1155 void *priv) 1156 { 1157 unsigned char *macaddr = priv; 1158 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1159 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1160 const struct rocker_tlv *attr; 1161 1162 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1163 if (!attrs[ROCKER_TLV_CMD_INFO]) 1164 return -EIO; 1165 1166 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1167 attrs[ROCKER_TLV_CMD_INFO]); 1168 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; 1169 if (!attr) 1170 return -EIO; 1171 1172 if (rocker_tlv_len(attr) != ETH_ALEN) 1173 return -EINVAL; 1174 1175 ether_addr_copy(macaddr, rocker_tlv_data(attr)); 1176 return 0; 1177 } 1178 1179 static int 1180 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, 1181 const struct rocker_desc_info *desc_info, 1182 void *priv) 1183 { 1184 u8 *p_mode = priv; 1185 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1186 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1187 const struct rocker_tlv *attr; 1188 1189 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1190 if (!attrs[ROCKER_TLV_CMD_INFO]) 1191 return -EIO; 1192 1193 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1194 attrs[ROCKER_TLV_CMD_INFO]); 1195 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; 1196 if (!attr) 1197 return -EIO; 1198 1199 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); 1200 return 0; 1201 } 1202 1203 struct port_name { 1204 char *buf; 1205 size_t len; 1206 }; 1207 1208 static int 1209 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, 1210 const struct rocker_desc_info *desc_info, 1211 void *priv) 1212 { 1213 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; 1214 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 1215 struct port_name *name = priv; 1216 const struct rocker_tlv *attr; 1217 size_t i, j, len; 1218 const char *str; 1219 1220 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 1221 if (!attrs[ROCKER_TLV_CMD_INFO]) 1222 return -EIO; 1223 1224 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, 1225 attrs[ROCKER_TLV_CMD_INFO]); 1226 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; 1227 if (!attr) 1228 return -EIO; 1229 1230 len = min_t(size_t, rocker_tlv_len(attr), name->len); 1231 str = rocker_tlv_data(attr); 1232 1233 /* make sure name only contains alphanumeric characters */ 1234 for (i = j = 0; i < len; ++i) { 1235 if (isalnum(str[i])) { 1236 name->buf[j] = str[i]; 1237 j++; 1238 } 1239 } 1240 1241 if (j == 0) 1242 return -EIO; 1243 1244 name->buf[j] = '\0'; 1245 1246 return 0; 1247 } 1248 1249 static int 1250 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, 1251 struct rocker_desc_info *desc_info, 1252 void *priv) 1253 { 1254 struct ethtool_link_ksettings *ecmd = priv; 1255 struct rocker_tlv *cmd_info; 1256 1257 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1258 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1259 return -EMSGSIZE; 1260 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1261 if (!cmd_info) 1262 return -EMSGSIZE; 1263 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1264 rocker_port->pport)) 1265 return -EMSGSIZE; 1266 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, 1267 ecmd->base.speed)) 1268 return -EMSGSIZE; 1269 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, 1270 ecmd->base.duplex)) 1271 return -EMSGSIZE; 1272 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, 1273 ecmd->base.autoneg)) 1274 return -EMSGSIZE; 1275 rocker_tlv_nest_end(desc_info, cmd_info); 1276 return 0; 1277 } 1278 1279 static int 1280 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, 1281 struct rocker_desc_info *desc_info, 1282 void *priv) 1283 { 1284 const unsigned char *macaddr = priv; 1285 struct rocker_tlv *cmd_info; 1286 1287 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1288 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1289 return -EMSGSIZE; 1290 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1291 if (!cmd_info) 1292 return -EMSGSIZE; 1293 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1294 rocker_port->pport)) 1295 return -EMSGSIZE; 1296 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, 1297 ETH_ALEN, macaddr)) 1298 return -EMSGSIZE; 1299 rocker_tlv_nest_end(desc_info, cmd_info); 1300 return 0; 1301 } 1302 1303 static int 1304 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, 1305 struct rocker_desc_info *desc_info, 1306 void *priv) 1307 { 1308 int mtu = *(int *)priv; 1309 struct rocker_tlv *cmd_info; 1310 1311 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1312 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1313 return -EMSGSIZE; 1314 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1315 if (!cmd_info) 1316 return -EMSGSIZE; 1317 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1318 rocker_port->pport)) 1319 return -EMSGSIZE; 1320 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, 1321 mtu)) 1322 return -EMSGSIZE; 1323 rocker_tlv_nest_end(desc_info, cmd_info); 1324 return 0; 1325 } 1326 1327 static int 1328 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, 1329 struct rocker_desc_info *desc_info, 1330 void *priv) 1331 { 1332 bool learning = *(bool *)priv; 1333 struct rocker_tlv *cmd_info; 1334 1335 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1336 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) 1337 return -EMSGSIZE; 1338 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1339 if (!cmd_info) 1340 return -EMSGSIZE; 1341 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, 1342 rocker_port->pport)) 1343 return -EMSGSIZE; 1344 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, 1345 learning)) 1346 return -EMSGSIZE; 1347 rocker_tlv_nest_end(desc_info, cmd_info); 1348 return 0; 1349 } 1350 1351 static int 1352 rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, 1353 struct ethtool_link_ksettings *ecmd) 1354 { 1355 return rocker_cmd_exec(rocker_port, false, 1356 rocker_cmd_get_port_settings_prep, NULL, 1357 rocker_cmd_get_port_settings_ethtool_proc, 1358 ecmd); 1359 } 1360 1361 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, 1362 unsigned char *macaddr) 1363 { 1364 return rocker_cmd_exec(rocker_port, false, 1365 rocker_cmd_get_port_settings_prep, NULL, 1366 rocker_cmd_get_port_settings_macaddr_proc, 1367 macaddr); 1368 } 1369 1370 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, 1371 u8 *p_mode) 1372 { 1373 return rocker_cmd_exec(rocker_port, false, 1374 rocker_cmd_get_port_settings_prep, NULL, 1375 rocker_cmd_get_port_settings_mode_proc, p_mode); 1376 } 1377 1378 static int 1379 rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, 1380 const struct ethtool_link_ksettings *ecmd) 1381 { 1382 struct ethtool_link_ksettings copy_ecmd; 1383 1384 memcpy(©_ecmd, ecmd, sizeof(copy_ecmd)); 1385 1386 return rocker_cmd_exec(rocker_port, false, 1387 rocker_cmd_set_port_settings_ethtool_prep, 1388 ©_ecmd, NULL, NULL); 1389 } 1390 1391 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, 1392 unsigned char *macaddr) 1393 { 1394 return rocker_cmd_exec(rocker_port, false, 1395 rocker_cmd_set_port_settings_macaddr_prep, 1396 macaddr, NULL, NULL); 1397 } 1398 1399 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, 1400 int mtu) 1401 { 1402 return rocker_cmd_exec(rocker_port, false, 1403 rocker_cmd_set_port_settings_mtu_prep, 1404 &mtu, NULL, NULL); 1405 } 1406 1407 int rocker_port_set_learning(struct rocker_port *rocker_port, 1408 bool learning) 1409 { 1410 return rocker_cmd_exec(rocker_port, false, 1411 rocker_cmd_set_port_learning_prep, 1412 &learning, NULL, NULL); 1413 } 1414 1415 /********************** 1416 * Worlds manipulation 1417 **********************/ 1418 1419 static struct rocker_world_ops *rocker_world_ops[] = { 1420 &rocker_ofdpa_ops, 1421 }; 1422 1423 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) 1424 1425 static struct rocker_world_ops *rocker_world_ops_find(u8 mode) 1426 { 1427 int i; 1428 1429 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) 1430 if (rocker_world_ops[i]->mode == mode) 1431 return rocker_world_ops[i]; 1432 return NULL; 1433 } 1434 1435 static int rocker_world_init(struct rocker *rocker, u8 mode) 1436 { 1437 struct rocker_world_ops *wops; 1438 int err; 1439 1440 wops = rocker_world_ops_find(mode); 1441 if (!wops) { 1442 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", 1443 mode); 1444 return -EINVAL; 1445 } 1446 rocker->wops = wops; 1447 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); 1448 if (!rocker->wpriv) 1449 return -ENOMEM; 1450 if (!wops->init) 1451 return 0; 1452 err = wops->init(rocker); 1453 if (err) 1454 kfree(rocker->wpriv); 1455 return err; 1456 } 1457 1458 static void rocker_world_fini(struct rocker *rocker) 1459 { 1460 struct rocker_world_ops *wops = rocker->wops; 1461 1462 if (!wops || !wops->fini) 1463 return; 1464 wops->fini(rocker); 1465 kfree(rocker->wpriv); 1466 } 1467 1468 static int rocker_world_check_init(struct rocker_port *rocker_port) 1469 { 1470 struct rocker *rocker = rocker_port->rocker; 1471 u8 mode; 1472 int err; 1473 1474 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); 1475 if (err) { 1476 dev_err(&rocker->pdev->dev, "failed to get port mode\n"); 1477 return err; 1478 } 1479 if (rocker->wops) { 1480 if (rocker->wops->mode != mode) { 1481 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); 1482 return -EINVAL; 1483 } 1484 return 0; 1485 } 1486 return rocker_world_init(rocker, mode); 1487 } 1488 1489 static int rocker_world_port_pre_init(struct rocker_port *rocker_port) 1490 { 1491 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1492 int err; 1493 1494 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); 1495 if (!rocker_port->wpriv) 1496 return -ENOMEM; 1497 if (!wops->port_pre_init) 1498 return 0; 1499 err = wops->port_pre_init(rocker_port); 1500 if (err) 1501 kfree(rocker_port->wpriv); 1502 return 0; 1503 } 1504 1505 static int rocker_world_port_init(struct rocker_port *rocker_port) 1506 { 1507 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1508 1509 if (!wops->port_init) 1510 return 0; 1511 return wops->port_init(rocker_port); 1512 } 1513 1514 static void rocker_world_port_fini(struct rocker_port *rocker_port) 1515 { 1516 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1517 1518 if (!wops->port_fini) 1519 return; 1520 wops->port_fini(rocker_port); 1521 } 1522 1523 static void rocker_world_port_post_fini(struct rocker_port *rocker_port) 1524 { 1525 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1526 1527 if (!wops->port_post_fini) 1528 return; 1529 wops->port_post_fini(rocker_port); 1530 kfree(rocker_port->wpriv); 1531 } 1532 1533 static int rocker_world_port_open(struct rocker_port *rocker_port) 1534 { 1535 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1536 1537 if (!wops->port_open) 1538 return 0; 1539 return wops->port_open(rocker_port); 1540 } 1541 1542 static void rocker_world_port_stop(struct rocker_port *rocker_port) 1543 { 1544 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1545 1546 if (!wops->port_stop) 1547 return; 1548 wops->port_stop(rocker_port); 1549 } 1550 1551 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, 1552 u8 state) 1553 { 1554 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1555 1556 if (!wops->port_attr_stp_state_set) 1557 return -EOPNOTSUPP; 1558 1559 return wops->port_attr_stp_state_set(rocker_port, state); 1560 } 1561 1562 static int 1563 rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * 1564 rocker_port, 1565 unsigned long * 1566 p_brport_flags_support) 1567 { 1568 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1569 1570 if (!wops->port_attr_bridge_flags_support_get) 1571 return -EOPNOTSUPP; 1572 return wops->port_attr_bridge_flags_support_get(rocker_port, 1573 p_brport_flags_support); 1574 } 1575 1576 static int 1577 rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port, 1578 struct switchdev_brport_flags flags) 1579 { 1580 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1581 unsigned long brport_flags_s; 1582 int err; 1583 1584 if (!wops->port_attr_bridge_flags_set) 1585 return -EOPNOTSUPP; 1586 1587 err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, 1588 &brport_flags_s); 1589 if (err) 1590 return err; 1591 1592 if (flags.mask & ~brport_flags_s) 1593 return -EINVAL; 1594 1595 return 0; 1596 } 1597 1598 static int 1599 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, 1600 struct switchdev_brport_flags flags) 1601 { 1602 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1603 1604 if (!wops->port_attr_bridge_flags_set) 1605 return -EOPNOTSUPP; 1606 1607 return wops->port_attr_bridge_flags_set(rocker_port, flags.val); 1608 } 1609 1610 static int 1611 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, 1612 u32 ageing_time) 1613 { 1614 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1615 1616 if (!wops->port_attr_bridge_ageing_time_set) 1617 return -EOPNOTSUPP; 1618 1619 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time); 1620 } 1621 1622 static int 1623 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, 1624 const struct switchdev_obj_port_vlan *vlan) 1625 { 1626 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1627 1628 if (!wops->port_obj_vlan_add) 1629 return -EOPNOTSUPP; 1630 1631 return wops->port_obj_vlan_add(rocker_port, vlan); 1632 } 1633 1634 static int 1635 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, 1636 const struct switchdev_obj_port_vlan *vlan) 1637 { 1638 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1639 1640 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1641 return -EOPNOTSUPP; 1642 1643 if (!wops->port_obj_vlan_del) 1644 return -EOPNOTSUPP; 1645 return wops->port_obj_vlan_del(rocker_port, vlan); 1646 } 1647 1648 static int 1649 rocker_world_port_fdb_add(struct rocker_port *rocker_port, 1650 struct switchdev_notifier_fdb_info *info) 1651 { 1652 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1653 1654 if (!wops->port_obj_fdb_add) 1655 return -EOPNOTSUPP; 1656 1657 return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr); 1658 } 1659 1660 static int 1661 rocker_world_port_fdb_del(struct rocker_port *rocker_port, 1662 struct switchdev_notifier_fdb_info *info) 1663 { 1664 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1665 1666 if (!wops->port_obj_fdb_del) 1667 return -EOPNOTSUPP; 1668 return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr); 1669 } 1670 1671 static int rocker_world_port_master_linked(struct rocker_port *rocker_port, 1672 struct net_device *master, 1673 struct netlink_ext_ack *extack) 1674 { 1675 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1676 1677 if (!wops->port_master_linked) 1678 return -EOPNOTSUPP; 1679 return wops->port_master_linked(rocker_port, master, extack); 1680 } 1681 1682 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, 1683 struct net_device *master) 1684 { 1685 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1686 1687 if (!wops->port_master_unlinked) 1688 return -EOPNOTSUPP; 1689 return wops->port_master_unlinked(rocker_port, master); 1690 } 1691 1692 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, 1693 struct neighbour *n) 1694 { 1695 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1696 1697 if (!wops->port_neigh_update) 1698 return -EOPNOTSUPP; 1699 return wops->port_neigh_update(rocker_port, n); 1700 } 1701 1702 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, 1703 struct neighbour *n) 1704 { 1705 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1706 1707 if (!wops->port_neigh_destroy) 1708 return -EOPNOTSUPP; 1709 return wops->port_neigh_destroy(rocker_port, n); 1710 } 1711 1712 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, 1713 const unsigned char *addr, 1714 __be16 vlan_id) 1715 { 1716 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1717 1718 if (!wops->port_ev_mac_vlan_seen) 1719 return -EOPNOTSUPP; 1720 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); 1721 } 1722 1723 static int rocker_world_fib4_add(struct rocker *rocker, 1724 const struct fib_entry_notifier_info *fen_info) 1725 { 1726 struct rocker_world_ops *wops = rocker->wops; 1727 1728 if (!wops->fib4_add) 1729 return 0; 1730 return wops->fib4_add(rocker, fen_info); 1731 } 1732 1733 static int rocker_world_fib4_del(struct rocker *rocker, 1734 const struct fib_entry_notifier_info *fen_info) 1735 { 1736 struct rocker_world_ops *wops = rocker->wops; 1737 1738 if (!wops->fib4_del) 1739 return 0; 1740 return wops->fib4_del(rocker, fen_info); 1741 } 1742 1743 static void rocker_world_fib4_abort(struct rocker *rocker) 1744 { 1745 struct rocker_world_ops *wops = rocker->wops; 1746 1747 if (wops->fib4_abort) 1748 wops->fib4_abort(rocker); 1749 } 1750 1751 /***************** 1752 * Net device ops 1753 *****************/ 1754 1755 static int rocker_port_open(struct net_device *dev) 1756 { 1757 struct rocker_port *rocker_port = netdev_priv(dev); 1758 int err; 1759 1760 err = rocker_port_dma_rings_init(rocker_port); 1761 if (err) 1762 return err; 1763 1764 err = request_irq(rocker_msix_tx_vector(rocker_port), 1765 rocker_tx_irq_handler, 0, 1766 rocker_driver_name, rocker_port); 1767 if (err) { 1768 netdev_err(rocker_port->dev, "cannot assign tx irq\n"); 1769 goto err_request_tx_irq; 1770 } 1771 1772 err = request_irq(rocker_msix_rx_vector(rocker_port), 1773 rocker_rx_irq_handler, 0, 1774 rocker_driver_name, rocker_port); 1775 if (err) { 1776 netdev_err(rocker_port->dev, "cannot assign rx irq\n"); 1777 goto err_request_rx_irq; 1778 } 1779 1780 err = rocker_world_port_open(rocker_port); 1781 if (err) { 1782 netdev_err(rocker_port->dev, "cannot open port in world\n"); 1783 goto err_world_port_open; 1784 } 1785 1786 napi_enable(&rocker_port->napi_tx); 1787 napi_enable(&rocker_port->napi_rx); 1788 if (!dev->proto_down) 1789 rocker_port_set_enable(rocker_port, true); 1790 netif_start_queue(dev); 1791 return 0; 1792 1793 err_world_port_open: 1794 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); 1795 err_request_rx_irq: 1796 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); 1797 err_request_tx_irq: 1798 rocker_port_dma_rings_fini(rocker_port); 1799 return err; 1800 } 1801 1802 static int rocker_port_stop(struct net_device *dev) 1803 { 1804 struct rocker_port *rocker_port = netdev_priv(dev); 1805 1806 netif_stop_queue(dev); 1807 rocker_port_set_enable(rocker_port, false); 1808 napi_disable(&rocker_port->napi_rx); 1809 napi_disable(&rocker_port->napi_tx); 1810 rocker_world_port_stop(rocker_port); 1811 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); 1812 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); 1813 rocker_port_dma_rings_fini(rocker_port); 1814 1815 return 0; 1816 } 1817 1818 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, 1819 const struct rocker_desc_info *desc_info) 1820 { 1821 const struct rocker *rocker = rocker_port->rocker; 1822 struct pci_dev *pdev = rocker->pdev; 1823 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; 1824 struct rocker_tlv *attr; 1825 int rem; 1826 1827 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); 1828 if (!attrs[ROCKER_TLV_TX_FRAGS]) 1829 return; 1830 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { 1831 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; 1832 dma_addr_t dma_handle; 1833 size_t len; 1834 1835 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) 1836 continue; 1837 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, 1838 attr); 1839 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || 1840 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) 1841 continue; 1842 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); 1843 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); 1844 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE); 1845 } 1846 } 1847 1848 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, 1849 struct rocker_desc_info *desc_info, 1850 char *buf, size_t buf_len) 1851 { 1852 const struct rocker *rocker = rocker_port->rocker; 1853 struct pci_dev *pdev = rocker->pdev; 1854 dma_addr_t dma_handle; 1855 struct rocker_tlv *frag; 1856 1857 dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE); 1858 if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) { 1859 if (net_ratelimit()) 1860 netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); 1861 return -EIO; 1862 } 1863 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); 1864 if (!frag) 1865 goto unmap_frag; 1866 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, 1867 dma_handle)) 1868 goto nest_cancel; 1869 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, 1870 buf_len)) 1871 goto nest_cancel; 1872 rocker_tlv_nest_end(desc_info, frag); 1873 return 0; 1874 1875 nest_cancel: 1876 rocker_tlv_nest_cancel(desc_info, frag); 1877 unmap_frag: 1878 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE); 1879 return -EMSGSIZE; 1880 } 1881 1882 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) 1883 { 1884 struct rocker_port *rocker_port = netdev_priv(dev); 1885 struct rocker *rocker = rocker_port->rocker; 1886 struct rocker_desc_info *desc_info; 1887 struct rocker_tlv *frags; 1888 int i; 1889 int err; 1890 1891 desc_info = rocker_desc_head_get(&rocker_port->tx_ring); 1892 if (unlikely(!desc_info)) { 1893 if (net_ratelimit()) 1894 netdev_err(dev, "tx ring full when queue awake\n"); 1895 return NETDEV_TX_BUSY; 1896 } 1897 1898 rocker_desc_cookie_ptr_set(desc_info, skb); 1899 1900 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); 1901 if (!frags) 1902 goto out; 1903 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, 1904 skb->data, skb_headlen(skb)); 1905 if (err) 1906 goto nest_cancel; 1907 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { 1908 err = skb_linearize(skb); 1909 if (err) 1910 goto unmap_frags; 1911 } 1912 1913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1914 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1915 1916 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, 1917 skb_frag_address(frag), 1918 skb_frag_size(frag)); 1919 if (err) 1920 goto unmap_frags; 1921 } 1922 rocker_tlv_nest_end(desc_info, frags); 1923 1924 rocker_desc_gen_clear(desc_info); 1925 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); 1926 1927 desc_info = rocker_desc_head_get(&rocker_port->tx_ring); 1928 if (!desc_info) 1929 netif_stop_queue(dev); 1930 1931 return NETDEV_TX_OK; 1932 1933 unmap_frags: 1934 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 1935 nest_cancel: 1936 rocker_tlv_nest_cancel(desc_info, frags); 1937 out: 1938 dev_kfree_skb(skb); 1939 dev->stats.tx_dropped++; 1940 1941 return NETDEV_TX_OK; 1942 } 1943 1944 static int rocker_port_set_mac_address(struct net_device *dev, void *p) 1945 { 1946 struct sockaddr *addr = p; 1947 struct rocker_port *rocker_port = netdev_priv(dev); 1948 int err; 1949 1950 if (!is_valid_ether_addr(addr->sa_data)) 1951 return -EADDRNOTAVAIL; 1952 1953 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); 1954 if (err) 1955 return err; 1956 eth_hw_addr_set(dev, addr->sa_data); 1957 return 0; 1958 } 1959 1960 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) 1961 { 1962 struct rocker_port *rocker_port = netdev_priv(dev); 1963 int running = netif_running(dev); 1964 int err; 1965 1966 if (running) 1967 rocker_port_stop(dev); 1968 1969 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); 1970 WRITE_ONCE(dev->mtu, new_mtu); 1971 1972 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); 1973 if (err) 1974 return err; 1975 1976 if (running) 1977 err = rocker_port_open(dev); 1978 1979 return err; 1980 } 1981 1982 static int rocker_port_get_phys_port_name(struct net_device *dev, 1983 char *buf, size_t len) 1984 { 1985 struct rocker_port *rocker_port = netdev_priv(dev); 1986 struct port_name name = { .buf = buf, .len = len }; 1987 int err; 1988 1989 err = rocker_cmd_exec(rocker_port, false, 1990 rocker_cmd_get_port_settings_prep, NULL, 1991 rocker_cmd_get_port_settings_phys_name_proc, 1992 &name); 1993 1994 return err ? -EOPNOTSUPP : 0; 1995 } 1996 1997 static void rocker_port_neigh_destroy(struct net_device *dev, 1998 struct neighbour *n) 1999 { 2000 struct rocker_port *rocker_port = netdev_priv(n->dev); 2001 int err; 2002 2003 err = rocker_world_port_neigh_destroy(rocker_port, n); 2004 if (err) 2005 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", 2006 err); 2007 } 2008 2009 static int rocker_port_get_port_parent_id(struct net_device *dev, 2010 struct netdev_phys_item_id *ppid) 2011 { 2012 const struct rocker_port *rocker_port = netdev_priv(dev); 2013 const struct rocker *rocker = rocker_port->rocker; 2014 2015 ppid->id_len = sizeof(rocker->hw.id); 2016 memcpy(&ppid->id, &rocker->hw.id, ppid->id_len); 2017 2018 return 0; 2019 } 2020 2021 static const struct net_device_ops rocker_port_netdev_ops = { 2022 .ndo_open = rocker_port_open, 2023 .ndo_stop = rocker_port_stop, 2024 .ndo_start_xmit = rocker_port_xmit, 2025 .ndo_set_mac_address = rocker_port_set_mac_address, 2026 .ndo_change_mtu = rocker_port_change_mtu, 2027 .ndo_get_phys_port_name = rocker_port_get_phys_port_name, 2028 .ndo_neigh_destroy = rocker_port_neigh_destroy, 2029 .ndo_get_port_parent_id = rocker_port_get_port_parent_id, 2030 }; 2031 2032 /******************** 2033 * swdev interface 2034 ********************/ 2035 2036 static int rocker_port_attr_set(struct net_device *dev, 2037 const struct switchdev_attr *attr) 2038 { 2039 struct rocker_port *rocker_port = netdev_priv(dev); 2040 int err = 0; 2041 2042 switch (attr->id) { 2043 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 2044 err = rocker_world_port_attr_stp_state_set(rocker_port, 2045 attr->u.stp_state); 2046 break; 2047 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 2048 err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port, 2049 attr->u.brport_flags); 2050 break; 2051 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 2052 err = rocker_world_port_attr_bridge_flags_set(rocker_port, 2053 attr->u.brport_flags); 2054 break; 2055 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 2056 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, 2057 attr->u.ageing_time); 2058 break; 2059 default: 2060 err = -EOPNOTSUPP; 2061 break; 2062 } 2063 2064 return err; 2065 } 2066 2067 static int rocker_port_obj_add(struct net_device *dev, 2068 const struct switchdev_obj *obj) 2069 { 2070 struct rocker_port *rocker_port = netdev_priv(dev); 2071 int err = 0; 2072 2073 switch (obj->id) { 2074 case SWITCHDEV_OBJ_ID_PORT_VLAN: 2075 err = rocker_world_port_obj_vlan_add(rocker_port, 2076 SWITCHDEV_OBJ_PORT_VLAN(obj)); 2077 break; 2078 default: 2079 err = -EOPNOTSUPP; 2080 break; 2081 } 2082 2083 return err; 2084 } 2085 2086 static int rocker_port_obj_del(struct net_device *dev, 2087 const struct switchdev_obj *obj) 2088 { 2089 struct rocker_port *rocker_port = netdev_priv(dev); 2090 int err = 0; 2091 2092 switch (obj->id) { 2093 case SWITCHDEV_OBJ_ID_PORT_VLAN: 2094 err = rocker_world_port_obj_vlan_del(rocker_port, 2095 SWITCHDEV_OBJ_PORT_VLAN(obj)); 2096 break; 2097 default: 2098 err = -EOPNOTSUPP; 2099 break; 2100 } 2101 2102 return err; 2103 } 2104 2105 struct rocker_fib_event_work { 2106 struct work_struct work; 2107 union { 2108 struct fib_entry_notifier_info fen_info; 2109 struct fib_rule_notifier_info fr_info; 2110 }; 2111 struct rocker *rocker; 2112 unsigned long event; 2113 }; 2114 2115 static void rocker_router_fib_event_work(struct work_struct *work) 2116 { 2117 struct rocker_fib_event_work *fib_work = 2118 container_of(work, struct rocker_fib_event_work, work); 2119 struct rocker *rocker = fib_work->rocker; 2120 struct fib_rule *rule; 2121 int err; 2122 2123 /* Protect internal structures from changes */ 2124 rtnl_lock(); 2125 switch (fib_work->event) { 2126 case FIB_EVENT_ENTRY_REPLACE: 2127 err = rocker_world_fib4_add(rocker, &fib_work->fen_info); 2128 if (err) 2129 rocker_world_fib4_abort(rocker); 2130 fib_info_put(fib_work->fen_info.fi); 2131 break; 2132 case FIB_EVENT_ENTRY_DEL: 2133 rocker_world_fib4_del(rocker, &fib_work->fen_info); 2134 fib_info_put(fib_work->fen_info.fi); 2135 break; 2136 case FIB_EVENT_RULE_ADD: 2137 case FIB_EVENT_RULE_DEL: 2138 rule = fib_work->fr_info.rule; 2139 if (!fib4_rule_default(rule)) 2140 rocker_world_fib4_abort(rocker); 2141 fib_rule_put(rule); 2142 break; 2143 } 2144 rtnl_unlock(); 2145 kfree(fib_work); 2146 } 2147 2148 /* Called with rcu_read_lock() */ 2149 static int rocker_router_fib_event(struct notifier_block *nb, 2150 unsigned long event, void *ptr) 2151 { 2152 struct rocker *rocker = container_of(nb, struct rocker, fib_nb); 2153 struct rocker_fib_event_work *fib_work; 2154 struct fib_notifier_info *info = ptr; 2155 2156 if (info->family != AF_INET) 2157 return NOTIFY_DONE; 2158 2159 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 2160 if (WARN_ON(!fib_work)) 2161 return NOTIFY_BAD; 2162 2163 INIT_WORK(&fib_work->work, rocker_router_fib_event_work); 2164 fib_work->rocker = rocker; 2165 fib_work->event = event; 2166 2167 switch (event) { 2168 case FIB_EVENT_ENTRY_REPLACE: 2169 case FIB_EVENT_ENTRY_DEL: 2170 if (info->family == AF_INET) { 2171 struct fib_entry_notifier_info *fen_info = ptr; 2172 2173 if (fen_info->fi->fib_nh_is_v6) { 2174 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); 2175 kfree(fib_work); 2176 return notifier_from_errno(-EINVAL); 2177 } 2178 if (fen_info->fi->nh) { 2179 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); 2180 kfree(fib_work); 2181 return notifier_from_errno(-EINVAL); 2182 } 2183 } 2184 2185 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); 2186 /* Take referece on fib_info to prevent it from being 2187 * freed while work is queued. Release it afterwards. 2188 */ 2189 fib_info_hold(fib_work->fen_info.fi); 2190 break; 2191 case FIB_EVENT_RULE_ADD: 2192 case FIB_EVENT_RULE_DEL: 2193 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); 2194 fib_rule_get(fib_work->fr_info.rule); 2195 break; 2196 } 2197 2198 queue_work(rocker->rocker_owq, &fib_work->work); 2199 2200 return NOTIFY_DONE; 2201 } 2202 2203 /******************** 2204 * ethtool interface 2205 ********************/ 2206 2207 static int 2208 rocker_port_get_link_ksettings(struct net_device *dev, 2209 struct ethtool_link_ksettings *ecmd) 2210 { 2211 struct rocker_port *rocker_port = netdev_priv(dev); 2212 2213 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); 2214 } 2215 2216 static int 2217 rocker_port_set_link_ksettings(struct net_device *dev, 2218 const struct ethtool_link_ksettings *ecmd) 2219 { 2220 struct rocker_port *rocker_port = netdev_priv(dev); 2221 2222 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); 2223 } 2224 2225 static void rocker_port_get_drvinfo(struct net_device *dev, 2226 struct ethtool_drvinfo *drvinfo) 2227 { 2228 strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); 2229 } 2230 2231 static struct rocker_port_stats { 2232 char str[ETH_GSTRING_LEN]; 2233 int type; 2234 } rocker_port_stats[] = { 2235 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, 2236 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, 2237 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, 2238 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, 2239 2240 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, 2241 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, 2242 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, 2243 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, 2244 }; 2245 2246 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) 2247 2248 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, 2249 u8 *data) 2250 { 2251 u8 *p = data; 2252 int i; 2253 2254 switch (stringset) { 2255 case ETH_SS_STATS: 2256 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { 2257 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); 2258 p += ETH_GSTRING_LEN; 2259 } 2260 break; 2261 } 2262 } 2263 2264 static int 2265 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, 2266 struct rocker_desc_info *desc_info, 2267 void *priv) 2268 { 2269 struct rocker_tlv *cmd_stats; 2270 2271 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 2272 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) 2273 return -EMSGSIZE; 2274 2275 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 2276 if (!cmd_stats) 2277 return -EMSGSIZE; 2278 2279 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, 2280 rocker_port->pport)) 2281 return -EMSGSIZE; 2282 2283 rocker_tlv_nest_end(desc_info, cmd_stats); 2284 2285 return 0; 2286 } 2287 2288 static int 2289 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, 2290 const struct rocker_desc_info *desc_info, 2291 void *priv) 2292 { 2293 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; 2294 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; 2295 const struct rocker_tlv *pattr; 2296 u32 pport; 2297 u64 *data = priv; 2298 int i; 2299 2300 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); 2301 2302 if (!attrs[ROCKER_TLV_CMD_INFO]) 2303 return -EIO; 2304 2305 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, 2306 attrs[ROCKER_TLV_CMD_INFO]); 2307 2308 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) 2309 return -EIO; 2310 2311 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); 2312 if (pport != rocker_port->pport) 2313 return -EIO; 2314 2315 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { 2316 pattr = stats_attrs[rocker_port_stats[i].type]; 2317 if (!pattr) 2318 continue; 2319 2320 data[i] = rocker_tlv_get_u64(pattr); 2321 } 2322 2323 return 0; 2324 } 2325 2326 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, 2327 void *priv) 2328 { 2329 return rocker_cmd_exec(rocker_port, false, 2330 rocker_cmd_get_port_stats_prep, NULL, 2331 rocker_cmd_get_port_stats_ethtool_proc, 2332 priv); 2333 } 2334 2335 static void rocker_port_get_stats(struct net_device *dev, 2336 struct ethtool_stats *stats, u64 *data) 2337 { 2338 struct rocker_port *rocker_port = netdev_priv(dev); 2339 2340 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { 2341 int i; 2342 2343 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) 2344 data[i] = 0; 2345 } 2346 } 2347 2348 static int rocker_port_get_sset_count(struct net_device *netdev, int sset) 2349 { 2350 switch (sset) { 2351 case ETH_SS_STATS: 2352 return ROCKER_PORT_STATS_LEN; 2353 default: 2354 return -EOPNOTSUPP; 2355 } 2356 } 2357 2358 static const struct ethtool_ops rocker_port_ethtool_ops = { 2359 .get_drvinfo = rocker_port_get_drvinfo, 2360 .get_link = ethtool_op_get_link, 2361 .get_strings = rocker_port_get_strings, 2362 .get_ethtool_stats = rocker_port_get_stats, 2363 .get_sset_count = rocker_port_get_sset_count, 2364 .get_link_ksettings = rocker_port_get_link_ksettings, 2365 .set_link_ksettings = rocker_port_set_link_ksettings, 2366 }; 2367 2368 /***************** 2369 * NAPI interface 2370 *****************/ 2371 2372 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) 2373 { 2374 return container_of(napi, struct rocker_port, napi_tx); 2375 } 2376 2377 static int rocker_port_poll_tx(struct napi_struct *napi, int budget) 2378 { 2379 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); 2380 const struct rocker *rocker = rocker_port->rocker; 2381 const struct rocker_desc_info *desc_info; 2382 u32 credits = 0; 2383 int err; 2384 2385 /* Cleanup tx descriptors */ 2386 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { 2387 struct sk_buff *skb; 2388 2389 err = rocker_desc_err(desc_info); 2390 if (err && net_ratelimit()) 2391 netdev_err(rocker_port->dev, "tx desc received with err %d\n", 2392 err); 2393 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 2394 2395 skb = rocker_desc_cookie_ptr_get(desc_info); 2396 if (err == 0) { 2397 rocker_port->dev->stats.tx_packets++; 2398 rocker_port->dev->stats.tx_bytes += skb->len; 2399 } else { 2400 rocker_port->dev->stats.tx_errors++; 2401 } 2402 2403 dev_kfree_skb_any(skb); 2404 credits++; 2405 } 2406 2407 if (credits && netif_queue_stopped(rocker_port->dev)) 2408 netif_wake_queue(rocker_port->dev); 2409 2410 napi_complete(napi); 2411 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); 2412 2413 return 0; 2414 } 2415 2416 static int rocker_port_rx_proc(const struct rocker *rocker, 2417 const struct rocker_port *rocker_port, 2418 struct rocker_desc_info *desc_info) 2419 { 2420 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; 2421 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); 2422 size_t rx_len; 2423 u16 rx_flags = 0; 2424 2425 if (!skb) 2426 return -ENOENT; 2427 2428 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); 2429 if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) 2430 return -EINVAL; 2431 if (attrs[ROCKER_TLV_RX_FLAGS]) 2432 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); 2433 2434 rocker_dma_rx_ring_skb_unmap(rocker, attrs); 2435 2436 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); 2437 skb_put(skb, rx_len); 2438 skb->protocol = eth_type_trans(skb, rocker_port->dev); 2439 2440 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) 2441 skb->offload_fwd_mark = 1; 2442 2443 rocker_port->dev->stats.rx_packets++; 2444 rocker_port->dev->stats.rx_bytes += skb->len; 2445 2446 netif_receive_skb(skb); 2447 2448 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); 2449 } 2450 2451 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) 2452 { 2453 return container_of(napi, struct rocker_port, napi_rx); 2454 } 2455 2456 static int rocker_port_poll_rx(struct napi_struct *napi, int budget) 2457 { 2458 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); 2459 const struct rocker *rocker = rocker_port->rocker; 2460 struct rocker_desc_info *desc_info; 2461 u32 credits = 0; 2462 int err; 2463 2464 /* Process rx descriptors */ 2465 while (credits < budget && 2466 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { 2467 err = rocker_desc_err(desc_info); 2468 if (err) { 2469 if (net_ratelimit()) 2470 netdev_err(rocker_port->dev, "rx desc received with err %d\n", 2471 err); 2472 } else { 2473 err = rocker_port_rx_proc(rocker, rocker_port, 2474 desc_info); 2475 if (err && net_ratelimit()) 2476 netdev_err(rocker_port->dev, "rx processing failed with err %d\n", 2477 err); 2478 } 2479 if (err) 2480 rocker_port->dev->stats.rx_errors++; 2481 2482 rocker_desc_gen_clear(desc_info); 2483 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); 2484 credits++; 2485 } 2486 2487 if (credits < budget) 2488 napi_complete_done(napi, credits); 2489 2490 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); 2491 2492 return credits; 2493 } 2494 2495 /***************** 2496 * PCI driver ops 2497 *****************/ 2498 2499 static void rocker_carrier_init(const struct rocker_port *rocker_port) 2500 { 2501 const struct rocker *rocker = rocker_port->rocker; 2502 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); 2503 bool link_up; 2504 2505 link_up = link_status & (1 << rocker_port->pport); 2506 if (link_up) 2507 netif_carrier_on(rocker_port->dev); 2508 else 2509 netif_carrier_off(rocker_port->dev); 2510 } 2511 2512 static void rocker_remove_ports(struct rocker *rocker) 2513 { 2514 struct rocker_port *rocker_port; 2515 int i; 2516 2517 for (i = 0; i < rocker->port_count; i++) { 2518 rocker_port = rocker->ports[i]; 2519 if (!rocker_port) 2520 continue; 2521 rocker_world_port_fini(rocker_port); 2522 unregister_netdev(rocker_port->dev); 2523 rocker_world_port_post_fini(rocker_port); 2524 free_netdev(rocker_port->dev); 2525 } 2526 rocker_world_fini(rocker); 2527 kfree(rocker->ports); 2528 } 2529 2530 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) 2531 { 2532 const struct rocker *rocker = rocker_port->rocker; 2533 const struct pci_dev *pdev = rocker->pdev; 2534 u8 addr[ETH_ALEN]; 2535 int err; 2536 2537 err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr); 2538 if (!err) { 2539 eth_hw_addr_set(rocker_port->dev, addr); 2540 } else { 2541 dev_warn(&pdev->dev, "failed to get mac address, using random\n"); 2542 eth_hw_addr_random(rocker_port->dev); 2543 } 2544 } 2545 2546 #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU 2547 #define ROCKER_PORT_MAX_MTU 9000 2548 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) 2549 { 2550 struct pci_dev *pdev = rocker->pdev; 2551 struct rocker_port *rocker_port; 2552 struct net_device *dev; 2553 int err; 2554 2555 dev = alloc_etherdev(sizeof(struct rocker_port)); 2556 if (!dev) 2557 return -ENOMEM; 2558 SET_NETDEV_DEV(dev, &pdev->dev); 2559 rocker_port = netdev_priv(dev); 2560 rocker_port->dev = dev; 2561 rocker_port->rocker = rocker; 2562 rocker_port->port_number = port_number; 2563 rocker_port->pport = port_number + 1; 2564 2565 err = rocker_world_check_init(rocker_port); 2566 if (err) { 2567 dev_err(&pdev->dev, "world init failed\n"); 2568 goto err_world_check_init; 2569 } 2570 2571 rocker_port_dev_addr_init(rocker_port); 2572 dev->netdev_ops = &rocker_port_netdev_ops; 2573 dev->ethtool_ops = &rocker_port_ethtool_ops; 2574 netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx); 2575 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx); 2576 rocker_carrier_init(rocker_port); 2577 2578 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; 2579 2580 /* MTU range: 68 - 9000 */ 2581 dev->min_mtu = ROCKER_PORT_MIN_MTU; 2582 dev->max_mtu = ROCKER_PORT_MAX_MTU; 2583 2584 err = rocker_world_port_pre_init(rocker_port); 2585 if (err) { 2586 dev_err(&pdev->dev, "port world pre-init failed\n"); 2587 goto err_world_port_pre_init; 2588 } 2589 err = register_netdev(dev); 2590 if (err) { 2591 dev_err(&pdev->dev, "register_netdev failed\n"); 2592 goto err_register_netdev; 2593 } 2594 rocker->ports[port_number] = rocker_port; 2595 2596 err = rocker_world_port_init(rocker_port); 2597 if (err) { 2598 dev_err(&pdev->dev, "port world init failed\n"); 2599 goto err_world_port_init; 2600 } 2601 2602 return 0; 2603 2604 err_world_port_init: 2605 rocker->ports[port_number] = NULL; 2606 unregister_netdev(dev); 2607 err_register_netdev: 2608 rocker_world_port_post_fini(rocker_port); 2609 err_world_port_pre_init: 2610 err_world_check_init: 2611 free_netdev(dev); 2612 return err; 2613 } 2614 2615 static int rocker_probe_ports(struct rocker *rocker) 2616 { 2617 int i; 2618 size_t alloc_size; 2619 int err; 2620 2621 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 2622 rocker->ports = kzalloc(alloc_size, GFP_KERNEL); 2623 if (!rocker->ports) 2624 return -ENOMEM; 2625 for (i = 0; i < rocker->port_count; i++) { 2626 err = rocker_probe_port(rocker, i); 2627 if (err) 2628 goto remove_ports; 2629 } 2630 return 0; 2631 2632 remove_ports: 2633 rocker_remove_ports(rocker); 2634 return err; 2635 } 2636 2637 static int rocker_msix_init(struct rocker *rocker) 2638 { 2639 struct pci_dev *pdev = rocker->pdev; 2640 int msix_entries; 2641 int i; 2642 int err; 2643 2644 msix_entries = pci_msix_vec_count(pdev); 2645 if (msix_entries < 0) 2646 return msix_entries; 2647 2648 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) 2649 return -EINVAL; 2650 2651 rocker->msix_entries = kmalloc_array(msix_entries, 2652 sizeof(struct msix_entry), 2653 GFP_KERNEL); 2654 if (!rocker->msix_entries) 2655 return -ENOMEM; 2656 2657 for (i = 0; i < msix_entries; i++) 2658 rocker->msix_entries[i].entry = i; 2659 2660 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); 2661 if (err < 0) 2662 goto err_enable_msix; 2663 2664 return 0; 2665 2666 err_enable_msix: 2667 kfree(rocker->msix_entries); 2668 return err; 2669 } 2670 2671 static void rocker_msix_fini(const struct rocker *rocker) 2672 { 2673 pci_disable_msix(rocker->pdev); 2674 kfree(rocker->msix_entries); 2675 } 2676 2677 static bool rocker_port_dev_check(const struct net_device *dev) 2678 { 2679 return dev->netdev_ops == &rocker_port_netdev_ops; 2680 } 2681 2682 static int 2683 rocker_switchdev_port_attr_set_event(struct net_device *netdev, 2684 struct switchdev_notifier_port_attr_info *port_attr_info) 2685 { 2686 int err; 2687 2688 err = rocker_port_attr_set(netdev, port_attr_info->attr); 2689 2690 port_attr_info->handled = true; 2691 return notifier_from_errno(err); 2692 } 2693 2694 struct rocker_switchdev_event_work { 2695 struct work_struct work; 2696 struct switchdev_notifier_fdb_info fdb_info; 2697 struct rocker_port *rocker_port; 2698 unsigned long event; 2699 }; 2700 2701 static void 2702 rocker_fdb_offload_notify(struct rocker_port *rocker_port, 2703 struct switchdev_notifier_fdb_info *recv_info) 2704 { 2705 struct switchdev_notifier_fdb_info info = {}; 2706 2707 info.addr = recv_info->addr; 2708 info.vid = recv_info->vid; 2709 info.offloaded = true; 2710 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 2711 rocker_port->dev, &info.info, NULL); 2712 } 2713 2714 static void rocker_switchdev_event_work(struct work_struct *work) 2715 { 2716 struct rocker_switchdev_event_work *switchdev_work = 2717 container_of(work, struct rocker_switchdev_event_work, work); 2718 struct rocker_port *rocker_port = switchdev_work->rocker_port; 2719 struct switchdev_notifier_fdb_info *fdb_info; 2720 int err; 2721 2722 rtnl_lock(); 2723 switch (switchdev_work->event) { 2724 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2725 fdb_info = &switchdev_work->fdb_info; 2726 if (!fdb_info->added_by_user || fdb_info->is_local) 2727 break; 2728 err = rocker_world_port_fdb_add(rocker_port, fdb_info); 2729 if (err) { 2730 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); 2731 break; 2732 } 2733 rocker_fdb_offload_notify(rocker_port, fdb_info); 2734 break; 2735 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2736 fdb_info = &switchdev_work->fdb_info; 2737 if (!fdb_info->added_by_user || fdb_info->is_local) 2738 break; 2739 err = rocker_world_port_fdb_del(rocker_port, fdb_info); 2740 if (err) 2741 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); 2742 break; 2743 } 2744 rtnl_unlock(); 2745 2746 kfree(switchdev_work->fdb_info.addr); 2747 kfree(switchdev_work); 2748 dev_put(rocker_port->dev); 2749 } 2750 2751 /* called under rcu_read_lock() */ 2752 static int rocker_switchdev_event(struct notifier_block *unused, 2753 unsigned long event, void *ptr) 2754 { 2755 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2756 struct rocker_switchdev_event_work *switchdev_work; 2757 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2758 struct rocker_port *rocker_port; 2759 2760 if (!rocker_port_dev_check(dev)) 2761 return NOTIFY_DONE; 2762 2763 if (event == SWITCHDEV_PORT_ATTR_SET) 2764 return rocker_switchdev_port_attr_set_event(dev, ptr); 2765 2766 rocker_port = netdev_priv(dev); 2767 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2768 if (WARN_ON(!switchdev_work)) 2769 return NOTIFY_BAD; 2770 2771 INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work); 2772 switchdev_work->rocker_port = rocker_port; 2773 switchdev_work->event = event; 2774 2775 switch (event) { 2776 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2777 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2778 memcpy(&switchdev_work->fdb_info, ptr, 2779 sizeof(switchdev_work->fdb_info)); 2780 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2781 if (unlikely(!switchdev_work->fdb_info.addr)) { 2782 kfree(switchdev_work); 2783 return NOTIFY_BAD; 2784 } 2785 2786 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2787 fdb_info->addr); 2788 /* Take a reference on the rocker device */ 2789 dev_hold(dev); 2790 break; 2791 default: 2792 kfree(switchdev_work); 2793 return NOTIFY_DONE; 2794 } 2795 2796 queue_work(rocker_port->rocker->rocker_owq, 2797 &switchdev_work->work); 2798 return NOTIFY_DONE; 2799 } 2800 2801 static int 2802 rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, 2803 struct switchdev_notifier_port_obj_info *port_obj_info) 2804 { 2805 int err = -EOPNOTSUPP; 2806 2807 switch (event) { 2808 case SWITCHDEV_PORT_OBJ_ADD: 2809 err = rocker_port_obj_add(netdev, port_obj_info->obj); 2810 break; 2811 case SWITCHDEV_PORT_OBJ_DEL: 2812 err = rocker_port_obj_del(netdev, port_obj_info->obj); 2813 break; 2814 } 2815 2816 port_obj_info->handled = true; 2817 return notifier_from_errno(err); 2818 } 2819 2820 static int rocker_switchdev_blocking_event(struct notifier_block *unused, 2821 unsigned long event, void *ptr) 2822 { 2823 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2824 2825 if (!rocker_port_dev_check(dev)) 2826 return NOTIFY_DONE; 2827 2828 switch (event) { 2829 case SWITCHDEV_PORT_OBJ_ADD: 2830 case SWITCHDEV_PORT_OBJ_DEL: 2831 return rocker_switchdev_port_obj_event(event, dev, ptr); 2832 case SWITCHDEV_PORT_ATTR_SET: 2833 return rocker_switchdev_port_attr_set_event(dev, ptr); 2834 } 2835 2836 return NOTIFY_DONE; 2837 } 2838 2839 static struct notifier_block rocker_switchdev_notifier = { 2840 .notifier_call = rocker_switchdev_event, 2841 }; 2842 2843 static struct notifier_block rocker_switchdev_blocking_notifier = { 2844 .notifier_call = rocker_switchdev_blocking_event, 2845 }; 2846 2847 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2848 { 2849 struct notifier_block *nb; 2850 struct rocker *rocker; 2851 int err; 2852 2853 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); 2854 if (!rocker) 2855 return -ENOMEM; 2856 2857 err = pci_enable_device(pdev); 2858 if (err) { 2859 dev_err(&pdev->dev, "pci_enable_device failed\n"); 2860 goto err_pci_enable_device; 2861 } 2862 2863 err = pci_request_regions(pdev, rocker_driver_name); 2864 if (err) { 2865 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2866 goto err_pci_request_regions; 2867 } 2868 2869 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2870 if (err) { 2871 dev_err(&pdev->dev, "dma_set_mask failed\n"); 2872 goto err_pci_set_dma_mask; 2873 } 2874 2875 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { 2876 dev_err(&pdev->dev, "invalid PCI region size\n"); 2877 err = -EINVAL; 2878 goto err_pci_resource_len_check; 2879 } 2880 2881 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), 2882 pci_resource_len(pdev, 0)); 2883 if (!rocker->hw_addr) { 2884 dev_err(&pdev->dev, "ioremap failed\n"); 2885 err = -EIO; 2886 goto err_ioremap; 2887 } 2888 pci_set_master(pdev); 2889 2890 rocker->pdev = pdev; 2891 pci_set_drvdata(pdev, rocker); 2892 2893 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); 2894 2895 err = rocker_msix_init(rocker); 2896 if (err) { 2897 dev_err(&pdev->dev, "MSI-X init failed\n"); 2898 goto err_msix_init; 2899 } 2900 2901 err = rocker_basic_hw_test(rocker); 2902 if (err) { 2903 dev_err(&pdev->dev, "basic hw test failed\n"); 2904 goto err_basic_hw_test; 2905 } 2906 2907 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); 2908 2909 err = rocker_dma_rings_init(rocker); 2910 if (err) 2911 goto err_dma_rings_init; 2912 2913 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), 2914 rocker_cmd_irq_handler, 0, 2915 rocker_driver_name, rocker); 2916 if (err) { 2917 dev_err(&pdev->dev, "cannot assign cmd irq\n"); 2918 goto err_request_cmd_irq; 2919 } 2920 2921 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), 2922 rocker_event_irq_handler, 0, 2923 rocker_driver_name, rocker); 2924 if (err) { 2925 dev_err(&pdev->dev, "cannot assign event irq\n"); 2926 goto err_request_event_irq; 2927 } 2928 2929 rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name, 2930 WQ_MEM_RECLAIM); 2931 if (!rocker->rocker_owq) { 2932 err = -ENOMEM; 2933 goto err_alloc_ordered_workqueue; 2934 } 2935 2936 err = rocker_probe_ports(rocker); 2937 if (err) { 2938 dev_err(&pdev->dev, "failed to probe ports\n"); 2939 goto err_probe_ports; 2940 } 2941 2942 /* Only FIBs pointing to our own netdevs are programmed into 2943 * the device, so no need to pass a callback. 2944 */ 2945 rocker->fib_nb.notifier_call = rocker_router_fib_event; 2946 err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL); 2947 if (err) 2948 goto err_register_fib_notifier; 2949 2950 err = register_switchdev_notifier(&rocker_switchdev_notifier); 2951 if (err) { 2952 dev_err(&pdev->dev, "Failed to register switchdev notifier\n"); 2953 goto err_register_switchdev_notifier; 2954 } 2955 2956 nb = &rocker_switchdev_blocking_notifier; 2957 err = register_switchdev_blocking_notifier(nb); 2958 if (err) { 2959 dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n"); 2960 goto err_register_switchdev_blocking_notifier; 2961 } 2962 2963 rocker->hw.id = rocker_read64(rocker, SWITCH_ID); 2964 2965 dev_info(&pdev->dev, "Rocker switch with id %*phN\n", 2966 (int)sizeof(rocker->hw.id), &rocker->hw.id); 2967 2968 return 0; 2969 2970 err_register_switchdev_blocking_notifier: 2971 unregister_switchdev_notifier(&rocker_switchdev_notifier); 2972 err_register_switchdev_notifier: 2973 unregister_fib_notifier(&init_net, &rocker->fib_nb); 2974 err_register_fib_notifier: 2975 rocker_remove_ports(rocker); 2976 err_probe_ports: 2977 destroy_workqueue(rocker->rocker_owq); 2978 err_alloc_ordered_workqueue: 2979 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); 2980 err_request_event_irq: 2981 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); 2982 err_request_cmd_irq: 2983 rocker_dma_rings_fini(rocker); 2984 err_dma_rings_init: 2985 err_basic_hw_test: 2986 rocker_msix_fini(rocker); 2987 err_msix_init: 2988 iounmap(rocker->hw_addr); 2989 err_ioremap: 2990 err_pci_resource_len_check: 2991 err_pci_set_dma_mask: 2992 pci_release_regions(pdev); 2993 err_pci_request_regions: 2994 pci_disable_device(pdev); 2995 err_pci_enable_device: 2996 kfree(rocker); 2997 return err; 2998 } 2999 3000 static void rocker_remove(struct pci_dev *pdev) 3001 { 3002 struct rocker *rocker = pci_get_drvdata(pdev); 3003 struct notifier_block *nb; 3004 3005 nb = &rocker_switchdev_blocking_notifier; 3006 unregister_switchdev_blocking_notifier(nb); 3007 3008 unregister_switchdev_notifier(&rocker_switchdev_notifier); 3009 unregister_fib_notifier(&init_net, &rocker->fib_nb); 3010 rocker_remove_ports(rocker); 3011 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); 3012 destroy_workqueue(rocker->rocker_owq); 3013 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); 3014 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); 3015 rocker_dma_rings_fini(rocker); 3016 rocker_msix_fini(rocker); 3017 iounmap(rocker->hw_addr); 3018 pci_release_regions(rocker->pdev); 3019 pci_disable_device(rocker->pdev); 3020 kfree(rocker); 3021 } 3022 3023 static struct pci_driver rocker_pci_driver = { 3024 .name = rocker_driver_name, 3025 .id_table = rocker_pci_id_table, 3026 .probe = rocker_probe, 3027 .remove = rocker_remove, 3028 }; 3029 3030 /************************************ 3031 * Net device notifier event handler 3032 ************************************/ 3033 3034 static bool rocker_port_dev_check_under(const struct net_device *dev, 3035 struct rocker *rocker) 3036 { 3037 struct rocker_port *rocker_port; 3038 3039 if (!rocker_port_dev_check(dev)) 3040 return false; 3041 3042 rocker_port = netdev_priv(dev); 3043 if (rocker_port->rocker != rocker) 3044 return false; 3045 3046 return true; 3047 } 3048 3049 struct rocker_walk_data { 3050 struct rocker *rocker; 3051 struct rocker_port *port; 3052 }; 3053 3054 static int rocker_lower_dev_walk(struct net_device *lower_dev, 3055 struct netdev_nested_priv *priv) 3056 { 3057 struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data; 3058 int ret = 0; 3059 3060 if (rocker_port_dev_check_under(lower_dev, data->rocker)) { 3061 data->port = netdev_priv(lower_dev); 3062 ret = 1; 3063 } 3064 3065 return ret; 3066 } 3067 3068 struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, 3069 struct rocker *rocker) 3070 { 3071 struct netdev_nested_priv priv; 3072 struct rocker_walk_data data; 3073 3074 if (rocker_port_dev_check_under(dev, rocker)) 3075 return netdev_priv(dev); 3076 3077 data.rocker = rocker; 3078 data.port = NULL; 3079 priv.data = (void *)&data; 3080 netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv); 3081 3082 return data.port; 3083 } 3084 3085 static int rocker_netdevice_event(struct notifier_block *unused, 3086 unsigned long event, void *ptr) 3087 { 3088 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 3089 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3090 struct netdev_notifier_changeupper_info *info; 3091 struct rocker_port *rocker_port; 3092 int err; 3093 3094 if (!rocker_port_dev_check(dev)) 3095 return NOTIFY_DONE; 3096 3097 switch (event) { 3098 case NETDEV_CHANGEUPPER: 3099 info = ptr; 3100 if (!info->master) 3101 goto out; 3102 rocker_port = netdev_priv(dev); 3103 if (info->linking) { 3104 err = rocker_world_port_master_linked(rocker_port, 3105 info->upper_dev, 3106 extack); 3107 if (err) 3108 netdev_warn(dev, "failed to reflect master linked (err %d)\n", 3109 err); 3110 } else { 3111 err = rocker_world_port_master_unlinked(rocker_port, 3112 info->upper_dev); 3113 if (err) 3114 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", 3115 err); 3116 } 3117 } 3118 out: 3119 return NOTIFY_DONE; 3120 } 3121 3122 static struct notifier_block rocker_netdevice_nb __read_mostly = { 3123 .notifier_call = rocker_netdevice_event, 3124 }; 3125 3126 /************************************ 3127 * Net event notifier event handler 3128 ************************************/ 3129 3130 static int rocker_netevent_event(struct notifier_block *unused, 3131 unsigned long event, void *ptr) 3132 { 3133 struct rocker_port *rocker_port; 3134 struct net_device *dev; 3135 struct neighbour *n = ptr; 3136 int err; 3137 3138 switch (event) { 3139 case NETEVENT_NEIGH_UPDATE: 3140 if (n->tbl != &arp_tbl) 3141 return NOTIFY_DONE; 3142 dev = n->dev; 3143 if (!rocker_port_dev_check(dev)) 3144 return NOTIFY_DONE; 3145 rocker_port = netdev_priv(dev); 3146 err = rocker_world_port_neigh_update(rocker_port, n); 3147 if (err) 3148 netdev_warn(dev, "failed to handle neigh update (err %d)\n", 3149 err); 3150 break; 3151 } 3152 3153 return NOTIFY_DONE; 3154 } 3155 3156 static struct notifier_block rocker_netevent_nb __read_mostly = { 3157 .notifier_call = rocker_netevent_event, 3158 }; 3159 3160 /*********************** 3161 * Module init and exit 3162 ***********************/ 3163 3164 static int __init rocker_module_init(void) 3165 { 3166 int err; 3167 3168 register_netdevice_notifier(&rocker_netdevice_nb); 3169 register_netevent_notifier(&rocker_netevent_nb); 3170 err = pci_register_driver(&rocker_pci_driver); 3171 if (err) 3172 goto err_pci_register_driver; 3173 return 0; 3174 3175 err_pci_register_driver: 3176 unregister_netevent_notifier(&rocker_netevent_nb); 3177 unregister_netdevice_notifier(&rocker_netdevice_nb); 3178 return err; 3179 } 3180 3181 static void __exit rocker_module_exit(void) 3182 { 3183 unregister_netevent_notifier(&rocker_netevent_nb); 3184 unregister_netdevice_notifier(&rocker_netdevice_nb); 3185 pci_unregister_driver(&rocker_pci_driver); 3186 } 3187 3188 module_init(rocker_module_init); 3189 module_exit(rocker_module_exit); 3190 3191 MODULE_LICENSE("GPL v2"); 3192 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3193 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); 3194 MODULE_DESCRIPTION("Rocker switch device driver"); 3195 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); 3196