1 /* 2 * Copyright Gavin Shan, IBM Corporation 2016. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 16 #include <net/ncsi.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <net/addrconf.h> 20 #include <net/ipv6.h> 21 #include <net/if_inet6.h> 22 #include <net/genetlink.h> 23 24 #include "internal.h" 25 #include "ncsi-pkt.h" 26 #include "ncsi-netlink.h" 27 28 LIST_HEAD(ncsi_dev_list); 29 DEFINE_SPINLOCK(ncsi_dev_lock); 30 31 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down) 32 { 33 struct ncsi_dev *nd = &ndp->ndev; 34 struct ncsi_package *np; 35 struct ncsi_channel *nc; 36 unsigned long flags; 37 38 nd->state = ncsi_dev_state_functional; 39 if (force_down) { 40 nd->link_up = 0; 41 goto report; 42 } 43 44 nd->link_up = 0; 45 NCSI_FOR_EACH_PACKAGE(ndp, np) { 46 NCSI_FOR_EACH_CHANNEL(np, nc) { 47 spin_lock_irqsave(&nc->lock, flags); 48 49 if (!list_empty(&nc->link) || 50 nc->state != NCSI_CHANNEL_ACTIVE) { 51 spin_unlock_irqrestore(&nc->lock, flags); 52 continue; 53 } 54 55 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 56 spin_unlock_irqrestore(&nc->lock, flags); 57 nd->link_up = 1; 58 goto report; 59 } 60 61 spin_unlock_irqrestore(&nc->lock, flags); 62 } 63 } 64 65 report: 66 nd->handler(nd); 67 } 68 69 static void ncsi_channel_monitor(struct timer_list *t) 70 { 71 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); 72 struct ncsi_package *np = nc->package; 73 struct ncsi_dev_priv *ndp = np->ndp; 74 struct ncsi_channel_mode *ncm; 75 struct ncsi_cmd_arg nca; 76 bool enabled, chained; 77 unsigned int monitor_state; 78 unsigned long flags; 79 int state, ret; 80 81 spin_lock_irqsave(&nc->lock, flags); 82 state = nc->state; 83 chained = !list_empty(&nc->link); 84 enabled = nc->monitor.enabled; 85 monitor_state = nc->monitor.state; 86 spin_unlock_irqrestore(&nc->lock, flags); 87 88 if (!enabled || chained) { 89 ncsi_stop_channel_monitor(nc); 90 return; 91 } 92 if (state != NCSI_CHANNEL_INACTIVE && 93 state != NCSI_CHANNEL_ACTIVE) { 94 ncsi_stop_channel_monitor(nc); 95 return; 96 } 97 98 switch (monitor_state) { 99 case NCSI_CHANNEL_MONITOR_START: 100 case NCSI_CHANNEL_MONITOR_RETRY: 101 nca.ndp = ndp; 102 nca.package = np->id; 103 nca.channel = nc->id; 104 nca.type = NCSI_PKT_CMD_GLS; 105 nca.req_flags = 0; 106 ret = ncsi_xmit_cmd(&nca); 107 if (ret) 108 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 109 ret); 110 break; 111 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 112 break; 113 default: 114 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n", 115 nc->id); 116 if (!(ndp->flags & NCSI_DEV_HWA)) { 117 ncsi_report_link(ndp, true); 118 ndp->flags |= NCSI_DEV_RESHUFFLE; 119 } 120 121 ncsi_stop_channel_monitor(nc); 122 123 ncm = &nc->modes[NCSI_MODE_LINK]; 124 spin_lock_irqsave(&nc->lock, flags); 125 nc->state = NCSI_CHANNEL_INVISIBLE; 126 ncm->data[2] &= ~0x1; 127 spin_unlock_irqrestore(&nc->lock, flags); 128 129 spin_lock_irqsave(&ndp->lock, flags); 130 nc->state = NCSI_CHANNEL_ACTIVE; 131 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 132 spin_unlock_irqrestore(&ndp->lock, flags); 133 ncsi_process_next_channel(ndp); 134 return; 135 } 136 137 spin_lock_irqsave(&nc->lock, flags); 138 nc->monitor.state++; 139 spin_unlock_irqrestore(&nc->lock, flags); 140 mod_timer(&nc->monitor.timer, jiffies + HZ); 141 } 142 143 void ncsi_start_channel_monitor(struct ncsi_channel *nc) 144 { 145 unsigned long flags; 146 147 spin_lock_irqsave(&nc->lock, flags); 148 WARN_ON_ONCE(nc->monitor.enabled); 149 nc->monitor.enabled = true; 150 nc->monitor.state = NCSI_CHANNEL_MONITOR_START; 151 spin_unlock_irqrestore(&nc->lock, flags); 152 153 mod_timer(&nc->monitor.timer, jiffies + HZ); 154 } 155 156 void ncsi_stop_channel_monitor(struct ncsi_channel *nc) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&nc->lock, flags); 161 if (!nc->monitor.enabled) { 162 spin_unlock_irqrestore(&nc->lock, flags); 163 return; 164 } 165 nc->monitor.enabled = false; 166 spin_unlock_irqrestore(&nc->lock, flags); 167 168 del_timer_sync(&nc->monitor.timer); 169 } 170 171 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, 172 unsigned char id) 173 { 174 struct ncsi_channel *nc; 175 176 NCSI_FOR_EACH_CHANNEL(np, nc) { 177 if (nc->id == id) 178 return nc; 179 } 180 181 return NULL; 182 } 183 184 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id) 185 { 186 struct ncsi_channel *nc, *tmp; 187 int index; 188 unsigned long flags; 189 190 nc = kzalloc(sizeof(*nc), GFP_ATOMIC); 191 if (!nc) 192 return NULL; 193 194 nc->id = id; 195 nc->package = np; 196 nc->state = NCSI_CHANNEL_INACTIVE; 197 nc->monitor.enabled = false; 198 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); 199 spin_lock_init(&nc->lock); 200 INIT_LIST_HEAD(&nc->link); 201 for (index = 0; index < NCSI_CAP_MAX; index++) 202 nc->caps[index].index = index; 203 for (index = 0; index < NCSI_MODE_MAX; index++) 204 nc->modes[index].index = index; 205 206 spin_lock_irqsave(&np->lock, flags); 207 tmp = ncsi_find_channel(np, id); 208 if (tmp) { 209 spin_unlock_irqrestore(&np->lock, flags); 210 kfree(nc); 211 return tmp; 212 } 213 214 list_add_tail_rcu(&nc->node, &np->channels); 215 np->channel_num++; 216 spin_unlock_irqrestore(&np->lock, flags); 217 218 return nc; 219 } 220 221 static void ncsi_remove_channel(struct ncsi_channel *nc) 222 { 223 struct ncsi_package *np = nc->package; 224 unsigned long flags; 225 226 spin_lock_irqsave(&nc->lock, flags); 227 228 /* Release filters */ 229 kfree(nc->mac_filter.addrs); 230 kfree(nc->vlan_filter.vids); 231 232 nc->state = NCSI_CHANNEL_INACTIVE; 233 spin_unlock_irqrestore(&nc->lock, flags); 234 ncsi_stop_channel_monitor(nc); 235 236 /* Remove and free channel */ 237 spin_lock_irqsave(&np->lock, flags); 238 list_del_rcu(&nc->node); 239 np->channel_num--; 240 spin_unlock_irqrestore(&np->lock, flags); 241 242 kfree(nc); 243 } 244 245 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp, 246 unsigned char id) 247 { 248 struct ncsi_package *np; 249 250 NCSI_FOR_EACH_PACKAGE(ndp, np) { 251 if (np->id == id) 252 return np; 253 } 254 255 return NULL; 256 } 257 258 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp, 259 unsigned char id) 260 { 261 struct ncsi_package *np, *tmp; 262 unsigned long flags; 263 264 np = kzalloc(sizeof(*np), GFP_ATOMIC); 265 if (!np) 266 return NULL; 267 268 np->id = id; 269 np->ndp = ndp; 270 spin_lock_init(&np->lock); 271 INIT_LIST_HEAD(&np->channels); 272 273 spin_lock_irqsave(&ndp->lock, flags); 274 tmp = ncsi_find_package(ndp, id); 275 if (tmp) { 276 spin_unlock_irqrestore(&ndp->lock, flags); 277 kfree(np); 278 return tmp; 279 } 280 281 list_add_tail_rcu(&np->node, &ndp->packages); 282 ndp->package_num++; 283 spin_unlock_irqrestore(&ndp->lock, flags); 284 285 return np; 286 } 287 288 void ncsi_remove_package(struct ncsi_package *np) 289 { 290 struct ncsi_dev_priv *ndp = np->ndp; 291 struct ncsi_channel *nc, *tmp; 292 unsigned long flags; 293 294 /* Release all child channels */ 295 list_for_each_entry_safe(nc, tmp, &np->channels, node) 296 ncsi_remove_channel(nc); 297 298 /* Remove and free package */ 299 spin_lock_irqsave(&ndp->lock, flags); 300 list_del_rcu(&np->node); 301 ndp->package_num--; 302 spin_unlock_irqrestore(&ndp->lock, flags); 303 304 kfree(np); 305 } 306 307 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp, 308 unsigned char id, 309 struct ncsi_package **np, 310 struct ncsi_channel **nc) 311 { 312 struct ncsi_package *p; 313 struct ncsi_channel *c; 314 315 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id)); 316 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; 317 318 if (np) 319 *np = p; 320 if (nc) 321 *nc = c; 322 } 323 324 /* For two consecutive NCSI commands, the packet IDs shouldn't 325 * be same. Otherwise, the bogus response might be replied. So 326 * the available IDs are allocated in round-robin fashion. 327 */ 328 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, 329 unsigned int req_flags) 330 { 331 struct ncsi_request *nr = NULL; 332 int i, limit = ARRAY_SIZE(ndp->requests); 333 unsigned long flags; 334 335 /* Check if there is one available request until the ceiling */ 336 spin_lock_irqsave(&ndp->lock, flags); 337 for (i = ndp->request_id; i < limit; i++) { 338 if (ndp->requests[i].used) 339 continue; 340 341 nr = &ndp->requests[i]; 342 nr->used = true; 343 nr->flags = req_flags; 344 ndp->request_id = i + 1; 345 goto found; 346 } 347 348 /* Fail back to check from the starting cursor */ 349 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) { 350 if (ndp->requests[i].used) 351 continue; 352 353 nr = &ndp->requests[i]; 354 nr->used = true; 355 nr->flags = req_flags; 356 ndp->request_id = i + 1; 357 goto found; 358 } 359 360 found: 361 spin_unlock_irqrestore(&ndp->lock, flags); 362 return nr; 363 } 364 365 void ncsi_free_request(struct ncsi_request *nr) 366 { 367 struct ncsi_dev_priv *ndp = nr->ndp; 368 struct sk_buff *cmd, *rsp; 369 unsigned long flags; 370 bool driven; 371 372 if (nr->enabled) { 373 nr->enabled = false; 374 del_timer_sync(&nr->timer); 375 } 376 377 spin_lock_irqsave(&ndp->lock, flags); 378 cmd = nr->cmd; 379 rsp = nr->rsp; 380 nr->cmd = NULL; 381 nr->rsp = NULL; 382 nr->used = false; 383 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN); 384 spin_unlock_irqrestore(&ndp->lock, flags); 385 386 if (driven && cmd && --ndp->pending_req_num == 0) 387 schedule_work(&ndp->work); 388 389 /* Release command and response */ 390 consume_skb(cmd); 391 consume_skb(rsp); 392 } 393 394 struct ncsi_dev *ncsi_find_dev(struct net_device *dev) 395 { 396 struct ncsi_dev_priv *ndp; 397 398 NCSI_FOR_EACH_DEV(ndp) { 399 if (ndp->ndev.dev == dev) 400 return &ndp->ndev; 401 } 402 403 return NULL; 404 } 405 406 static void ncsi_request_timeout(struct timer_list *t) 407 { 408 struct ncsi_request *nr = from_timer(nr, t, timer); 409 struct ncsi_dev_priv *ndp = nr->ndp; 410 struct ncsi_cmd_pkt *cmd; 411 struct ncsi_package *np; 412 struct ncsi_channel *nc; 413 unsigned long flags; 414 415 /* If the request already had associated response, 416 * let the response handler to release it. 417 */ 418 spin_lock_irqsave(&ndp->lock, flags); 419 nr->enabled = false; 420 if (nr->rsp || !nr->cmd) { 421 spin_unlock_irqrestore(&ndp->lock, flags); 422 return; 423 } 424 spin_unlock_irqrestore(&ndp->lock, flags); 425 426 if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) { 427 if (nr->cmd) { 428 /* Find the package */ 429 cmd = (struct ncsi_cmd_pkt *) 430 skb_network_header(nr->cmd); 431 ncsi_find_package_and_channel(ndp, 432 cmd->cmd.common.channel, 433 &np, &nc); 434 ncsi_send_netlink_timeout(nr, np, nc); 435 } 436 } 437 438 /* Release the request */ 439 ncsi_free_request(nr); 440 } 441 442 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) 443 { 444 struct ncsi_dev *nd = &ndp->ndev; 445 struct ncsi_package *np = ndp->active_package; 446 struct ncsi_channel *nc = ndp->active_channel; 447 struct ncsi_cmd_arg nca; 448 unsigned long flags; 449 int ret; 450 451 nca.ndp = ndp; 452 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 453 switch (nd->state) { 454 case ncsi_dev_state_suspend: 455 nd->state = ncsi_dev_state_suspend_select; 456 /* Fall through */ 457 case ncsi_dev_state_suspend_select: 458 ndp->pending_req_num = 1; 459 460 nca.type = NCSI_PKT_CMD_SP; 461 nca.package = np->id; 462 nca.channel = NCSI_RESERVED_CHANNEL; 463 if (ndp->flags & NCSI_DEV_HWA) 464 nca.bytes[0] = 0; 465 else 466 nca.bytes[0] = 1; 467 468 /* To retrieve the last link states of channels in current 469 * package when current active channel needs fail over to 470 * another one. It means we will possibly select another 471 * channel as next active one. The link states of channels 472 * are most important factor of the selection. So we need 473 * accurate link states. Unfortunately, the link states on 474 * inactive channels can't be updated with LSC AEN in time. 475 */ 476 if (ndp->flags & NCSI_DEV_RESHUFFLE) 477 nd->state = ncsi_dev_state_suspend_gls; 478 else 479 nd->state = ncsi_dev_state_suspend_dcnt; 480 ret = ncsi_xmit_cmd(&nca); 481 if (ret) 482 goto error; 483 484 break; 485 case ncsi_dev_state_suspend_gls: 486 ndp->pending_req_num = np->channel_num; 487 488 nca.type = NCSI_PKT_CMD_GLS; 489 nca.package = np->id; 490 491 nd->state = ncsi_dev_state_suspend_dcnt; 492 NCSI_FOR_EACH_CHANNEL(np, nc) { 493 nca.channel = nc->id; 494 ret = ncsi_xmit_cmd(&nca); 495 if (ret) 496 goto error; 497 } 498 499 break; 500 case ncsi_dev_state_suspend_dcnt: 501 ndp->pending_req_num = 1; 502 503 nca.type = NCSI_PKT_CMD_DCNT; 504 nca.package = np->id; 505 nca.channel = nc->id; 506 507 nd->state = ncsi_dev_state_suspend_dc; 508 ret = ncsi_xmit_cmd(&nca); 509 if (ret) 510 goto error; 511 512 break; 513 case ncsi_dev_state_suspend_dc: 514 ndp->pending_req_num = 1; 515 516 nca.type = NCSI_PKT_CMD_DC; 517 nca.package = np->id; 518 nca.channel = nc->id; 519 nca.bytes[0] = 1; 520 521 nd->state = ncsi_dev_state_suspend_deselect; 522 ret = ncsi_xmit_cmd(&nca); 523 if (ret) 524 goto error; 525 526 break; 527 case ncsi_dev_state_suspend_deselect: 528 ndp->pending_req_num = 1; 529 530 nca.type = NCSI_PKT_CMD_DP; 531 nca.package = np->id; 532 nca.channel = NCSI_RESERVED_CHANNEL; 533 534 nd->state = ncsi_dev_state_suspend_done; 535 ret = ncsi_xmit_cmd(&nca); 536 if (ret) 537 goto error; 538 539 break; 540 case ncsi_dev_state_suspend_done: 541 spin_lock_irqsave(&nc->lock, flags); 542 nc->state = NCSI_CHANNEL_INACTIVE; 543 spin_unlock_irqrestore(&nc->lock, flags); 544 ncsi_process_next_channel(ndp); 545 546 break; 547 default: 548 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 549 nd->state); 550 } 551 552 return; 553 error: 554 nd->state = ncsi_dev_state_functional; 555 } 556 557 /* Check the VLAN filter bitmap for a set filter, and construct a 558 * "Set VLAN Filter - Disable" packet if found. 559 */ 560 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 561 struct ncsi_cmd_arg *nca) 562 { 563 struct ncsi_channel_vlan_filter *ncf; 564 unsigned long flags; 565 void *bitmap; 566 int index; 567 u16 vid; 568 569 ncf = &nc->vlan_filter; 570 bitmap = &ncf->bitmap; 571 572 spin_lock_irqsave(&nc->lock, flags); 573 index = find_next_bit(bitmap, ncf->n_vids, 0); 574 if (index >= ncf->n_vids) { 575 spin_unlock_irqrestore(&nc->lock, flags); 576 return -1; 577 } 578 vid = ncf->vids[index]; 579 580 clear_bit(index, bitmap); 581 ncf->vids[index] = 0; 582 spin_unlock_irqrestore(&nc->lock, flags); 583 584 nca->type = NCSI_PKT_CMD_SVF; 585 nca->words[1] = vid; 586 /* HW filter index starts at 1 */ 587 nca->bytes[6] = index + 1; 588 nca->bytes[7] = 0x00; 589 return 0; 590 } 591 592 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable" 593 * packet. 594 */ 595 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 596 struct ncsi_cmd_arg *nca) 597 { 598 struct ncsi_channel_vlan_filter *ncf; 599 struct vlan_vid *vlan = NULL; 600 unsigned long flags; 601 int i, index; 602 void *bitmap; 603 u16 vid; 604 605 if (list_empty(&ndp->vlan_vids)) 606 return -1; 607 608 ncf = &nc->vlan_filter; 609 bitmap = &ncf->bitmap; 610 611 spin_lock_irqsave(&nc->lock, flags); 612 613 rcu_read_lock(); 614 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 615 vid = vlan->vid; 616 for (i = 0; i < ncf->n_vids; i++) 617 if (ncf->vids[i] == vid) { 618 vid = 0; 619 break; 620 } 621 if (vid) 622 break; 623 } 624 rcu_read_unlock(); 625 626 if (!vid) { 627 /* No VLAN ID is not set */ 628 spin_unlock_irqrestore(&nc->lock, flags); 629 return -1; 630 } 631 632 index = find_next_zero_bit(bitmap, ncf->n_vids, 0); 633 if (index < 0 || index >= ncf->n_vids) { 634 netdev_err(ndp->ndev.dev, 635 "Channel %u already has all VLAN filters set\n", 636 nc->id); 637 spin_unlock_irqrestore(&nc->lock, flags); 638 return -1; 639 } 640 641 ncf->vids[index] = vid; 642 set_bit(index, bitmap); 643 spin_unlock_irqrestore(&nc->lock, flags); 644 645 nca->type = NCSI_PKT_CMD_SVF; 646 nca->words[1] = vid; 647 /* HW filter index starts at 1 */ 648 nca->bytes[6] = index + 1; 649 nca->bytes[7] = 0x01; 650 651 return 0; 652 } 653 654 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 655 { 656 struct ncsi_dev *nd = &ndp->ndev; 657 struct net_device *dev = nd->dev; 658 struct ncsi_package *np = ndp->active_package; 659 struct ncsi_channel *nc = ndp->active_channel; 660 struct ncsi_channel *hot_nc = NULL; 661 struct ncsi_cmd_arg nca; 662 unsigned char index; 663 unsigned long flags; 664 int ret; 665 666 nca.ndp = ndp; 667 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 668 switch (nd->state) { 669 case ncsi_dev_state_config: 670 case ncsi_dev_state_config_sp: 671 ndp->pending_req_num = 1; 672 673 /* Select the specific package */ 674 nca.type = NCSI_PKT_CMD_SP; 675 if (ndp->flags & NCSI_DEV_HWA) 676 nca.bytes[0] = 0; 677 else 678 nca.bytes[0] = 1; 679 nca.package = np->id; 680 nca.channel = NCSI_RESERVED_CHANNEL; 681 ret = ncsi_xmit_cmd(&nca); 682 if (ret) { 683 netdev_err(ndp->ndev.dev, 684 "NCSI: Failed to transmit CMD_SP\n"); 685 goto error; 686 } 687 688 nd->state = ncsi_dev_state_config_cis; 689 break; 690 case ncsi_dev_state_config_cis: 691 ndp->pending_req_num = 1; 692 693 /* Clear initial state */ 694 nca.type = NCSI_PKT_CMD_CIS; 695 nca.package = np->id; 696 nca.channel = nc->id; 697 ret = ncsi_xmit_cmd(&nca); 698 if (ret) { 699 netdev_err(ndp->ndev.dev, 700 "NCSI: Failed to transmit CMD_CIS\n"); 701 goto error; 702 } 703 704 nd->state = ncsi_dev_state_config_clear_vids; 705 break; 706 case ncsi_dev_state_config_clear_vids: 707 case ncsi_dev_state_config_svf: 708 case ncsi_dev_state_config_ev: 709 case ncsi_dev_state_config_sma: 710 case ncsi_dev_state_config_ebf: 711 #if IS_ENABLED(CONFIG_IPV6) 712 case ncsi_dev_state_config_egmf: 713 #endif 714 case ncsi_dev_state_config_ecnt: 715 case ncsi_dev_state_config_ec: 716 case ncsi_dev_state_config_ae: 717 case ncsi_dev_state_config_gls: 718 ndp->pending_req_num = 1; 719 720 nca.package = np->id; 721 nca.channel = nc->id; 722 723 /* Clear any active filters on the channel before setting */ 724 if (nd->state == ncsi_dev_state_config_clear_vids) { 725 ret = clear_one_vid(ndp, nc, &nca); 726 if (ret) { 727 nd->state = ncsi_dev_state_config_svf; 728 schedule_work(&ndp->work); 729 break; 730 } 731 /* Repeat */ 732 nd->state = ncsi_dev_state_config_clear_vids; 733 /* Add known VLAN tags to the filter */ 734 } else if (nd->state == ncsi_dev_state_config_svf) { 735 ret = set_one_vid(ndp, nc, &nca); 736 if (ret) { 737 nd->state = ncsi_dev_state_config_ev; 738 schedule_work(&ndp->work); 739 break; 740 } 741 /* Repeat */ 742 nd->state = ncsi_dev_state_config_svf; 743 /* Enable/Disable the VLAN filter */ 744 } else if (nd->state == ncsi_dev_state_config_ev) { 745 if (list_empty(&ndp->vlan_vids)) { 746 nca.type = NCSI_PKT_CMD_DV; 747 } else { 748 nca.type = NCSI_PKT_CMD_EV; 749 nca.bytes[3] = NCSI_CAP_VLAN_NO; 750 } 751 nd->state = ncsi_dev_state_config_sma; 752 } else if (nd->state == ncsi_dev_state_config_sma) { 753 /* Use first entry in unicast filter table. Note that 754 * the MAC filter table starts from entry 1 instead of 755 * 0. 756 */ 757 nca.type = NCSI_PKT_CMD_SMA; 758 for (index = 0; index < 6; index++) 759 nca.bytes[index] = dev->dev_addr[index]; 760 nca.bytes[6] = 0x1; 761 nca.bytes[7] = 0x1; 762 nd->state = ncsi_dev_state_config_ebf; 763 } else if (nd->state == ncsi_dev_state_config_ebf) { 764 nca.type = NCSI_PKT_CMD_EBF; 765 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; 766 nd->state = ncsi_dev_state_config_ecnt; 767 #if IS_ENABLED(CONFIG_IPV6) 768 if (ndp->inet6_addr_num > 0 && 769 (nc->caps[NCSI_CAP_GENERIC].cap & 770 NCSI_CAP_GENERIC_MC)) 771 nd->state = ncsi_dev_state_config_egmf; 772 else 773 nd->state = ncsi_dev_state_config_ecnt; 774 } else if (nd->state == ncsi_dev_state_config_egmf) { 775 nca.type = NCSI_PKT_CMD_EGMF; 776 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 777 nd->state = ncsi_dev_state_config_ecnt; 778 #endif /* CONFIG_IPV6 */ 779 } else if (nd->state == ncsi_dev_state_config_ecnt) { 780 nca.type = NCSI_PKT_CMD_ECNT; 781 nd->state = ncsi_dev_state_config_ec; 782 } else if (nd->state == ncsi_dev_state_config_ec) { 783 /* Enable AEN if it's supported */ 784 nca.type = NCSI_PKT_CMD_EC; 785 nd->state = ncsi_dev_state_config_ae; 786 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) 787 nd->state = ncsi_dev_state_config_gls; 788 } else if (nd->state == ncsi_dev_state_config_ae) { 789 nca.type = NCSI_PKT_CMD_AE; 790 nca.bytes[0] = 0; 791 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; 792 nd->state = ncsi_dev_state_config_gls; 793 } else if (nd->state == ncsi_dev_state_config_gls) { 794 nca.type = NCSI_PKT_CMD_GLS; 795 nd->state = ncsi_dev_state_config_done; 796 } 797 798 ret = ncsi_xmit_cmd(&nca); 799 if (ret) { 800 netdev_err(ndp->ndev.dev, 801 "NCSI: Failed to transmit CMD %x\n", 802 nca.type); 803 goto error; 804 } 805 break; 806 case ncsi_dev_state_config_done: 807 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n", 808 nc->id); 809 spin_lock_irqsave(&nc->lock, flags); 810 if (nc->reconfigure_needed) { 811 /* This channel's configuration has been updated 812 * part-way during the config state - start the 813 * channel configuration over 814 */ 815 nc->reconfigure_needed = false; 816 nc->state = NCSI_CHANNEL_INACTIVE; 817 spin_unlock_irqrestore(&nc->lock, flags); 818 819 spin_lock_irqsave(&ndp->lock, flags); 820 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 821 spin_unlock_irqrestore(&ndp->lock, flags); 822 823 netdev_dbg(dev, "Dirty NCSI channel state reset\n"); 824 ncsi_process_next_channel(ndp); 825 break; 826 } 827 828 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 829 hot_nc = nc; 830 nc->state = NCSI_CHANNEL_ACTIVE; 831 } else { 832 hot_nc = NULL; 833 nc->state = NCSI_CHANNEL_INACTIVE; 834 netdev_dbg(ndp->ndev.dev, 835 "NCSI: channel %u link down after config\n", 836 nc->id); 837 } 838 spin_unlock_irqrestore(&nc->lock, flags); 839 840 /* Update the hot channel */ 841 spin_lock_irqsave(&ndp->lock, flags); 842 ndp->hot_channel = hot_nc; 843 spin_unlock_irqrestore(&ndp->lock, flags); 844 845 ncsi_start_channel_monitor(nc); 846 ncsi_process_next_channel(ndp); 847 break; 848 default: 849 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n", 850 nd->state); 851 } 852 853 return; 854 855 error: 856 ncsi_report_link(ndp, true); 857 } 858 859 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 860 { 861 struct ncsi_package *np, *force_package; 862 struct ncsi_channel *nc, *found, *hot_nc, *force_channel; 863 struct ncsi_channel_mode *ncm; 864 unsigned long flags; 865 866 spin_lock_irqsave(&ndp->lock, flags); 867 hot_nc = ndp->hot_channel; 868 force_channel = ndp->force_channel; 869 force_package = ndp->force_package; 870 spin_unlock_irqrestore(&ndp->lock, flags); 871 872 /* Force a specific channel whether or not it has link if we have been 873 * configured to do so 874 */ 875 if (force_package && force_channel) { 876 found = force_channel; 877 ncm = &found->modes[NCSI_MODE_LINK]; 878 if (!(ncm->data[2] & 0x1)) 879 netdev_info(ndp->ndev.dev, 880 "NCSI: Channel %u forced, but it is link down\n", 881 found->id); 882 goto out; 883 } 884 885 /* The search is done once an inactive channel with up 886 * link is found. 887 */ 888 found = NULL; 889 NCSI_FOR_EACH_PACKAGE(ndp, np) { 890 if (ndp->force_package && np != ndp->force_package) 891 continue; 892 NCSI_FOR_EACH_CHANNEL(np, nc) { 893 spin_lock_irqsave(&nc->lock, flags); 894 895 if (!list_empty(&nc->link) || 896 nc->state != NCSI_CHANNEL_INACTIVE) { 897 spin_unlock_irqrestore(&nc->lock, flags); 898 continue; 899 } 900 901 if (!found) 902 found = nc; 903 904 if (nc == hot_nc) 905 found = nc; 906 907 ncm = &nc->modes[NCSI_MODE_LINK]; 908 if (ncm->data[2] & 0x1) { 909 spin_unlock_irqrestore(&nc->lock, flags); 910 found = nc; 911 goto out; 912 } 913 914 spin_unlock_irqrestore(&nc->lock, flags); 915 } 916 } 917 918 if (!found) { 919 netdev_warn(ndp->ndev.dev, 920 "NCSI: No channel found with link\n"); 921 ncsi_report_link(ndp, true); 922 return -ENODEV; 923 } 924 925 ncm = &found->modes[NCSI_MODE_LINK]; 926 netdev_dbg(ndp->ndev.dev, 927 "NCSI: Channel %u added to queue (link %s)\n", 928 found->id, ncm->data[2] & 0x1 ? "up" : "down"); 929 930 out: 931 spin_lock_irqsave(&ndp->lock, flags); 932 list_add_tail_rcu(&found->link, &ndp->channel_queue); 933 spin_unlock_irqrestore(&ndp->lock, flags); 934 935 return ncsi_process_next_channel(ndp); 936 } 937 938 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) 939 { 940 struct ncsi_package *np; 941 struct ncsi_channel *nc; 942 unsigned int cap; 943 bool has_channel = false; 944 945 /* The hardware arbitration is disabled if any one channel 946 * doesn't support explicitly. 947 */ 948 NCSI_FOR_EACH_PACKAGE(ndp, np) { 949 NCSI_FOR_EACH_CHANNEL(np, nc) { 950 has_channel = true; 951 952 cap = nc->caps[NCSI_CAP_GENERIC].cap; 953 if (!(cap & NCSI_CAP_GENERIC_HWA) || 954 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 955 NCSI_CAP_GENERIC_HWA_SUPPORT) { 956 ndp->flags &= ~NCSI_DEV_HWA; 957 return false; 958 } 959 } 960 } 961 962 if (has_channel) { 963 ndp->flags |= NCSI_DEV_HWA; 964 return true; 965 } 966 967 ndp->flags &= ~NCSI_DEV_HWA; 968 return false; 969 } 970 971 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 972 { 973 struct ncsi_package *np; 974 struct ncsi_channel *nc; 975 unsigned long flags; 976 977 /* Move all available channels to processing queue */ 978 spin_lock_irqsave(&ndp->lock, flags); 979 NCSI_FOR_EACH_PACKAGE(ndp, np) { 980 NCSI_FOR_EACH_CHANNEL(np, nc) { 981 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE || 982 !list_empty(&nc->link)); 983 ncsi_stop_channel_monitor(nc); 984 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 985 } 986 } 987 spin_unlock_irqrestore(&ndp->lock, flags); 988 989 /* We can have no channels in extremely case */ 990 if (list_empty(&ndp->channel_queue)) { 991 netdev_err(ndp->ndev.dev, 992 "NCSI: No available channels for HWA\n"); 993 ncsi_report_link(ndp, false); 994 return -ENOENT; 995 } 996 997 return ncsi_process_next_channel(ndp); 998 } 999 1000 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) 1001 { 1002 struct ncsi_dev *nd = &ndp->ndev; 1003 struct ncsi_package *np; 1004 struct ncsi_channel *nc; 1005 struct ncsi_cmd_arg nca; 1006 unsigned char index; 1007 int ret; 1008 1009 nca.ndp = ndp; 1010 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 1011 switch (nd->state) { 1012 case ncsi_dev_state_probe: 1013 nd->state = ncsi_dev_state_probe_deselect; 1014 /* Fall through */ 1015 case ncsi_dev_state_probe_deselect: 1016 ndp->pending_req_num = 8; 1017 1018 /* Deselect all possible packages */ 1019 nca.type = NCSI_PKT_CMD_DP; 1020 nca.channel = NCSI_RESERVED_CHANNEL; 1021 for (index = 0; index < 8; index++) { 1022 nca.package = index; 1023 ret = ncsi_xmit_cmd(&nca); 1024 if (ret) 1025 goto error; 1026 } 1027 1028 nd->state = ncsi_dev_state_probe_package; 1029 break; 1030 case ncsi_dev_state_probe_package: 1031 ndp->pending_req_num = 16; 1032 1033 /* Select all possible packages */ 1034 nca.type = NCSI_PKT_CMD_SP; 1035 nca.bytes[0] = 1; 1036 nca.channel = NCSI_RESERVED_CHANNEL; 1037 for (index = 0; index < 8; index++) { 1038 nca.package = index; 1039 ret = ncsi_xmit_cmd(&nca); 1040 if (ret) 1041 goto error; 1042 } 1043 1044 /* Disable all possible packages */ 1045 nca.type = NCSI_PKT_CMD_DP; 1046 for (index = 0; index < 8; index++) { 1047 nca.package = index; 1048 ret = ncsi_xmit_cmd(&nca); 1049 if (ret) 1050 goto error; 1051 } 1052 1053 nd->state = ncsi_dev_state_probe_channel; 1054 break; 1055 case ncsi_dev_state_probe_channel: 1056 if (!ndp->active_package) 1057 ndp->active_package = list_first_or_null_rcu( 1058 &ndp->packages, struct ncsi_package, node); 1059 else if (list_is_last(&ndp->active_package->node, 1060 &ndp->packages)) 1061 ndp->active_package = NULL; 1062 else 1063 ndp->active_package = list_next_entry( 1064 ndp->active_package, node); 1065 1066 /* All available packages and channels are enumerated. The 1067 * enumeration happens for once when the NCSI interface is 1068 * started. So we need continue to start the interface after 1069 * the enumeration. 1070 * 1071 * We have to choose an active channel before configuring it. 1072 * Note that we possibly don't have active channel in extreme 1073 * situation. 1074 */ 1075 if (!ndp->active_package) { 1076 ndp->flags |= NCSI_DEV_PROBED; 1077 if (ncsi_check_hwa(ndp)) 1078 ncsi_enable_hwa(ndp); 1079 else 1080 ncsi_choose_active_channel(ndp); 1081 return; 1082 } 1083 1084 /* Select the active package */ 1085 ndp->pending_req_num = 1; 1086 nca.type = NCSI_PKT_CMD_SP; 1087 nca.bytes[0] = 1; 1088 nca.package = ndp->active_package->id; 1089 nca.channel = NCSI_RESERVED_CHANNEL; 1090 ret = ncsi_xmit_cmd(&nca); 1091 if (ret) 1092 goto error; 1093 1094 nd->state = ncsi_dev_state_probe_cis; 1095 break; 1096 case ncsi_dev_state_probe_cis: 1097 ndp->pending_req_num = NCSI_RESERVED_CHANNEL; 1098 1099 /* Clear initial state */ 1100 nca.type = NCSI_PKT_CMD_CIS; 1101 nca.package = ndp->active_package->id; 1102 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) { 1103 nca.channel = index; 1104 ret = ncsi_xmit_cmd(&nca); 1105 if (ret) 1106 goto error; 1107 } 1108 1109 nd->state = ncsi_dev_state_probe_gvi; 1110 break; 1111 case ncsi_dev_state_probe_gvi: 1112 case ncsi_dev_state_probe_gc: 1113 case ncsi_dev_state_probe_gls: 1114 np = ndp->active_package; 1115 ndp->pending_req_num = np->channel_num; 1116 1117 /* Retrieve version, capability or link status */ 1118 if (nd->state == ncsi_dev_state_probe_gvi) 1119 nca.type = NCSI_PKT_CMD_GVI; 1120 else if (nd->state == ncsi_dev_state_probe_gc) 1121 nca.type = NCSI_PKT_CMD_GC; 1122 else 1123 nca.type = NCSI_PKT_CMD_GLS; 1124 1125 nca.package = np->id; 1126 NCSI_FOR_EACH_CHANNEL(np, nc) { 1127 nca.channel = nc->id; 1128 ret = ncsi_xmit_cmd(&nca); 1129 if (ret) 1130 goto error; 1131 } 1132 1133 if (nd->state == ncsi_dev_state_probe_gvi) 1134 nd->state = ncsi_dev_state_probe_gc; 1135 else if (nd->state == ncsi_dev_state_probe_gc) 1136 nd->state = ncsi_dev_state_probe_gls; 1137 else 1138 nd->state = ncsi_dev_state_probe_dp; 1139 break; 1140 case ncsi_dev_state_probe_dp: 1141 ndp->pending_req_num = 1; 1142 1143 /* Deselect the active package */ 1144 nca.type = NCSI_PKT_CMD_DP; 1145 nca.package = ndp->active_package->id; 1146 nca.channel = NCSI_RESERVED_CHANNEL; 1147 ret = ncsi_xmit_cmd(&nca); 1148 if (ret) 1149 goto error; 1150 1151 /* Scan channels in next package */ 1152 nd->state = ncsi_dev_state_probe_channel; 1153 break; 1154 default: 1155 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n", 1156 nd->state); 1157 } 1158 1159 return; 1160 error: 1161 netdev_err(ndp->ndev.dev, 1162 "NCSI: Failed to transmit cmd 0x%x during probe\n", 1163 nca.type); 1164 ncsi_report_link(ndp, true); 1165 } 1166 1167 static void ncsi_dev_work(struct work_struct *work) 1168 { 1169 struct ncsi_dev_priv *ndp = container_of(work, 1170 struct ncsi_dev_priv, work); 1171 struct ncsi_dev *nd = &ndp->ndev; 1172 1173 switch (nd->state & ncsi_dev_state_major) { 1174 case ncsi_dev_state_probe: 1175 ncsi_probe_channel(ndp); 1176 break; 1177 case ncsi_dev_state_suspend: 1178 ncsi_suspend_channel(ndp); 1179 break; 1180 case ncsi_dev_state_config: 1181 ncsi_configure_channel(ndp); 1182 break; 1183 default: 1184 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n", 1185 nd->state); 1186 } 1187 } 1188 1189 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) 1190 { 1191 struct ncsi_channel *nc; 1192 int old_state; 1193 unsigned long flags; 1194 1195 spin_lock_irqsave(&ndp->lock, flags); 1196 nc = list_first_or_null_rcu(&ndp->channel_queue, 1197 struct ncsi_channel, link); 1198 if (!nc) { 1199 spin_unlock_irqrestore(&ndp->lock, flags); 1200 goto out; 1201 } 1202 1203 list_del_init(&nc->link); 1204 spin_unlock_irqrestore(&ndp->lock, flags); 1205 1206 spin_lock_irqsave(&nc->lock, flags); 1207 old_state = nc->state; 1208 nc->state = NCSI_CHANNEL_INVISIBLE; 1209 spin_unlock_irqrestore(&nc->lock, flags); 1210 1211 ndp->active_channel = nc; 1212 ndp->active_package = nc->package; 1213 1214 switch (old_state) { 1215 case NCSI_CHANNEL_INACTIVE: 1216 ndp->ndev.state = ncsi_dev_state_config; 1217 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n", 1218 nc->id); 1219 ncsi_configure_channel(ndp); 1220 break; 1221 case NCSI_CHANNEL_ACTIVE: 1222 ndp->ndev.state = ncsi_dev_state_suspend; 1223 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n", 1224 nc->id); 1225 ncsi_suspend_channel(ndp); 1226 break; 1227 default: 1228 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n", 1229 old_state, nc->package->id, nc->id); 1230 ncsi_report_link(ndp, false); 1231 return -EINVAL; 1232 } 1233 1234 return 0; 1235 1236 out: 1237 ndp->active_channel = NULL; 1238 ndp->active_package = NULL; 1239 if (ndp->flags & NCSI_DEV_RESHUFFLE) { 1240 ndp->flags &= ~NCSI_DEV_RESHUFFLE; 1241 return ncsi_choose_active_channel(ndp); 1242 } 1243 1244 ncsi_report_link(ndp, false); 1245 return -ENODEV; 1246 } 1247 1248 #if IS_ENABLED(CONFIG_IPV6) 1249 static int ncsi_inet6addr_event(struct notifier_block *this, 1250 unsigned long event, void *data) 1251 { 1252 struct inet6_ifaddr *ifa = data; 1253 struct net_device *dev = ifa->idev->dev; 1254 struct ncsi_dev *nd = ncsi_find_dev(dev); 1255 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1256 struct ncsi_package *np; 1257 struct ncsi_channel *nc; 1258 struct ncsi_cmd_arg nca; 1259 bool action; 1260 int ret; 1261 1262 if (!ndp || (ipv6_addr_type(&ifa->addr) & 1263 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK))) 1264 return NOTIFY_OK; 1265 1266 switch (event) { 1267 case NETDEV_UP: 1268 action = (++ndp->inet6_addr_num) == 1; 1269 nca.type = NCSI_PKT_CMD_EGMF; 1270 break; 1271 case NETDEV_DOWN: 1272 action = (--ndp->inet6_addr_num == 0); 1273 nca.type = NCSI_PKT_CMD_DGMF; 1274 break; 1275 default: 1276 return NOTIFY_OK; 1277 } 1278 1279 /* We might not have active channel or packages. The IPv6 1280 * required multicast will be enabled when active channel 1281 * or packages are chosen. 1282 */ 1283 np = ndp->active_package; 1284 nc = ndp->active_channel; 1285 if (!action || !np || !nc) 1286 return NOTIFY_OK; 1287 1288 /* We needn't enable or disable it if the function isn't supported */ 1289 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC)) 1290 return NOTIFY_OK; 1291 1292 nca.ndp = ndp; 1293 nca.req_flags = 0; 1294 nca.package = np->id; 1295 nca.channel = nc->id; 1296 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 1297 ret = ncsi_xmit_cmd(&nca); 1298 if (ret) { 1299 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n", 1300 (event == NETDEV_UP) ? "enable" : "disable", ret); 1301 return NOTIFY_DONE; 1302 } 1303 1304 return NOTIFY_OK; 1305 } 1306 1307 static struct notifier_block ncsi_inet6addr_notifier = { 1308 .notifier_call = ncsi_inet6addr_event, 1309 }; 1310 #endif /* CONFIG_IPV6 */ 1311 1312 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) 1313 { 1314 struct ncsi_dev *nd = &ndp->ndev; 1315 struct ncsi_channel *nc; 1316 struct ncsi_package *np; 1317 unsigned long flags; 1318 unsigned int n = 0; 1319 1320 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1321 NCSI_FOR_EACH_CHANNEL(np, nc) { 1322 spin_lock_irqsave(&nc->lock, flags); 1323 1324 /* Channels may be busy, mark dirty instead of 1325 * kicking if; 1326 * a) not ACTIVE (configured) 1327 * b) in the channel_queue (to be configured) 1328 * c) it's ndev is in the config state 1329 */ 1330 if (nc->state != NCSI_CHANNEL_ACTIVE) { 1331 if ((ndp->ndev.state & 0xff00) == 1332 ncsi_dev_state_config || 1333 !list_empty(&nc->link)) { 1334 netdev_dbg(nd->dev, 1335 "NCSI: channel %p marked dirty\n", 1336 nc); 1337 nc->reconfigure_needed = true; 1338 } 1339 spin_unlock_irqrestore(&nc->lock, flags); 1340 continue; 1341 } 1342 1343 spin_unlock_irqrestore(&nc->lock, flags); 1344 1345 ncsi_stop_channel_monitor(nc); 1346 spin_lock_irqsave(&nc->lock, flags); 1347 nc->state = NCSI_CHANNEL_INACTIVE; 1348 spin_unlock_irqrestore(&nc->lock, flags); 1349 1350 spin_lock_irqsave(&ndp->lock, flags); 1351 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1352 spin_unlock_irqrestore(&ndp->lock, flags); 1353 1354 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc); 1355 n++; 1356 } 1357 } 1358 1359 return n; 1360 } 1361 1362 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1363 { 1364 struct ncsi_dev_priv *ndp; 1365 unsigned int n_vids = 0; 1366 struct vlan_vid *vlan; 1367 struct ncsi_dev *nd; 1368 bool found = false; 1369 1370 if (vid == 0) 1371 return 0; 1372 1373 nd = ncsi_find_dev(dev); 1374 if (!nd) { 1375 netdev_warn(dev, "NCSI: No net_device?\n"); 1376 return 0; 1377 } 1378 1379 ndp = TO_NCSI_DEV_PRIV(nd); 1380 1381 /* Add the VLAN id to our internal list */ 1382 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1383 n_vids++; 1384 if (vlan->vid == vid) { 1385 netdev_dbg(dev, "NCSI: vid %u already registered\n", 1386 vid); 1387 return 0; 1388 } 1389 } 1390 if (n_vids >= NCSI_MAX_VLAN_VIDS) { 1391 netdev_warn(dev, 1392 "tried to add vlan id %u but NCSI max already registered (%u)\n", 1393 vid, NCSI_MAX_VLAN_VIDS); 1394 return -ENOSPC; 1395 } 1396 1397 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1398 if (!vlan) 1399 return -ENOMEM; 1400 1401 vlan->proto = proto; 1402 vlan->vid = vid; 1403 list_add_rcu(&vlan->list, &ndp->vlan_vids); 1404 1405 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid); 1406 1407 found = ncsi_kick_channels(ndp) != 0; 1408 1409 return found ? ncsi_process_next_channel(ndp) : 0; 1410 } 1411 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid); 1412 1413 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1414 { 1415 struct vlan_vid *vlan, *tmp; 1416 struct ncsi_dev_priv *ndp; 1417 struct ncsi_dev *nd; 1418 bool found = false; 1419 1420 if (vid == 0) 1421 return 0; 1422 1423 nd = ncsi_find_dev(dev); 1424 if (!nd) { 1425 netdev_warn(dev, "NCSI: no net_device?\n"); 1426 return 0; 1427 } 1428 1429 ndp = TO_NCSI_DEV_PRIV(nd); 1430 1431 /* Remove the VLAN id from our internal list */ 1432 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list) 1433 if (vlan->vid == vid) { 1434 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid); 1435 list_del_rcu(&vlan->list); 1436 found = true; 1437 kfree(vlan); 1438 } 1439 1440 if (!found) { 1441 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid); 1442 return -EINVAL; 1443 } 1444 1445 found = ncsi_kick_channels(ndp) != 0; 1446 1447 return found ? ncsi_process_next_channel(ndp) : 0; 1448 } 1449 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid); 1450 1451 struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 1452 void (*handler)(struct ncsi_dev *ndev)) 1453 { 1454 struct ncsi_dev_priv *ndp; 1455 struct ncsi_dev *nd; 1456 unsigned long flags; 1457 int i; 1458 1459 /* Check if the device has been registered or not */ 1460 nd = ncsi_find_dev(dev); 1461 if (nd) 1462 return nd; 1463 1464 /* Create NCSI device */ 1465 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC); 1466 if (!ndp) 1467 return NULL; 1468 1469 nd = &ndp->ndev; 1470 nd->state = ncsi_dev_state_registered; 1471 nd->dev = dev; 1472 nd->handler = handler; 1473 ndp->pending_req_num = 0; 1474 INIT_LIST_HEAD(&ndp->channel_queue); 1475 INIT_LIST_HEAD(&ndp->vlan_vids); 1476 INIT_WORK(&ndp->work, ncsi_dev_work); 1477 1478 /* Initialize private NCSI device */ 1479 spin_lock_init(&ndp->lock); 1480 INIT_LIST_HEAD(&ndp->packages); 1481 ndp->request_id = NCSI_REQ_START_IDX; 1482 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) { 1483 ndp->requests[i].id = i; 1484 ndp->requests[i].ndp = ndp; 1485 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0); 1486 } 1487 1488 spin_lock_irqsave(&ncsi_dev_lock, flags); 1489 #if IS_ENABLED(CONFIG_IPV6) 1490 ndp->inet6_addr_num = 0; 1491 if (list_empty(&ncsi_dev_list)) 1492 register_inet6addr_notifier(&ncsi_inet6addr_notifier); 1493 #endif 1494 list_add_tail_rcu(&ndp->node, &ncsi_dev_list); 1495 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1496 1497 /* Register NCSI packet Rx handler */ 1498 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI); 1499 ndp->ptype.func = ncsi_rcv_rsp; 1500 ndp->ptype.dev = dev; 1501 dev_add_pack(&ndp->ptype); 1502 1503 /* Set up generic netlink interface */ 1504 ncsi_init_netlink(dev); 1505 1506 return nd; 1507 } 1508 EXPORT_SYMBOL_GPL(ncsi_register_dev); 1509 1510 int ncsi_start_dev(struct ncsi_dev *nd) 1511 { 1512 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1513 int ret; 1514 1515 if (nd->state != ncsi_dev_state_registered && 1516 nd->state != ncsi_dev_state_functional) 1517 return -ENOTTY; 1518 1519 if (!(ndp->flags & NCSI_DEV_PROBED)) { 1520 nd->state = ncsi_dev_state_probe; 1521 schedule_work(&ndp->work); 1522 return 0; 1523 } 1524 1525 if (ndp->flags & NCSI_DEV_HWA) { 1526 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n"); 1527 ret = ncsi_enable_hwa(ndp); 1528 } else { 1529 ret = ncsi_choose_active_channel(ndp); 1530 } 1531 1532 return ret; 1533 } 1534 EXPORT_SYMBOL_GPL(ncsi_start_dev); 1535 1536 void ncsi_stop_dev(struct ncsi_dev *nd) 1537 { 1538 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1539 struct ncsi_package *np; 1540 struct ncsi_channel *nc; 1541 bool chained; 1542 int old_state; 1543 unsigned long flags; 1544 1545 /* Stop the channel monitor and reset channel's state */ 1546 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1547 NCSI_FOR_EACH_CHANNEL(np, nc) { 1548 ncsi_stop_channel_monitor(nc); 1549 1550 spin_lock_irqsave(&nc->lock, flags); 1551 chained = !list_empty(&nc->link); 1552 old_state = nc->state; 1553 nc->state = NCSI_CHANNEL_INACTIVE; 1554 spin_unlock_irqrestore(&nc->lock, flags); 1555 1556 WARN_ON_ONCE(chained || 1557 old_state == NCSI_CHANNEL_INVISIBLE); 1558 } 1559 } 1560 1561 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n"); 1562 ncsi_report_link(ndp, true); 1563 } 1564 EXPORT_SYMBOL_GPL(ncsi_stop_dev); 1565 1566 void ncsi_unregister_dev(struct ncsi_dev *nd) 1567 { 1568 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1569 struct ncsi_package *np, *tmp; 1570 unsigned long flags; 1571 1572 dev_remove_pack(&ndp->ptype); 1573 1574 list_for_each_entry_safe(np, tmp, &ndp->packages, node) 1575 ncsi_remove_package(np); 1576 1577 spin_lock_irqsave(&ncsi_dev_lock, flags); 1578 list_del_rcu(&ndp->node); 1579 #if IS_ENABLED(CONFIG_IPV6) 1580 if (list_empty(&ncsi_dev_list)) 1581 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier); 1582 #endif 1583 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1584 1585 ncsi_unregister_netlink(nd->dev); 1586 1587 kfree(ndp); 1588 } 1589 EXPORT_SYMBOL_GPL(ncsi_unregister_dev); 1590