1 /* 2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 #include <linux/module.h> 21 #include <linux/version.h> 22 #include <linux/spinlock.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/ethtool.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/crc32.h> 29 #include <linux/slab.h> 30 #include <linux/cpu.h> 31 #include <linux/fs.h> 32 #include <linux/sysfs.h> 33 #include <linux/ctype.h> 34 #include <scsi/scsi_tcq.h> 35 #include <scsi/scsicam.h> 36 #include <scsi/scsi_transport.h> 37 #include <scsi/scsi_transport_fc.h> 38 #include <net/rtnetlink.h> 39 40 #include <scsi/fc/fc_encaps.h> 41 #include <scsi/fc/fc_fip.h> 42 43 #include <scsi/libfc.h> 44 #include <scsi/fc_frame.h> 45 #include <scsi/libfcoe.h> 46 47 #include "fcoe.h" 48 49 MODULE_AUTHOR("Open-FCoE.org"); 50 MODULE_DESCRIPTION("FCoE"); 51 MODULE_LICENSE("GPL v2"); 52 53 /* Performance tuning parameters for fcoe */ 54 static unsigned int fcoe_ddp_min; 55 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); 56 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 57 "Direct Data Placement (DDP)."); 58 59 DEFINE_MUTEX(fcoe_config_mutex); 60 61 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ 62 static DECLARE_COMPLETION(fcoe_flush_completion); 63 64 /* fcoe host list */ 65 /* must only by accessed under the RTNL mutex */ 66 LIST_HEAD(fcoe_hostlist); 67 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 68 69 /* Function Prototypes */ 70 static int fcoe_reset(struct Scsi_Host *); 71 static int fcoe_xmit(struct fc_lport *, struct fc_frame *); 72 static int fcoe_rcv(struct sk_buff *, struct net_device *, 73 struct packet_type *, struct net_device *); 74 static int fcoe_percpu_receive_thread(void *); 75 static void fcoe_clean_pending_queue(struct fc_lport *); 76 static void fcoe_percpu_clean(struct fc_lport *); 77 static int fcoe_link_speed_update(struct fc_lport *); 78 static int fcoe_link_ok(struct fc_lport *); 79 80 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 81 static int fcoe_hostlist_add(const struct fc_lport *); 82 83 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); 84 static int fcoe_device_notification(struct notifier_block *, ulong, void *); 85 static void fcoe_dev_setup(void); 86 static void fcoe_dev_cleanup(void); 87 static struct fcoe_interface 88 *fcoe_hostlist_lookup_port(const struct net_device *); 89 90 static int fcoe_fip_recv(struct sk_buff *, struct net_device *, 91 struct packet_type *, struct net_device *); 92 93 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); 94 static void fcoe_update_src_mac(struct fc_lport *, u8 *); 95 static u8 *fcoe_get_src_mac(struct fc_lport *); 96 static void fcoe_destroy_work(struct work_struct *); 97 98 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, 99 unsigned int); 100 static int fcoe_ddp_done(struct fc_lport *, u16); 101 102 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); 103 104 static int fcoe_create(const char *, struct kernel_param *); 105 static int fcoe_destroy(const char *, struct kernel_param *); 106 static int fcoe_enable(const char *, struct kernel_param *); 107 static int fcoe_disable(const char *, struct kernel_param *); 108 109 static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 110 u32 did, struct fc_frame *, 111 unsigned int op, 112 void (*resp)(struct fc_seq *, 113 struct fc_frame *, 114 void *), 115 void *, u32 timeout); 116 static void fcoe_recv_frame(struct sk_buff *skb); 117 118 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 119 120 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 121 __MODULE_PARM_TYPE(create, "string"); 122 MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); 123 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 124 __MODULE_PARM_TYPE(destroy, "string"); 125 MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); 126 module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR); 127 __MODULE_PARM_TYPE(enable, "string"); 128 MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); 129 module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR); 130 __MODULE_PARM_TYPE(disable, "string"); 131 MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); 132 133 /* notification function for packets from net device */ 134 static struct notifier_block fcoe_notifier = { 135 .notifier_call = fcoe_device_notification, 136 }; 137 138 /* notification function for CPU hotplug events */ 139 static struct notifier_block fcoe_cpu_notifier = { 140 .notifier_call = fcoe_cpu_callback, 141 }; 142 143 static struct scsi_transport_template *fcoe_transport_template; 144 static struct scsi_transport_template *fcoe_vport_transport_template; 145 146 static int fcoe_vport_destroy(struct fc_vport *); 147 static int fcoe_vport_create(struct fc_vport *, bool disabled); 148 static int fcoe_vport_disable(struct fc_vport *, bool disable); 149 static void fcoe_set_vport_symbolic_name(struct fc_vport *); 150 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 151 152 static struct libfc_function_template fcoe_libfc_fcn_templ = { 153 .frame_send = fcoe_xmit, 154 .ddp_setup = fcoe_ddp_setup, 155 .ddp_done = fcoe_ddp_done, 156 .elsct_send = fcoe_elsct_send, 157 .get_lesb = fcoe_get_lesb, 158 .lport_set_port_id = fcoe_set_port_id, 159 }; 160 161 struct fc_function_template fcoe_transport_function = { 162 .show_host_node_name = 1, 163 .show_host_port_name = 1, 164 .show_host_supported_classes = 1, 165 .show_host_supported_fc4s = 1, 166 .show_host_active_fc4s = 1, 167 .show_host_maxframe_size = 1, 168 169 .show_host_port_id = 1, 170 .show_host_supported_speeds = 1, 171 .get_host_speed = fc_get_host_speed, 172 .show_host_speed = 1, 173 .show_host_port_type = 1, 174 .get_host_port_state = fc_get_host_port_state, 175 .show_host_port_state = 1, 176 .show_host_symbolic_name = 1, 177 178 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 179 .show_rport_maxframe_size = 1, 180 .show_rport_supported_classes = 1, 181 182 .show_host_fabric_name = 1, 183 .show_starget_node_name = 1, 184 .show_starget_port_name = 1, 185 .show_starget_port_id = 1, 186 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 187 .show_rport_dev_loss_tmo = 1, 188 .get_fc_host_stats = fc_get_host_stats, 189 .issue_fc_host_lip = fcoe_reset, 190 191 .terminate_rport_io = fc_rport_terminate_io, 192 193 .vport_create = fcoe_vport_create, 194 .vport_delete = fcoe_vport_destroy, 195 .vport_disable = fcoe_vport_disable, 196 .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, 197 198 .bsg_request = fc_lport_bsg_request, 199 }; 200 201 struct fc_function_template fcoe_vport_transport_function = { 202 .show_host_node_name = 1, 203 .show_host_port_name = 1, 204 .show_host_supported_classes = 1, 205 .show_host_supported_fc4s = 1, 206 .show_host_active_fc4s = 1, 207 .show_host_maxframe_size = 1, 208 209 .show_host_port_id = 1, 210 .show_host_supported_speeds = 1, 211 .get_host_speed = fc_get_host_speed, 212 .show_host_speed = 1, 213 .show_host_port_type = 1, 214 .get_host_port_state = fc_get_host_port_state, 215 .show_host_port_state = 1, 216 .show_host_symbolic_name = 1, 217 218 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 219 .show_rport_maxframe_size = 1, 220 .show_rport_supported_classes = 1, 221 222 .show_host_fabric_name = 1, 223 .show_starget_node_name = 1, 224 .show_starget_port_name = 1, 225 .show_starget_port_id = 1, 226 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 227 .show_rport_dev_loss_tmo = 1, 228 .get_fc_host_stats = fc_get_host_stats, 229 .issue_fc_host_lip = fcoe_reset, 230 231 .terminate_rport_io = fc_rport_terminate_io, 232 233 .bsg_request = fc_lport_bsg_request, 234 }; 235 236 static struct scsi_host_template fcoe_shost_template = { 237 .module = THIS_MODULE, 238 .name = "FCoE Driver", 239 .proc_name = FCOE_NAME, 240 .queuecommand = fc_queuecommand, 241 .eh_abort_handler = fc_eh_abort, 242 .eh_device_reset_handler = fc_eh_device_reset, 243 .eh_host_reset_handler = fc_eh_host_reset, 244 .slave_alloc = fc_slave_alloc, 245 .change_queue_depth = fc_change_queue_depth, 246 .change_queue_type = fc_change_queue_type, 247 .this_id = -1, 248 .cmd_per_lun = 3, 249 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 250 .use_clustering = ENABLE_CLUSTERING, 251 .sg_tablesize = SG_ALL, 252 .max_sectors = 0xffff, 253 }; 254 255 /** 256 * fcoe_interface_setup() - Setup a FCoE interface 257 * @fcoe: The new FCoE interface 258 * @netdev: The net device that the fcoe interface is on 259 * 260 * Returns : 0 for success 261 * Locking: must be called with the RTNL mutex held 262 */ 263 static int fcoe_interface_setup(struct fcoe_interface *fcoe, 264 struct net_device *netdev) 265 { 266 struct fcoe_ctlr *fip = &fcoe->ctlr; 267 struct netdev_hw_addr *ha; 268 struct net_device *real_dev; 269 u8 flogi_maddr[ETH_ALEN]; 270 const struct net_device_ops *ops; 271 272 fcoe->netdev = netdev; 273 274 /* Let LLD initialize for FCoE */ 275 ops = netdev->netdev_ops; 276 if (ops->ndo_fcoe_enable) { 277 if (ops->ndo_fcoe_enable(netdev)) 278 FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" 279 " specific feature for LLD.\n"); 280 } 281 282 /* Do not support for bonding device */ 283 if ((netdev->priv_flags & IFF_MASTER_ALB) || 284 (netdev->priv_flags & IFF_SLAVE_INACTIVE) || 285 (netdev->priv_flags & IFF_MASTER_8023AD)) { 286 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); 287 return -EOPNOTSUPP; 288 } 289 290 /* look for SAN MAC address, if multiple SAN MACs exist, only 291 * use the first one for SPMA */ 292 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? 293 vlan_dev_real_dev(netdev) : netdev; 294 rcu_read_lock(); 295 for_each_dev_addr(real_dev, ha) { 296 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 297 (is_valid_ether_addr(ha->addr))) { 298 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); 299 fip->spma = 1; 300 break; 301 } 302 } 303 rcu_read_unlock(); 304 305 /* setup Source Mac Address */ 306 if (!fip->spma) 307 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len); 308 309 /* 310 * Add FCoE MAC address as second unicast MAC address 311 * or enter promiscuous mode if not capable of listening 312 * for multiple unicast MACs. 313 */ 314 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 315 dev_uc_add(netdev, flogi_maddr); 316 if (fip->spma) 317 dev_uc_add(netdev, fip->ctl_src_addr); 318 dev_mc_add(netdev, FIP_ALL_ENODE_MACS); 319 320 /* 321 * setup the receive function from ethernet driver 322 * on the ethertype for the given device 323 */ 324 fcoe->fcoe_packet_type.func = fcoe_rcv; 325 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 326 fcoe->fcoe_packet_type.dev = netdev; 327 dev_add_pack(&fcoe->fcoe_packet_type); 328 329 fcoe->fip_packet_type.func = fcoe_fip_recv; 330 fcoe->fip_packet_type.type = htons(ETH_P_FIP); 331 fcoe->fip_packet_type.dev = netdev; 332 dev_add_pack(&fcoe->fip_packet_type); 333 334 return 0; 335 } 336 337 /** 338 * fcoe_interface_create() - Create a FCoE interface on a net device 339 * @netdev: The net device to create the FCoE interface on 340 * 341 * Returns: pointer to a struct fcoe_interface or NULL on error 342 */ 343 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) 344 { 345 struct fcoe_interface *fcoe; 346 int err; 347 348 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 349 if (!fcoe) { 350 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 351 return NULL; 352 } 353 354 dev_hold(netdev); 355 kref_init(&fcoe->kref); 356 357 /* 358 * Initialize FIP. 359 */ 360 fcoe_ctlr_init(&fcoe->ctlr); 361 fcoe->ctlr.send = fcoe_fip_send; 362 fcoe->ctlr.update_mac = fcoe_update_src_mac; 363 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 364 365 err = fcoe_interface_setup(fcoe, netdev); 366 if (err) { 367 fcoe_ctlr_destroy(&fcoe->ctlr); 368 kfree(fcoe); 369 dev_put(netdev); 370 return NULL; 371 } 372 373 return fcoe; 374 } 375 376 /** 377 * fcoe_interface_cleanup() - Clean up a FCoE interface 378 * @fcoe: The FCoE interface to be cleaned up 379 * 380 * Caller must be holding the RTNL mutex 381 */ 382 void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 383 { 384 struct net_device *netdev = fcoe->netdev; 385 struct fcoe_ctlr *fip = &fcoe->ctlr; 386 u8 flogi_maddr[ETH_ALEN]; 387 const struct net_device_ops *ops; 388 389 /* 390 * Don't listen for Ethernet packets anymore. 391 * synchronize_net() ensures that the packet handlers are not running 392 * on another CPU. dev_remove_pack() would do that, this calls the 393 * unsyncronized version __dev_remove_pack() to avoid multiple delays. 394 */ 395 __dev_remove_pack(&fcoe->fcoe_packet_type); 396 __dev_remove_pack(&fcoe->fip_packet_type); 397 synchronize_net(); 398 399 /* Delete secondary MAC addresses */ 400 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 401 dev_uc_del(netdev, flogi_maddr); 402 if (fip->spma) 403 dev_uc_del(netdev, fip->ctl_src_addr); 404 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 405 406 /* Tell the LLD we are done w/ FCoE */ 407 ops = netdev->netdev_ops; 408 if (ops->ndo_fcoe_disable) { 409 if (ops->ndo_fcoe_disable(netdev)) 410 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 411 " specific feature for LLD.\n"); 412 } 413 } 414 415 /** 416 * fcoe_interface_release() - fcoe_port kref release function 417 * @kref: Embedded reference count in an fcoe_interface struct 418 */ 419 static void fcoe_interface_release(struct kref *kref) 420 { 421 struct fcoe_interface *fcoe; 422 struct net_device *netdev; 423 424 fcoe = container_of(kref, struct fcoe_interface, kref); 425 netdev = fcoe->netdev; 426 /* tear-down the FCoE controller */ 427 fcoe_ctlr_destroy(&fcoe->ctlr); 428 kfree(fcoe); 429 dev_put(netdev); 430 } 431 432 /** 433 * fcoe_interface_get() - Get a reference to a FCoE interface 434 * @fcoe: The FCoE interface to be held 435 */ 436 static inline void fcoe_interface_get(struct fcoe_interface *fcoe) 437 { 438 kref_get(&fcoe->kref); 439 } 440 441 /** 442 * fcoe_interface_put() - Put a reference to a FCoE interface 443 * @fcoe: The FCoE interface to be released 444 */ 445 static inline void fcoe_interface_put(struct fcoe_interface *fcoe) 446 { 447 kref_put(&fcoe->kref, fcoe_interface_release); 448 } 449 450 /** 451 * fcoe_fip_recv() - Handler for received FIP frames 452 * @skb: The receive skb 453 * @netdev: The associated net device 454 * @ptype: The packet_type structure which was used to register this handler 455 * @orig_dev: The original net_device the the skb was received on. 456 * (in case dev is a bond) 457 * 458 * Returns: 0 for success 459 */ 460 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, 461 struct packet_type *ptype, 462 struct net_device *orig_dev) 463 { 464 struct fcoe_interface *fcoe; 465 466 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 467 fcoe_ctlr_recv(&fcoe->ctlr, skb); 468 return 0; 469 } 470 471 /** 472 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 473 * @fip: The FCoE controller 474 * @skb: The FIP packet to be sent 475 */ 476 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 477 { 478 skb->dev = fcoe_from_ctlr(fip)->netdev; 479 dev_queue_xmit(skb); 480 } 481 482 /** 483 * fcoe_update_src_mac() - Update the Ethernet MAC filters 484 * @lport: The local port to update the source MAC on 485 * @addr: Unicast MAC address to add 486 * 487 * Remove any previously-set unicast MAC filter. 488 * Add secondary FCoE MAC address filter for our OUI. 489 */ 490 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) 491 { 492 struct fcoe_port *port = lport_priv(lport); 493 struct fcoe_interface *fcoe = port->fcoe; 494 495 rtnl_lock(); 496 if (!is_zero_ether_addr(port->data_src_addr)) 497 dev_uc_del(fcoe->netdev, port->data_src_addr); 498 if (!is_zero_ether_addr(addr)) 499 dev_uc_add(fcoe->netdev, addr); 500 memcpy(port->data_src_addr, addr, ETH_ALEN); 501 rtnl_unlock(); 502 } 503 504 /** 505 * fcoe_get_src_mac() - return the Ethernet source address for an lport 506 * @lport: libfc lport 507 */ 508 static u8 *fcoe_get_src_mac(struct fc_lport *lport) 509 { 510 struct fcoe_port *port = lport_priv(lport); 511 512 return port->data_src_addr; 513 } 514 515 /** 516 * fcoe_lport_config() - Set up a local port 517 * @lport: The local port to be setup 518 * 519 * Returns: 0 for success 520 */ 521 static int fcoe_lport_config(struct fc_lport *lport) 522 { 523 lport->link_up = 0; 524 lport->qfull = 0; 525 lport->max_retry_count = 3; 526 lport->max_rport_retry_count = 3; 527 lport->e_d_tov = 2 * 1000; /* FC-FS default */ 528 lport->r_a_tov = 2 * 2 * 1000; 529 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 530 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 531 lport->does_npiv = 1; 532 533 fc_lport_init_stats(lport); 534 535 /* lport fc_lport related configuration */ 536 fc_lport_config(lport); 537 538 /* offload related configuration */ 539 lport->crc_offload = 0; 540 lport->seq_offload = 0; 541 lport->lro_enabled = 0; 542 lport->lro_xid = 0; 543 lport->lso_max = 0; 544 545 return 0; 546 } 547 548 /** 549 * fcoe_queue_timer() - The fcoe queue timer 550 * @lport: The local port 551 * 552 * Calls fcoe_check_wait_queue on timeout 553 */ 554 static void fcoe_queue_timer(ulong lport) 555 { 556 fcoe_check_wait_queue((struct fc_lport *)lport, NULL); 557 } 558 559 /** 560 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it 561 * @netdev: the associated net device 562 * @wwn: the output WWN 563 * @type: the type of WWN (WWPN or WWNN) 564 * 565 * Returns: 0 for success 566 */ 567 static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 568 { 569 const struct net_device_ops *ops = netdev->netdev_ops; 570 571 if (ops->ndo_fcoe_get_wwn) 572 return ops->ndo_fcoe_get_wwn(netdev, wwn, type); 573 return -EINVAL; 574 } 575 576 /** 577 * fcoe_netdev_config() - Set up net devive for SW FCoE 578 * @lport: The local port that is associated with the net device 579 * @netdev: The associated net device 580 * 581 * Must be called after fcoe_lport_config() as it will use local port mutex 582 * 583 * Returns: 0 for success 584 */ 585 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) 586 { 587 u32 mfs; 588 u64 wwnn, wwpn; 589 struct fcoe_interface *fcoe; 590 struct fcoe_port *port; 591 int vid = 0; 592 593 /* Setup lport private data to point to fcoe softc */ 594 port = lport_priv(lport); 595 fcoe = port->fcoe; 596 597 /* 598 * Determine max frame size based on underlying device and optional 599 * user-configured limit. If the MFS is too low, fcoe_link_ok() 600 * will return 0, so do this first. 601 */ 602 mfs = netdev->mtu; 603 if (netdev->features & NETIF_F_FCOE_MTU) { 604 mfs = FCOE_MTU; 605 FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); 606 } 607 mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); 608 if (fc_set_mfs(lport, mfs)) 609 return -EINVAL; 610 611 /* offload features support */ 612 if (netdev->features & NETIF_F_SG) 613 lport->sg_supp = 1; 614 615 if (netdev->features & NETIF_F_FCOE_CRC) { 616 lport->crc_offload = 1; 617 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 618 } 619 if (netdev->features & NETIF_F_FSO) { 620 lport->seq_offload = 1; 621 lport->lso_max = netdev->gso_max_size; 622 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 623 lport->lso_max); 624 } 625 if (netdev->fcoe_ddp_xid) { 626 lport->lro_enabled = 1; 627 lport->lro_xid = netdev->fcoe_ddp_xid; 628 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 629 lport->lro_xid); 630 } 631 skb_queue_head_init(&port->fcoe_pending_queue); 632 port->fcoe_pending_queue_active = 0; 633 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 634 635 fcoe_link_speed_update(lport); 636 637 if (!lport->vport) { 638 /* 639 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN: 640 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0. 641 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID 642 */ 643 if (netdev->priv_flags & IFF_802_1Q_VLAN) 644 vid = vlan_dev_vlan_id(netdev); 645 646 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 647 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 648 fc_set_wwnn(lport, wwnn); 649 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 650 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 651 2, vid); 652 fc_set_wwpn(lport, wwpn); 653 } 654 655 return 0; 656 } 657 658 /** 659 * fcoe_shost_config() - Set up the SCSI host associated with a local port 660 * @lport: The local port 661 * @dev: The device associated with the SCSI host 662 * 663 * Must be called after fcoe_lport_config() and fcoe_netdev_config() 664 * 665 * Returns: 0 for success 666 */ 667 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) 668 { 669 int rc = 0; 670 671 /* lport scsi host config */ 672 lport->host->max_lun = FCOE_MAX_LUN; 673 lport->host->max_id = FCOE_MAX_FCP_TARGET; 674 lport->host->max_channel = 0; 675 lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; 676 677 if (lport->vport) 678 lport->host->transportt = fcoe_vport_transport_template; 679 else 680 lport->host->transportt = fcoe_transport_template; 681 682 /* add the new host to the SCSI-ml */ 683 rc = scsi_add_host(lport->host, dev); 684 if (rc) { 685 FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " 686 "error on scsi_add_host\n"); 687 return rc; 688 } 689 690 if (!lport->vport) 691 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 692 693 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 694 "%s v%s over %s", FCOE_NAME, FCOE_VERSION, 695 fcoe_netdev(lport)->name); 696 697 return 0; 698 } 699 700 /** 701 * fcoe_oem_match() - The match routine for the offloaded exchange manager 702 * @fp: The I/O frame 703 * 704 * This routine will be associated with an exchange manager (EM). When 705 * the libfc exchange handling code is looking for an EM to use it will 706 * call this routine and pass it the frame that it wishes to send. This 707 * routine will return True if the associated EM is to be used and False 708 * if the echange code should continue looking for an EM. 709 * 710 * The offload EM that this routine is associated with will handle any 711 * packets that are for SCSI read requests. 712 * 713 * Returns: True for read types I/O, otherwise returns false. 714 */ 715 bool fcoe_oem_match(struct fc_frame *fp) 716 { 717 return fc_fcp_is_read(fr_fsp(fp)) && 718 (fr_fsp(fp)->data_len > fcoe_ddp_min); 719 } 720 721 /** 722 * fcoe_em_config() - Allocate and configure an exchange manager 723 * @lport: The local port that the new EM will be associated with 724 * 725 * Returns: 0 on success 726 */ 727 static inline int fcoe_em_config(struct fc_lport *lport) 728 { 729 struct fcoe_port *port = lport_priv(lport); 730 struct fcoe_interface *fcoe = port->fcoe; 731 struct fcoe_interface *oldfcoe = NULL; 732 struct net_device *old_real_dev, *cur_real_dev; 733 u16 min_xid = FCOE_MIN_XID; 734 u16 max_xid = FCOE_MAX_XID; 735 736 /* 737 * Check if need to allocate an em instance for 738 * offload exchange ids to be shared across all VN_PORTs/lport. 739 */ 740 if (!lport->lro_enabled || !lport->lro_xid || 741 (lport->lro_xid >= max_xid)) { 742 lport->lro_xid = 0; 743 goto skip_oem; 744 } 745 746 /* 747 * Reuse existing offload em instance in case 748 * it is already allocated on real eth device 749 */ 750 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 751 cur_real_dev = vlan_dev_real_dev(fcoe->netdev); 752 else 753 cur_real_dev = fcoe->netdev; 754 755 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { 756 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 757 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); 758 else 759 old_real_dev = oldfcoe->netdev; 760 761 if (cur_real_dev == old_real_dev) { 762 fcoe->oem = oldfcoe->oem; 763 break; 764 } 765 } 766 767 if (fcoe->oem) { 768 if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { 769 printk(KERN_ERR "fcoe_em_config: failed to add " 770 "offload em:%p on interface:%s\n", 771 fcoe->oem, fcoe->netdev->name); 772 return -ENOMEM; 773 } 774 } else { 775 fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, 776 FCOE_MIN_XID, lport->lro_xid, 777 fcoe_oem_match); 778 if (!fcoe->oem) { 779 printk(KERN_ERR "fcoe_em_config: failed to allocate " 780 "em for offload exches on interface:%s\n", 781 fcoe->netdev->name); 782 return -ENOMEM; 783 } 784 } 785 786 /* 787 * Exclude offload EM xid range from next EM xid range. 788 */ 789 min_xid += lport->lro_xid + 1; 790 791 skip_oem: 792 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { 793 printk(KERN_ERR "fcoe_em_config: failed to " 794 "allocate em on interface %s\n", fcoe->netdev->name); 795 return -ENOMEM; 796 } 797 798 return 0; 799 } 800 801 /** 802 * fcoe_if_destroy() - Tear down a SW FCoE instance 803 * @lport: The local port to be destroyed 804 * 805 * Locking: must be called with the RTNL mutex held and RTNL mutex 806 * needed to be dropped by this function since not dropping RTNL 807 * would cause circular locking warning on synchronous fip worker 808 * cancelling thru fcoe_interface_put invoked by this function. 809 * 810 */ 811 static void fcoe_if_destroy(struct fc_lport *lport) 812 { 813 struct fcoe_port *port = lport_priv(lport); 814 struct fcoe_interface *fcoe = port->fcoe; 815 struct net_device *netdev = fcoe->netdev; 816 817 FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); 818 819 /* Logout of the fabric */ 820 fc_fabric_logoff(lport); 821 822 /* Cleanup the fc_lport */ 823 fc_lport_destroy(lport); 824 fc_fcp_destroy(lport); 825 826 /* Stop the transmit retry timer */ 827 del_timer_sync(&port->timer); 828 829 /* Free existing transmit skbs */ 830 fcoe_clean_pending_queue(lport); 831 832 if (!is_zero_ether_addr(port->data_src_addr)) 833 dev_uc_del(netdev, port->data_src_addr); 834 rtnl_unlock(); 835 836 /* receives may not be stopped until after this */ 837 fcoe_interface_put(fcoe); 838 839 /* Free queued packets for the per-CPU receive threads */ 840 fcoe_percpu_clean(lport); 841 842 /* Detach from the scsi-ml */ 843 fc_remove_host(lport->host); 844 scsi_remove_host(lport->host); 845 846 /* There are no more rports or I/O, free the EM */ 847 fc_exch_mgr_free(lport); 848 849 /* Free memory used by statistical counters */ 850 fc_lport_free_stats(lport); 851 852 /* Release the Scsi_Host */ 853 scsi_host_put(lport->host); 854 module_put(THIS_MODULE); 855 } 856 857 /** 858 * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device 859 * @lport: The local port to setup DDP for 860 * @xid: The exchange ID for this DDP transfer 861 * @sgl: The scatterlist describing this transfer 862 * @sgc: The number of sg items 863 * 864 * Returns: 0 if the DDP context was not configured 865 */ 866 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, 867 struct scatterlist *sgl, unsigned int sgc) 868 { 869 struct net_device *netdev = fcoe_netdev(lport); 870 871 if (netdev->netdev_ops->ndo_fcoe_ddp_setup) 872 return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, 873 xid, sgl, 874 sgc); 875 876 return 0; 877 } 878 879 /** 880 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device 881 * @lport: The local port to complete DDP on 882 * @xid: The exchange ID for this DDP transfer 883 * 884 * Returns: the length of data that have been completed by DDP 885 */ 886 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) 887 { 888 struct net_device *netdev = fcoe_netdev(lport); 889 890 if (netdev->netdev_ops->ndo_fcoe_ddp_done) 891 return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); 892 return 0; 893 } 894 895 /** 896 * fcoe_if_create() - Create a FCoE instance on an interface 897 * @fcoe: The FCoE interface to create a local port on 898 * @parent: The device pointer to be the parent in sysfs for the SCSI host 899 * @npiv: Indicates if the port is a vport or not 900 * 901 * Creates a fc_lport instance and a Scsi_Host instance and configure them. 902 * 903 * Returns: The allocated fc_lport or an error pointer 904 */ 905 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 906 struct device *parent, int npiv) 907 { 908 struct net_device *netdev = fcoe->netdev; 909 struct fc_lport *lport = NULL; 910 struct fcoe_port *port; 911 int rc; 912 /* 913 * parent is only a vport if npiv is 1, 914 * but we'll only use vport in that case so go ahead and set it 915 */ 916 struct fc_vport *vport = dev_to_vport(parent); 917 918 FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 919 920 if (!npiv) { 921 lport = libfc_host_alloc(&fcoe_shost_template, 922 sizeof(struct fcoe_port)); 923 } else { 924 lport = libfc_vport_create(vport, 925 sizeof(struct fcoe_port)); 926 } 927 if (!lport) { 928 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 929 rc = -ENOMEM; 930 goto out; 931 } 932 port = lport_priv(lport); 933 port->lport = lport; 934 port->fcoe = fcoe; 935 INIT_WORK(&port->destroy_work, fcoe_destroy_work); 936 937 /* configure a fc_lport including the exchange manager */ 938 rc = fcoe_lport_config(lport); 939 if (rc) { 940 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 941 "interface\n"); 942 goto out_host_put; 943 } 944 945 if (npiv) { 946 FCOE_NETDEV_DBG(netdev, "Setting vport names, " 947 "%16.16llx %16.16llx\n", 948 vport->node_name, vport->port_name); 949 fc_set_wwnn(lport, vport->node_name); 950 fc_set_wwpn(lport, vport->port_name); 951 } 952 953 /* configure lport network properties */ 954 rc = fcoe_netdev_config(lport, netdev); 955 if (rc) { 956 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " 957 "interface\n"); 958 goto out_lp_destroy; 959 } 960 961 /* configure lport scsi host properties */ 962 rc = fcoe_shost_config(lport, parent); 963 if (rc) { 964 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 965 "interface\n"); 966 goto out_lp_destroy; 967 } 968 969 /* Initialize the library */ 970 rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ); 971 if (rc) { 972 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 973 "interface\n"); 974 goto out_lp_destroy; 975 } 976 977 if (!npiv) { 978 /* 979 * fcoe_em_alloc() and fcoe_hostlist_add() both 980 * need to be atomic with respect to other changes to the 981 * hostlist since fcoe_em_alloc() looks for an existing EM 982 * instance on host list updated by fcoe_hostlist_add(). 983 * 984 * This is currently handled through the fcoe_config_mutex 985 * begin held. 986 */ 987 988 /* lport exch manager allocation */ 989 rc = fcoe_em_config(lport); 990 if (rc) { 991 FCOE_NETDEV_DBG(netdev, "Could not configure the EM " 992 "for the interface\n"); 993 goto out_lp_destroy; 994 } 995 } 996 997 fcoe_interface_get(fcoe); 998 return lport; 999 1000 out_lp_destroy: 1001 fc_exch_mgr_free(lport); 1002 out_host_put: 1003 scsi_host_put(lport->host); 1004 out: 1005 return ERR_PTR(rc); 1006 } 1007 1008 /** 1009 * fcoe_if_init() - Initialization routine for fcoe.ko 1010 * 1011 * Attaches the SW FCoE transport to the FC transport 1012 * 1013 * Returns: 0 on success 1014 */ 1015 static int __init fcoe_if_init(void) 1016 { 1017 /* attach to scsi transport */ 1018 fcoe_transport_template = fc_attach_transport(&fcoe_transport_function); 1019 fcoe_vport_transport_template = 1020 fc_attach_transport(&fcoe_vport_transport_function); 1021 1022 if (!fcoe_transport_template) { 1023 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); 1024 return -ENODEV; 1025 } 1026 1027 return 0; 1028 } 1029 1030 /** 1031 * fcoe_if_exit() - Tear down fcoe.ko 1032 * 1033 * Detaches the SW FCoE transport from the FC transport 1034 * 1035 * Returns: 0 on success 1036 */ 1037 int __exit fcoe_if_exit(void) 1038 { 1039 fc_release_transport(fcoe_transport_template); 1040 fc_release_transport(fcoe_vport_transport_template); 1041 fcoe_transport_template = NULL; 1042 fcoe_vport_transport_template = NULL; 1043 return 0; 1044 } 1045 1046 /** 1047 * fcoe_percpu_thread_create() - Create a receive thread for an online CPU 1048 * @cpu: The CPU index of the CPU to create a receive thread for 1049 */ 1050 static void fcoe_percpu_thread_create(unsigned int cpu) 1051 { 1052 struct fcoe_percpu_s *p; 1053 struct task_struct *thread; 1054 1055 p = &per_cpu(fcoe_percpu, cpu); 1056 1057 thread = kthread_create(fcoe_percpu_receive_thread, 1058 (void *)p, "fcoethread/%d", cpu); 1059 1060 if (likely(!IS_ERR(thread))) { 1061 kthread_bind(thread, cpu); 1062 wake_up_process(thread); 1063 1064 spin_lock_bh(&p->fcoe_rx_list.lock); 1065 p->thread = thread; 1066 spin_unlock_bh(&p->fcoe_rx_list.lock); 1067 } 1068 } 1069 1070 /** 1071 * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU 1072 * @cpu: The CPU index of the CPU whose receive thread is to be destroyed 1073 * 1074 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the 1075 * current CPU's Rx thread. If the thread being destroyed is bound to 1076 * the CPU processing this context the skbs will be freed. 1077 */ 1078 static void fcoe_percpu_thread_destroy(unsigned int cpu) 1079 { 1080 struct fcoe_percpu_s *p; 1081 struct task_struct *thread; 1082 struct page *crc_eof; 1083 struct sk_buff *skb; 1084 #ifdef CONFIG_SMP 1085 struct fcoe_percpu_s *p0; 1086 unsigned targ_cpu = get_cpu(); 1087 #endif /* CONFIG_SMP */ 1088 1089 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); 1090 1091 /* Prevent any new skbs from being queued for this CPU. */ 1092 p = &per_cpu(fcoe_percpu, cpu); 1093 spin_lock_bh(&p->fcoe_rx_list.lock); 1094 thread = p->thread; 1095 p->thread = NULL; 1096 crc_eof = p->crc_eof_page; 1097 p->crc_eof_page = NULL; 1098 p->crc_eof_offset = 0; 1099 spin_unlock_bh(&p->fcoe_rx_list.lock); 1100 1101 #ifdef CONFIG_SMP 1102 /* 1103 * Don't bother moving the skb's if this context is running 1104 * on the same CPU that is having its thread destroyed. This 1105 * can easily happen when the module is removed. 1106 */ 1107 if (cpu != targ_cpu) { 1108 p0 = &per_cpu(fcoe_percpu, targ_cpu); 1109 spin_lock_bh(&p0->fcoe_rx_list.lock); 1110 if (p0->thread) { 1111 FCOE_DBG("Moving frames from CPU %d to CPU %d\n", 1112 cpu, targ_cpu); 1113 1114 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1115 __skb_queue_tail(&p0->fcoe_rx_list, skb); 1116 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1117 } else { 1118 /* 1119 * The targeted CPU is not initialized and cannot accept 1120 * new skbs. Unlock the targeted CPU and drop the skbs 1121 * on the CPU that is going offline. 1122 */ 1123 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1124 kfree_skb(skb); 1125 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1126 } 1127 } else { 1128 /* 1129 * This scenario occurs when the module is being removed 1130 * and all threads are being destroyed. skbs will continue 1131 * to be shifted from the CPU thread that is being removed 1132 * to the CPU thread associated with the CPU that is processing 1133 * the module removal. Once there is only one CPU Rx thread it 1134 * will reach this case and we will drop all skbs and later 1135 * stop the thread. 1136 */ 1137 spin_lock_bh(&p->fcoe_rx_list.lock); 1138 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1139 kfree_skb(skb); 1140 spin_unlock_bh(&p->fcoe_rx_list.lock); 1141 } 1142 put_cpu(); 1143 #else 1144 /* 1145 * This a non-SMP scenario where the singular Rx thread is 1146 * being removed. Free all skbs and stop the thread. 1147 */ 1148 spin_lock_bh(&p->fcoe_rx_list.lock); 1149 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1150 kfree_skb(skb); 1151 spin_unlock_bh(&p->fcoe_rx_list.lock); 1152 #endif 1153 1154 if (thread) 1155 kthread_stop(thread); 1156 1157 if (crc_eof) 1158 put_page(crc_eof); 1159 } 1160 1161 /** 1162 * fcoe_cpu_callback() - Handler for CPU hotplug events 1163 * @nfb: The callback data block 1164 * @action: The event triggering the callback 1165 * @hcpu: The index of the CPU that the event is for 1166 * 1167 * This creates or destroys per-CPU data for fcoe 1168 * 1169 * Returns NOTIFY_OK always. 1170 */ 1171 static int fcoe_cpu_callback(struct notifier_block *nfb, 1172 unsigned long action, void *hcpu) 1173 { 1174 unsigned cpu = (unsigned long)hcpu; 1175 1176 switch (action) { 1177 case CPU_ONLINE: 1178 case CPU_ONLINE_FROZEN: 1179 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); 1180 fcoe_percpu_thread_create(cpu); 1181 break; 1182 case CPU_DEAD: 1183 case CPU_DEAD_FROZEN: 1184 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); 1185 fcoe_percpu_thread_destroy(cpu); 1186 break; 1187 default: 1188 break; 1189 } 1190 return NOTIFY_OK; 1191 } 1192 1193 /** 1194 * fcoe_rcv() - Receive packets from a net device 1195 * @skb: The received packet 1196 * @netdev: The net device that the packet was received on 1197 * @ptype: The packet type context 1198 * @olddev: The last device net device 1199 * 1200 * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a 1201 * FC frame and passes the frame to libfc. 1202 * 1203 * Returns: 0 for success 1204 */ 1205 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1206 struct packet_type *ptype, struct net_device *olddev) 1207 { 1208 struct fc_lport *lport; 1209 struct fcoe_rcv_info *fr; 1210 struct fcoe_interface *fcoe; 1211 struct fc_frame_header *fh; 1212 struct fcoe_percpu_s *fps; 1213 unsigned int cpu; 1214 1215 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1216 lport = fcoe->ctlr.lp; 1217 if (unlikely(!lport)) { 1218 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1219 goto err2; 1220 } 1221 if (!lport->link_up) 1222 goto err2; 1223 1224 FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " 1225 "data:%p tail:%p end:%p sum:%d dev:%s", 1226 skb->len, skb->data_len, skb->head, skb->data, 1227 skb_tail_pointer(skb), skb_end_pointer(skb), 1228 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1229 1230 /* check for FCOE packet type */ 1231 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 1232 FCOE_NETDEV_DBG(netdev, "Wrong FC type frame"); 1233 goto err; 1234 } 1235 1236 /* 1237 * Check for minimum frame length, and make sure required FCoE 1238 * and FC headers are pulled into the linear data area. 1239 */ 1240 if (unlikely((skb->len < FCOE_MIN_FRAME) || 1241 !pskb_may_pull(skb, FCOE_HEADER_LEN))) 1242 goto err; 1243 1244 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1245 fh = (struct fc_frame_header *) skb_transport_header(skb); 1246 1247 fr = fcoe_dev_from_skb(skb); 1248 fr->fr_dev = lport; 1249 fr->ptype = ptype; 1250 1251 /* 1252 * In case the incoming frame's exchange is originated from 1253 * the initiator, then received frame's exchange id is ANDed 1254 * with fc_cpu_mask bits to get the same cpu on which exchange 1255 * was originated, otherwise just use the current cpu. 1256 */ 1257 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1258 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1259 else 1260 cpu = smp_processor_id(); 1261 1262 fps = &per_cpu(fcoe_percpu, cpu); 1263 spin_lock_bh(&fps->fcoe_rx_list.lock); 1264 if (unlikely(!fps->thread)) { 1265 /* 1266 * The targeted CPU is not ready, let's target 1267 * the first CPU now. For non-SMP systems this 1268 * will check the same CPU twice. 1269 */ 1270 FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " 1271 "ready for incoming skb- using first online " 1272 "CPU.\n"); 1273 1274 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1275 cpu = cpumask_first(cpu_online_mask); 1276 fps = &per_cpu(fcoe_percpu, cpu); 1277 spin_lock_bh(&fps->fcoe_rx_list.lock); 1278 if (!fps->thread) { 1279 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1280 goto err; 1281 } 1282 } 1283 1284 /* 1285 * We now have a valid CPU that we're targeting for 1286 * this skb. We also have this receive thread locked, 1287 * so we're free to queue skbs into it's queue. 1288 */ 1289 1290 /* If this is a SCSI-FCP frame, and this is already executing on the 1291 * correct CPU, and the queue for this CPU is empty, then go ahead 1292 * and process the frame directly in the softirq context. 1293 * This lets us process completions without context switching from the 1294 * NET_RX softirq, to our receive processing thread, and then back to 1295 * BLOCK softirq context. 1296 */ 1297 if (fh->fh_type == FC_TYPE_FCP && 1298 cpu == smp_processor_id() && 1299 skb_queue_empty(&fps->fcoe_rx_list)) { 1300 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1301 fcoe_recv_frame(skb); 1302 } else { 1303 __skb_queue_tail(&fps->fcoe_rx_list, skb); 1304 if (fps->fcoe_rx_list.qlen == 1) 1305 wake_up_process(fps->thread); 1306 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1307 } 1308 1309 return 0; 1310 err: 1311 per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++; 1312 put_cpu(); 1313 err2: 1314 kfree_skb(skb); 1315 return -1; 1316 } 1317 1318 /** 1319 * fcoe_start_io() - Start FCoE I/O 1320 * @skb: The packet to be transmitted 1321 * 1322 * This routine is called from the net device to start transmitting 1323 * FCoE packets. 1324 * 1325 * Returns: 0 for success 1326 */ 1327 static inline int fcoe_start_io(struct sk_buff *skb) 1328 { 1329 struct sk_buff *nskb; 1330 int rc; 1331 1332 nskb = skb_clone(skb, GFP_ATOMIC); 1333 rc = dev_queue_xmit(nskb); 1334 if (rc != 0) 1335 return rc; 1336 kfree_skb(skb); 1337 return 0; 1338 } 1339 1340 /** 1341 * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC 1342 * @skb: The packet to be transmitted 1343 * @tlen: The total length of the trailer 1344 * 1345 * This routine allocates a page for frame trailers. The page is re-used if 1346 * there is enough room left on it for the current trailer. If there isn't 1347 * enough buffer left a new page is allocated for the trailer. Reference to 1348 * the page from this function as well as the skbs using the page fragments 1349 * ensure that the page is freed at the appropriate time. 1350 * 1351 * Returns: 0 for success 1352 */ 1353 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) 1354 { 1355 struct fcoe_percpu_s *fps; 1356 struct page *page; 1357 1358 fps = &get_cpu_var(fcoe_percpu); 1359 page = fps->crc_eof_page; 1360 if (!page) { 1361 page = alloc_page(GFP_ATOMIC); 1362 if (!page) { 1363 put_cpu_var(fcoe_percpu); 1364 return -ENOMEM; 1365 } 1366 fps->crc_eof_page = page; 1367 fps->crc_eof_offset = 0; 1368 } 1369 1370 get_page(page); 1371 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 1372 fps->crc_eof_offset, tlen); 1373 skb->len += tlen; 1374 skb->data_len += tlen; 1375 skb->truesize += tlen; 1376 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); 1377 1378 if (fps->crc_eof_offset >= PAGE_SIZE) { 1379 fps->crc_eof_page = NULL; 1380 fps->crc_eof_offset = 0; 1381 put_page(page); 1382 } 1383 put_cpu_var(fcoe_percpu); 1384 return 0; 1385 } 1386 1387 /** 1388 * fcoe_fc_crc() - Calculates the CRC for a given frame 1389 * @fp: The frame to be checksumed 1390 * 1391 * This uses crc32() routine to calculate the CRC for a frame 1392 * 1393 * Return: The 32 bit CRC value 1394 */ 1395 u32 fcoe_fc_crc(struct fc_frame *fp) 1396 { 1397 struct sk_buff *skb = fp_skb(fp); 1398 struct skb_frag_struct *frag; 1399 unsigned char *data; 1400 unsigned long off, len, clen; 1401 u32 crc; 1402 unsigned i; 1403 1404 crc = crc32(~0, skb->data, skb_headlen(skb)); 1405 1406 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1407 frag = &skb_shinfo(skb)->frags[i]; 1408 off = frag->page_offset; 1409 len = frag->size; 1410 while (len > 0) { 1411 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); 1412 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), 1413 KM_SKB_DATA_SOFTIRQ); 1414 crc = crc32(crc, data + (off & ~PAGE_MASK), clen); 1415 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); 1416 off += clen; 1417 len -= clen; 1418 } 1419 } 1420 return crc; 1421 } 1422 1423 /** 1424 * fcoe_xmit() - Transmit a FCoE frame 1425 * @lport: The local port that the frame is to be transmitted for 1426 * @fp: The frame to be transmitted 1427 * 1428 * Return: 0 for success 1429 */ 1430 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1431 { 1432 int wlen; 1433 u32 crc; 1434 struct ethhdr *eh; 1435 struct fcoe_crc_eof *cp; 1436 struct sk_buff *skb; 1437 struct fcoe_dev_stats *stats; 1438 struct fc_frame_header *fh; 1439 unsigned int hlen; /* header length implies the version */ 1440 unsigned int tlen; /* trailer length */ 1441 unsigned int elen; /* eth header, may include vlan */ 1442 struct fcoe_port *port = lport_priv(lport); 1443 struct fcoe_interface *fcoe = port->fcoe; 1444 u8 sof, eof; 1445 struct fcoe_hdr *hp; 1446 1447 WARN_ON((fr_len(fp) % sizeof(u32)) != 0); 1448 1449 fh = fc_frame_header_get(fp); 1450 skb = fp_skb(fp); 1451 wlen = skb->len / FCOE_WORD_TO_BYTE; 1452 1453 if (!lport->link_up) { 1454 kfree_skb(skb); 1455 return 0; 1456 } 1457 1458 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1459 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1460 return 0; 1461 1462 sof = fr_sof(fp); 1463 eof = fr_eof(fp); 1464 1465 elen = sizeof(struct ethhdr); 1466 hlen = sizeof(struct fcoe_hdr); 1467 tlen = sizeof(struct fcoe_crc_eof); 1468 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1469 1470 /* crc offload */ 1471 if (likely(lport->crc_offload)) { 1472 skb->ip_summed = CHECKSUM_PARTIAL; 1473 skb->csum_start = skb_headroom(skb); 1474 skb->csum_offset = skb->len; 1475 crc = 0; 1476 } else { 1477 skb->ip_summed = CHECKSUM_NONE; 1478 crc = fcoe_fc_crc(fp); 1479 } 1480 1481 /* copy port crc and eof to the skb buff */ 1482 if (skb_is_nonlinear(skb)) { 1483 skb_frag_t *frag; 1484 if (fcoe_get_paged_crc_eof(skb, tlen)) { 1485 kfree_skb(skb); 1486 return -ENOMEM; 1487 } 1488 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1489 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) 1490 + frag->page_offset; 1491 } else { 1492 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 1493 } 1494 1495 memset(cp, 0, sizeof(*cp)); 1496 cp->fcoe_eof = eof; 1497 cp->fcoe_crc32 = cpu_to_le32(~crc); 1498 1499 if (skb_is_nonlinear(skb)) { 1500 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 1501 cp = NULL; 1502 } 1503 1504 /* adjust skb network/transport offsets to match mac/fcoe/port */ 1505 skb_push(skb, elen + hlen); 1506 skb_reset_mac_header(skb); 1507 skb_reset_network_header(skb); 1508 skb->mac_len = elen; 1509 skb->protocol = htons(ETH_P_FCOE); 1510 skb->dev = fcoe->netdev; 1511 1512 /* fill up mac and fcoe headers */ 1513 eh = eth_hdr(skb); 1514 eh->h_proto = htons(ETH_P_FCOE); 1515 if (fcoe->ctlr.map_dest) 1516 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 1517 else 1518 /* insert GW address */ 1519 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); 1520 1521 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1522 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1523 else 1524 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1525 1526 hp = (struct fcoe_hdr *)(eh + 1); 1527 memset(hp, 0, sizeof(*hp)); 1528 if (FC_FCOE_VER) 1529 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 1530 hp->fcoe_sof = sof; 1531 1532 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ 1533 if (lport->seq_offload && fr_max_payload(fp)) { 1534 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; 1535 skb_shinfo(skb)->gso_size = fr_max_payload(fp); 1536 } else { 1537 skb_shinfo(skb)->gso_type = 0; 1538 skb_shinfo(skb)->gso_size = 0; 1539 } 1540 /* update tx stats: regardless if LLD fails */ 1541 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1542 stats->TxFrames++; 1543 stats->TxWords += wlen; 1544 put_cpu(); 1545 1546 /* send down to lld */ 1547 fr_dev(fp) = lport; 1548 if (port->fcoe_pending_queue.qlen) 1549 fcoe_check_wait_queue(lport, skb); 1550 else if (fcoe_start_io(skb)) 1551 fcoe_check_wait_queue(lport, skb); 1552 1553 return 0; 1554 } 1555 1556 /** 1557 * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion 1558 * @skb: The completed skb (argument required by destructor) 1559 */ 1560 static void fcoe_percpu_flush_done(struct sk_buff *skb) 1561 { 1562 complete(&fcoe_flush_completion); 1563 } 1564 1565 /** 1566 * fcoe_recv_frame() - process a single received frame 1567 * @skb: frame to process 1568 */ 1569 static void fcoe_recv_frame(struct sk_buff *skb) 1570 { 1571 u32 fr_len; 1572 struct fc_lport *lport; 1573 struct fcoe_rcv_info *fr; 1574 struct fcoe_dev_stats *stats; 1575 struct fc_frame_header *fh; 1576 struct fcoe_crc_eof crc_eof; 1577 struct fc_frame *fp; 1578 struct fcoe_port *port; 1579 struct fcoe_hdr *hp; 1580 1581 fr = fcoe_dev_from_skb(skb); 1582 lport = fr->fr_dev; 1583 if (unlikely(!lport)) { 1584 if (skb->destructor != fcoe_percpu_flush_done) 1585 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); 1586 kfree_skb(skb); 1587 return; 1588 } 1589 1590 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " 1591 "head:%p data:%p tail:%p end:%p sum:%d dev:%s", 1592 skb->len, skb->data_len, 1593 skb->head, skb->data, skb_tail_pointer(skb), 1594 skb_end_pointer(skb), skb->csum, 1595 skb->dev ? skb->dev->name : "<NULL>"); 1596 1597 port = lport_priv(lport); 1598 if (skb_is_nonlinear(skb)) 1599 skb_linearize(skb); /* not ideal */ 1600 1601 /* 1602 * Frame length checks and setting up the header pointers 1603 * was done in fcoe_rcv already. 1604 */ 1605 hp = (struct fcoe_hdr *) skb_network_header(skb); 1606 fh = (struct fc_frame_header *) skb_transport_header(skb); 1607 1608 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1609 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1610 if (stats->ErrorFrames < 5) 1611 printk(KERN_WARNING "fcoe: FCoE version " 1612 "mismatch: The frame has " 1613 "version %x, but the " 1614 "initiator supports version " 1615 "%x\n", FC_FCOE_DECAPS_VER(hp), 1616 FC_FCOE_VER); 1617 goto drop; 1618 } 1619 1620 skb_pull(skb, sizeof(struct fcoe_hdr)); 1621 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 1622 1623 stats->RxFrames++; 1624 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; 1625 1626 fp = (struct fc_frame *)skb; 1627 fc_frame_init(fp); 1628 fr_dev(fp) = lport; 1629 fr_sof(fp) = hp->fcoe_sof; 1630 1631 /* Copy out the CRC and EOF trailer for access */ 1632 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) 1633 goto drop; 1634 fr_eof(fp) = crc_eof.fcoe_eof; 1635 fr_crc(fp) = crc_eof.fcoe_crc32; 1636 if (pskb_trim(skb, fr_len)) 1637 goto drop; 1638 1639 /* 1640 * We only check CRC if no offload is available and if it is 1641 * it's solicited data, in which case, the FCP layer would 1642 * check it during the copy. 1643 */ 1644 if (lport->crc_offload && 1645 skb->ip_summed == CHECKSUM_UNNECESSARY) 1646 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1647 else 1648 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 1649 1650 fh = fc_frame_header_get(fp); 1651 if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA || 1652 fh->fh_type != FC_TYPE_FCP) && 1653 (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { 1654 if (le32_to_cpu(fr_crc(fp)) != 1655 ~crc32(~0, skb->data, fr_len)) { 1656 if (stats->InvalidCRCCount < 5) 1657 printk(KERN_WARNING "fcoe: dropping " 1658 "frame with CRC error\n"); 1659 stats->InvalidCRCCount++; 1660 goto drop; 1661 } 1662 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1663 } 1664 put_cpu(); 1665 fc_exch_recv(lport, fp); 1666 return; 1667 1668 drop: 1669 stats->ErrorFrames++; 1670 put_cpu(); 1671 kfree_skb(skb); 1672 } 1673 1674 /** 1675 * fcoe_percpu_receive_thread() - The per-CPU packet receive thread 1676 * @arg: The per-CPU context 1677 * 1678 * Return: 0 for success 1679 */ 1680 int fcoe_percpu_receive_thread(void *arg) 1681 { 1682 struct fcoe_percpu_s *p = arg; 1683 struct sk_buff *skb; 1684 1685 set_user_nice(current, -20); 1686 1687 while (!kthread_should_stop()) { 1688 1689 spin_lock_bh(&p->fcoe_rx_list.lock); 1690 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { 1691 set_current_state(TASK_INTERRUPTIBLE); 1692 spin_unlock_bh(&p->fcoe_rx_list.lock); 1693 schedule(); 1694 set_current_state(TASK_RUNNING); 1695 if (kthread_should_stop()) 1696 return 0; 1697 spin_lock_bh(&p->fcoe_rx_list.lock); 1698 } 1699 spin_unlock_bh(&p->fcoe_rx_list.lock); 1700 fcoe_recv_frame(skb); 1701 } 1702 return 0; 1703 } 1704 1705 /** 1706 * fcoe_check_wait_queue() - Attempt to clear the transmit backlog 1707 * @lport: The local port whose backlog is to be cleared 1708 * 1709 * This empties the wait_queue, dequeues the head of the wait_queue queue 1710 * and calls fcoe_start_io() for each packet. If all skb have been 1711 * transmitted it returns the qlen. If an error occurs it restores 1712 * wait_queue (to try again later) and returns -1. 1713 * 1714 * The wait_queue is used when the skb transmit fails. The failed skb 1715 * will go in the wait_queue which will be emptied by the timer function or 1716 * by the next skb transmit. 1717 */ 1718 static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) 1719 { 1720 struct fcoe_port *port = lport_priv(lport); 1721 int rc; 1722 1723 spin_lock_bh(&port->fcoe_pending_queue.lock); 1724 1725 if (skb) 1726 __skb_queue_tail(&port->fcoe_pending_queue, skb); 1727 1728 if (port->fcoe_pending_queue_active) 1729 goto out; 1730 port->fcoe_pending_queue_active = 1; 1731 1732 while (port->fcoe_pending_queue.qlen) { 1733 /* keep qlen > 0 until fcoe_start_io succeeds */ 1734 port->fcoe_pending_queue.qlen++; 1735 skb = __skb_dequeue(&port->fcoe_pending_queue); 1736 1737 spin_unlock_bh(&port->fcoe_pending_queue.lock); 1738 rc = fcoe_start_io(skb); 1739 spin_lock_bh(&port->fcoe_pending_queue.lock); 1740 1741 if (rc) { 1742 __skb_queue_head(&port->fcoe_pending_queue, skb); 1743 /* undo temporary increment above */ 1744 port->fcoe_pending_queue.qlen--; 1745 break; 1746 } 1747 /* undo temporary increment above */ 1748 port->fcoe_pending_queue.qlen--; 1749 } 1750 1751 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1752 lport->qfull = 0; 1753 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) 1754 mod_timer(&port->timer, jiffies + 2); 1755 port->fcoe_pending_queue_active = 0; 1756 out: 1757 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 1758 lport->qfull = 1; 1759 spin_unlock_bh(&port->fcoe_pending_queue.lock); 1760 return; 1761 } 1762 1763 /** 1764 * fcoe_dev_setup() - Setup the link change notification interface 1765 */ 1766 static void fcoe_dev_setup(void) 1767 { 1768 register_netdevice_notifier(&fcoe_notifier); 1769 } 1770 1771 /** 1772 * fcoe_dev_cleanup() - Cleanup the link change notification interface 1773 */ 1774 static void fcoe_dev_cleanup(void) 1775 { 1776 unregister_netdevice_notifier(&fcoe_notifier); 1777 } 1778 1779 /** 1780 * fcoe_device_notification() - Handler for net device events 1781 * @notifier: The context of the notification 1782 * @event: The type of event 1783 * @ptr: The net device that the event was on 1784 * 1785 * This function is called by the Ethernet driver in case of link change event. 1786 * 1787 * Returns: 0 for success 1788 */ 1789 static int fcoe_device_notification(struct notifier_block *notifier, 1790 ulong event, void *ptr) 1791 { 1792 struct fc_lport *lport = NULL; 1793 struct net_device *netdev = ptr; 1794 struct fcoe_interface *fcoe; 1795 struct fcoe_port *port; 1796 struct fcoe_dev_stats *stats; 1797 u32 link_possible = 1; 1798 u32 mfs; 1799 int rc = NOTIFY_OK; 1800 1801 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1802 if (fcoe->netdev == netdev) { 1803 lport = fcoe->ctlr.lp; 1804 break; 1805 } 1806 } 1807 if (!lport) { 1808 rc = NOTIFY_DONE; 1809 goto out; 1810 } 1811 1812 switch (event) { 1813 case NETDEV_DOWN: 1814 case NETDEV_GOING_DOWN: 1815 link_possible = 0; 1816 break; 1817 case NETDEV_UP: 1818 case NETDEV_CHANGE: 1819 break; 1820 case NETDEV_CHANGEMTU: 1821 if (netdev->features & NETIF_F_FCOE_MTU) 1822 break; 1823 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 1824 sizeof(struct fcoe_crc_eof)); 1825 if (mfs >= FC_MIN_MAX_FRAME) 1826 fc_set_mfs(lport, mfs); 1827 break; 1828 case NETDEV_REGISTER: 1829 break; 1830 case NETDEV_UNREGISTER: 1831 list_del(&fcoe->list); 1832 port = lport_priv(fcoe->ctlr.lp); 1833 fcoe_interface_cleanup(fcoe); 1834 schedule_work(&port->destroy_work); 1835 goto out; 1836 break; 1837 default: 1838 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 1839 "from netdev netlink\n", event); 1840 } 1841 1842 fcoe_link_speed_update(lport); 1843 1844 if (link_possible && !fcoe_link_ok(lport)) 1845 fcoe_ctlr_link_up(&fcoe->ctlr); 1846 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 1847 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1848 stats->LinkFailureCount++; 1849 put_cpu(); 1850 fcoe_clean_pending_queue(lport); 1851 } 1852 out: 1853 return rc; 1854 } 1855 1856 /** 1857 * fcoe_if_to_netdev() - Parse a name buffer to get a net device 1858 * @buffer: The name of the net device 1859 * 1860 * Returns: NULL or a ptr to net_device 1861 */ 1862 static struct net_device *fcoe_if_to_netdev(const char *buffer) 1863 { 1864 char *cp; 1865 char ifname[IFNAMSIZ + 2]; 1866 1867 if (buffer) { 1868 strlcpy(ifname, buffer, IFNAMSIZ); 1869 cp = ifname + strlen(ifname); 1870 while (--cp >= ifname && *cp == '\n') 1871 *cp = '\0'; 1872 return dev_get_by_name(&init_net, ifname); 1873 } 1874 return NULL; 1875 } 1876 1877 /** 1878 * fcoe_disable() - Disables a FCoE interface 1879 * @buffer: The name of the Ethernet interface to be disabled 1880 * @kp: The associated kernel parameter 1881 * 1882 * Called from sysfs. 1883 * 1884 * Returns: 0 for success 1885 */ 1886 static int fcoe_disable(const char *buffer, struct kernel_param *kp) 1887 { 1888 struct fcoe_interface *fcoe; 1889 struct net_device *netdev; 1890 int rc = 0; 1891 1892 mutex_lock(&fcoe_config_mutex); 1893 #ifdef CONFIG_FCOE_MODULE 1894 /* 1895 * Make sure the module has been initialized, and is not about to be 1896 * removed. Module paramter sysfs files are writable before the 1897 * module_init function is called and after module_exit. 1898 */ 1899 if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1900 rc = -ENODEV; 1901 goto out_nodev; 1902 } 1903 #endif 1904 1905 netdev = fcoe_if_to_netdev(buffer); 1906 if (!netdev) { 1907 rc = -ENODEV; 1908 goto out_nodev; 1909 } 1910 1911 if (!rtnl_trylock()) { 1912 dev_put(netdev); 1913 mutex_unlock(&fcoe_config_mutex); 1914 return restart_syscall(); 1915 } 1916 1917 fcoe = fcoe_hostlist_lookup_port(netdev); 1918 rtnl_unlock(); 1919 1920 if (fcoe) { 1921 fc_fabric_logoff(fcoe->ctlr.lp); 1922 fcoe_ctlr_link_down(&fcoe->ctlr); 1923 } else 1924 rc = -ENODEV; 1925 1926 dev_put(netdev); 1927 out_nodev: 1928 mutex_unlock(&fcoe_config_mutex); 1929 return rc; 1930 } 1931 1932 /** 1933 * fcoe_enable() - Enables a FCoE interface 1934 * @buffer: The name of the Ethernet interface to be enabled 1935 * @kp: The associated kernel parameter 1936 * 1937 * Called from sysfs. 1938 * 1939 * Returns: 0 for success 1940 */ 1941 static int fcoe_enable(const char *buffer, struct kernel_param *kp) 1942 { 1943 struct fcoe_interface *fcoe; 1944 struct net_device *netdev; 1945 int rc = 0; 1946 1947 mutex_lock(&fcoe_config_mutex); 1948 #ifdef CONFIG_FCOE_MODULE 1949 /* 1950 * Make sure the module has been initialized, and is not about to be 1951 * removed. Module paramter sysfs files are writable before the 1952 * module_init function is called and after module_exit. 1953 */ 1954 if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1955 rc = -ENODEV; 1956 goto out_nodev; 1957 } 1958 #endif 1959 1960 netdev = fcoe_if_to_netdev(buffer); 1961 if (!netdev) { 1962 rc = -ENODEV; 1963 goto out_nodev; 1964 } 1965 1966 if (!rtnl_trylock()) { 1967 dev_put(netdev); 1968 mutex_unlock(&fcoe_config_mutex); 1969 return restart_syscall(); 1970 } 1971 1972 fcoe = fcoe_hostlist_lookup_port(netdev); 1973 rtnl_unlock(); 1974 1975 if (fcoe) { 1976 if (!fcoe_link_ok(fcoe->ctlr.lp)) 1977 fcoe_ctlr_link_up(&fcoe->ctlr); 1978 rc = fc_fabric_login(fcoe->ctlr.lp); 1979 } else 1980 rc = -ENODEV; 1981 1982 dev_put(netdev); 1983 out_nodev: 1984 mutex_unlock(&fcoe_config_mutex); 1985 return rc; 1986 } 1987 1988 /** 1989 * fcoe_destroy() - Destroy a FCoE interface 1990 * @buffer: The name of the Ethernet interface to be destroyed 1991 * @kp: The associated kernel parameter 1992 * 1993 * Called from sysfs. 1994 * 1995 * Returns: 0 for success 1996 */ 1997 static int fcoe_destroy(const char *buffer, struct kernel_param *kp) 1998 { 1999 struct fcoe_interface *fcoe; 2000 struct net_device *netdev; 2001 int rc = 0; 2002 2003 mutex_lock(&fcoe_config_mutex); 2004 #ifdef CONFIG_FCOE_MODULE 2005 /* 2006 * Make sure the module has been initialized, and is not about to be 2007 * removed. Module paramter sysfs files are writable before the 2008 * module_init function is called and after module_exit. 2009 */ 2010 if (THIS_MODULE->state != MODULE_STATE_LIVE) { 2011 rc = -ENODEV; 2012 goto out_nodev; 2013 } 2014 #endif 2015 2016 netdev = fcoe_if_to_netdev(buffer); 2017 if (!netdev) { 2018 rc = -ENODEV; 2019 goto out_nodev; 2020 } 2021 2022 if (!rtnl_trylock()) { 2023 dev_put(netdev); 2024 mutex_unlock(&fcoe_config_mutex); 2025 return restart_syscall(); 2026 } 2027 2028 fcoe = fcoe_hostlist_lookup_port(netdev); 2029 if (!fcoe) { 2030 rtnl_unlock(); 2031 rc = -ENODEV; 2032 goto out_putdev; 2033 } 2034 list_del(&fcoe->list); 2035 fcoe_interface_cleanup(fcoe); 2036 /* RTNL mutex is dropped by fcoe_if_destroy */ 2037 fcoe_if_destroy(fcoe->ctlr.lp); 2038 2039 out_putdev: 2040 dev_put(netdev); 2041 out_nodev: 2042 mutex_unlock(&fcoe_config_mutex); 2043 return rc; 2044 } 2045 2046 /** 2047 * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context 2048 * @work: Handle to the FCoE port to be destroyed 2049 */ 2050 static void fcoe_destroy_work(struct work_struct *work) 2051 { 2052 struct fcoe_port *port; 2053 2054 port = container_of(work, struct fcoe_port, destroy_work); 2055 mutex_lock(&fcoe_config_mutex); 2056 rtnl_lock(); 2057 /* RTNL mutex is dropped by fcoe_if_destroy */ 2058 fcoe_if_destroy(port->lport); 2059 mutex_unlock(&fcoe_config_mutex); 2060 } 2061 2062 /** 2063 * fcoe_create() - Create a fcoe interface 2064 * @buffer: The name of the Ethernet interface to create on 2065 * @kp: The associated kernel param 2066 * 2067 * Called from sysfs. 2068 * 2069 * Returns: 0 for success 2070 */ 2071 static int fcoe_create(const char *buffer, struct kernel_param *kp) 2072 { 2073 int rc; 2074 struct fcoe_interface *fcoe; 2075 struct fc_lport *lport; 2076 struct net_device *netdev; 2077 2078 mutex_lock(&fcoe_config_mutex); 2079 2080 if (!rtnl_trylock()) { 2081 mutex_unlock(&fcoe_config_mutex); 2082 return restart_syscall(); 2083 } 2084 2085 #ifdef CONFIG_FCOE_MODULE 2086 /* 2087 * Make sure the module has been initialized, and is not about to be 2088 * removed. Module paramter sysfs files are writable before the 2089 * module_init function is called and after module_exit. 2090 */ 2091 if (THIS_MODULE->state != MODULE_STATE_LIVE) { 2092 rc = -ENODEV; 2093 goto out_nomod; 2094 } 2095 #endif 2096 2097 if (!try_module_get(THIS_MODULE)) { 2098 rc = -EINVAL; 2099 goto out_nomod; 2100 } 2101 2102 netdev = fcoe_if_to_netdev(buffer); 2103 if (!netdev) { 2104 rc = -ENODEV; 2105 goto out_nodev; 2106 } 2107 2108 /* look for existing lport */ 2109 if (fcoe_hostlist_lookup(netdev)) { 2110 rc = -EEXIST; 2111 goto out_putdev; 2112 } 2113 2114 fcoe = fcoe_interface_create(netdev); 2115 if (!fcoe) { 2116 rc = -ENOMEM; 2117 goto out_putdev; 2118 } 2119 2120 lport = fcoe_if_create(fcoe, &netdev->dev, 0); 2121 if (IS_ERR(lport)) { 2122 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2123 netdev->name); 2124 rc = -EIO; 2125 fcoe_interface_cleanup(fcoe); 2126 goto out_free; 2127 } 2128 2129 /* Make this the "master" N_Port */ 2130 fcoe->ctlr.lp = lport; 2131 2132 /* add to lports list */ 2133 fcoe_hostlist_add(lport); 2134 2135 /* start FIP Discovery and FLOGI */ 2136 lport->boot_time = jiffies; 2137 fc_fabric_login(lport); 2138 if (!fcoe_link_ok(lport)) 2139 fcoe_ctlr_link_up(&fcoe->ctlr); 2140 2141 /* 2142 * Release from init in fcoe_interface_create(), on success lport 2143 * should be holding a reference taken in fcoe_if_create(). 2144 */ 2145 fcoe_interface_put(fcoe); 2146 dev_put(netdev); 2147 rtnl_unlock(); 2148 mutex_unlock(&fcoe_config_mutex); 2149 2150 return 0; 2151 out_free: 2152 fcoe_interface_put(fcoe); 2153 out_putdev: 2154 dev_put(netdev); 2155 out_nodev: 2156 module_put(THIS_MODULE); 2157 out_nomod: 2158 rtnl_unlock(); 2159 mutex_unlock(&fcoe_config_mutex); 2160 return rc; 2161 } 2162 2163 /** 2164 * fcoe_link_speed_update() - Update the supported and actual link speeds 2165 * @lport: The local port to update speeds for 2166 * 2167 * Returns: 0 if the ethtool query was successful 2168 * -1 if the ethtool query failed 2169 */ 2170 int fcoe_link_speed_update(struct fc_lport *lport) 2171 { 2172 struct fcoe_port *port = lport_priv(lport); 2173 struct net_device *netdev = port->fcoe->netdev; 2174 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 2175 2176 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 2177 lport->link_supported_speeds &= 2178 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 2179 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 2180 SUPPORTED_1000baseT_Full)) 2181 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; 2182 if (ecmd.supported & SUPPORTED_10000baseT_Full) 2183 lport->link_supported_speeds |= 2184 FC_PORTSPEED_10GBIT; 2185 if (ecmd.speed == SPEED_1000) 2186 lport->link_speed = FC_PORTSPEED_1GBIT; 2187 if (ecmd.speed == SPEED_10000) 2188 lport->link_speed = FC_PORTSPEED_10GBIT; 2189 2190 return 0; 2191 } 2192 return -1; 2193 } 2194 2195 /** 2196 * fcoe_link_ok() - Check if the link is OK for a local port 2197 * @lport: The local port to check link on 2198 * 2199 * Returns: 0 if link is UP and OK, -1 if not 2200 * 2201 */ 2202 int fcoe_link_ok(struct fc_lport *lport) 2203 { 2204 struct fcoe_port *port = lport_priv(lport); 2205 struct net_device *netdev = port->fcoe->netdev; 2206 2207 if (netif_oper_up(netdev)) 2208 return 0; 2209 return -1; 2210 } 2211 2212 /** 2213 * fcoe_percpu_clean() - Clear all pending skbs for an local port 2214 * @lport: The local port whose skbs are to be cleared 2215 * 2216 * Must be called with fcoe_create_mutex held to single-thread completion. 2217 * 2218 * This flushes the pending skbs by adding a new skb to each queue and 2219 * waiting until they are all freed. This assures us that not only are 2220 * there no packets that will be handled by the lport, but also that any 2221 * threads already handling packet have returned. 2222 */ 2223 void fcoe_percpu_clean(struct fc_lport *lport) 2224 { 2225 struct fcoe_percpu_s *pp; 2226 struct fcoe_rcv_info *fr; 2227 struct sk_buff_head *list; 2228 struct sk_buff *skb, *next; 2229 struct sk_buff *head; 2230 unsigned int cpu; 2231 2232 for_each_possible_cpu(cpu) { 2233 pp = &per_cpu(fcoe_percpu, cpu); 2234 spin_lock_bh(&pp->fcoe_rx_list.lock); 2235 list = &pp->fcoe_rx_list; 2236 head = list->next; 2237 for (skb = head; skb != (struct sk_buff *)list; 2238 skb = next) { 2239 next = skb->next; 2240 fr = fcoe_dev_from_skb(skb); 2241 if (fr->fr_dev == lport) { 2242 __skb_unlink(skb, list); 2243 kfree_skb(skb); 2244 } 2245 } 2246 2247 if (!pp->thread || !cpu_online(cpu)) { 2248 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2249 continue; 2250 } 2251 2252 skb = dev_alloc_skb(0); 2253 if (!skb) { 2254 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2255 continue; 2256 } 2257 skb->destructor = fcoe_percpu_flush_done; 2258 2259 __skb_queue_tail(&pp->fcoe_rx_list, skb); 2260 if (pp->fcoe_rx_list.qlen == 1) 2261 wake_up_process(pp->thread); 2262 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2263 2264 wait_for_completion(&fcoe_flush_completion); 2265 } 2266 } 2267 2268 /** 2269 * fcoe_clean_pending_queue() - Dequeue a skb and free it 2270 * @lport: The local port to dequeue a skb on 2271 */ 2272 void fcoe_clean_pending_queue(struct fc_lport *lport) 2273 { 2274 struct fcoe_port *port = lport_priv(lport); 2275 struct sk_buff *skb; 2276 2277 spin_lock_bh(&port->fcoe_pending_queue.lock); 2278 while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { 2279 spin_unlock_bh(&port->fcoe_pending_queue.lock); 2280 kfree_skb(skb); 2281 spin_lock_bh(&port->fcoe_pending_queue.lock); 2282 } 2283 spin_unlock_bh(&port->fcoe_pending_queue.lock); 2284 } 2285 2286 /** 2287 * fcoe_reset() - Reset a local port 2288 * @shost: The SCSI host associated with the local port to be reset 2289 * 2290 * Returns: Always 0 (return value required by FC transport template) 2291 */ 2292 int fcoe_reset(struct Scsi_Host *shost) 2293 { 2294 struct fc_lport *lport = shost_priv(shost); 2295 fc_lport_reset(lport); 2296 return 0; 2297 } 2298 2299 /** 2300 * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device 2301 * @netdev: The net device used as a key 2302 * 2303 * Locking: Must be called with the RNL mutex held. 2304 * 2305 * Returns: NULL or the FCoE interface 2306 */ 2307 static struct fcoe_interface * 2308 fcoe_hostlist_lookup_port(const struct net_device *netdev) 2309 { 2310 struct fcoe_interface *fcoe; 2311 2312 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 2313 if (fcoe->netdev == netdev) 2314 return fcoe; 2315 } 2316 return NULL; 2317 } 2318 2319 /** 2320 * fcoe_hostlist_lookup() - Find the local port associated with a 2321 * given net device 2322 * @netdev: The netdevice used as a key 2323 * 2324 * Locking: Must be called with the RTNL mutex held 2325 * 2326 * Returns: NULL or the local port 2327 */ 2328 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2329 { 2330 struct fcoe_interface *fcoe; 2331 2332 fcoe = fcoe_hostlist_lookup_port(netdev); 2333 return (fcoe) ? fcoe->ctlr.lp : NULL; 2334 } 2335 2336 /** 2337 * fcoe_hostlist_add() - Add the FCoE interface identified by a local 2338 * port to the hostlist 2339 * @lport: The local port that identifies the FCoE interface to be added 2340 * 2341 * Locking: must be called with the RTNL mutex held 2342 * 2343 * Returns: 0 for success 2344 */ 2345 static int fcoe_hostlist_add(const struct fc_lport *lport) 2346 { 2347 struct fcoe_interface *fcoe; 2348 struct fcoe_port *port; 2349 2350 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); 2351 if (!fcoe) { 2352 port = lport_priv(lport); 2353 fcoe = port->fcoe; 2354 list_add_tail(&fcoe->list, &fcoe_hostlist); 2355 } 2356 return 0; 2357 } 2358 2359 /** 2360 * fcoe_init() - Initialize fcoe.ko 2361 * 2362 * Returns: 0 on success, or a negative value on failure 2363 */ 2364 static int __init fcoe_init(void) 2365 { 2366 struct fcoe_percpu_s *p; 2367 unsigned int cpu; 2368 int rc = 0; 2369 2370 mutex_lock(&fcoe_config_mutex); 2371 2372 for_each_possible_cpu(cpu) { 2373 p = &per_cpu(fcoe_percpu, cpu); 2374 skb_queue_head_init(&p->fcoe_rx_list); 2375 } 2376 2377 for_each_online_cpu(cpu) 2378 fcoe_percpu_thread_create(cpu); 2379 2380 /* Initialize per CPU interrupt thread */ 2381 rc = register_hotcpu_notifier(&fcoe_cpu_notifier); 2382 if (rc) 2383 goto out_free; 2384 2385 /* Setup link change notification */ 2386 fcoe_dev_setup(); 2387 2388 rc = fcoe_if_init(); 2389 if (rc) 2390 goto out_free; 2391 2392 mutex_unlock(&fcoe_config_mutex); 2393 return 0; 2394 2395 out_free: 2396 for_each_online_cpu(cpu) { 2397 fcoe_percpu_thread_destroy(cpu); 2398 } 2399 mutex_unlock(&fcoe_config_mutex); 2400 return rc; 2401 } 2402 module_init(fcoe_init); 2403 2404 /** 2405 * fcoe_exit() - Clean up fcoe.ko 2406 * 2407 * Returns: 0 on success or a negative value on failure 2408 */ 2409 static void __exit fcoe_exit(void) 2410 { 2411 struct fcoe_interface *fcoe, *tmp; 2412 struct fcoe_port *port; 2413 unsigned int cpu; 2414 2415 mutex_lock(&fcoe_config_mutex); 2416 2417 fcoe_dev_cleanup(); 2418 2419 /* releases the associated fcoe hosts */ 2420 rtnl_lock(); 2421 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2422 list_del(&fcoe->list); 2423 port = lport_priv(fcoe->ctlr.lp); 2424 fcoe_interface_cleanup(fcoe); 2425 schedule_work(&port->destroy_work); 2426 } 2427 rtnl_unlock(); 2428 2429 unregister_hotcpu_notifier(&fcoe_cpu_notifier); 2430 2431 for_each_online_cpu(cpu) 2432 fcoe_percpu_thread_destroy(cpu); 2433 2434 mutex_unlock(&fcoe_config_mutex); 2435 2436 /* flush any asyncronous interface destroys, 2437 * this should happen after the netdev notifier is unregistered */ 2438 flush_scheduled_work(); 2439 /* That will flush out all the N_Ports on the hostlist, but now we 2440 * may have NPIV VN_Ports scheduled for destruction */ 2441 flush_scheduled_work(); 2442 2443 /* detach from scsi transport 2444 * must happen after all destroys are done, therefor after the flush */ 2445 fcoe_if_exit(); 2446 } 2447 module_exit(fcoe_exit); 2448 2449 /** 2450 * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler 2451 * @seq: active sequence in the FLOGI or FDISC exchange 2452 * @fp: response frame, or error encoded in a pointer (timeout) 2453 * @arg: pointer the the fcoe_ctlr structure 2454 * 2455 * This handles MAC address managment for FCoE, then passes control on to 2456 * the libfc FLOGI response handler. 2457 */ 2458 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2459 { 2460 struct fcoe_ctlr *fip = arg; 2461 struct fc_exch *exch = fc_seq_exch(seq); 2462 struct fc_lport *lport = exch->lp; 2463 u8 *mac; 2464 2465 if (IS_ERR(fp)) 2466 goto done; 2467 2468 mac = fr_cb(fp)->granted_mac; 2469 if (is_zero_ether_addr(mac)) { 2470 /* pre-FIP */ 2471 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 2472 fc_frame_free(fp); 2473 return; 2474 } 2475 } 2476 fcoe_update_src_mac(lport, mac); 2477 done: 2478 fc_lport_flogi_resp(seq, fp, lport); 2479 } 2480 2481 /** 2482 * fcoe_logo_resp() - FCoE specific LOGO response handler 2483 * @seq: active sequence in the LOGO exchange 2484 * @fp: response frame, or error encoded in a pointer (timeout) 2485 * @arg: pointer the the fcoe_ctlr structure 2486 * 2487 * This handles MAC address managment for FCoE, then passes control on to 2488 * the libfc LOGO response handler. 2489 */ 2490 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2491 { 2492 struct fc_lport *lport = arg; 2493 static u8 zero_mac[ETH_ALEN] = { 0 }; 2494 2495 if (!IS_ERR(fp)) 2496 fcoe_update_src_mac(lport, zero_mac); 2497 fc_lport_logo_resp(seq, fp, lport); 2498 } 2499 2500 /** 2501 * fcoe_elsct_send - FCoE specific ELS handler 2502 * 2503 * This does special case handling of FIP encapsualted ELS exchanges for FCoE, 2504 * using FCoE specific response handlers and passing the FIP controller as 2505 * the argument (the lport is still available from the exchange). 2506 * 2507 * Most of the work here is just handed off to the libfc routine. 2508 */ 2509 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, 2510 struct fc_frame *fp, unsigned int op, 2511 void (*resp)(struct fc_seq *, 2512 struct fc_frame *, 2513 void *), 2514 void *arg, u32 timeout) 2515 { 2516 struct fcoe_port *port = lport_priv(lport); 2517 struct fcoe_interface *fcoe = port->fcoe; 2518 struct fcoe_ctlr *fip = &fcoe->ctlr; 2519 struct fc_frame_header *fh = fc_frame_header_get(fp); 2520 2521 switch (op) { 2522 case ELS_FLOGI: 2523 case ELS_FDISC: 2524 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, 2525 fip, timeout); 2526 case ELS_LOGO: 2527 /* only hook onto fabric logouts, not port logouts */ 2528 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) 2529 break; 2530 return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, 2531 lport, timeout); 2532 } 2533 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); 2534 } 2535 2536 /** 2537 * fcoe_vport_create() - create an fc_host/scsi_host for a vport 2538 * @vport: fc_vport object to create a new fc_host for 2539 * @disabled: start the new fc_host in a disabled state by default? 2540 * 2541 * Returns: 0 for success 2542 */ 2543 static int fcoe_vport_create(struct fc_vport *vport, bool disabled) 2544 { 2545 struct Scsi_Host *shost = vport_to_shost(vport); 2546 struct fc_lport *n_port = shost_priv(shost); 2547 struct fcoe_port *port = lport_priv(n_port); 2548 struct fcoe_interface *fcoe = port->fcoe; 2549 struct net_device *netdev = fcoe->netdev; 2550 struct fc_lport *vn_port; 2551 2552 mutex_lock(&fcoe_config_mutex); 2553 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2554 mutex_unlock(&fcoe_config_mutex); 2555 2556 if (IS_ERR(vn_port)) { 2557 printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", 2558 netdev->name); 2559 return -EIO; 2560 } 2561 2562 if (disabled) { 2563 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2564 } else { 2565 vn_port->boot_time = jiffies; 2566 fc_fabric_login(vn_port); 2567 fc_vport_setlink(vn_port); 2568 } 2569 return 0; 2570 } 2571 2572 /** 2573 * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport 2574 * @vport: fc_vport object that is being destroyed 2575 * 2576 * Returns: 0 for success 2577 */ 2578 static int fcoe_vport_destroy(struct fc_vport *vport) 2579 { 2580 struct Scsi_Host *shost = vport_to_shost(vport); 2581 struct fc_lport *n_port = shost_priv(shost); 2582 struct fc_lport *vn_port = vport->dd_data; 2583 struct fcoe_port *port = lport_priv(vn_port); 2584 2585 mutex_lock(&n_port->lp_mutex); 2586 list_del(&vn_port->list); 2587 mutex_unlock(&n_port->lp_mutex); 2588 schedule_work(&port->destroy_work); 2589 return 0; 2590 } 2591 2592 /** 2593 * fcoe_vport_disable() - change vport state 2594 * @vport: vport to bring online/offline 2595 * @disable: should the vport be disabled? 2596 */ 2597 static int fcoe_vport_disable(struct fc_vport *vport, bool disable) 2598 { 2599 struct fc_lport *lport = vport->dd_data; 2600 2601 if (disable) { 2602 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2603 fc_fabric_logoff(lport); 2604 } else { 2605 lport->boot_time = jiffies; 2606 fc_fabric_login(lport); 2607 fc_vport_setlink(lport); 2608 } 2609 2610 return 0; 2611 } 2612 2613 /** 2614 * fcoe_vport_set_symbolic_name() - append vport string to symbolic name 2615 * @vport: fc_vport with a new symbolic name string 2616 * 2617 * After generating a new symbolic name string, a new RSPN_ID request is 2618 * sent to the name server. There is no response handler, so if it fails 2619 * for some reason it will not be retried. 2620 */ 2621 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) 2622 { 2623 struct fc_lport *lport = vport->dd_data; 2624 struct fc_frame *fp; 2625 size_t len; 2626 2627 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 2628 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, 2629 fcoe_netdev(lport)->name, vport->symbolic_name); 2630 2631 if (lport->state != LPORT_ST_READY) 2632 return; 2633 2634 len = strnlen(fc_host_symbolic_name(lport->host), 255); 2635 fp = fc_frame_alloc(lport, 2636 sizeof(struct fc_ct_hdr) + 2637 sizeof(struct fc_ns_rspn) + len); 2638 if (!fp) 2639 return; 2640 lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, 2641 NULL, NULL, 3 * lport->r_a_tov); 2642 } 2643 2644 /** 2645 * fcoe_get_lesb() - Fill the FCoE Link Error Status Block 2646 * @lport: the local port 2647 * @fc_lesb: the link error status block 2648 */ 2649 static void fcoe_get_lesb(struct fc_lport *lport, 2650 struct fc_els_lesb *fc_lesb) 2651 { 2652 unsigned int cpu; 2653 u32 lfc, vlfc, mdac; 2654 struct fcoe_dev_stats *devst; 2655 struct fcoe_fc_els_lesb *lesb; 2656 struct net_device *netdev = fcoe_netdev(lport); 2657 2658 lfc = 0; 2659 vlfc = 0; 2660 mdac = 0; 2661 lesb = (struct fcoe_fc_els_lesb *)fc_lesb; 2662 memset(lesb, 0, sizeof(*lesb)); 2663 for_each_possible_cpu(cpu) { 2664 devst = per_cpu_ptr(lport->dev_stats, cpu); 2665 lfc += devst->LinkFailureCount; 2666 vlfc += devst->VLinkFailureCount; 2667 mdac += devst->MissDiscAdvCount; 2668 } 2669 lesb->lesb_link_fail = htonl(lfc); 2670 lesb->lesb_vlink_fail = htonl(vlfc); 2671 lesb->lesb_miss_fka = htonl(mdac); 2672 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors); 2673 } 2674 2675 /** 2676 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2677 * @lport: the local port 2678 * @port_id: the port ID 2679 * @fp: the received frame, if any, that caused the port_id to be set. 2680 * 2681 * This routine handles the case where we received a FLOGI and are 2682 * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi() 2683 * so it can set the non-mapped mode and gateway address. 2684 * 2685 * The FLOGI LS_ACC is handled by fcoe_flogi_resp(). 2686 */ 2687 static void fcoe_set_port_id(struct fc_lport *lport, 2688 u32 port_id, struct fc_frame *fp) 2689 { 2690 struct fcoe_port *port = lport_priv(lport); 2691 struct fcoe_interface *fcoe = port->fcoe; 2692 2693 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2694 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2695 } 2696