1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2011 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include "ixgbe.h" 29 #include <linux/if_ether.h> 30 #include <linux/gfp.h> 31 #include <linux/if_vlan.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/fc/fc_fs.h> 35 #include <scsi/fc/fc_fcoe.h> 36 #include <scsi/libfc.h> 37 #include <scsi/libfcoe.h> 38 39 /** 40 * ixgbe_fcoe_clear_ddp - clear the given ddp context 41 * @ddp - ptr to the ixgbe_fcoe_ddp 42 * 43 * Returns : none 44 * 45 */ 46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 47 { 48 ddp->len = 0; 49 ddp->err = 1; 50 ddp->udl = NULL; 51 ddp->udp = 0UL; 52 ddp->sgl = NULL; 53 ddp->sgc = 0; 54 } 55 56 /** 57 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 58 * @netdev: the corresponding net_device 59 * @xid: the xid that corresponding ddp will be freed 60 * 61 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 62 * and it is expected to be called by ULD, i.e., FCP layer of libfc 63 * to release the corresponding ddp context when the I/O is done. 64 * 65 * Returns : data length already ddp-ed in bytes 66 */ 67 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 68 { 69 int len = 0; 70 struct ixgbe_fcoe *fcoe; 71 struct ixgbe_adapter *adapter; 72 struct ixgbe_fcoe_ddp *ddp; 73 u32 fcbuff; 74 75 if (!netdev) 76 goto out_ddp_put; 77 78 if (xid >= IXGBE_FCOE_DDP_MAX) 79 goto out_ddp_put; 80 81 adapter = netdev_priv(netdev); 82 fcoe = &adapter->fcoe; 83 ddp = &fcoe->ddp[xid]; 84 if (!ddp->udl) 85 goto out_ddp_put; 86 87 len = ddp->len; 88 /* if there an error, force to invalidate ddp context */ 89 if (ddp->err) { 90 spin_lock_bh(&fcoe->lock); 91 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); 92 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, 93 (xid | IXGBE_FCFLTRW_WE)); 94 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 95 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 96 (xid | IXGBE_FCDMARW_WE)); 97 98 /* guaranteed to be invalidated after 100us */ 99 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 100 (xid | IXGBE_FCDMARW_RE)); 101 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); 102 spin_unlock_bh(&fcoe->lock); 103 if (fcbuff & IXGBE_FCBUFF_VALID) 104 udelay(100); 105 } 106 if (ddp->sgl) 107 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 108 DMA_FROM_DEVICE); 109 if (ddp->pool) { 110 pci_pool_free(ddp->pool, ddp->udl, ddp->udp); 111 ddp->pool = NULL; 112 } 113 114 ixgbe_fcoe_clear_ddp(ddp); 115 116 out_ddp_put: 117 return len; 118 } 119 120 /** 121 * ixgbe_fcoe_ddp_setup - called to set up ddp context 122 * @netdev: the corresponding net_device 123 * @xid: the exchange id requesting ddp 124 * @sgl: the scatter-gather list for this request 125 * @sgc: the number of scatter-gather items 126 * 127 * Returns : 1 for success and 0 for no ddp 128 */ 129 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, 130 struct scatterlist *sgl, unsigned int sgc, 131 int target_mode) 132 { 133 struct ixgbe_adapter *adapter; 134 struct ixgbe_hw *hw; 135 struct ixgbe_fcoe *fcoe; 136 struct ixgbe_fcoe_ddp *ddp; 137 struct scatterlist *sg; 138 unsigned int i, j, dmacount; 139 unsigned int len; 140 static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 141 unsigned int firstoff = 0; 142 unsigned int lastsize; 143 unsigned int thisoff = 0; 144 unsigned int thislen = 0; 145 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 146 dma_addr_t addr = 0; 147 struct pci_pool *pool; 148 unsigned int cpu; 149 150 if (!netdev || !sgl) 151 return 0; 152 153 adapter = netdev_priv(netdev); 154 if (xid >= IXGBE_FCOE_DDP_MAX) { 155 e_warn(drv, "xid=0x%x out-of-range\n", xid); 156 return 0; 157 } 158 159 /* no DDP if we are already down or resetting */ 160 if (test_bit(__IXGBE_DOWN, &adapter->state) || 161 test_bit(__IXGBE_RESETTING, &adapter->state)) 162 return 0; 163 164 fcoe = &adapter->fcoe; 165 if (!fcoe->pool) { 166 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 167 return 0; 168 } 169 170 ddp = &fcoe->ddp[xid]; 171 if (ddp->sgl) { 172 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 173 xid, ddp->sgl, ddp->sgc); 174 return 0; 175 } 176 ixgbe_fcoe_clear_ddp(ddp); 177 178 /* setup dma from scsi command sgl */ 179 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 180 if (dmacount == 0) { 181 e_err(drv, "xid 0x%x DMA map error\n", xid); 182 return 0; 183 } 184 185 /* alloc the udl from per cpu ddp pool */ 186 cpu = get_cpu(); 187 pool = *per_cpu_ptr(fcoe->pool, cpu); 188 ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); 189 if (!ddp->udl) { 190 e_err(drv, "failed allocated ddp context\n"); 191 goto out_noddp_unmap; 192 } 193 ddp->pool = pool; 194 ddp->sgl = sgl; 195 ddp->sgc = sgc; 196 197 j = 0; 198 for_each_sg(sgl, sg, dmacount, i) { 199 addr = sg_dma_address(sg); 200 len = sg_dma_len(sg); 201 while (len) { 202 /* max number of buffers allowed in one DDP context */ 203 if (j >= IXGBE_BUFFCNT_MAX) { 204 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; 205 goto out_noddp_free; 206 } 207 208 /* get the offset of length of current buffer */ 209 thisoff = addr & ((dma_addr_t)bufflen - 1); 210 thislen = min((bufflen - thisoff), len); 211 /* 212 * all but the 1st buffer (j == 0) 213 * must be aligned on bufflen 214 */ 215 if ((j != 0) && (thisoff)) 216 goto out_noddp_free; 217 /* 218 * all but the last buffer 219 * ((i == (dmacount - 1)) && (thislen == len)) 220 * must end at bufflen 221 */ 222 if (((i != (dmacount - 1)) || (thislen != len)) 223 && ((thislen + thisoff) != bufflen)) 224 goto out_noddp_free; 225 226 ddp->udl[j] = (u64)(addr - thisoff); 227 /* only the first buffer may have none-zero offset */ 228 if (j == 0) 229 firstoff = thisoff; 230 len -= thislen; 231 addr += thislen; 232 j++; 233 } 234 } 235 /* only the last buffer may have non-full bufflen */ 236 lastsize = thisoff + thislen; 237 238 /* 239 * lastsize can not be buffer len. 240 * If it is then adding another buffer with lastsize = 1. 241 */ 242 if (lastsize == bufflen) { 243 if (j >= IXGBE_BUFFCNT_MAX) { 244 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; 245 goto out_noddp_free; 246 } 247 248 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 249 j++; 250 lastsize = 1; 251 } 252 put_cpu(); 253 254 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 255 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 256 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 257 /* Set WRCONTX bit to allow DDP for target */ 258 if (target_mode) 259 fcbuff |= (IXGBE_FCBUFF_WRCONTX); 260 fcbuff |= (IXGBE_FCBUFF_VALID); 261 262 fcdmarw = xid; 263 fcdmarw |= IXGBE_FCDMARW_WE; 264 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 265 266 fcfltrw = xid; 267 fcfltrw |= IXGBE_FCFLTRW_WE; 268 269 /* program DMA context */ 270 hw = &adapter->hw; 271 spin_lock_bh(&fcoe->lock); 272 273 /* turn on last frame indication for target mode as FCP_RSPtarget is 274 * supposed to send FCP_RSP when it is done. */ 275 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { 276 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); 277 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); 278 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; 279 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 280 } 281 282 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 283 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 284 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 285 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 286 /* program filter context */ 287 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 288 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 289 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 290 291 spin_unlock_bh(&fcoe->lock); 292 293 return 1; 294 295 out_noddp_free: 296 pci_pool_free(pool, ddp->udl, ddp->udp); 297 ixgbe_fcoe_clear_ddp(ddp); 298 299 out_noddp_unmap: 300 pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 301 put_cpu(); 302 return 0; 303 } 304 305 /** 306 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode 307 * @netdev: the corresponding net_device 308 * @xid: the exchange id requesting ddp 309 * @sgl: the scatter-gather list for this request 310 * @sgc: the number of scatter-gather items 311 * 312 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 313 * and is expected to be called from ULD, e.g., FCP layer of libfc 314 * to set up ddp for the corresponding xid of the given sglist for 315 * the corresponding I/O. 316 * 317 * Returns : 1 for success and 0 for no ddp 318 */ 319 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 320 struct scatterlist *sgl, unsigned int sgc) 321 { 322 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); 323 } 324 325 /** 326 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode 327 * @netdev: the corresponding net_device 328 * @xid: the exchange id requesting ddp 329 * @sgl: the scatter-gather list for this request 330 * @sgc: the number of scatter-gather items 331 * 332 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target 333 * and is expected to be called from ULD, e.g., FCP layer of libfc 334 * to set up ddp for the corresponding xid of the given sglist for 335 * the corresponding I/O. The DDP in target mode is a write I/O request 336 * from the initiator. 337 * 338 * Returns : 1 for success and 0 for no ddp 339 */ 340 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 341 struct scatterlist *sgl, unsigned int sgc) 342 { 343 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); 344 } 345 346 /** 347 * ixgbe_fcoe_ddp - check ddp status and mark it done 348 * @adapter: ixgbe adapter 349 * @rx_desc: advanced rx descriptor 350 * @skb: the skb holding the received data 351 * 352 * This checks ddp status. 353 * 354 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 355 * not passing the skb to ULD, > 0 indicates is the length of data 356 * being ddped. 357 */ 358 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 359 union ixgbe_adv_rx_desc *rx_desc, 360 struct sk_buff *skb, 361 u32 staterr) 362 { 363 u16 xid; 364 u32 fctl; 365 u32 fceofe, fcerr, fcstat; 366 int rc = -EINVAL; 367 struct ixgbe_fcoe *fcoe; 368 struct ixgbe_fcoe_ddp *ddp; 369 struct fc_frame_header *fh; 370 struct fcoe_crc_eof *crc; 371 372 fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); 373 fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); 374 if (fcerr == IXGBE_FCERR_BADCRC) 375 skb_checksum_none_assert(skb); 376 else 377 skb->ip_summed = CHECKSUM_UNNECESSARY; 378 379 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 380 fh = (struct fc_frame_header *)(skb->data + 381 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 382 else 383 fh = (struct fc_frame_header *)(skb->data + 384 sizeof(struct fcoe_hdr)); 385 fctl = ntoh24(fh->fh_f_ctl); 386 if (fctl & FC_FC_EX_CTX) 387 xid = be16_to_cpu(fh->fh_ox_id); 388 else 389 xid = be16_to_cpu(fh->fh_rx_id); 390 391 if (xid >= IXGBE_FCOE_DDP_MAX) 392 goto ddp_out; 393 394 fcoe = &adapter->fcoe; 395 ddp = &fcoe->ddp[xid]; 396 if (!ddp->udl) 397 goto ddp_out; 398 399 if (fcerr | fceofe) 400 goto ddp_out; 401 402 fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); 403 if (fcstat) { 404 /* update length of DDPed data */ 405 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 406 /* unmap the sg list when FCP_RSP is received */ 407 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { 408 pci_unmap_sg(adapter->pdev, ddp->sgl, 409 ddp->sgc, DMA_FROM_DEVICE); 410 ddp->err = (fcerr | fceofe); 411 ddp->sgl = NULL; 412 ddp->sgc = 0; 413 } 414 /* return 0 to bypass going to ULD for DDPed data */ 415 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) 416 rc = 0; 417 else if (ddp->len) 418 rc = ddp->len; 419 } 420 /* In target mode, check the last data frame of the sequence. 421 * For DDP in target mode, data is already DDPed but the header 422 * indication of the last data frame ould allow is to tell if we 423 * got all the data and the ULP can send FCP_RSP back, as this is 424 * not a full fcoe frame, we fill the trailer here so it won't be 425 * dropped by the ULP stack. 426 */ 427 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 428 (fctl & FC_FC_END_SEQ)) { 429 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 430 crc->fcoe_eof = FC_EOF_T; 431 } 432 ddp_out: 433 return rc; 434 } 435 436 /** 437 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 438 * @tx_ring: tx desc ring 439 * @skb: associated skb 440 * @tx_flags: tx flags 441 * @hdr_len: hdr_len to be returned 442 * 443 * This sets up large send offload for FCoE 444 * 445 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error 446 */ 447 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, 448 u32 tx_flags, u8 *hdr_len) 449 { 450 struct fc_frame_header *fh; 451 u32 vlan_macip_lens; 452 u32 fcoe_sof_eof = 0; 453 u32 mss_l4len_idx; 454 u8 sof, eof; 455 456 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 457 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 458 skb_shinfo(skb)->gso_type); 459 return -EINVAL; 460 } 461 462 /* resets the header to point fcoe/fc */ 463 skb_set_network_header(skb, skb->mac_len); 464 skb_set_transport_header(skb, skb->mac_len + 465 sizeof(struct fcoe_hdr)); 466 467 /* sets up SOF and ORIS */ 468 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 469 switch (sof) { 470 case FC_SOF_I2: 471 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; 472 break; 473 case FC_SOF_I3: 474 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | 475 IXGBE_ADVTXD_FCOEF_ORIS; 476 break; 477 case FC_SOF_N2: 478 break; 479 case FC_SOF_N3: 480 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; 481 break; 482 default: 483 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); 484 return -EINVAL; 485 } 486 487 /* the first byte of the last dword is EOF */ 488 skb_copy_bits(skb, skb->len - 4, &eof, 1); 489 /* sets up EOF and ORIE */ 490 switch (eof) { 491 case FC_EOF_N: 492 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 493 break; 494 case FC_EOF_T: 495 /* lso needs ORIE */ 496 if (skb_is_gso(skb)) 497 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | 498 IXGBE_ADVTXD_FCOEF_ORIE; 499 else 500 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 501 break; 502 case FC_EOF_NI: 503 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 504 break; 505 case FC_EOF_A: 506 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 507 break; 508 default: 509 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); 510 return -EINVAL; 511 } 512 513 /* sets up PARINC indicating data offset */ 514 fh = (struct fc_frame_header *)skb_transport_header(skb); 515 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 516 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 517 518 /* include trailer in headlen as it is replicated per frame */ 519 *hdr_len = sizeof(struct fcoe_crc_eof); 520 521 /* hdr_len includes fc_hdr if FCoE LSO is enabled */ 522 if (skb_is_gso(skb)) 523 *hdr_len += (skb_transport_offset(skb) + 524 sizeof(struct fc_frame_header)); 525 526 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ 527 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 528 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 529 530 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 531 vlan_macip_lens = skb_transport_offset(skb) + 532 sizeof(struct fc_frame_header); 533 vlan_macip_lens |= (skb_transport_offset(skb) - 4) 534 << IXGBE_ADVTXD_MACLEN_SHIFT; 535 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 536 537 /* write context desc */ 538 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 539 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); 540 541 return skb_is_gso(skb); 542 } 543 544 static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) 545 { 546 unsigned int cpu; 547 struct pci_pool **pool; 548 549 for_each_possible_cpu(cpu) { 550 pool = per_cpu_ptr(fcoe->pool, cpu); 551 if (*pool) 552 pci_pool_destroy(*pool); 553 } 554 free_percpu(fcoe->pool); 555 fcoe->pool = NULL; 556 } 557 558 static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) 559 { 560 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 561 unsigned int cpu; 562 struct pci_pool **pool; 563 char pool_name[32]; 564 565 fcoe->pool = alloc_percpu(struct pci_pool *); 566 if (!fcoe->pool) 567 return; 568 569 /* allocate pci pool for each cpu */ 570 for_each_possible_cpu(cpu) { 571 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); 572 pool = per_cpu_ptr(fcoe->pool, cpu); 573 *pool = pci_pool_create(pool_name, 574 adapter->pdev, IXGBE_FCPTR_MAX, 575 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 576 if (!*pool) { 577 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 578 ixgbe_fcoe_ddp_pools_free(fcoe); 579 return; 580 } 581 } 582 } 583 584 /** 585 * ixgbe_configure_fcoe - configures registers for fcoe at start 586 * @adapter: ptr to ixgbe adapter 587 * 588 * This sets up FCoE related registers 589 * 590 * Returns : none 591 */ 592 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 593 { 594 int i, fcoe_q, fcoe_i; 595 struct ixgbe_hw *hw = &adapter->hw; 596 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 597 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 598 unsigned int cpu; 599 600 if (!fcoe->pool) { 601 spin_lock_init(&fcoe->lock); 602 603 ixgbe_fcoe_ddp_pools_alloc(adapter); 604 if (!fcoe->pool) { 605 e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); 606 return; 607 } 608 609 /* Extra buffer to be shared by all DDPs for HW work around */ 610 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 611 if (fcoe->extra_ddp_buffer == NULL) { 612 e_err(drv, "failed to allocated extra DDP buffer\n"); 613 goto out_ddp_pools; 614 } 615 616 fcoe->extra_ddp_buffer_dma = 617 dma_map_single(&adapter->pdev->dev, 618 fcoe->extra_ddp_buffer, 619 IXGBE_FCBUFF_MIN, 620 DMA_FROM_DEVICE); 621 if (dma_mapping_error(&adapter->pdev->dev, 622 fcoe->extra_ddp_buffer_dma)) { 623 e_err(drv, "failed to map extra DDP buffer\n"); 624 goto out_extra_ddp_buffer; 625 } 626 627 /* Alloc per cpu mem to count the ddp alloc failure number */ 628 fcoe->pcpu_noddp = alloc_percpu(u64); 629 if (!fcoe->pcpu_noddp) { 630 e_err(drv, "failed to alloc noddp counter\n"); 631 goto out_pcpu_noddp_alloc_fail; 632 } 633 634 fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64); 635 if (!fcoe->pcpu_noddp_ext_buff) { 636 e_err(drv, "failed to alloc noddp extra buff cnt\n"); 637 goto out_pcpu_noddp_extra_buff_alloc_fail; 638 } 639 640 for_each_possible_cpu(cpu) { 641 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0; 642 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0; 643 } 644 } 645 646 /* Enable L2 eth type filter for FCoE */ 647 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 648 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 649 /* Enable L2 eth type filter for FIP */ 650 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), 651 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); 652 if (adapter->ring_feature[RING_F_FCOE].indices) { 653 /* Use multiple rx queues for FCoE by redirection table */ 654 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 655 fcoe_i = f->mask + i % f->indices; 656 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 657 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 658 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 659 } 660 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 661 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 662 } else { 663 /* Use single rx queue for FCoE */ 664 fcoe_i = f->mask; 665 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 666 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); 667 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 668 IXGBE_ETQS_QUEUE_EN | 669 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 670 } 671 /* send FIP frames to the first FCoE queue */ 672 fcoe_i = f->mask; 673 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 674 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 675 IXGBE_ETQS_QUEUE_EN | 676 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 677 678 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | 679 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 680 return; 681 out_pcpu_noddp_extra_buff_alloc_fail: 682 free_percpu(fcoe->pcpu_noddp); 683 out_pcpu_noddp_alloc_fail: 684 dma_unmap_single(&adapter->pdev->dev, 685 fcoe->extra_ddp_buffer_dma, 686 IXGBE_FCBUFF_MIN, 687 DMA_FROM_DEVICE); 688 out_extra_ddp_buffer: 689 kfree(fcoe->extra_ddp_buffer); 690 out_ddp_pools: 691 ixgbe_fcoe_ddp_pools_free(fcoe); 692 } 693 694 /** 695 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources 696 * @adapter : ixgbe adapter 697 * 698 * Cleans up outstanding ddp context resources 699 * 700 * Returns : none 701 */ 702 void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) 703 { 704 int i; 705 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 706 707 if (!fcoe->pool) 708 return; 709 710 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 711 ixgbe_fcoe_ddp_put(adapter->netdev, i); 712 dma_unmap_single(&adapter->pdev->dev, 713 fcoe->extra_ddp_buffer_dma, 714 IXGBE_FCBUFF_MIN, 715 DMA_FROM_DEVICE); 716 free_percpu(fcoe->pcpu_noddp); 717 free_percpu(fcoe->pcpu_noddp_ext_buff); 718 kfree(fcoe->extra_ddp_buffer); 719 ixgbe_fcoe_ddp_pools_free(fcoe); 720 } 721 722 /** 723 * ixgbe_fcoe_enable - turn on FCoE offload feature 724 * @netdev: the corresponding netdev 725 * 726 * Turns on FCoE offload feature in 82599. 727 * 728 * Returns : 0 indicates success or -EINVAL on failure 729 */ 730 int ixgbe_fcoe_enable(struct net_device *netdev) 731 { 732 int rc = -EINVAL; 733 struct ixgbe_adapter *adapter = netdev_priv(netdev); 734 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 735 736 737 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 738 goto out_enable; 739 740 atomic_inc(&fcoe->refcnt); 741 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 742 goto out_enable; 743 744 e_info(drv, "Enabling FCoE offload features.\n"); 745 if (netif_running(netdev)) 746 netdev->netdev_ops->ndo_stop(netdev); 747 748 ixgbe_clear_interrupt_scheme(adapter); 749 750 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 751 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; 752 netdev->features |= NETIF_F_FCOE_CRC; 753 netdev->features |= NETIF_F_FSO; 754 netdev->features |= NETIF_F_FCOE_MTU; 755 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 756 757 ixgbe_init_interrupt_scheme(adapter); 758 netdev_features_change(netdev); 759 760 if (netif_running(netdev)) 761 netdev->netdev_ops->ndo_open(netdev); 762 rc = 0; 763 764 out_enable: 765 return rc; 766 } 767 768 /** 769 * ixgbe_fcoe_disable - turn off FCoE offload feature 770 * @netdev: the corresponding netdev 771 * 772 * Turns off FCoE offload feature in 82599. 773 * 774 * Returns : 0 indicates success or -EINVAL on failure 775 */ 776 int ixgbe_fcoe_disable(struct net_device *netdev) 777 { 778 int rc = -EINVAL; 779 struct ixgbe_adapter *adapter = netdev_priv(netdev); 780 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 781 782 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 783 goto out_disable; 784 785 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 786 goto out_disable; 787 788 if (!atomic_dec_and_test(&fcoe->refcnt)) 789 goto out_disable; 790 791 e_info(drv, "Disabling FCoE offload features.\n"); 792 netdev->features &= ~NETIF_F_FCOE_CRC; 793 netdev->features &= ~NETIF_F_FSO; 794 netdev->features &= ~NETIF_F_FCOE_MTU; 795 netdev->fcoe_ddp_xid = 0; 796 netdev_features_change(netdev); 797 798 if (netif_running(netdev)) 799 netdev->netdev_ops->ndo_stop(netdev); 800 801 ixgbe_clear_interrupt_scheme(adapter); 802 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 803 adapter->ring_feature[RING_F_FCOE].indices = 0; 804 ixgbe_cleanup_fcoe(adapter); 805 ixgbe_init_interrupt_scheme(adapter); 806 807 if (netif_running(netdev)) 808 netdev->netdev_ops->ndo_open(netdev); 809 rc = 0; 810 811 out_disable: 812 return rc; 813 } 814 815 /** 816 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 817 * @netdev : ixgbe adapter 818 * @wwn : the world wide name 819 * @type: the type of world wide name 820 * 821 * Returns the node or port world wide name if both the prefix and the san 822 * mac address are valid, then the wwn is formed based on the NAA-2 for 823 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 824 * 825 * Returns : 0 on success 826 */ 827 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 828 { 829 int rc = -EINVAL; 830 u16 prefix = 0xffff; 831 struct ixgbe_adapter *adapter = netdev_priv(netdev); 832 struct ixgbe_mac_info *mac = &adapter->hw.mac; 833 834 switch (type) { 835 case NETDEV_FCOE_WWNN: 836 prefix = mac->wwnn_prefix; 837 break; 838 case NETDEV_FCOE_WWPN: 839 prefix = mac->wwpn_prefix; 840 break; 841 default: 842 break; 843 } 844 845 if ((prefix != 0xffff) && 846 is_valid_ether_addr(mac->san_addr)) { 847 *wwn = ((u64) prefix << 48) | 848 ((u64) mac->san_addr[0] << 40) | 849 ((u64) mac->san_addr[1] << 32) | 850 ((u64) mac->san_addr[2] << 24) | 851 ((u64) mac->san_addr[3] << 16) | 852 ((u64) mac->san_addr[4] << 8) | 853 ((u64) mac->san_addr[5]); 854 rc = 0; 855 } 856 return rc; 857 } 858