1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include <linux/if_ether.h> 31 #include <linux/gfp.h> 32 #include <linux/if_vlan.h> 33 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/fc/fc_fs.h> 36 #include <scsi/fc/fc_fcoe.h> 37 #include <scsi/libfc.h> 38 #include <scsi/libfcoe.h> 39 40 /** 41 * ixgbe_fcoe_clear_ddp - clear the given ddp context 42 * @ddp: ptr to the ixgbe_fcoe_ddp 43 * 44 * Returns : none 45 * 46 */ 47 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 48 { 49 ddp->len = 0; 50 ddp->err = 1; 51 ddp->udl = NULL; 52 ddp->udp = 0UL; 53 ddp->sgl = NULL; 54 ddp->sgc = 0; 55 } 56 57 /** 58 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 59 * @netdev: the corresponding net_device 60 * @xid: the xid that corresponding ddp will be freed 61 * 62 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 63 * and it is expected to be called by ULD, i.e., FCP layer of libfc 64 * to release the corresponding ddp context when the I/O is done. 65 * 66 * Returns : data length already ddp-ed in bytes 67 */ 68 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 69 { 70 int len; 71 struct ixgbe_fcoe *fcoe; 72 struct ixgbe_adapter *adapter; 73 struct ixgbe_fcoe_ddp *ddp; 74 struct ixgbe_hw *hw; 75 u32 fcbuff; 76 77 if (!netdev) 78 return 0; 79 80 if (xid >= IXGBE_FCOE_DDP_MAX) 81 return 0; 82 83 adapter = netdev_priv(netdev); 84 fcoe = &adapter->fcoe; 85 ddp = &fcoe->ddp[xid]; 86 if (!ddp->udl) 87 return 0; 88 89 hw = &adapter->hw; 90 len = ddp->len; 91 /* if no error then skip ddp context invalidation */ 92 if (!ddp->err) 93 goto skip_ddpinv; 94 95 if (hw->mac.type == ixgbe_mac_X550) { 96 /* X550 does not require DDP FCoE lock */ 97 98 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); 99 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), 100 (xid | IXGBE_FCFLTRW_WE)); 101 102 /* program FCBUFF */ 103 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); 104 105 /* program FCDMARW */ 106 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 107 (xid | IXGBE_FCDMARW_WE)); 108 109 /* read FCBUFF to check context invalidated */ 110 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 111 (xid | IXGBE_FCDMARW_RE)); 112 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); 113 } else { 114 /* other hardware requires DDP FCoE lock */ 115 spin_lock_bh(&fcoe->lock); 116 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); 117 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, 118 (xid | IXGBE_FCFLTRW_WE)); 119 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); 120 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 121 (xid | IXGBE_FCDMARW_WE)); 122 123 /* guaranteed to be invalidated after 100us */ 124 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 125 (xid | IXGBE_FCDMARW_RE)); 126 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); 127 spin_unlock_bh(&fcoe->lock); 128 } 129 130 if (fcbuff & IXGBE_FCBUFF_VALID) 131 usleep_range(100, 150); 132 133 skip_ddpinv: 134 if (ddp->sgl) 135 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 136 DMA_FROM_DEVICE); 137 if (ddp->pool) { 138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 139 ddp->pool = NULL; 140 } 141 142 ixgbe_fcoe_clear_ddp(ddp); 143 144 return len; 145 } 146 147 /** 148 * ixgbe_fcoe_ddp_setup - called to set up ddp context 149 * @netdev: the corresponding net_device 150 * @xid: the exchange id requesting ddp 151 * @sgl: the scatter-gather list for this request 152 * @sgc: the number of scatter-gather items 153 * 154 * Returns : 1 for success and 0 for no ddp 155 */ 156 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, 157 struct scatterlist *sgl, unsigned int sgc, 158 int target_mode) 159 { 160 struct ixgbe_adapter *adapter; 161 struct ixgbe_hw *hw; 162 struct ixgbe_fcoe *fcoe; 163 struct ixgbe_fcoe_ddp *ddp; 164 struct ixgbe_fcoe_ddp_pool *ddp_pool; 165 struct scatterlist *sg; 166 unsigned int i, j, dmacount; 167 unsigned int len; 168 static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 169 unsigned int firstoff = 0; 170 unsigned int lastsize; 171 unsigned int thisoff = 0; 172 unsigned int thislen = 0; 173 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 174 dma_addr_t addr = 0; 175 176 if (!netdev || !sgl) 177 return 0; 178 179 adapter = netdev_priv(netdev); 180 if (xid >= IXGBE_FCOE_DDP_MAX) { 181 e_warn(drv, "xid=0x%x out-of-range\n", xid); 182 return 0; 183 } 184 185 /* no DDP if we are already down or resetting */ 186 if (test_bit(__IXGBE_DOWN, &adapter->state) || 187 test_bit(__IXGBE_RESETTING, &adapter->state)) 188 return 0; 189 190 fcoe = &adapter->fcoe; 191 ddp = &fcoe->ddp[xid]; 192 if (ddp->sgl) { 193 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 194 xid, ddp->sgl, ddp->sgc); 195 return 0; 196 } 197 ixgbe_fcoe_clear_ddp(ddp); 198 199 200 if (!fcoe->ddp_pool) { 201 e_warn(drv, "No ddp_pool resources allocated\n"); 202 return 0; 203 } 204 205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); 206 if (!ddp_pool->pool) { 207 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 208 goto out_noddp; 209 } 210 211 /* setup dma from scsi command sgl */ 212 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 213 if (dmacount == 0) { 214 e_err(drv, "xid 0x%x DMA map error\n", xid); 215 goto out_noddp; 216 } 217 218 /* alloc the udl from per cpu ddp pool */ 219 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 220 if (!ddp->udl) { 221 e_err(drv, "failed allocated ddp context\n"); 222 goto out_noddp_unmap; 223 } 224 ddp->pool = ddp_pool->pool; 225 ddp->sgl = sgl; 226 ddp->sgc = sgc; 227 228 j = 0; 229 for_each_sg(sgl, sg, dmacount, i) { 230 addr = sg_dma_address(sg); 231 len = sg_dma_len(sg); 232 while (len) { 233 /* max number of buffers allowed in one DDP context */ 234 if (j >= IXGBE_BUFFCNT_MAX) { 235 ddp_pool->noddp++; 236 goto out_noddp_free; 237 } 238 239 /* get the offset of length of current buffer */ 240 thisoff = addr & ((dma_addr_t)bufflen - 1); 241 thislen = min((bufflen - thisoff), len); 242 /* 243 * all but the 1st buffer (j == 0) 244 * must be aligned on bufflen 245 */ 246 if ((j != 0) && (thisoff)) 247 goto out_noddp_free; 248 /* 249 * all but the last buffer 250 * ((i == (dmacount - 1)) && (thislen == len)) 251 * must end at bufflen 252 */ 253 if (((i != (dmacount - 1)) || (thislen != len)) 254 && ((thislen + thisoff) != bufflen)) 255 goto out_noddp_free; 256 257 ddp->udl[j] = (u64)(addr - thisoff); 258 /* only the first buffer may have none-zero offset */ 259 if (j == 0) 260 firstoff = thisoff; 261 len -= thislen; 262 addr += thislen; 263 j++; 264 } 265 } 266 /* only the last buffer may have non-full bufflen */ 267 lastsize = thisoff + thislen; 268 269 /* 270 * lastsize can not be buffer len. 271 * If it is then adding another buffer with lastsize = 1. 272 */ 273 if (lastsize == bufflen) { 274 if (j >= IXGBE_BUFFCNT_MAX) { 275 ddp_pool->noddp_ext_buff++; 276 goto out_noddp_free; 277 } 278 279 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 280 j++; 281 lastsize = 1; 282 } 283 put_cpu(); 284 285 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 286 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 287 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 288 /* Set WRCONTX bit to allow DDP for target */ 289 if (target_mode) 290 fcbuff |= (IXGBE_FCBUFF_WRCONTX); 291 fcbuff |= (IXGBE_FCBUFF_VALID); 292 293 fcdmarw = xid; 294 fcdmarw |= IXGBE_FCDMARW_WE; 295 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 296 297 fcfltrw = xid; 298 fcfltrw |= IXGBE_FCFLTRW_WE; 299 300 /* program DMA context */ 301 hw = &adapter->hw; 302 303 /* turn on last frame indication for target mode as FCP_RSPtarget is 304 * supposed to send FCP_RSP when it is done. */ 305 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { 306 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); 307 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); 308 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; 309 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 310 } 311 312 if (hw->mac.type == ixgbe_mac_X550) { 313 /* X550 does not require DDP lock */ 314 315 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), 316 ddp->udp & DMA_BIT_MASK(32)); 317 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); 318 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); 319 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); 320 /* program filter context */ 321 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); 322 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); 323 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); 324 } else { 325 /* DDP lock for indirect DDP context access */ 326 spin_lock_bh(&fcoe->lock); 327 328 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 329 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 330 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 331 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 332 /* program filter context */ 333 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 334 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 335 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 336 337 spin_unlock_bh(&fcoe->lock); 338 } 339 340 return 1; 341 342 out_noddp_free: 343 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 344 ixgbe_fcoe_clear_ddp(ddp); 345 346 out_noddp_unmap: 347 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 348 out_noddp: 349 put_cpu(); 350 return 0; 351 } 352 353 /** 354 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode 355 * @netdev: the corresponding net_device 356 * @xid: the exchange id requesting ddp 357 * @sgl: the scatter-gather list for this request 358 * @sgc: the number of scatter-gather items 359 * 360 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 361 * and is expected to be called from ULD, e.g., FCP layer of libfc 362 * to set up ddp for the corresponding xid of the given sglist for 363 * the corresponding I/O. 364 * 365 * Returns : 1 for success and 0 for no ddp 366 */ 367 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 368 struct scatterlist *sgl, unsigned int sgc) 369 { 370 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); 371 } 372 373 /** 374 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode 375 * @netdev: the corresponding net_device 376 * @xid: the exchange id requesting ddp 377 * @sgl: the scatter-gather list for this request 378 * @sgc: the number of scatter-gather items 379 * 380 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target 381 * and is expected to be called from ULD, e.g., FCP layer of libfc 382 * to set up ddp for the corresponding xid of the given sglist for 383 * the corresponding I/O. The DDP in target mode is a write I/O request 384 * from the initiator. 385 * 386 * Returns : 1 for success and 0 for no ddp 387 */ 388 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 389 struct scatterlist *sgl, unsigned int sgc) 390 { 391 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); 392 } 393 394 /** 395 * ixgbe_fcoe_ddp - check ddp status and mark it done 396 * @adapter: ixgbe adapter 397 * @rx_desc: advanced rx descriptor 398 * @skb: the skb holding the received data 399 * 400 * This checks ddp status. 401 * 402 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 403 * not passing the skb to ULD, > 0 indicates is the length of data 404 * being ddped. 405 */ 406 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 407 union ixgbe_adv_rx_desc *rx_desc, 408 struct sk_buff *skb) 409 { 410 int rc = -EINVAL; 411 struct ixgbe_fcoe *fcoe; 412 struct ixgbe_fcoe_ddp *ddp; 413 struct fc_frame_header *fh; 414 struct fcoe_crc_eof *crc; 415 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); 416 __le32 ddp_err; 417 int ddp_max; 418 u32 fctl; 419 u16 xid; 420 421 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) 422 skb->ip_summed = CHECKSUM_NONE; 423 else 424 skb->ip_summed = CHECKSUM_UNNECESSARY; 425 426 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 427 fh = (struct fc_frame_header *)(skb->data + 428 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 429 else 430 fh = (struct fc_frame_header *)(skb->data + 431 sizeof(struct fcoe_hdr)); 432 433 fctl = ntoh24(fh->fh_f_ctl); 434 if (fctl & FC_FC_EX_CTX) 435 xid = be16_to_cpu(fh->fh_ox_id); 436 else 437 xid = be16_to_cpu(fh->fh_rx_id); 438 439 ddp_max = IXGBE_FCOE_DDP_MAX; 440 /* X550 has different DDP Max limit */ 441 if (adapter->hw.mac.type == ixgbe_mac_X550) 442 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 443 if (xid >= ddp_max) 444 return -EINVAL; 445 446 fcoe = &adapter->fcoe; 447 ddp = &fcoe->ddp[xid]; 448 if (!ddp->udl) 449 return -EINVAL; 450 451 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | 452 IXGBE_RXDADV_ERR_FCERR); 453 if (ddp_err) 454 return -EINVAL; 455 456 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 457 /* return 0 to bypass going to ULD for DDPed data */ 458 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 459 /* update length of DDPed data */ 460 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 461 rc = 0; 462 break; 463 /* unmap the sg list when FCPRSP is received */ 464 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 465 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 466 ddp->sgc, DMA_FROM_DEVICE); 467 ddp->err = ddp_err; 468 ddp->sgl = NULL; 469 ddp->sgc = 0; 470 /* fall through */ 471 /* if DDP length is present pass it through to ULD */ 472 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 473 /* update length of DDPed data */ 474 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 475 if (ddp->len) 476 rc = ddp->len; 477 break; 478 /* no match will return as an error */ 479 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 480 default: 481 break; 482 } 483 484 /* In target mode, check the last data frame of the sequence. 485 * For DDP in target mode, data is already DDPed but the header 486 * indication of the last data frame ould allow is to tell if we 487 * got all the data and the ULP can send FCP_RSP back, as this is 488 * not a full fcoe frame, we fill the trailer here so it won't be 489 * dropped by the ULP stack. 490 */ 491 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 492 (fctl & FC_FC_END_SEQ)) { 493 skb_linearize(skb); 494 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 495 crc->fcoe_eof = FC_EOF_T; 496 } 497 498 return rc; 499 } 500 501 /** 502 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 503 * @tx_ring: tx desc ring 504 * @first: first tx_buffer structure containing skb, tx_flags, and protocol 505 * @hdr_len: hdr_len to be returned 506 * 507 * This sets up large send offload for FCoE 508 * 509 * Returns : 0 indicates success, < 0 for error 510 */ 511 int ixgbe_fso(struct ixgbe_ring *tx_ring, 512 struct ixgbe_tx_buffer *first, 513 u8 *hdr_len) 514 { 515 struct sk_buff *skb = first->skb; 516 struct fc_frame_header *fh; 517 u32 vlan_macip_lens; 518 u32 fcoe_sof_eof = 0; 519 u32 mss_l4len_idx; 520 u8 sof, eof; 521 522 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 523 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 524 skb_shinfo(skb)->gso_type); 525 return -EINVAL; 526 } 527 528 /* resets the header to point fcoe/fc */ 529 skb_set_network_header(skb, skb->mac_len); 530 skb_set_transport_header(skb, skb->mac_len + 531 sizeof(struct fcoe_hdr)); 532 533 /* sets up SOF and ORIS */ 534 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 535 switch (sof) { 536 case FC_SOF_I2: 537 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; 538 break; 539 case FC_SOF_I3: 540 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | 541 IXGBE_ADVTXD_FCOEF_ORIS; 542 break; 543 case FC_SOF_N2: 544 break; 545 case FC_SOF_N3: 546 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; 547 break; 548 default: 549 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); 550 return -EINVAL; 551 } 552 553 /* the first byte of the last dword is EOF */ 554 skb_copy_bits(skb, skb->len - 4, &eof, 1); 555 /* sets up EOF and ORIE */ 556 switch (eof) { 557 case FC_EOF_N: 558 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 559 break; 560 case FC_EOF_T: 561 /* lso needs ORIE */ 562 if (skb_is_gso(skb)) 563 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | 564 IXGBE_ADVTXD_FCOEF_ORIE; 565 else 566 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 567 break; 568 case FC_EOF_NI: 569 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 570 break; 571 case FC_EOF_A: 572 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 573 break; 574 default: 575 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); 576 return -EINVAL; 577 } 578 579 /* sets up PARINC indicating data offset */ 580 fh = (struct fc_frame_header *)skb_transport_header(skb); 581 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 582 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 583 584 /* include trailer in headlen as it is replicated per frame */ 585 *hdr_len = sizeof(struct fcoe_crc_eof); 586 587 /* hdr_len includes fc_hdr if FCoE LSO is enabled */ 588 if (skb_is_gso(skb)) { 589 *hdr_len += skb_transport_offset(skb) + 590 sizeof(struct fc_frame_header); 591 /* update gso_segs and bytecount */ 592 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 593 skb_shinfo(skb)->gso_size); 594 first->bytecount += (first->gso_segs - 1) * *hdr_len; 595 first->tx_flags |= IXGBE_TX_FLAGS_TSO; 596 } 597 598 /* set flag indicating FCOE to ixgbe_tx_map call */ 599 first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; 600 601 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ 602 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 603 604 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 605 vlan_macip_lens = skb_transport_offset(skb) + 606 sizeof(struct fc_frame_header); 607 vlan_macip_lens |= (skb_transport_offset(skb) - 4) 608 << IXGBE_ADVTXD_MACLEN_SHIFT; 609 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 610 611 /* write context desc */ 612 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 613 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); 614 615 return 0; 616 } 617 618 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) 619 { 620 struct ixgbe_fcoe_ddp_pool *ddp_pool; 621 622 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 623 if (ddp_pool->pool) 624 dma_pool_destroy(ddp_pool->pool); 625 ddp_pool->pool = NULL; 626 } 627 628 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, 629 struct device *dev, 630 unsigned int cpu) 631 { 632 struct ixgbe_fcoe_ddp_pool *ddp_pool; 633 struct dma_pool *pool; 634 char pool_name[32]; 635 636 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); 637 638 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, 639 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 640 if (!pool) 641 return -ENOMEM; 642 643 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 644 ddp_pool->pool = pool; 645 ddp_pool->noddp = 0; 646 ddp_pool->noddp_ext_buff = 0; 647 648 return 0; 649 } 650 651 /** 652 * ixgbe_configure_fcoe - configures registers for fcoe at start 653 * @adapter: ptr to ixgbe adapter 654 * 655 * This sets up FCoE related registers 656 * 657 * Returns : none 658 */ 659 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 660 { 661 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 662 struct ixgbe_hw *hw = &adapter->hw; 663 int i, fcoe_q, fcoe_i, fcoe_q_h = 0; 664 int fcreta_size; 665 u32 etqf; 666 667 /* Minimal functionality for FCoE requires at least CRC offloads */ 668 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) 669 return; 670 671 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ 672 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; 673 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 674 etqf |= IXGBE_ETQF_POOL_ENABLE; 675 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 676 } 677 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); 678 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 679 680 /* leave registers un-configured if FCoE is disabled */ 681 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 682 return; 683 684 /* Use one or more Rx queues for FCoE by redirection table */ 685 fcreta_size = IXGBE_FCRETA_SIZE; 686 if (adapter->hw.mac.type == ixgbe_mac_X550) 687 fcreta_size = IXGBE_FCRETA_SIZE_X550; 688 689 for (i = 0; i < fcreta_size; i++) { 690 if (adapter->hw.mac.type == ixgbe_mac_X550) { 691 int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % 692 fcoe->indices); 693 fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; 694 fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & 695 IXGBE_FCRETA_ENTRY_HIGH_MASK; 696 } 697 698 fcoe_i = fcoe->offset + (i % fcoe->indices); 699 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 700 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 701 fcoe_q |= fcoe_q_h; 702 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 703 } 704 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 705 706 /* Enable L2 EtherType filter for FIP */ 707 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; 708 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 709 etqf |= IXGBE_ETQF_POOL_ENABLE; 710 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 711 } 712 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); 713 714 /* Send FIP frames to the first FCoE queue */ 715 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; 716 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 717 IXGBE_ETQS_QUEUE_EN | 718 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 719 720 /* Configure FCoE Rx control */ 721 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 722 IXGBE_FCRXCTRL_FCCRCBO | 723 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 724 } 725 726 /** 727 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources 728 * @adapter : ixgbe adapter 729 * 730 * Cleans up outstanding ddp context resources 731 * 732 * Returns : none 733 */ 734 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 735 { 736 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 737 int cpu, i, ddp_max; 738 739 /* do nothing if no DDP pools were allocated */ 740 if (!fcoe->ddp_pool) 741 return; 742 743 ddp_max = IXGBE_FCOE_DDP_MAX; 744 /* X550 has different DDP Max limit */ 745 if (adapter->hw.mac.type == ixgbe_mac_X550) 746 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 747 748 for (i = 0; i < ddp_max; i++) 749 ixgbe_fcoe_ddp_put(adapter->netdev, i); 750 751 for_each_possible_cpu(cpu) 752 ixgbe_fcoe_dma_pool_free(fcoe, cpu); 753 754 dma_unmap_single(&adapter->pdev->dev, 755 fcoe->extra_ddp_buffer_dma, 756 IXGBE_FCBUFF_MIN, 757 DMA_FROM_DEVICE); 758 kfree(fcoe->extra_ddp_buffer); 759 760 fcoe->extra_ddp_buffer = NULL; 761 fcoe->extra_ddp_buffer_dma = 0; 762 } 763 764 /** 765 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources 766 * @adapter: ixgbe adapter 767 * 768 * Sets up ddp context resouces 769 * 770 * Returns : 0 indicates success or -EINVAL on failure 771 */ 772 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 773 { 774 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 775 struct device *dev = &adapter->pdev->dev; 776 void *buffer; 777 dma_addr_t dma; 778 unsigned int cpu; 779 780 /* do nothing if no DDP pools were allocated */ 781 if (!fcoe->ddp_pool) 782 return 0; 783 784 /* Extra buffer to be shared by all DDPs for HW work around */ 785 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 786 if (!buffer) 787 return -ENOMEM; 788 789 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); 790 if (dma_mapping_error(dev, dma)) { 791 e_err(drv, "failed to map extra DDP buffer\n"); 792 kfree(buffer); 793 return -ENOMEM; 794 } 795 796 fcoe->extra_ddp_buffer = buffer; 797 fcoe->extra_ddp_buffer_dma = dma; 798 799 /* allocate pci pool for each cpu */ 800 for_each_possible_cpu(cpu) { 801 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); 802 if (!err) 803 continue; 804 805 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 806 ixgbe_free_fcoe_ddp_resources(adapter); 807 return -ENOMEM; 808 } 809 810 return 0; 811 } 812 813 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) 814 { 815 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 816 817 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 818 return -EINVAL; 819 820 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); 821 822 if (!fcoe->ddp_pool) { 823 e_err(drv, "failed to allocate percpu DDP resources\n"); 824 return -ENOMEM; 825 } 826 827 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 828 /* X550 has different DDP Max limit */ 829 if (adapter->hw.mac.type == ixgbe_mac_X550) 830 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; 831 832 return 0; 833 } 834 835 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) 836 { 837 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 838 839 adapter->netdev->fcoe_ddp_xid = 0; 840 841 if (!fcoe->ddp_pool) 842 return; 843 844 free_percpu(fcoe->ddp_pool); 845 fcoe->ddp_pool = NULL; 846 } 847 848 /** 849 * ixgbe_fcoe_enable - turn on FCoE offload feature 850 * @netdev: the corresponding netdev 851 * 852 * Turns on FCoE offload feature in 82599. 853 * 854 * Returns : 0 indicates success or -EINVAL on failure 855 */ 856 int ixgbe_fcoe_enable(struct net_device *netdev) 857 { 858 struct ixgbe_adapter *adapter = netdev_priv(netdev); 859 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 860 861 atomic_inc(&fcoe->refcnt); 862 863 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 864 return -EINVAL; 865 866 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 867 return -EINVAL; 868 869 e_info(drv, "Enabling FCoE offload features.\n"); 870 871 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 872 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); 873 874 if (netif_running(netdev)) 875 netdev->netdev_ops->ndo_stop(netdev); 876 877 /* Allocate per CPU memory to track DDP pools */ 878 ixgbe_fcoe_ddp_enable(adapter); 879 880 /* enable FCoE and notify stack */ 881 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 882 netdev->features |= NETIF_F_FCOE_MTU; 883 netdev_features_change(netdev); 884 885 /* release existing queues and reallocate them */ 886 ixgbe_clear_interrupt_scheme(adapter); 887 ixgbe_init_interrupt_scheme(adapter); 888 889 if (netif_running(netdev)) 890 netdev->netdev_ops->ndo_open(netdev); 891 892 return 0; 893 } 894 895 /** 896 * ixgbe_fcoe_disable - turn off FCoE offload feature 897 * @netdev: the corresponding netdev 898 * 899 * Turns off FCoE offload feature in 82599. 900 * 901 * Returns : 0 indicates success or -EINVAL on failure 902 */ 903 int ixgbe_fcoe_disable(struct net_device *netdev) 904 { 905 struct ixgbe_adapter *adapter = netdev_priv(netdev); 906 907 if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) 908 return -EINVAL; 909 910 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 911 return -EINVAL; 912 913 e_info(drv, "Disabling FCoE offload features.\n"); 914 if (netif_running(netdev)) 915 netdev->netdev_ops->ndo_stop(netdev); 916 917 /* Free per CPU memory to track DDP pools */ 918 ixgbe_fcoe_ddp_disable(adapter); 919 920 /* disable FCoE and notify stack */ 921 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 922 netdev->features &= ~NETIF_F_FCOE_MTU; 923 924 netdev_features_change(netdev); 925 926 /* release existing queues and reallocate them */ 927 ixgbe_clear_interrupt_scheme(adapter); 928 ixgbe_init_interrupt_scheme(adapter); 929 930 if (netif_running(netdev)) 931 netdev->netdev_ops->ndo_open(netdev); 932 933 return 0; 934 } 935 936 /** 937 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 938 * @netdev : ixgbe adapter 939 * @wwn : the world wide name 940 * @type: the type of world wide name 941 * 942 * Returns the node or port world wide name if both the prefix and the san 943 * mac address are valid, then the wwn is formed based on the NAA-2 for 944 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 945 * 946 * Returns : 0 on success 947 */ 948 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 949 { 950 u16 prefix = 0xffff; 951 struct ixgbe_adapter *adapter = netdev_priv(netdev); 952 struct ixgbe_mac_info *mac = &adapter->hw.mac; 953 954 switch (type) { 955 case NETDEV_FCOE_WWNN: 956 prefix = mac->wwnn_prefix; 957 break; 958 case NETDEV_FCOE_WWPN: 959 prefix = mac->wwpn_prefix; 960 break; 961 default: 962 break; 963 } 964 965 if ((prefix != 0xffff) && 966 is_valid_ether_addr(mac->san_addr)) { 967 *wwn = ((u64) prefix << 48) | 968 ((u64) mac->san_addr[0] << 40) | 969 ((u64) mac->san_addr[1] << 32) | 970 ((u64) mac->san_addr[2] << 24) | 971 ((u64) mac->san_addr[3] << 16) | 972 ((u64) mac->san_addr[4] << 8) | 973 ((u64) mac->san_addr[5]); 974 return 0; 975 } 976 return -EINVAL; 977 } 978 979 /** 980 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information 981 * @netdev : ixgbe adapter 982 * @info : HBA information 983 * 984 * Returns ixgbe HBA information 985 * 986 * Returns : 0 on success 987 */ 988 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 989 struct netdev_fcoe_hbainfo *info) 990 { 991 struct ixgbe_adapter *adapter = netdev_priv(netdev); 992 struct ixgbe_hw *hw = &adapter->hw; 993 int i, pos; 994 u8 buf[8]; 995 996 if (!info) 997 return -EINVAL; 998 999 /* Don't return information on unsupported devices */ 1000 if (hw->mac.type != ixgbe_mac_82599EB && 1001 hw->mac.type != ixgbe_mac_X540) 1002 return -EINVAL; 1003 1004 /* Manufacturer */ 1005 snprintf(info->manufacturer, sizeof(info->manufacturer), 1006 "Intel Corporation"); 1007 1008 /* Serial Number */ 1009 1010 /* Get the PCI-e Device Serial Number Capability */ 1011 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); 1012 if (pos) { 1013 pos += 4; 1014 for (i = 0; i < 8; i++) 1015 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); 1016 1017 snprintf(info->serial_number, sizeof(info->serial_number), 1018 "%02X%02X%02X%02X%02X%02X%02X%02X", 1019 buf[7], buf[6], buf[5], buf[4], 1020 buf[3], buf[2], buf[1], buf[0]); 1021 } else 1022 snprintf(info->serial_number, sizeof(info->serial_number), 1023 "Unknown"); 1024 1025 /* Hardware Version */ 1026 snprintf(info->hardware_version, 1027 sizeof(info->hardware_version), 1028 "Rev %d", hw->revision_id); 1029 /* Driver Name/Version */ 1030 snprintf(info->driver_version, 1031 sizeof(info->driver_version), 1032 "%s v%s", 1033 ixgbe_driver_name, 1034 ixgbe_driver_version); 1035 /* Firmware Version */ 1036 snprintf(info->firmware_version, 1037 sizeof(info->firmware_version), 1038 "0x%08x", 1039 (adapter->eeprom_verh << 16) | 1040 adapter->eeprom_verl); 1041 1042 /* Model */ 1043 if (hw->mac.type == ixgbe_mac_82599EB) { 1044 snprintf(info->model, 1045 sizeof(info->model), 1046 "Intel 82599"); 1047 } else { 1048 snprintf(info->model, 1049 sizeof(info->model), 1050 "Intel X540"); 1051 } 1052 1053 /* Model Description */ 1054 snprintf(info->model_description, 1055 sizeof(info->model_description), 1056 "%s", 1057 ixgbe_default_device_descr); 1058 1059 return 0; 1060 } 1061 1062 /** 1063 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to 1064 * @adapter - pointer to the device adapter structure 1065 * 1066 * Return : TC that FCoE is mapped to 1067 */ 1068 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) 1069 { 1070 #ifdef CONFIG_IXGBE_DCB 1071 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); 1072 #else 1073 return 0; 1074 #endif 1075 } 1076