xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 /*******************************************************************************
2 
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include "ixgbe.h"
29 #include <linux/if_ether.h>
30 #include <linux/gfp.h>
31 #include <linux/if_vlan.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <scsi/fc/fc_fcoe.h>
36 #include <scsi/libfc.h>
37 #include <scsi/libfcoe.h>
38 
39 /**
40  * ixgbe_fcoe_clear_ddp - clear the given ddp context
41  * @ddp - ptr to the ixgbe_fcoe_ddp
42  *
43  * Returns : none
44  *
45  */
46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
47 {
48 	ddp->len = 0;
49 	ddp->err = 1;
50 	ddp->udl = NULL;
51 	ddp->udp = 0UL;
52 	ddp->sgl = NULL;
53 	ddp->sgc = 0;
54 }
55 
56 /**
57  * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
58  * @netdev: the corresponding net_device
59  * @xid: the xid that corresponding ddp will be freed
60  *
61  * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
62  * and it is expected to be called by ULD, i.e., FCP layer of libfc
63  * to release the corresponding ddp context when the I/O is done.
64  *
65  * Returns : data length already ddp-ed in bytes
66  */
67 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
68 {
69 	int len = 0;
70 	struct ixgbe_fcoe *fcoe;
71 	struct ixgbe_adapter *adapter;
72 	struct ixgbe_fcoe_ddp *ddp;
73 	u32 fcbuff;
74 
75 	if (!netdev)
76 		goto out_ddp_put;
77 
78 	if (xid >= IXGBE_FCOE_DDP_MAX)
79 		goto out_ddp_put;
80 
81 	adapter = netdev_priv(netdev);
82 	fcoe = &adapter->fcoe;
83 	ddp = &fcoe->ddp[xid];
84 	if (!ddp->udl)
85 		goto out_ddp_put;
86 
87 	len = ddp->len;
88 	/* if there an error, force to invalidate ddp context */
89 	if (ddp->err) {
90 		spin_lock_bh(&fcoe->lock);
91 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
92 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
93 				(xid | IXGBE_FCFLTRW_WE));
94 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
95 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
96 				(xid | IXGBE_FCDMARW_WE));
97 
98 		/* guaranteed to be invalidated after 100us */
99 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
100 				(xid | IXGBE_FCDMARW_RE));
101 		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
102 		spin_unlock_bh(&fcoe->lock);
103 		if (fcbuff & IXGBE_FCBUFF_VALID)
104 			udelay(100);
105 	}
106 	if (ddp->sgl)
107 		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
108 			     DMA_FROM_DEVICE);
109 	if (ddp->pool) {
110 		pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
111 		ddp->pool = NULL;
112 	}
113 
114 	ixgbe_fcoe_clear_ddp(ddp);
115 
116 out_ddp_put:
117 	return len;
118 }
119 
120 /**
121  * ixgbe_fcoe_ddp_setup - called to set up ddp context
122  * @netdev: the corresponding net_device
123  * @xid: the exchange id requesting ddp
124  * @sgl: the scatter-gather list for this request
125  * @sgc: the number of scatter-gather items
126  *
127  * Returns : 1 for success and 0 for no ddp
128  */
129 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
130 				struct scatterlist *sgl, unsigned int sgc,
131 				int target_mode)
132 {
133 	struct ixgbe_adapter *adapter;
134 	struct ixgbe_hw *hw;
135 	struct ixgbe_fcoe *fcoe;
136 	struct ixgbe_fcoe_ddp *ddp;
137 	struct scatterlist *sg;
138 	unsigned int i, j, dmacount;
139 	unsigned int len;
140 	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
141 	unsigned int firstoff = 0;
142 	unsigned int lastsize;
143 	unsigned int thisoff = 0;
144 	unsigned int thislen = 0;
145 	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
146 	dma_addr_t addr = 0;
147 	struct pci_pool *pool;
148 	unsigned int cpu;
149 
150 	if (!netdev || !sgl)
151 		return 0;
152 
153 	adapter = netdev_priv(netdev);
154 	if (xid >= IXGBE_FCOE_DDP_MAX) {
155 		e_warn(drv, "xid=0x%x out-of-range\n", xid);
156 		return 0;
157 	}
158 
159 	/* no DDP if we are already down or resetting */
160 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
161 	    test_bit(__IXGBE_RESETTING, &adapter->state))
162 		return 0;
163 
164 	fcoe = &adapter->fcoe;
165 	if (!fcoe->pool) {
166 		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
167 		return 0;
168 	}
169 
170 	ddp = &fcoe->ddp[xid];
171 	if (ddp->sgl) {
172 		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
173 		      xid, ddp->sgl, ddp->sgc);
174 		return 0;
175 	}
176 	ixgbe_fcoe_clear_ddp(ddp);
177 
178 	/* setup dma from scsi command sgl */
179 	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
180 	if (dmacount == 0) {
181 		e_err(drv, "xid 0x%x DMA map error\n", xid);
182 		return 0;
183 	}
184 
185 	/* alloc the udl from per cpu ddp pool */
186 	cpu = get_cpu();
187 	pool = *per_cpu_ptr(fcoe->pool, cpu);
188 	ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
189 	if (!ddp->udl) {
190 		e_err(drv, "failed allocated ddp context\n");
191 		goto out_noddp_unmap;
192 	}
193 	ddp->pool = pool;
194 	ddp->sgl = sgl;
195 	ddp->sgc = sgc;
196 
197 	j = 0;
198 	for_each_sg(sgl, sg, dmacount, i) {
199 		addr = sg_dma_address(sg);
200 		len = sg_dma_len(sg);
201 		while (len) {
202 			/* max number of buffers allowed in one DDP context */
203 			if (j >= IXGBE_BUFFCNT_MAX) {
204 				*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
205 				goto out_noddp_free;
206 			}
207 
208 			/* get the offset of length of current buffer */
209 			thisoff = addr & ((dma_addr_t)bufflen - 1);
210 			thislen = min((bufflen - thisoff), len);
211 			/*
212 			 * all but the 1st buffer (j == 0)
213 			 * must be aligned on bufflen
214 			 */
215 			if ((j != 0) && (thisoff))
216 				goto out_noddp_free;
217 			/*
218 			 * all but the last buffer
219 			 * ((i == (dmacount - 1)) && (thislen == len))
220 			 * must end at bufflen
221 			 */
222 			if (((i != (dmacount - 1)) || (thislen != len))
223 			    && ((thislen + thisoff) != bufflen))
224 				goto out_noddp_free;
225 
226 			ddp->udl[j] = (u64)(addr - thisoff);
227 			/* only the first buffer may have none-zero offset */
228 			if (j == 0)
229 				firstoff = thisoff;
230 			len -= thislen;
231 			addr += thislen;
232 			j++;
233 		}
234 	}
235 	/* only the last buffer may have non-full bufflen */
236 	lastsize = thisoff + thislen;
237 
238 	/*
239 	 * lastsize can not be buffer len.
240 	 * If it is then adding another buffer with lastsize = 1.
241 	 */
242 	if (lastsize == bufflen) {
243 		if (j >= IXGBE_BUFFCNT_MAX) {
244 			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
245 			goto out_noddp_free;
246 		}
247 
248 		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
249 		j++;
250 		lastsize = 1;
251 	}
252 	put_cpu();
253 
254 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
255 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
256 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
257 	/* Set WRCONTX bit to allow DDP for target */
258 	if (target_mode)
259 		fcbuff |= (IXGBE_FCBUFF_WRCONTX);
260 	fcbuff |= (IXGBE_FCBUFF_VALID);
261 
262 	fcdmarw = xid;
263 	fcdmarw |= IXGBE_FCDMARW_WE;
264 	fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
265 
266 	fcfltrw = xid;
267 	fcfltrw |= IXGBE_FCFLTRW_WE;
268 
269 	/* program DMA context */
270 	hw = &adapter->hw;
271 	spin_lock_bh(&fcoe->lock);
272 
273 	/* turn on last frame indication for target mode as FCP_RSPtarget is
274 	 * supposed to send FCP_RSP when it is done. */
275 	if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
276 		set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
277 		fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
278 		fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
279 		IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
280 	}
281 
282 	IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
283 	IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
284 	IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
285 	IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
286 	/* program filter context */
287 	IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
288 	IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
289 	IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
290 
291 	spin_unlock_bh(&fcoe->lock);
292 
293 	return 1;
294 
295 out_noddp_free:
296 	pci_pool_free(pool, ddp->udl, ddp->udp);
297 	ixgbe_fcoe_clear_ddp(ddp);
298 
299 out_noddp_unmap:
300 	pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
301 	put_cpu();
302 	return 0;
303 }
304 
305 /**
306  * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
307  * @netdev: the corresponding net_device
308  * @xid: the exchange id requesting ddp
309  * @sgl: the scatter-gather list for this request
310  * @sgc: the number of scatter-gather items
311  *
312  * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
313  * and is expected to be called from ULD, e.g., FCP layer of libfc
314  * to set up ddp for the corresponding xid of the given sglist for
315  * the corresponding I/O.
316  *
317  * Returns : 1 for success and 0 for no ddp
318  */
319 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
320 		       struct scatterlist *sgl, unsigned int sgc)
321 {
322 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
323 }
324 
325 /**
326  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
327  * @netdev: the corresponding net_device
328  * @xid: the exchange id requesting ddp
329  * @sgl: the scatter-gather list for this request
330  * @sgc: the number of scatter-gather items
331  *
332  * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
333  * and is expected to be called from ULD, e.g., FCP layer of libfc
334  * to set up ddp for the corresponding xid of the given sglist for
335  * the corresponding I/O. The DDP in target mode is a write I/O request
336  * from the initiator.
337  *
338  * Returns : 1 for success and 0 for no ddp
339  */
340 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
341 			    struct scatterlist *sgl, unsigned int sgc)
342 {
343 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
344 }
345 
346 /**
347  * ixgbe_fcoe_ddp - check ddp status and mark it done
348  * @adapter: ixgbe adapter
349  * @rx_desc: advanced rx descriptor
350  * @skb: the skb holding the received data
351  *
352  * This checks ddp status.
353  *
354  * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
355  * not passing the skb to ULD, > 0 indicates is the length of data
356  * being ddped.
357  */
358 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
359 		   union ixgbe_adv_rx_desc *rx_desc,
360 		   struct sk_buff *skb)
361 {
362 	int rc = -EINVAL;
363 	struct ixgbe_fcoe *fcoe;
364 	struct ixgbe_fcoe_ddp *ddp;
365 	struct fc_frame_header *fh;
366 	struct fcoe_crc_eof *crc;
367 	__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
368 	__le32 ddp_err;
369 	u32 fctl;
370 	u16 xid;
371 
372 	if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
373 		skb->ip_summed = CHECKSUM_NONE;
374 	else
375 		skb->ip_summed = CHECKSUM_UNNECESSARY;
376 
377 	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
378 		fh = (struct fc_frame_header *)(skb->data +
379 			sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
380 	else
381 		fh = (struct fc_frame_header *)(skb->data +
382 			sizeof(struct fcoe_hdr));
383 
384 	fctl = ntoh24(fh->fh_f_ctl);
385 	if (fctl & FC_FC_EX_CTX)
386 		xid =  be16_to_cpu(fh->fh_ox_id);
387 	else
388 		xid =  be16_to_cpu(fh->fh_rx_id);
389 
390 	if (xid >= IXGBE_FCOE_DDP_MAX)
391 		goto ddp_out;
392 
393 	fcoe = &adapter->fcoe;
394 	ddp = &fcoe->ddp[xid];
395 	if (!ddp->udl)
396 		goto ddp_out;
397 
398 	ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
399 					      IXGBE_RXDADV_ERR_FCERR);
400 	if (ddp_err)
401 		goto ddp_out;
402 
403 	switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
404 	/* return 0 to bypass going to ULD for DDPed data */
405 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
406 		/* update length of DDPed data */
407 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
408 		rc = 0;
409 		break;
410 	/* unmap the sg list when FCPRSP is received */
411 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
412 		pci_unmap_sg(adapter->pdev, ddp->sgl,
413 			     ddp->sgc, DMA_FROM_DEVICE);
414 		ddp->err = ddp_err;
415 		ddp->sgl = NULL;
416 		ddp->sgc = 0;
417 		/* fall through */
418 	/* if DDP length is present pass it through to ULD */
419 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
420 		/* update length of DDPed data */
421 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
422 		if (ddp->len)
423 			rc = ddp->len;
424 		break;
425 	/* no match will return as an error */
426 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
427 	default:
428 		break;
429 	}
430 
431 	/* In target mode, check the last data frame of the sequence.
432 	 * For DDP in target mode, data is already DDPed but the header
433 	 * indication of the last data frame ould allow is to tell if we
434 	 * got all the data and the ULP can send FCP_RSP back, as this is
435 	 * not a full fcoe frame, we fill the trailer here so it won't be
436 	 * dropped by the ULP stack.
437 	 */
438 	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
439 	    (fctl & FC_FC_END_SEQ)) {
440 		crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
441 		crc->fcoe_eof = FC_EOF_T;
442 	}
443 ddp_out:
444 	return rc;
445 }
446 
447 /**
448  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
449  * @tx_ring: tx desc ring
450  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
451  * @hdr_len: hdr_len to be returned
452  *
453  * This sets up large send offload for FCoE
454  *
455  * Returns : 0 indicates success, < 0 for error
456  */
457 int ixgbe_fso(struct ixgbe_ring *tx_ring,
458 	      struct ixgbe_tx_buffer *first,
459 	      u8 *hdr_len)
460 {
461 	struct sk_buff *skb = first->skb;
462 	struct fc_frame_header *fh;
463 	u32 vlan_macip_lens;
464 	u32 fcoe_sof_eof = 0;
465 	u32 mss_l4len_idx;
466 	u8 sof, eof;
467 
468 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
469 		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
470 			skb_shinfo(skb)->gso_type);
471 		return -EINVAL;
472 	}
473 
474 	/* resets the header to point fcoe/fc */
475 	skb_set_network_header(skb, skb->mac_len);
476 	skb_set_transport_header(skb, skb->mac_len +
477 				 sizeof(struct fcoe_hdr));
478 
479 	/* sets up SOF and ORIS */
480 	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
481 	switch (sof) {
482 	case FC_SOF_I2:
483 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
484 		break;
485 	case FC_SOF_I3:
486 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
487 			       IXGBE_ADVTXD_FCOEF_ORIS;
488 		break;
489 	case FC_SOF_N2:
490 		break;
491 	case FC_SOF_N3:
492 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
493 		break;
494 	default:
495 		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
496 		return -EINVAL;
497 	}
498 
499 	/* the first byte of the last dword is EOF */
500 	skb_copy_bits(skb, skb->len - 4, &eof, 1);
501 	/* sets up EOF and ORIE */
502 	switch (eof) {
503 	case FC_EOF_N:
504 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
505 		break;
506 	case FC_EOF_T:
507 		/* lso needs ORIE */
508 		if (skb_is_gso(skb))
509 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
510 					IXGBE_ADVTXD_FCOEF_ORIE;
511 		else
512 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
513 		break;
514 	case FC_EOF_NI:
515 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
516 		break;
517 	case FC_EOF_A:
518 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
519 		break;
520 	default:
521 		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
522 		return -EINVAL;
523 	}
524 
525 	/* sets up PARINC indicating data offset */
526 	fh = (struct fc_frame_header *)skb_transport_header(skb);
527 	if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
528 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
529 
530 	/* include trailer in headlen as it is replicated per frame */
531 	*hdr_len = sizeof(struct fcoe_crc_eof);
532 
533 	/* hdr_len includes fc_hdr if FCoE LSO is enabled */
534 	if (skb_is_gso(skb)) {
535 		*hdr_len += skb_transport_offset(skb) +
536 			    sizeof(struct fc_frame_header);
537 		/* update gso_segs and bytecount */
538 		first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
539 					       skb_shinfo(skb)->gso_size);
540 		first->bytecount += (first->gso_segs - 1) * *hdr_len;
541 		first->tx_flags |= IXGBE_TX_FLAGS_FSO;
542 	}
543 
544 	/* set flag indicating FCOE to ixgbe_tx_map call */
545 	first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
546 
547 	/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
548 	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
549 	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
550 
551 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
552 	vlan_macip_lens = skb_transport_offset(skb) +
553 			  sizeof(struct fc_frame_header);
554 	vlan_macip_lens |= (skb_transport_offset(skb) - 4)
555 			   << IXGBE_ADVTXD_MACLEN_SHIFT;
556 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
557 
558 	/* write context desc */
559 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
560 			  IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
561 
562 	return 0;
563 }
564 
565 static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
566 {
567 	unsigned int cpu;
568 	struct pci_pool **pool;
569 
570 	for_each_possible_cpu(cpu) {
571 		pool = per_cpu_ptr(fcoe->pool, cpu);
572 		if (*pool)
573 			pci_pool_destroy(*pool);
574 	}
575 	free_percpu(fcoe->pool);
576 	fcoe->pool = NULL;
577 }
578 
579 static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
580 {
581 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
582 	unsigned int cpu;
583 	struct pci_pool **pool;
584 	char pool_name[32];
585 
586 	fcoe->pool = alloc_percpu(struct pci_pool *);
587 	if (!fcoe->pool)
588 		return;
589 
590 	/* allocate pci pool for each cpu */
591 	for_each_possible_cpu(cpu) {
592 		snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
593 		pool = per_cpu_ptr(fcoe->pool, cpu);
594 		*pool = pci_pool_create(pool_name,
595 					adapter->pdev, IXGBE_FCPTR_MAX,
596 					IXGBE_FCPTR_ALIGN, PAGE_SIZE);
597 		if (!*pool) {
598 			e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
599 			ixgbe_fcoe_ddp_pools_free(fcoe);
600 			return;
601 		}
602 	}
603 }
604 
605 /**
606  * ixgbe_configure_fcoe - configures registers for fcoe at start
607  * @adapter: ptr to ixgbe adapter
608  *
609  * This sets up FCoE related registers
610  *
611  * Returns : none
612  */
613 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
614 {
615 	int i, fcoe_q, fcoe_i;
616 	struct ixgbe_hw *hw = &adapter->hw;
617 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
618 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
619 	unsigned int cpu;
620 
621 	if (!fcoe->pool) {
622 		spin_lock_init(&fcoe->lock);
623 
624 		ixgbe_fcoe_ddp_pools_alloc(adapter);
625 		if (!fcoe->pool) {
626 			e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
627 			return;
628 		}
629 
630 		/* Extra buffer to be shared by all DDPs for HW work around */
631 		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
632 		if (fcoe->extra_ddp_buffer == NULL) {
633 			e_err(drv, "failed to allocated extra DDP buffer\n");
634 			goto out_ddp_pools;
635 		}
636 
637 		fcoe->extra_ddp_buffer_dma =
638 			dma_map_single(&adapter->pdev->dev,
639 				       fcoe->extra_ddp_buffer,
640 				       IXGBE_FCBUFF_MIN,
641 				       DMA_FROM_DEVICE);
642 		if (dma_mapping_error(&adapter->pdev->dev,
643 				      fcoe->extra_ddp_buffer_dma)) {
644 			e_err(drv, "failed to map extra DDP buffer\n");
645 			goto out_extra_ddp_buffer;
646 		}
647 
648 		/* Alloc per cpu mem to count the ddp alloc failure number */
649 		fcoe->pcpu_noddp = alloc_percpu(u64);
650 		if (!fcoe->pcpu_noddp) {
651 			e_err(drv, "failed to alloc noddp counter\n");
652 			goto out_pcpu_noddp_alloc_fail;
653 		}
654 
655 		fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
656 		if (!fcoe->pcpu_noddp_ext_buff) {
657 			e_err(drv, "failed to alloc noddp extra buff cnt\n");
658 			goto out_pcpu_noddp_extra_buff_alloc_fail;
659 		}
660 
661 		for_each_possible_cpu(cpu) {
662 			*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
663 			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
664 		}
665 	}
666 
667 	/* Enable L2 eth type filter for FCoE */
668 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
669 			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
670 	/* Enable L2 eth type filter for FIP */
671 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
672 			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
673 	if (adapter->ring_feature[RING_F_FCOE].indices) {
674 		/* Use multiple rx queues for FCoE by redirection table */
675 		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
676 			fcoe_i = f->mask + i % f->indices;
677 			fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
678 			fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
679 			IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
680 		}
681 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
682 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
683 	} else  {
684 		/* Use single rx queue for FCoE */
685 		fcoe_i = f->mask;
686 		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
687 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
688 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
689 				IXGBE_ETQS_QUEUE_EN |
690 				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
691 	}
692 	/* send FIP frames to the first FCoE queue */
693 	fcoe_i = f->mask;
694 	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
695 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
696 			IXGBE_ETQS_QUEUE_EN |
697 			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
698 
699 	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
700 			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
701 	return;
702 out_pcpu_noddp_extra_buff_alloc_fail:
703 	free_percpu(fcoe->pcpu_noddp);
704 out_pcpu_noddp_alloc_fail:
705 	dma_unmap_single(&adapter->pdev->dev,
706 			 fcoe->extra_ddp_buffer_dma,
707 			 IXGBE_FCBUFF_MIN,
708 			 DMA_FROM_DEVICE);
709 out_extra_ddp_buffer:
710 	kfree(fcoe->extra_ddp_buffer);
711 out_ddp_pools:
712 	ixgbe_fcoe_ddp_pools_free(fcoe);
713 }
714 
715 /**
716  * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
717  * @adapter : ixgbe adapter
718  *
719  * Cleans up outstanding ddp context resources
720  *
721  * Returns : none
722  */
723 void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
724 {
725 	int i;
726 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
727 
728 	if (!fcoe->pool)
729 		return;
730 
731 	for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
732 		ixgbe_fcoe_ddp_put(adapter->netdev, i);
733 	dma_unmap_single(&adapter->pdev->dev,
734 			 fcoe->extra_ddp_buffer_dma,
735 			 IXGBE_FCBUFF_MIN,
736 			 DMA_FROM_DEVICE);
737 	free_percpu(fcoe->pcpu_noddp);
738 	free_percpu(fcoe->pcpu_noddp_ext_buff);
739 	kfree(fcoe->extra_ddp_buffer);
740 	ixgbe_fcoe_ddp_pools_free(fcoe);
741 }
742 
743 /**
744  * ixgbe_fcoe_enable - turn on FCoE offload feature
745  * @netdev: the corresponding netdev
746  *
747  * Turns on FCoE offload feature in 82599.
748  *
749  * Returns : 0 indicates success or -EINVAL on failure
750  */
751 int ixgbe_fcoe_enable(struct net_device *netdev)
752 {
753 	int rc = -EINVAL;
754 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
755 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
756 
757 
758 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
759 		goto out_enable;
760 
761 	atomic_inc(&fcoe->refcnt);
762 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
763 		goto out_enable;
764 
765 	e_info(drv, "Enabling FCoE offload features.\n");
766 	if (netif_running(netdev))
767 		netdev->netdev_ops->ndo_stop(netdev);
768 
769 	ixgbe_clear_interrupt_scheme(adapter);
770 
771 	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
772 	adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
773 	netdev->features |= NETIF_F_FCOE_CRC;
774 	netdev->features |= NETIF_F_FSO;
775 	netdev->features |= NETIF_F_FCOE_MTU;
776 	netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
777 
778 	ixgbe_init_interrupt_scheme(adapter);
779 	netdev_features_change(netdev);
780 
781 	if (netif_running(netdev))
782 		netdev->netdev_ops->ndo_open(netdev);
783 	rc = 0;
784 
785 out_enable:
786 	return rc;
787 }
788 
789 /**
790  * ixgbe_fcoe_disable - turn off FCoE offload feature
791  * @netdev: the corresponding netdev
792  *
793  * Turns off FCoE offload feature in 82599.
794  *
795  * Returns : 0 indicates success or -EINVAL on failure
796  */
797 int ixgbe_fcoe_disable(struct net_device *netdev)
798 {
799 	int rc = -EINVAL;
800 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
801 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
802 
803 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
804 		goto out_disable;
805 
806 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
807 		goto out_disable;
808 
809 	if (!atomic_dec_and_test(&fcoe->refcnt))
810 		goto out_disable;
811 
812 	e_info(drv, "Disabling FCoE offload features.\n");
813 	netdev->features &= ~NETIF_F_FCOE_CRC;
814 	netdev->features &= ~NETIF_F_FSO;
815 	netdev->features &= ~NETIF_F_FCOE_MTU;
816 	netdev->fcoe_ddp_xid = 0;
817 	netdev_features_change(netdev);
818 
819 	if (netif_running(netdev))
820 		netdev->netdev_ops->ndo_stop(netdev);
821 
822 	ixgbe_clear_interrupt_scheme(adapter);
823 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
824 	adapter->ring_feature[RING_F_FCOE].indices = 0;
825 	ixgbe_cleanup_fcoe(adapter);
826 	ixgbe_init_interrupt_scheme(adapter);
827 
828 	if (netif_running(netdev))
829 		netdev->netdev_ops->ndo_open(netdev);
830 	rc = 0;
831 
832 out_disable:
833 	return rc;
834 }
835 
836 /**
837  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
838  * @netdev : ixgbe adapter
839  * @wwn : the world wide name
840  * @type: the type of world wide name
841  *
842  * Returns the node or port world wide name if both the prefix and the san
843  * mac address are valid, then the wwn is formed based on the NAA-2 for
844  * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
845  *
846  * Returns : 0 on success
847  */
848 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
849 {
850 	int rc = -EINVAL;
851 	u16 prefix = 0xffff;
852 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
853 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
854 
855 	switch (type) {
856 	case NETDEV_FCOE_WWNN:
857 		prefix = mac->wwnn_prefix;
858 		break;
859 	case NETDEV_FCOE_WWPN:
860 		prefix = mac->wwpn_prefix;
861 		break;
862 	default:
863 		break;
864 	}
865 
866 	if ((prefix != 0xffff) &&
867 	    is_valid_ether_addr(mac->san_addr)) {
868 		*wwn = ((u64) prefix << 48) |
869 		       ((u64) mac->san_addr[0] << 40) |
870 		       ((u64) mac->san_addr[1] << 32) |
871 		       ((u64) mac->san_addr[2] << 24) |
872 		       ((u64) mac->san_addr[3] << 16) |
873 		       ((u64) mac->san_addr[4] << 8)  |
874 		       ((u64) mac->san_addr[5]);
875 		rc = 0;
876 	}
877 	return rc;
878 }
879 
880 /**
881  * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
882  * @netdev : ixgbe adapter
883  * @info : HBA information
884  *
885  * Returns ixgbe HBA information
886  *
887  * Returns : 0 on success
888  */
889 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
890 			   struct netdev_fcoe_hbainfo *info)
891 {
892 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
893 	struct ixgbe_hw *hw = &adapter->hw;
894 	int i, pos;
895 	u8 buf[8];
896 
897 	if (!info)
898 		return -EINVAL;
899 
900 	/* Don't return information on unsupported devices */
901 	if (hw->mac.type != ixgbe_mac_82599EB &&
902 	    hw->mac.type != ixgbe_mac_X540)
903 		return -EINVAL;
904 
905 	/* Manufacturer */
906 	snprintf(info->manufacturer, sizeof(info->manufacturer),
907 		 "Intel Corporation");
908 
909 	/* Serial Number */
910 
911 	/* Get the PCI-e Device Serial Number Capability */
912 	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
913 	if (pos) {
914 		pos += 4;
915 		for (i = 0; i < 8; i++)
916 			pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
917 
918 		snprintf(info->serial_number, sizeof(info->serial_number),
919 			 "%02X%02X%02X%02X%02X%02X%02X%02X",
920 			 buf[7], buf[6], buf[5], buf[4],
921 			 buf[3], buf[2], buf[1], buf[0]);
922 	} else
923 		snprintf(info->serial_number, sizeof(info->serial_number),
924 			 "Unknown");
925 
926 	/* Hardware Version */
927 	snprintf(info->hardware_version,
928 		 sizeof(info->hardware_version),
929 		 "Rev %d", hw->revision_id);
930 	/* Driver Name/Version */
931 	snprintf(info->driver_version,
932 		 sizeof(info->driver_version),
933 		 "%s v%s",
934 		 ixgbe_driver_name,
935 		 ixgbe_driver_version);
936 	/* Firmware Version */
937 	snprintf(info->firmware_version,
938 		 sizeof(info->firmware_version),
939 		 "0x%08x",
940 		 (adapter->eeprom_verh << 16) |
941 		  adapter->eeprom_verl);
942 
943 	/* Model */
944 	if (hw->mac.type == ixgbe_mac_82599EB) {
945 		snprintf(info->model,
946 			 sizeof(info->model),
947 			 "Intel 82599");
948 	} else {
949 		snprintf(info->model,
950 			 sizeof(info->model),
951 			 "Intel X540");
952 	}
953 
954 	/* Model Description */
955 	snprintf(info->model_description,
956 		 sizeof(info->model_description),
957 		 "%s",
958 		 ixgbe_default_device_descr);
959 
960 	return 0;
961 }
962