xref: /linux/drivers/scsi/fnic/fnic_fcs.c (revision 92ca6c498a5e6e2083b520b82d318e7e525f3e7c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
4  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5  */
6 #include <linux/errno.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/skbuff.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <linux/workqueue.h>
15 #include <scsi/fc/fc_fip.h>
16 #include <scsi/fc/fc_els.h>
17 #include <scsi/fc_frame.h>
18 #include <linux/etherdevice.h>
19 #include <scsi/scsi_transport_fc.h>
20 #include "fnic_io.h"
21 #include "fnic.h"
22 #include "fnic_fdls.h"
23 #include "fdls_fc.h"
24 #include "cq_enet_desc.h"
25 #include "cq_exch_desc.h"
26 #include "fip.h"
27 
28 #define MAX_RESET_WAIT_COUNT    64
29 
30 struct workqueue_struct *fnic_event_queue;
31 
32 static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
33 
34 /*
35  * Internal Functions
36  * This function will initialize the src_mac address to be
37  * used in outgoing frames
38  */
fnic_fdls_set_fcoe_srcmac(struct fnic * fnic,uint8_t * src_mac)39 static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
40 							 uint8_t *src_mac)
41 {
42 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
43 				 "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
44 				 src_mac[0], src_mac[1], src_mac[2], src_mac[3],
45 				 src_mac[4], src_mac[5]);
46 
47 	memcpy(fnic->iport.fpma, src_mac, 6);
48 }
49 
50 /*
51  * This function will initialize the dst_mac address to be
52  * used in outgoing frames
53  */
fnic_fdls_set_fcoe_dstmac(struct fnic * fnic,uint8_t * dst_mac)54 static inline  void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
55 							 uint8_t *dst_mac)
56 {
57 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
58 				 "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
59 				 dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
60 				 dst_mac[4], dst_mac[5]);
61 
62 	memcpy(fnic->iport.fcfmac, dst_mac, 6);
63 }
64 
fnic_get_host_port_state(struct Scsi_Host * shost)65 void fnic_get_host_port_state(struct Scsi_Host *shost)
66 {
67 	struct fnic *fnic = *((struct fnic **) shost_priv(shost));
68 	struct fnic_iport_s *iport = &fnic->iport;
69 	unsigned long flags;
70 
71 	spin_lock_irqsave(&fnic->fnic_lock, flags);
72 	if (!fnic->link_status)
73 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
74 	else if (iport->state == FNIC_IPORT_STATE_READY)
75 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
76 	else
77 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
78 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
79 }
80 
fnic_fdls_link_status_change(struct fnic * fnic,int linkup)81 void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
82 {
83 	struct fnic_iport_s *iport = &fnic->iport;
84 
85 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
86 				 "link up: %d, usefip: %d", linkup, iport->usefip);
87 
88 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
89 
90 	if (linkup) {
91 		if (iport->usefip) {
92 			iport->state = FNIC_IPORT_STATE_FIP;
93 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
94 						 "link up: %d, usefip: %d", linkup, iport->usefip);
95 			fnic_fcoe_send_vlan_req(fnic);
96 		} else {
97 			iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
98 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
99 						 "iport->state: %d", iport->state);
100 			fnic_fdls_disc_start(iport);
101 		}
102 	} else {
103 		iport->state = FNIC_IPORT_STATE_LINK_WAIT;
104 		if (!is_zero_ether_addr(iport->fpma))
105 			vnic_dev_del_addr(fnic->vdev, iport->fpma);
106 		fnic_common_fip_cleanup(fnic);
107 		fnic_fdls_link_down(iport);
108 
109 	}
110 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
111 }
112 
113 
114 /*
115  * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
116  * or derive from FC_MAP and FCID combination. While it should be
117  * same, revisit this if there is any possibility of not-correct.
118  */
fnic_fdls_learn_fcoe_macs(struct fnic_iport_s * iport,void * rx_frame,uint8_t * fcid)119 void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
120 							   uint8_t *fcid)
121 {
122 	struct fnic *fnic = iport->fnic;
123 	struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
124 	uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
125 
126 	memcpy(&fcmac[3], fcid, 3);
127 
128 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
129 				 "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
130 				 ethhdr->h_dest[0], ethhdr->h_dest[1],
131 				 ethhdr->h_dest[2], ethhdr->h_dest[3],
132 				 ethhdr->h_dest[4], ethhdr->h_dest[5]);
133 
134 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
135 				 "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
136 				 fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
137 				 fcmac[5]);
138 
139 	fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
140 	fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
141 }
142 
fnic_fdls_init(struct fnic * fnic,int usefip)143 void fnic_fdls_init(struct fnic *fnic, int usefip)
144 {
145 	struct fnic_iport_s *iport = &fnic->iport;
146 
147 	/* Initialize iPort structure */
148 	iport->state = FNIC_IPORT_STATE_INIT;
149 	iport->fnic = fnic;
150 	iport->usefip = usefip;
151 
152 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
153 				 "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
154 				 iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
155 				 iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
156 
157 	INIT_LIST_HEAD(&iport->tport_list);
158 	INIT_LIST_HEAD(&iport->tport_list_pending_del);
159 
160 	fnic_fdls_disc_init(iport);
161 }
162 
fnic_handle_link(struct work_struct * work)163 void fnic_handle_link(struct work_struct *work)
164 {
165 	struct fnic *fnic = container_of(work, struct fnic, link_work);
166 	int old_link_status;
167 	u32 old_link_down_cnt;
168 	int max_count = 0;
169 
170 	if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
171 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
172 					 "Interrupt mode is not MSI\n");
173 
174 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
175 
176 	if (fnic->stop_rx_link_events) {
177 		spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
178 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
179 					 "Stop link rx events\n");
180 		return;
181 	}
182 
183 	/* Do not process if the fnic is already in transitional state */
184 	if ((fnic->state != FNIC_IN_ETH_MODE)
185 		&& (fnic->state != FNIC_IN_FC_MODE)) {
186 		spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
187 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
188 			 "fnic in transitional state: %d. link up: %d ignored",
189 			 fnic->state, vnic_dev_link_status(fnic->vdev));
190 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
191 			 "Current link status: %d iport state: %d\n",
192 			 fnic->link_status, fnic->iport.state);
193 		return;
194 	}
195 
196 	old_link_down_cnt = fnic->link_down_cnt;
197 	old_link_status = fnic->link_status;
198 	fnic->link_status = vnic_dev_link_status(fnic->vdev);
199 	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
200 
201 	while (fnic->reset_in_progress == IN_PROGRESS) {
202 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
203 			 "fnic reset in progress. Link event needs to wait\n");
204 
205 		spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
206 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
207 					 "waiting for reset completion\n");
208 		wait_for_completion_timeout(&fnic->reset_completion_wait,
209 									msecs_to_jiffies(5000));
210 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
211 					 "woken up from reset completion wait\n");
212 		spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
213 
214 		max_count++;
215 		if (max_count >= MAX_RESET_WAIT_COUNT) {
216 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
217 				 "Rstth waited for too long. Skipping handle link event\n");
218 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
219 			return;
220 		}
221 	}
222 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
223 				 "Marking fnic reset in progress\n");
224 	fnic->reset_in_progress = IN_PROGRESS;
225 
226 	if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
227 		(fnic->link_status != old_link_status)) {
228 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
229 					 "old link status: %d link status: %d\n",
230 					 old_link_status, (int) fnic->link_status);
231 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
232 					 "old down count %d down count: %d\n",
233 					 old_link_down_cnt, (int) fnic->link_down_cnt);
234 	}
235 
236 	if (old_link_status == fnic->link_status) {
237 		if (!fnic->link_status) {
238 			/* DOWN -> DOWN */
239 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
240 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
241 						 "down->down\n");
242 		} else {
243 			if (old_link_down_cnt != fnic->link_down_cnt) {
244 				/* UP -> DOWN -> UP */
245 				spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
246 				FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
247 							 "up->down. Link down\n");
248 				fnic_fdls_link_status_change(fnic, 0);
249 
250 				FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
251 							 "down->up. Link up\n");
252 				fnic_fdls_link_status_change(fnic, 1);
253 			} else {
254 				/* UP -> UP */
255 				spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
256 				FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
257 							 "up->up\n");
258 			}
259 		}
260 	} else if (fnic->link_status) {
261 		/* DOWN -> UP */
262 		spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
263 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
264 					 "down->up. Link up\n");
265 		fnic_fdls_link_status_change(fnic, 1);
266 	} else {
267 		/* UP -> DOWN */
268 		spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
269 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
270 					 "up->down. Link down\n");
271 		fnic_fdls_link_status_change(fnic, 0);
272 	}
273 
274 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
275 	fnic->reset_in_progress = NOT_IN_PROGRESS;
276 	complete(&fnic->reset_completion_wait);
277 
278 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
279 				 "Marking fnic reset completion\n");
280 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
281 }
282 
fnic_handle_frame(struct work_struct * work)283 void fnic_handle_frame(struct work_struct *work)
284 {
285 	struct fnic *fnic = container_of(work, struct fnic, frame_work);
286 	struct fnic_frame_list *cur_frame, *next;
287 	int fchdr_offset = 0;
288 
289 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
290 	list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
291 		if (fnic->stop_rx_link_events) {
292 			list_del(&cur_frame->links);
293 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
294 			kfree(cur_frame->fp);
295 			mempool_free(cur_frame, fnic->frame_elem_pool);
296 			return;
297 		}
298 
299 		/*
300 		 * If we're in a transitional state, just re-queue and return.
301 		 * The queue will be serviced when we get to a stable state.
302 		 */
303 		if (fnic->state != FNIC_IN_FC_MODE &&
304 			fnic->state != FNIC_IN_ETH_MODE) {
305 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
306 				 "Cannot process frame in transitional state\n");
307 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
308 			return;
309 		}
310 
311 		list_del(&cur_frame->links);
312 
313 		/* Frames from FCP_RQ will have ethhdrs stripped off */
314 		fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
315 			0 : FNIC_ETH_FCOE_HDRS_OFFSET;
316 
317 		fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
318 							 cur_frame->frame_len, fchdr_offset);
319 
320 		kfree(cur_frame->fp);
321 		mempool_free(cur_frame, fnic->frame_elem_pool);
322 	}
323 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
324 }
325 
fnic_handle_fip_frame(struct work_struct * work)326 void fnic_handle_fip_frame(struct work_struct *work)
327 {
328 	struct fnic_frame_list *cur_frame, *next;
329 	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
330 
331 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
332 				 "Processing FIP frame\n");
333 
334 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
335 	list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
336 							 links) {
337 		if (fnic->stop_rx_link_events) {
338 			list_del(&cur_frame->links);
339 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
340 			kfree(cur_frame->fp);
341 			kfree(cur_frame);
342 			return;
343 		}
344 
345 		/*
346 		 * If we're in a transitional state, just re-queue and return.
347 		 * The queue will be serviced when we get to a stable state.
348 		 */
349 		if (fnic->state != FNIC_IN_FC_MODE &&
350 			fnic->state != FNIC_IN_ETH_MODE) {
351 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
352 			return;
353 		}
354 
355 		list_del(&cur_frame->links);
356 
357 		if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
358 			kfree(cur_frame->fp);
359 			kfree(cur_frame);
360 		}
361 	}
362 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
363 }
364 
365 /**
366  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
367  * @fnic:	fnic instance.
368  * @fp:		Ethernet Frame.
369  */
fnic_import_rq_eth_pkt(struct fnic * fnic,void * fp)370 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
371 {
372 	struct ethhdr *eh;
373 	struct fnic_frame_list *fip_fr_elem;
374 	unsigned long flags;
375 
376 	eh = (struct ethhdr *) fp;
377 	if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
378 		fip_fr_elem = (struct fnic_frame_list *)
379 			kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC);
380 		if (!fip_fr_elem)
381 			return 0;
382 		fip_fr_elem->fp = fp;
383 		spin_lock_irqsave(&fnic->fnic_lock, flags);
384 		list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
385 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
386 		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
387 		return 1;				/* let caller know packet was used */
388 	} else
389 		return 0;
390 }
391 
392 /**
393  * fnic_update_mac_locked() - set data MAC address and filters.
394  * @fnic:	fnic instance.
395  * @new:	newly-assigned FCoE MAC address.
396  *
397  * Called with the fnic lock held.
398  */
fnic_update_mac_locked(struct fnic * fnic,u8 * new)399 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
400 {
401 	struct fnic_iport_s *iport = &fnic->iport;
402 	u8 *ctl = iport->hwmac;
403 	u8 *data = fnic->data_src_addr;
404 
405 	if (is_zero_ether_addr(new))
406 		new = ctl;
407 	if (ether_addr_equal(data, new))
408 		return;
409 
410 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
411 				 "Update MAC: %u\n", *new);
412 
413 	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
414 		vnic_dev_del_addr(fnic->vdev, data);
415 
416 	memcpy(data, new, ETH_ALEN);
417 	if (!ether_addr_equal(new, ctl))
418 		vnic_dev_add_addr(fnic->vdev, new);
419 }
420 
fnic_rq_cmpl_frame_recv(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)421 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
422 				    *cq_desc, struct vnic_rq_buf *buf,
423 				    int skipped __attribute__((unused)),
424 				    void *opaque)
425 {
426 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
427 	uint8_t *fp;
428 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
429 	unsigned int ethhdr_stripped;
430 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
431 	u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
432 	u8 fcs_ok = 1, packet_error = 0;
433 	u16 q_number, completed_index, vlan;
434 	u32 rss_hash;
435 	u16 checksum;
436 	u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
437 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
438 	u8 fcoe = 0, fcoe_sof, fcoe_eof;
439 	u16 exchange_id, tmpl;
440 	u8 sof = 0;
441 	u8 eof = 0;
442 	u32 fcp_bytes_written = 0;
443 	u16 enet_bytes_written = 0;
444 	u32 bytes_written = 0;
445 	unsigned long flags;
446 	struct fnic_frame_list *frame_elem = NULL;
447 	struct ethhdr *eh;
448 
449 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
450 					 DMA_FROM_DEVICE);
451 	fp = (uint8_t *) buf->os_buf;
452 	buf->os_buf = NULL;
453 
454 	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
455 	if (type == CQ_DESC_TYPE_RQ_FCP) {
456 		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
457 						   &color, &q_number, &completed_index, &eop, &sop,
458 						   &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
459 						   &fcp_bytes_written, &sof, &eof, &ingress_port,
460 						   &packet_error, &fcoe_enc_error, &fcs_ok,
461 						   &vlan_stripped, &vlan);
462 		ethhdr_stripped = 1;
463 		bytes_written = fcp_bytes_written;
464 	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
465 		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
466 					&color, &q_number, &completed_index,
467 					&ingress_port, &fcoe, &eop, &sop, &rss_type,
468 					&csum_not_calc, &rss_hash, &enet_bytes_written,
469 					&packet_error, &vlan_stripped, &vlan,
470 					&checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
471 					&fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
472 					&udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
473 					&ipv4_fragment, &fcs_ok);
474 
475 		ethhdr_stripped = 0;
476 		bytes_written = enet_bytes_written;
477 
478 		if (!fcs_ok) {
479 			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
480 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
481 						 "fnic 0x%p fcs error.  Dropping packet.\n", fnic);
482 			goto drop;
483 		}
484 		eh = (struct ethhdr *) fp;
485 		if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
486 
487 			if (fnic_import_rq_eth_pkt(fnic, fp))
488 				return;
489 
490 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
491 							 "Dropping h_proto 0x%x",
492 							 be16_to_cpu(eh->h_proto));
493 			goto drop;
494 		}
495 	} else {
496 		/* wrong CQ type */
497 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
498 					 "fnic rq_cmpl wrong cq type x%x\n", type);
499 		goto drop;
500 	}
501 
502 	if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
503 		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
504 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
505 			 "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
506 			 fcoe, fcs_ok, packet_error,
507 			 fcoe_fnic_crc_ok, fcoe_enc_error);
508 		goto drop;
509 	}
510 
511 	spin_lock_irqsave(&fnic->fnic_lock, flags);
512 	if (fnic->stop_rx_link_events) {
513 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
514 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
515 					 "fnic->stop_rx_link_events: %d\n",
516 					 fnic->stop_rx_link_events);
517 		goto drop;
518 	}
519 
520 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
521 
522 	frame_elem = mempool_alloc(fnic->frame_elem_pool,
523 					GFP_ATOMIC | __GFP_ZERO);
524 	if (!frame_elem) {
525 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
526 				 "Failed to allocate memory for frame elem");
527 		goto drop;
528 	}
529 	frame_elem->fp = fp;
530 	frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
531 	frame_elem->frame_len = bytes_written;
532 
533 	spin_lock_irqsave(&fnic->fnic_lock, flags);
534 	list_add_tail(&frame_elem->links, &fnic->frame_queue);
535 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
536 
537 	queue_work(fnic_event_queue, &fnic->frame_work);
538 	return;
539 
540 drop:
541 	kfree(fp);
542 }
543 
fnic_rq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)544 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
545 				     struct cq_desc *cq_desc, u8 type,
546 				     u16 q_number, u16 completed_index,
547 				     void *opaque)
548 {
549 	struct fnic *fnic = vnic_dev_priv(vdev);
550 
551 	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
552 			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
553 			NULL);
554 	return 0;
555 }
556 
fnic_rq_cmpl_handler(struct fnic * fnic,int rq_work_to_do)557 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
558 {
559 	unsigned int tot_rq_work_done = 0, cur_work_done;
560 	unsigned int i;
561 	int err;
562 
563 	for (i = 0; i < fnic->rq_count; i++) {
564 		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
565 						fnic_rq_cmpl_handler_cont,
566 						NULL);
567 		if (cur_work_done && fnic->stop_rx_link_events != 1) {
568 			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
569 			if (err)
570 				shost_printk(KERN_ERR, fnic->host,
571 					     "fnic_alloc_rq_frame can't alloc"
572 					     " frame\n");
573 		}
574 		tot_rq_work_done += cur_work_done;
575 	}
576 
577 	return tot_rq_work_done;
578 }
579 
580 /*
581  * This function is called once at init time to allocate and fill RQ
582  * buffers. Subsequently, it is called in the interrupt context after RQ
583  * buffer processing to replenish the buffers in the RQ
584  */
fnic_alloc_rq_frame(struct vnic_rq * rq)585 int fnic_alloc_rq_frame(struct vnic_rq *rq)
586 {
587 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
588 	void *buf;
589 	u16 len;
590 	dma_addr_t pa;
591 	int ret;
592 
593 	len = FNIC_FRAME_HT_ROOM;
594 	buf = kmalloc(len, GFP_ATOMIC);
595 	if (!buf) {
596 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
597 					 "Unable to allocate RQ buffer of size: %d\n", len);
598 		return -ENOMEM;
599 	}
600 
601 	pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
602 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
603 		ret = -ENOMEM;
604 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
605 					 "PCI mapping failed with error %d\n", ret);
606 		goto free_buf;
607 	}
608 
609 	fnic_queue_rq_desc(rq, buf, pa, len);
610 	return 0;
611 free_buf:
612 	kfree(buf);
613 	return ret;
614 }
615 
fnic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)616 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
617 {
618 	void *rq_buf = buf->os_buf;
619 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
620 
621 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
622 			 DMA_FROM_DEVICE);
623 
624 	kfree(rq_buf);
625 	buf->os_buf = NULL;
626 }
627 
628 /*
629  * Send FC frame.
630  */
fnic_send_frame(struct fnic * fnic,void * frame,int frame_len)631 static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
632 {
633 	struct vnic_wq *wq = &fnic->wq[0];
634 	dma_addr_t pa;
635 	int ret = 0;
636 	unsigned long flags;
637 
638 	pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
639 	if (dma_mapping_error(&fnic->pdev->dev, pa))
640 		return -ENOMEM;
641 
642 	if ((fnic_fc_trace_set_data(fnic->fnic_num,
643 				FNIC_FC_SEND | 0x80, (char *) frame,
644 				frame_len)) != 0) {
645 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
646 					 "fnic ctlr frame trace error");
647 	}
648 
649 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
650 
651 	if (!vnic_wq_desc_avail(wq)) {
652 		dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
653 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
654 					 "vnic work queue descriptor is not available");
655 		ret = -1;
656 		goto fnic_send_frame_end;
657 	}
658 
659 	/* hw inserts cos value */
660 	fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
661 					   0, fnic->vlan_id, 1, 1, 1);
662 
663 fnic_send_frame_end:
664 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
665 	return ret;
666 }
667 
668 /**
669  * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
670  *	info. This interface is used only in the non fast path. (login, fabric
671  *	registrations etc.)
672  *
673  * @fnic:	fnic instance
674  * @frame:	frame structure with FC payload filled in
675  * @frame_size:	length of the frame to be sent
676  * @srcmac:	source mac address
677  * @dstmac:	destination mac address
678  *
679  * Called with the fnic lock held.
680  */
681 static int
fdls_send_fcoe_frame(struct fnic * fnic,void * frame,int frame_size,uint8_t * srcmac,uint8_t * dstmac)682 fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
683 					 uint8_t *srcmac, uint8_t *dstmac)
684 {
685 	struct ethhdr *pethhdr;
686 	struct fcoe_hdr *pfcoe_hdr;
687 	struct fnic_frame_list *frame_elem;
688 	int len = frame_size;
689 	int ret;
690 	struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
691 			FNIC_ETH_FCOE_HDRS_OFFSET);
692 
693 	pethhdr = (struct ethhdr *) frame;
694 	pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
695 	memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
696 	memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
697 
698 	pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
699 	pfcoe_hdr->fcoe_sof = FC_SOF_I3;
700 
701 	/*
702 	 * Queue frame if in a transitional state.
703 	 * This occurs while registering the Port_ID / MAC address after FLOGI.
704 	 */
705 	if ((fnic->state != FNIC_IN_FC_MODE)
706 		&& (fnic->state != FNIC_IN_ETH_MODE)) {
707 		frame_elem = mempool_alloc(fnic->frame_elem_pool,
708 						GFP_ATOMIC | __GFP_ZERO);
709 		if (!frame_elem) {
710 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
711 				 "Failed to allocate memory for frame elem");
712 			return -ENOMEM;
713 		}
714 
715 		FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
716 			"Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
717 			ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
718 			fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
719 		frame_elem->fp = frame;
720 		frame_elem->frame_len = len;
721 		list_add_tail(&frame_elem->links, &fnic->tx_queue);
722 		return 0;
723 	}
724 
725 	fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
726 
727 	ret = fnic_send_frame(fnic, frame, len);
728 	return ret;
729 }
730 
fnic_send_fcoe_frame(struct fnic_iport_s * iport,void * frame,int frame_size)731 void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
732 						 int frame_size)
733 {
734 	struct fnic *fnic = iport->fnic;
735 	uint8_t *dstmac, *srcmac;
736 
737 	/* If module unload is in-progress, don't send */
738 	if (fnic->in_remove)
739 		return;
740 
741 	if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
742 		srcmac = iport->fpma;
743 		dstmac = iport->fcfmac;
744 	} else {
745 		srcmac = iport->hwmac;
746 		dstmac = FCOE_ALL_FCF_MAC;
747 	}
748 
749 	fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
750 }
751 
752 int
fnic_send_fip_frame(struct fnic_iport_s * iport,void * frame,int frame_size)753 fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
754 					int frame_size)
755 {
756 	struct fnic *fnic = iport->fnic;
757 
758 	if (fnic->in_remove)
759 		return -1;
760 
761 	fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
762 	return fnic_send_frame(fnic, frame, frame_size);
763 }
764 
765 /**
766  * fnic_flush_tx() - send queued frames.
767  * @work: pointer to work element
768  *
769  * Send frames that were waiting to go out in FC or Ethernet mode.
770  * Whenever changing modes we purge queued frames, so these frames should
771  * be queued for the stable mode that we're in, either FC or Ethernet.
772  *
773  * Called without fnic_lock held.
774  */
fnic_flush_tx(struct work_struct * work)775 void fnic_flush_tx(struct work_struct *work)
776 {
777 	struct fnic *fnic = container_of(work, struct fnic, flush_work);
778 	struct fc_frame *fp;
779 	struct fnic_frame_list *cur_frame, *next;
780 
781 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
782 				 "Flush queued frames");
783 
784 	list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
785 		fp = cur_frame->fp;
786 		list_del(&cur_frame->links);
787 		fnic_send_frame(fnic, fp, cur_frame->frame_len);
788 		mempool_free(cur_frame, fnic->frame_elem_pool);
789 	}
790 }
791 
792 int
fnic_fdls_register_portid(struct fnic_iport_s * iport,u32 port_id,void * fp)793 fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
794 						  void *fp)
795 {
796 	struct fnic *fnic = iport->fnic;
797 	struct ethhdr *ethhdr;
798 	int ret;
799 
800 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
801 				 "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
802 				 fp, fnic->state);
803 
804 	if (fp) {
805 		ethhdr = (struct ethhdr *) fp;
806 		vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
807 	}
808 
809 	/* Change state to reflect transition to FC mode */
810 	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
811 		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
812 	else {
813 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
814 			 "Unexpected fnic state while processing FLOGI response\n");
815 		return -1;
816 	}
817 
818 	/*
819 	 * Send FLOGI registration to firmware to set up FC mode.
820 	 * The new address will be set up when registration completes.
821 	 */
822 	ret = fnic_flogi_reg_handler(fnic, port_id);
823 	if (ret < 0) {
824 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
825 					 "FLOGI registration error ret: %d fnic state: %d\n",
826 					 ret, fnic->state);
827 		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
828 			fnic->state = FNIC_IN_ETH_MODE;
829 
830 		return -1;
831 	}
832 	iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
833 
834 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
835 				 "FLOGI registration success\n");
836 	return 0;
837 }
838 
fnic_free_txq(struct list_head * head)839 void fnic_free_txq(struct list_head *head)
840 {
841 	struct fnic_frame_list *cur_frame, *next;
842 
843 	list_for_each_entry_safe(cur_frame, next, head, links) {
844 		list_del(&cur_frame->links);
845 		kfree(cur_frame->fp);
846 		kfree(cur_frame);
847 	}
848 }
849 
fnic_wq_complete_frame_send(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)850 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
851 					struct cq_desc *cq_desc,
852 					struct vnic_wq_buf *buf, void *opaque)
853 {
854 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
855 
856 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
857 			 DMA_TO_DEVICE);
858 	mempool_free(buf->os_buf, fnic->frame_pool);
859 	buf->os_buf = NULL;
860 }
861 
fnic_wq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)862 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
863 				     struct cq_desc *cq_desc, u8 type,
864 				     u16 q_number, u16 completed_index,
865 				     void *opaque)
866 {
867 	struct fnic *fnic = vnic_dev_priv(vdev);
868 	unsigned long flags;
869 
870 	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
871 	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
872 			fnic_wq_complete_frame_send, NULL);
873 	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
874 
875 	return 0;
876 }
877 
fnic_wq_cmpl_handler(struct fnic * fnic,int work_to_do)878 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
879 {
880 	unsigned int wq_work_done = 0;
881 	unsigned int i;
882 
883 	for (i = 0; i < fnic->raw_wq_count; i++) {
884 		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
885 						 work_to_do,
886 						 fnic_wq_cmpl_handler_cont,
887 						 NULL);
888 	}
889 
890 	return wq_work_done;
891 }
892 
893 
fnic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)894 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
895 {
896 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
897 
898 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
899 			 DMA_TO_DEVICE);
900 
901 	kfree(buf->os_buf);
902 	buf->os_buf = NULL;
903 }
904 
905 void
fnic_fdls_add_tport(struct fnic_iport_s * iport,struct fnic_tport_s * tport,unsigned long flags)906 fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
907 					unsigned long flags)
908 {
909 	struct fnic *fnic = iport->fnic;
910 	struct fc_rport *rport;
911 	struct fc_rport_identifiers ids;
912 	struct rport_dd_data_s *rdd_data;
913 
914 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
915 				 "Adding rport fcid: 0x%x", tport->fcid);
916 
917 	ids.node_name = tport->wwnn;
918 	ids.port_name = tport->wwpn;
919 	ids.port_id = tport->fcid;
920 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
921 
922 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
923 	rport = fc_remote_port_add(fnic->host, 0, &ids);
924 	spin_lock_irqsave(&fnic->fnic_lock, flags);
925 	if (!rport) {
926 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
927 					 "Failed to add rport for tport: 0x%x", tport->fcid);
928 		return;
929 	}
930 
931 	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
932 				 "Added rport fcid: 0x%x", tport->fcid);
933 
934 	/* Mimic these assignments in queuecommand to avoid timing issues */
935 	rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
936 	rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
937 	rdd_data = rport->dd_data;
938 	rdd_data->tport = tport;
939 	rdd_data->iport = iport;
940 	tport->rport = rport;
941 	tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
942 }
943 
944 void
fnic_fdls_remove_tport(struct fnic_iport_s * iport,struct fnic_tport_s * tport,unsigned long flags)945 fnic_fdls_remove_tport(struct fnic_iport_s *iport,
946 					   struct fnic_tport_s *tport, unsigned long flags)
947 {
948 	struct fnic *fnic = iport->fnic;
949 	struct rport_dd_data_s *rdd_data;
950 
951 	struct fc_rport *rport;
952 
953 	if (!tport)
954 		return;
955 
956 	fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
957 	rport = tport->rport;
958 
959 	if (rport) {
960 		/* tport resource release will be done
961 		 * after fnic_terminate_rport_io()
962 		 */
963 		tport->flags |= FNIC_FDLS_TPORT_DELETED;
964 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
965 
966 		/* Interface to scsi_fc_transport  */
967 		fc_remote_port_delete(rport);
968 
969 		spin_lock_irqsave(&fnic->fnic_lock, flags);
970 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
971 		 "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
972 		 tport->fcid);
973 
974 		/*
975 		 * the dd_data is allocated by fc transport
976 		 * of size dd_fcrport_size
977 		 */
978 		rdd_data = rport->dd_data;
979 		rdd_data->tport = NULL;
980 		rdd_data->iport = NULL;
981 		list_del(&tport->links);
982 		kfree(tport);
983 	} else {
984 		fnic_del_tport_timer_sync(fnic, tport);
985 		list_del(&tport->links);
986 		kfree(tport);
987 	}
988 }
989 
fnic_delete_fcp_tports(struct fnic * fnic)990 void fnic_delete_fcp_tports(struct fnic *fnic)
991 {
992 	struct fnic_tport_s *tport, *next;
993 	unsigned long flags;
994 
995 	spin_lock_irqsave(&fnic->fnic_lock, flags);
996 	list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
997 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
998 					 "removing fcp rport fcid: 0x%x", tport->fcid);
999 		fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
1000 		fnic_del_tport_timer_sync(fnic, tport);
1001 		fnic_fdls_remove_tport(&fnic->iport, tport, flags);
1002 	}
1003 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1004 }
1005 
1006 /**
1007  * fnic_tport_event_handler() - Handler for remote port events
1008  * in the tport_event_queue.
1009  *
1010  * @work: Handle to the remote port being dequeued
1011  */
fnic_tport_event_handler(struct work_struct * work)1012 void fnic_tport_event_handler(struct work_struct *work)
1013 {
1014 	struct fnic *fnic = container_of(work, struct fnic, tport_work);
1015 	struct fnic_tport_event_s *cur_evt, *next;
1016 	unsigned long flags;
1017 	struct fnic_tport_s *tport;
1018 
1019 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1020 	list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1021 		tport = cur_evt->arg1;
1022 		switch (cur_evt->event) {
1023 		case TGT_EV_RPORT_ADD:
1024 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1025 						 "Add rport event");
1026 			if (tport->state == FDLS_TGT_STATE_READY) {
1027 				fnic_fdls_add_tport(&fnic->iport,
1028 					(struct fnic_tport_s *) cur_evt->arg1, flags);
1029 			} else {
1030 				FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1031 					 "Target not ready. Add rport event dropped: 0x%x",
1032 					 tport->fcid);
1033 			}
1034 			break;
1035 		case TGT_EV_RPORT_DEL:
1036 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1037 						 "Remove rport event");
1038 			if (tport->state == FDLS_TGT_STATE_OFFLINING) {
1039 				fnic_fdls_remove_tport(&fnic->iport,
1040 					   (struct fnic_tport_s *) cur_evt->arg1, flags);
1041 			} else {
1042 				FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1043 							 "remove rport event dropped tport fcid: 0x%x",
1044 							 tport->fcid);
1045 			}
1046 			break;
1047 		case TGT_EV_TPORT_DELETE:
1048 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1049 						 "Delete tport event");
1050 			fdls_delete_tport(tport->iport, tport);
1051 			break;
1052 		default:
1053 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1054 						 "Unknown tport event");
1055 			break;
1056 		}
1057 		list_del(&cur_evt->links);
1058 		kfree(cur_evt);
1059 	}
1060 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1061 }
1062 
fnic_flush_tport_event_list(struct fnic * fnic)1063 void fnic_flush_tport_event_list(struct fnic *fnic)
1064 {
1065 	struct fnic_tport_event_s *cur_evt, *next;
1066 	unsigned long flags;
1067 
1068 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1069 	list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1070 		list_del(&cur_evt->links);
1071 		kfree(cur_evt);
1072 	}
1073 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1074 }
1075 
fnic_reset_work_handler(struct work_struct * work)1076 void fnic_reset_work_handler(struct work_struct *work)
1077 {
1078 	struct fnic *cur_fnic, *next_fnic;
1079 	unsigned long reset_fnic_list_lock_flags;
1080 	int host_reset_ret_code;
1081 
1082 	/*
1083 	 * This is a single thread. It is per fnic module, not per fnic
1084 	 * All the fnics that need to be reset
1085 	 * have been serialized via the reset fnic list.
1086 	 */
1087 	spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
1088 	list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
1089 		list_del(&cur_fnic->links);
1090 		spin_unlock_irqrestore(&reset_fnic_list_lock,
1091 							   reset_fnic_list_lock_flags);
1092 
1093 		dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
1094 			   cur_fnic->fnic_num);
1095 		host_reset_ret_code = fnic_host_reset(cur_fnic->host);
1096 		dev_err(&cur_fnic->pdev->dev,
1097 		   "fnic: <%d>: returned from host reset with status: %d\n",
1098 		   cur_fnic->fnic_num, host_reset_ret_code);
1099 
1100 		spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1101 		cur_fnic->pc_rscn_handling_status =
1102 			PC_RSCN_HANDLING_NOT_IN_PROGRESS;
1103 		spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1104 
1105 		spin_lock_irqsave(&reset_fnic_list_lock,
1106 						  reset_fnic_list_lock_flags);
1107 	}
1108 	spin_unlock_irqrestore(&reset_fnic_list_lock,
1109 						   reset_fnic_list_lock_flags);
1110 }
1111