1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 */
6 #include <linux/errno.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/skbuff.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <linux/workqueue.h>
15 #include <scsi/fc/fc_fip.h>
16 #include <scsi/fc/fc_els.h>
17 #include <scsi/fc_frame.h>
18 #include <linux/etherdevice.h>
19 #include <scsi/scsi_transport_fc.h>
20 #include "fnic_io.h"
21 #include "fnic.h"
22 #include "fnic_fdls.h"
23 #include "fdls_fc.h"
24 #include "cq_enet_desc.h"
25 #include "cq_exch_desc.h"
26 #include "fip.h"
27
28 #define MAX_RESET_WAIT_COUNT 64
29
30 struct workqueue_struct *fnic_event_queue;
31
32 static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
33
34 /*
35 * Internal Functions
36 * This function will initialize the src_mac address to be
37 * used in outgoing frames
38 */
fnic_fdls_set_fcoe_srcmac(struct fnic * fnic,uint8_t * src_mac)39 static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
40 uint8_t *src_mac)
41 {
42 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
43 "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
44 src_mac[0], src_mac[1], src_mac[2], src_mac[3],
45 src_mac[4], src_mac[5]);
46
47 memcpy(fnic->iport.fpma, src_mac, 6);
48 }
49
50 /*
51 * This function will initialize the dst_mac address to be
52 * used in outgoing frames
53 */
fnic_fdls_set_fcoe_dstmac(struct fnic * fnic,uint8_t * dst_mac)54 static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
55 uint8_t *dst_mac)
56 {
57 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
58 "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
59 dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
60 dst_mac[4], dst_mac[5]);
61
62 memcpy(fnic->iport.fcfmac, dst_mac, 6);
63 }
64
fnic_get_host_port_state(struct Scsi_Host * shost)65 void fnic_get_host_port_state(struct Scsi_Host *shost)
66 {
67 struct fnic *fnic = *((struct fnic **) shost_priv(shost));
68 struct fnic_iport_s *iport = &fnic->iport;
69 unsigned long flags;
70
71 spin_lock_irqsave(&fnic->fnic_lock, flags);
72 if (!fnic->link_status)
73 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
74 else if (iport->state == FNIC_IPORT_STATE_READY)
75 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
76 else
77 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
78 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
79 }
80
fnic_fdls_link_status_change(struct fnic * fnic,int linkup)81 void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
82 {
83 struct fnic_iport_s *iport = &fnic->iport;
84
85 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
86 "link up: %d, usefip: %d", linkup, iport->usefip);
87
88 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
89
90 if (linkup) {
91 if (iport->usefip) {
92 iport->state = FNIC_IPORT_STATE_FIP;
93 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
94 "link up: %d, usefip: %d", linkup, iport->usefip);
95 fnic_fcoe_send_vlan_req(fnic);
96 } else {
97 iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
98 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
99 "iport->state: %d", iport->state);
100 fnic_fdls_disc_start(iport);
101 }
102 } else {
103 iport->state = FNIC_IPORT_STATE_LINK_WAIT;
104 if (!is_zero_ether_addr(iport->fpma))
105 vnic_dev_del_addr(fnic->vdev, iport->fpma);
106 fnic_common_fip_cleanup(fnic);
107 fnic_fdls_link_down(iport);
108
109 }
110 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
111 }
112
113
114 /*
115 * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
116 * or derive from FC_MAP and FCID combination. While it should be
117 * same, revisit this if there is any possibility of not-correct.
118 */
fnic_fdls_learn_fcoe_macs(struct fnic_iport_s * iport,void * rx_frame,uint8_t * fcid)119 void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
120 uint8_t *fcid)
121 {
122 struct fnic *fnic = iport->fnic;
123 struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
124 uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
125
126 memcpy(&fcmac[3], fcid, 3);
127
128 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
129 "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
130 ethhdr->h_dest[0], ethhdr->h_dest[1],
131 ethhdr->h_dest[2], ethhdr->h_dest[3],
132 ethhdr->h_dest[4], ethhdr->h_dest[5]);
133
134 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
135 "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
136 fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
137 fcmac[5]);
138
139 fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
140 fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
141 }
142
fnic_fdls_init(struct fnic * fnic,int usefip)143 void fnic_fdls_init(struct fnic *fnic, int usefip)
144 {
145 struct fnic_iport_s *iport = &fnic->iport;
146
147 /* Initialize iPort structure */
148 iport->state = FNIC_IPORT_STATE_INIT;
149 iport->fnic = fnic;
150 iport->usefip = usefip;
151
152 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
153 "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
154 iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
155 iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
156
157 INIT_LIST_HEAD(&iport->tport_list);
158 INIT_LIST_HEAD(&iport->tport_list_pending_del);
159
160 fnic_fdls_disc_init(iport);
161 }
162
fnic_handle_link(struct work_struct * work)163 void fnic_handle_link(struct work_struct *work)
164 {
165 struct fnic *fnic = container_of(work, struct fnic, link_work);
166 int old_link_status;
167 u32 old_link_down_cnt;
168 int max_count = 0;
169
170 if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
171 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
172 "Interrupt mode is not MSI\n");
173
174 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
175
176 if (fnic->stop_rx_link_events) {
177 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
178 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
179 "Stop link rx events\n");
180 return;
181 }
182
183 /* Do not process if the fnic is already in transitional state */
184 if ((fnic->state != FNIC_IN_ETH_MODE)
185 && (fnic->state != FNIC_IN_FC_MODE)) {
186 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
187 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
188 "fnic in transitional state: %d. link up: %d ignored",
189 fnic->state, vnic_dev_link_status(fnic->vdev));
190 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
191 "Current link status: %d iport state: %d\n",
192 fnic->link_status, fnic->iport.state);
193 return;
194 }
195
196 old_link_down_cnt = fnic->link_down_cnt;
197 old_link_status = fnic->link_status;
198 fnic->link_status = vnic_dev_link_status(fnic->vdev);
199 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
200
201 while (fnic->reset_in_progress == IN_PROGRESS) {
202 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
203 "fnic reset in progress. Link event needs to wait\n");
204
205 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
206 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
207 "waiting for reset completion\n");
208 wait_for_completion_timeout(&fnic->reset_completion_wait,
209 msecs_to_jiffies(5000));
210 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
211 "woken up from reset completion wait\n");
212 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
213
214 max_count++;
215 if (max_count >= MAX_RESET_WAIT_COUNT) {
216 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
217 "Rstth waited for too long. Skipping handle link event\n");
218 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
219 return;
220 }
221 }
222 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
223 "Marking fnic reset in progress\n");
224 fnic->reset_in_progress = IN_PROGRESS;
225
226 if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
227 (fnic->link_status != old_link_status)) {
228 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
229 "old link status: %d link status: %d\n",
230 old_link_status, (int) fnic->link_status);
231 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
232 "old down count %d down count: %d\n",
233 old_link_down_cnt, (int) fnic->link_down_cnt);
234 }
235
236 if (old_link_status == fnic->link_status) {
237 if (!fnic->link_status) {
238 /* DOWN -> DOWN */
239 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
240 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
241 "down->down\n");
242 } else {
243 if (old_link_down_cnt != fnic->link_down_cnt) {
244 /* UP -> DOWN -> UP */
245 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
246 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
247 "up->down. Link down\n");
248 fnic_fdls_link_status_change(fnic, 0);
249
250 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
251 "down->up. Link up\n");
252 fnic_fdls_link_status_change(fnic, 1);
253 } else {
254 /* UP -> UP */
255 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
256 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
257 "up->up\n");
258 }
259 }
260 } else if (fnic->link_status) {
261 /* DOWN -> UP */
262 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
263 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
264 "down->up. Link up\n");
265 fnic_fdls_link_status_change(fnic, 1);
266 } else {
267 /* UP -> DOWN */
268 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
269 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
270 "up->down. Link down\n");
271 fnic_fdls_link_status_change(fnic, 0);
272 }
273
274 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
275 fnic->reset_in_progress = NOT_IN_PROGRESS;
276 complete(&fnic->reset_completion_wait);
277
278 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
279 "Marking fnic reset completion\n");
280 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
281 }
282
fnic_handle_frame(struct work_struct * work)283 void fnic_handle_frame(struct work_struct *work)
284 {
285 struct fnic *fnic = container_of(work, struct fnic, frame_work);
286 struct fnic_frame_list *cur_frame, *next;
287 int fchdr_offset = 0;
288
289 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
290 list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
291 if (fnic->stop_rx_link_events) {
292 list_del(&cur_frame->links);
293 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
294 kfree(cur_frame->fp);
295 mempool_free(cur_frame, fnic->frame_elem_pool);
296 return;
297 }
298
299 /*
300 * If we're in a transitional state, just re-queue and return.
301 * The queue will be serviced when we get to a stable state.
302 */
303 if (fnic->state != FNIC_IN_FC_MODE &&
304 fnic->state != FNIC_IN_ETH_MODE) {
305 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
306 "Cannot process frame in transitional state\n");
307 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
308 return;
309 }
310
311 list_del(&cur_frame->links);
312
313 /* Frames from FCP_RQ will have ethhdrs stripped off */
314 fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
315 0 : FNIC_ETH_FCOE_HDRS_OFFSET;
316
317 fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
318 cur_frame->frame_len, fchdr_offset);
319
320 kfree(cur_frame->fp);
321 mempool_free(cur_frame, fnic->frame_elem_pool);
322 }
323 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
324 }
325
fnic_handle_fip_frame(struct work_struct * work)326 void fnic_handle_fip_frame(struct work_struct *work)
327 {
328 struct fnic_frame_list *cur_frame, *next;
329 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
330
331 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
332 "Processing FIP frame\n");
333
334 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
335 list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
336 links) {
337 if (fnic->stop_rx_link_events) {
338 list_del(&cur_frame->links);
339 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
340 kfree(cur_frame->fp);
341 kfree(cur_frame);
342 return;
343 }
344
345 /*
346 * If we're in a transitional state, just re-queue and return.
347 * The queue will be serviced when we get to a stable state.
348 */
349 if (fnic->state != FNIC_IN_FC_MODE &&
350 fnic->state != FNIC_IN_ETH_MODE) {
351 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
352 return;
353 }
354
355 list_del(&cur_frame->links);
356
357 if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
358 kfree(cur_frame->fp);
359 kfree(cur_frame);
360 }
361 }
362 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
363 }
364
365 /**
366 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
367 * @fnic: fnic instance.
368 * @fp: Ethernet Frame.
369 */
fnic_import_rq_eth_pkt(struct fnic * fnic,void * fp)370 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
371 {
372 struct ethhdr *eh;
373 struct fnic_frame_list *fip_fr_elem;
374 unsigned long flags;
375
376 eh = (struct ethhdr *) fp;
377 if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
378 fip_fr_elem = (struct fnic_frame_list *)
379 kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC);
380 if (!fip_fr_elem)
381 return 0;
382 fip_fr_elem->fp = fp;
383 spin_lock_irqsave(&fnic->fnic_lock, flags);
384 list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
385 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
386 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
387 return 1; /* let caller know packet was used */
388 } else
389 return 0;
390 }
391
392 /**
393 * fnic_update_mac_locked() - set data MAC address and filters.
394 * @fnic: fnic instance.
395 * @new: newly-assigned FCoE MAC address.
396 *
397 * Called with the fnic lock held.
398 */
fnic_update_mac_locked(struct fnic * fnic,u8 * new)399 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
400 {
401 struct fnic_iport_s *iport = &fnic->iport;
402 u8 *ctl = iport->hwmac;
403 u8 *data = fnic->data_src_addr;
404
405 if (is_zero_ether_addr(new))
406 new = ctl;
407 if (ether_addr_equal(data, new))
408 return;
409
410 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
411 "Update MAC: %u\n", *new);
412
413 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
414 vnic_dev_del_addr(fnic->vdev, data);
415
416 memcpy(data, new, ETH_ALEN);
417 if (!ether_addr_equal(new, ctl))
418 vnic_dev_add_addr(fnic->vdev, new);
419 }
420
fnic_rq_cmpl_frame_recv(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)421 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
422 *cq_desc, struct vnic_rq_buf *buf,
423 int skipped __attribute__((unused)),
424 void *opaque)
425 {
426 struct fnic *fnic = vnic_dev_priv(rq->vdev);
427 uint8_t *fp;
428 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
429 unsigned int ethhdr_stripped;
430 u8 type, color, eop, sop, ingress_port, vlan_stripped;
431 u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
432 u8 fcs_ok = 1, packet_error = 0;
433 u16 q_number, completed_index, vlan;
434 u32 rss_hash;
435 u16 checksum;
436 u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
437 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
438 u8 fcoe = 0, fcoe_sof, fcoe_eof;
439 u16 exchange_id, tmpl;
440 u8 sof = 0;
441 u8 eof = 0;
442 u32 fcp_bytes_written = 0;
443 u16 enet_bytes_written = 0;
444 u32 bytes_written = 0;
445 unsigned long flags;
446 struct fnic_frame_list *frame_elem = NULL;
447 struct ethhdr *eh;
448
449 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
450 DMA_FROM_DEVICE);
451 fp = (uint8_t *) buf->os_buf;
452 buf->os_buf = NULL;
453
454 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
455 if (type == CQ_DESC_TYPE_RQ_FCP) {
456 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
457 &color, &q_number, &completed_index, &eop, &sop,
458 &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
459 &fcp_bytes_written, &sof, &eof, &ingress_port,
460 &packet_error, &fcoe_enc_error, &fcs_ok,
461 &vlan_stripped, &vlan);
462 ethhdr_stripped = 1;
463 bytes_written = fcp_bytes_written;
464 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
465 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
466 &color, &q_number, &completed_index,
467 &ingress_port, &fcoe, &eop, &sop, &rss_type,
468 &csum_not_calc, &rss_hash, &enet_bytes_written,
469 &packet_error, &vlan_stripped, &vlan,
470 &checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
471 &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
472 &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
473 &ipv4_fragment, &fcs_ok);
474
475 ethhdr_stripped = 0;
476 bytes_written = enet_bytes_written;
477
478 if (!fcs_ok) {
479 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
480 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
481 "fnic 0x%p fcs error. Dropping packet.\n", fnic);
482 goto drop;
483 }
484 eh = (struct ethhdr *) fp;
485 if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
486
487 if (fnic_import_rq_eth_pkt(fnic, fp))
488 return;
489
490 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
491 "Dropping h_proto 0x%x",
492 be16_to_cpu(eh->h_proto));
493 goto drop;
494 }
495 } else {
496 /* wrong CQ type */
497 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
498 "fnic rq_cmpl wrong cq type x%x\n", type);
499 goto drop;
500 }
501
502 if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
503 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
504 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
505 "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
506 fcoe, fcs_ok, packet_error,
507 fcoe_fnic_crc_ok, fcoe_enc_error);
508 goto drop;
509 }
510
511 spin_lock_irqsave(&fnic->fnic_lock, flags);
512 if (fnic->stop_rx_link_events) {
513 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
514 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
515 "fnic->stop_rx_link_events: %d\n",
516 fnic->stop_rx_link_events);
517 goto drop;
518 }
519
520 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
521
522 frame_elem = mempool_alloc(fnic->frame_elem_pool,
523 GFP_ATOMIC | __GFP_ZERO);
524 if (!frame_elem) {
525 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
526 "Failed to allocate memory for frame elem");
527 goto drop;
528 }
529 frame_elem->fp = fp;
530 frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
531 frame_elem->frame_len = bytes_written;
532
533 spin_lock_irqsave(&fnic->fnic_lock, flags);
534 list_add_tail(&frame_elem->links, &fnic->frame_queue);
535 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
536
537 queue_work(fnic_event_queue, &fnic->frame_work);
538 return;
539
540 drop:
541 kfree(fp);
542 }
543
fnic_rq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)544 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
545 struct cq_desc *cq_desc, u8 type,
546 u16 q_number, u16 completed_index,
547 void *opaque)
548 {
549 struct fnic *fnic = vnic_dev_priv(vdev);
550
551 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
552 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
553 NULL);
554 return 0;
555 }
556
fnic_rq_cmpl_handler(struct fnic * fnic,int rq_work_to_do)557 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
558 {
559 unsigned int tot_rq_work_done = 0, cur_work_done;
560 unsigned int i;
561 int err;
562
563 for (i = 0; i < fnic->rq_count; i++) {
564 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
565 fnic_rq_cmpl_handler_cont,
566 NULL);
567 if (cur_work_done && fnic->stop_rx_link_events != 1) {
568 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
569 if (err)
570 shost_printk(KERN_ERR, fnic->host,
571 "fnic_alloc_rq_frame can't alloc"
572 " frame\n");
573 }
574 tot_rq_work_done += cur_work_done;
575 }
576
577 return tot_rq_work_done;
578 }
579
580 /*
581 * This function is called once at init time to allocate and fill RQ
582 * buffers. Subsequently, it is called in the interrupt context after RQ
583 * buffer processing to replenish the buffers in the RQ
584 */
fnic_alloc_rq_frame(struct vnic_rq * rq)585 int fnic_alloc_rq_frame(struct vnic_rq *rq)
586 {
587 struct fnic *fnic = vnic_dev_priv(rq->vdev);
588 void *buf;
589 u16 len;
590 dma_addr_t pa;
591 int ret;
592
593 len = FNIC_FRAME_HT_ROOM;
594 buf = kmalloc(len, GFP_ATOMIC);
595 if (!buf) {
596 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
597 "Unable to allocate RQ buffer of size: %d\n", len);
598 return -ENOMEM;
599 }
600
601 pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
602 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
603 ret = -ENOMEM;
604 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
605 "PCI mapping failed with error %d\n", ret);
606 goto free_buf;
607 }
608
609 fnic_queue_rq_desc(rq, buf, pa, len);
610 return 0;
611 free_buf:
612 kfree(buf);
613 return ret;
614 }
615
fnic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)616 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
617 {
618 void *rq_buf = buf->os_buf;
619 struct fnic *fnic = vnic_dev_priv(rq->vdev);
620
621 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
622 DMA_FROM_DEVICE);
623
624 kfree(rq_buf);
625 buf->os_buf = NULL;
626 }
627
628 /*
629 * Send FC frame.
630 */
fnic_send_frame(struct fnic * fnic,void * frame,int frame_len)631 static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
632 {
633 struct vnic_wq *wq = &fnic->wq[0];
634 dma_addr_t pa;
635 int ret = 0;
636 unsigned long flags;
637
638 pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
639
640 if ((fnic_fc_trace_set_data(fnic->fnic_num,
641 FNIC_FC_SEND | 0x80, (char *) frame,
642 frame_len)) != 0) {
643 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
644 "fnic ctlr frame trace error");
645 }
646
647 spin_lock_irqsave(&fnic->wq_lock[0], flags);
648
649 if (!vnic_wq_desc_avail(wq)) {
650 dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
651 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
652 "vnic work queue descriptor is not available");
653 ret = -1;
654 goto fnic_send_frame_end;
655 }
656
657 /* hw inserts cos value */
658 fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
659 0, fnic->vlan_id, 1, 1, 1);
660
661 fnic_send_frame_end:
662 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
663 return ret;
664 }
665
666 /**
667 * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
668 * info. This interface is used only in the non fast path. (login, fabric
669 * registrations etc.)
670 *
671 * @fnic: fnic instance
672 * @frame: frame structure with FC payload filled in
673 * @frame_size: length of the frame to be sent
674 * @srcmac: source mac address
675 * @dstmac: destination mac address
676 *
677 * Called with the fnic lock held.
678 */
679 static int
fdls_send_fcoe_frame(struct fnic * fnic,void * frame,int frame_size,uint8_t * srcmac,uint8_t * dstmac)680 fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
681 uint8_t *srcmac, uint8_t *dstmac)
682 {
683 struct ethhdr *pethhdr;
684 struct fcoe_hdr *pfcoe_hdr;
685 struct fnic_frame_list *frame_elem;
686 int len = frame_size;
687 int ret;
688 struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
689 FNIC_ETH_FCOE_HDRS_OFFSET);
690
691 pethhdr = (struct ethhdr *) frame;
692 pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
693 memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
694 memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
695
696 pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
697 pfcoe_hdr->fcoe_sof = FC_SOF_I3;
698
699 /*
700 * Queue frame if in a transitional state.
701 * This occurs while registering the Port_ID / MAC address after FLOGI.
702 */
703 if ((fnic->state != FNIC_IN_FC_MODE)
704 && (fnic->state != FNIC_IN_ETH_MODE)) {
705 frame_elem = mempool_alloc(fnic->frame_elem_pool,
706 GFP_ATOMIC | __GFP_ZERO);
707 if (!frame_elem) {
708 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
709 "Failed to allocate memory for frame elem");
710 return -ENOMEM;
711 }
712
713 FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
714 "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
715 ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
716 fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
717 frame_elem->fp = frame;
718 frame_elem->frame_len = len;
719 list_add_tail(&frame_elem->links, &fnic->tx_queue);
720 return 0;
721 }
722
723 fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
724
725 ret = fnic_send_frame(fnic, frame, len);
726 return ret;
727 }
728
fnic_send_fcoe_frame(struct fnic_iport_s * iport,void * frame,int frame_size)729 void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
730 int frame_size)
731 {
732 struct fnic *fnic = iport->fnic;
733 uint8_t *dstmac, *srcmac;
734
735 /* If module unload is in-progress, don't send */
736 if (fnic->in_remove)
737 return;
738
739 if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
740 srcmac = iport->fpma;
741 dstmac = iport->fcfmac;
742 } else {
743 srcmac = iport->hwmac;
744 dstmac = FCOE_ALL_FCF_MAC;
745 }
746
747 fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
748 }
749
750 int
fnic_send_fip_frame(struct fnic_iport_s * iport,void * frame,int frame_size)751 fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
752 int frame_size)
753 {
754 struct fnic *fnic = iport->fnic;
755
756 if (fnic->in_remove)
757 return -1;
758
759 fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
760 return fnic_send_frame(fnic, frame, frame_size);
761 }
762
763 /**
764 * fnic_flush_tx() - send queued frames.
765 * @work: pointer to work element
766 *
767 * Send frames that were waiting to go out in FC or Ethernet mode.
768 * Whenever changing modes we purge queued frames, so these frames should
769 * be queued for the stable mode that we're in, either FC or Ethernet.
770 *
771 * Called without fnic_lock held.
772 */
fnic_flush_tx(struct work_struct * work)773 void fnic_flush_tx(struct work_struct *work)
774 {
775 struct fnic *fnic = container_of(work, struct fnic, flush_work);
776 struct fc_frame *fp;
777 struct fnic_frame_list *cur_frame, *next;
778
779 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
780 "Flush queued frames");
781
782 list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
783 fp = cur_frame->fp;
784 list_del(&cur_frame->links);
785 fnic_send_frame(fnic, fp, cur_frame->frame_len);
786 mempool_free(cur_frame, fnic->frame_elem_pool);
787 }
788 }
789
790 int
fnic_fdls_register_portid(struct fnic_iport_s * iport,u32 port_id,void * fp)791 fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
792 void *fp)
793 {
794 struct fnic *fnic = iport->fnic;
795 struct ethhdr *ethhdr;
796 int ret;
797
798 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
799 "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
800 fp, fnic->state);
801
802 if (fp) {
803 ethhdr = (struct ethhdr *) fp;
804 vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
805 }
806
807 /* Change state to reflect transition to FC mode */
808 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
809 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
810 else {
811 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
812 "Unexpected fnic state while processing FLOGI response\n");
813 return -1;
814 }
815
816 /*
817 * Send FLOGI registration to firmware to set up FC mode.
818 * The new address will be set up when registration completes.
819 */
820 ret = fnic_flogi_reg_handler(fnic, port_id);
821 if (ret < 0) {
822 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
823 "FLOGI registration error ret: %d fnic state: %d\n",
824 ret, fnic->state);
825 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
826 fnic->state = FNIC_IN_ETH_MODE;
827
828 return -1;
829 }
830 iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
831
832 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
833 "FLOGI registration success\n");
834 return 0;
835 }
836
fnic_free_txq(struct list_head * head)837 void fnic_free_txq(struct list_head *head)
838 {
839 struct fnic_frame_list *cur_frame, *next;
840
841 list_for_each_entry_safe(cur_frame, next, head, links) {
842 list_del(&cur_frame->links);
843 kfree(cur_frame->fp);
844 kfree(cur_frame);
845 }
846 }
847
fnic_wq_complete_frame_send(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)848 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
849 struct cq_desc *cq_desc,
850 struct vnic_wq_buf *buf, void *opaque)
851 {
852 struct fnic *fnic = vnic_dev_priv(wq->vdev);
853
854 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
855 DMA_TO_DEVICE);
856 mempool_free(buf->os_buf, fnic->frame_pool);
857 buf->os_buf = NULL;
858 }
859
fnic_wq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)860 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
861 struct cq_desc *cq_desc, u8 type,
862 u16 q_number, u16 completed_index,
863 void *opaque)
864 {
865 struct fnic *fnic = vnic_dev_priv(vdev);
866 unsigned long flags;
867
868 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
869 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
870 fnic_wq_complete_frame_send, NULL);
871 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
872
873 return 0;
874 }
875
fnic_wq_cmpl_handler(struct fnic * fnic,int work_to_do)876 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
877 {
878 unsigned int wq_work_done = 0;
879 unsigned int i;
880
881 for (i = 0; i < fnic->raw_wq_count; i++) {
882 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
883 work_to_do,
884 fnic_wq_cmpl_handler_cont,
885 NULL);
886 }
887
888 return wq_work_done;
889 }
890
891
fnic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)892 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
893 {
894 struct fnic *fnic = vnic_dev_priv(wq->vdev);
895
896 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
897 DMA_TO_DEVICE);
898
899 kfree(buf->os_buf);
900 buf->os_buf = NULL;
901 }
902
903 void
fnic_fdls_add_tport(struct fnic_iport_s * iport,struct fnic_tport_s * tport,unsigned long flags)904 fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
905 unsigned long flags)
906 {
907 struct fnic *fnic = iport->fnic;
908 struct fc_rport *rport;
909 struct fc_rport_identifiers ids;
910 struct rport_dd_data_s *rdd_data;
911
912 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
913 "Adding rport fcid: 0x%x", tport->fcid);
914
915 ids.node_name = tport->wwnn;
916 ids.port_name = tport->wwpn;
917 ids.port_id = tport->fcid;
918 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
919
920 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
921 rport = fc_remote_port_add(fnic->host, 0, &ids);
922 spin_lock_irqsave(&fnic->fnic_lock, flags);
923 if (!rport) {
924 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
925 "Failed to add rport for tport: 0x%x", tport->fcid);
926 return;
927 }
928
929 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
930 "Added rport fcid: 0x%x", tport->fcid);
931
932 /* Mimic these assignments in queuecommand to avoid timing issues */
933 rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
934 rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
935 rdd_data = rport->dd_data;
936 rdd_data->tport = tport;
937 rdd_data->iport = iport;
938 tport->rport = rport;
939 tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
940 }
941
942 void
fnic_fdls_remove_tport(struct fnic_iport_s * iport,struct fnic_tport_s * tport,unsigned long flags)943 fnic_fdls_remove_tport(struct fnic_iport_s *iport,
944 struct fnic_tport_s *tport, unsigned long flags)
945 {
946 struct fnic *fnic = iport->fnic;
947 struct rport_dd_data_s *rdd_data;
948
949 struct fc_rport *rport;
950
951 if (!tport)
952 return;
953
954 fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
955 rport = tport->rport;
956
957 if (rport) {
958 /* tport resource release will be done
959 * after fnic_terminate_rport_io()
960 */
961 tport->flags |= FNIC_FDLS_TPORT_DELETED;
962 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
963
964 /* Interface to scsi_fc_transport */
965 fc_remote_port_delete(rport);
966
967 spin_lock_irqsave(&fnic->fnic_lock, flags);
968 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
969 "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
970 tport->fcid);
971
972 /*
973 * the dd_data is allocated by fc transport
974 * of size dd_fcrport_size
975 */
976 rdd_data = rport->dd_data;
977 rdd_data->tport = NULL;
978 rdd_data->iport = NULL;
979 list_del(&tport->links);
980 kfree(tport);
981 } else {
982 fnic_del_tport_timer_sync(fnic, tport);
983 list_del(&tport->links);
984 kfree(tport);
985 }
986 }
987
fnic_delete_fcp_tports(struct fnic * fnic)988 void fnic_delete_fcp_tports(struct fnic *fnic)
989 {
990 struct fnic_tport_s *tport, *next;
991 unsigned long flags;
992
993 spin_lock_irqsave(&fnic->fnic_lock, flags);
994 list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
995 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
996 "removing fcp rport fcid: 0x%x", tport->fcid);
997 fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
998 fnic_del_tport_timer_sync(fnic, tport);
999 fnic_fdls_remove_tport(&fnic->iport, tport, flags);
1000 }
1001 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1002 }
1003
1004 /**
1005 * fnic_tport_event_handler() - Handler for remote port events
1006 * in the tport_event_queue.
1007 *
1008 * @work: Handle to the remote port being dequeued
1009 */
fnic_tport_event_handler(struct work_struct * work)1010 void fnic_tport_event_handler(struct work_struct *work)
1011 {
1012 struct fnic *fnic = container_of(work, struct fnic, tport_work);
1013 struct fnic_tport_event_s *cur_evt, *next;
1014 unsigned long flags;
1015 struct fnic_tport_s *tport;
1016
1017 spin_lock_irqsave(&fnic->fnic_lock, flags);
1018 list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1019 tport = cur_evt->arg1;
1020 switch (cur_evt->event) {
1021 case TGT_EV_RPORT_ADD:
1022 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1023 "Add rport event");
1024 if (tport->state == FDLS_TGT_STATE_READY) {
1025 fnic_fdls_add_tport(&fnic->iport,
1026 (struct fnic_tport_s *) cur_evt->arg1, flags);
1027 } else {
1028 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1029 "Target not ready. Add rport event dropped: 0x%x",
1030 tport->fcid);
1031 }
1032 break;
1033 case TGT_EV_RPORT_DEL:
1034 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1035 "Remove rport event");
1036 if (tport->state == FDLS_TGT_STATE_OFFLINING) {
1037 fnic_fdls_remove_tport(&fnic->iport,
1038 (struct fnic_tport_s *) cur_evt->arg1, flags);
1039 } else {
1040 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1041 "remove rport event dropped tport fcid: 0x%x",
1042 tport->fcid);
1043 }
1044 break;
1045 case TGT_EV_TPORT_DELETE:
1046 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1047 "Delete tport event");
1048 fdls_delete_tport(tport->iport, tport);
1049 break;
1050 default:
1051 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1052 "Unknown tport event");
1053 break;
1054 }
1055 list_del(&cur_evt->links);
1056 kfree(cur_evt);
1057 }
1058 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1059 }
1060
fnic_flush_tport_event_list(struct fnic * fnic)1061 void fnic_flush_tport_event_list(struct fnic *fnic)
1062 {
1063 struct fnic_tport_event_s *cur_evt, *next;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&fnic->fnic_lock, flags);
1067 list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1068 list_del(&cur_evt->links);
1069 kfree(cur_evt);
1070 }
1071 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1072 }
1073
fnic_reset_work_handler(struct work_struct * work)1074 void fnic_reset_work_handler(struct work_struct *work)
1075 {
1076 struct fnic *cur_fnic, *next_fnic;
1077 unsigned long reset_fnic_list_lock_flags;
1078 int host_reset_ret_code;
1079
1080 /*
1081 * This is a single thread. It is per fnic module, not per fnic
1082 * All the fnics that need to be reset
1083 * have been serialized via the reset fnic list.
1084 */
1085 spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
1086 list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
1087 list_del(&cur_fnic->links);
1088 spin_unlock_irqrestore(&reset_fnic_list_lock,
1089 reset_fnic_list_lock_flags);
1090
1091 dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
1092 cur_fnic->fnic_num);
1093 host_reset_ret_code = fnic_host_reset(cur_fnic->host);
1094 dev_err(&cur_fnic->pdev->dev,
1095 "fnic: <%d>: returned from host reset with status: %d\n",
1096 cur_fnic->fnic_num, host_reset_ret_code);
1097
1098 spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1099 cur_fnic->pc_rscn_handling_status =
1100 PC_RSCN_HANDLING_NOT_IN_PROGRESS;
1101 spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1102
1103 spin_lock_irqsave(&reset_fnic_list_lock,
1104 reset_fnic_list_lock_flags);
1105 }
1106 spin_unlock_irqrestore(&reset_fnic_list_lock,
1107 reset_fnic_list_lock_flags);
1108 }
1109