xref: /linux/drivers/scsi/qedf/qedf_main.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  QLogic FCoE Offload Driver
4  *  Copyright (c) 2016-2018 Cavium Inc.
5  */
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/device.h>
11 #include <linux/highmem.h>
12 #include <linux/crc32.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/kthread.h>
16 #include <linux/phylink.h>
17 #include <scsi/libfc.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/fc_frame.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/cpu.h>
23 #include "qedf.h"
24 #include "qedf_dbg.h"
25 #include <uapi/linux/pci_regs.h>
26 
27 const struct qed_fcoe_ops *qed_ops;
28 
29 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
30 static void qedf_remove(struct pci_dev *pdev);
31 static void qedf_shutdown(struct pci_dev *pdev);
32 static void qedf_schedule_recovery_handler(void *dev);
33 static void qedf_recovery_handler(struct work_struct *work);
34 static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
35 
36 /*
37  * Driver module parameters.
38  */
39 static unsigned int qedf_dev_loss_tmo = 60;
40 module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
41 MODULE_PARM_DESC(dev_loss_tmo,  " dev_loss_tmo setting for attached "
42 	"remote ports (default 60)");
43 
44 uint qedf_debug = QEDF_LOG_INFO;
45 module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
46 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
47 	" mask");
48 
49 static uint qedf_fipvlan_retries = 60;
50 module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
51 MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
52 	"before giving up (default 60)");
53 
54 static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
55 module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
56 MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
57 	"(default 1002).");
58 
59 static int qedf_default_prio = -1;
60 module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
61 MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
62 	" traffic (value between 0 and 7, default 3).");
63 
64 uint qedf_dump_frames;
65 module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
67 	"(default off)");
68 
69 static uint qedf_queue_depth;
70 module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
71 MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
72 	"by the qedf driver. Default is 0 (use OS default).");
73 
74 uint qedf_io_tracing;
75 module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
77 	"into trace buffer. (default off).");
78 
79 static uint qedf_max_lun = MAX_FIBRE_LUNS;
80 module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
81 MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
82 	"supports. (default 0xffffffff)");
83 
84 uint qedf_link_down_tmo;
85 module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
86 MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
87 	"link is down by N seconds.");
88 
89 bool qedf_retry_delay;
90 module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
92 	"delay handling (default off).");
93 
94 static bool qedf_dcbx_no_wait;
95 module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
97 	"sending FIP VLAN requests on link up (Default: off).");
98 
99 static uint qedf_dp_module;
100 module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
101 MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
102 	"qed module during probe.");
103 
104 static uint qedf_dp_level = QED_LEVEL_NOTICE;
105 module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
106 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module  "
107 	"during probe (0-3: 0 more verbose).");
108 
109 static bool qedf_enable_recovery = true;
110 module_param_named(enable_recovery, qedf_enable_recovery,
111 		bool, S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
113 		"interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
114 
115 struct workqueue_struct *qedf_io_wq;
116 
117 static struct fcoe_percpu_s qedf_global;
118 static DEFINE_SPINLOCK(qedf_global_lock);
119 
120 static struct kmem_cache *qedf_io_work_cache;
121 
qedf_set_vlan_id(struct qedf_ctx * qedf,int vlan_id)122 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
123 {
124 	int vlan_id_tmp = 0;
125 
126 	vlan_id_tmp = vlan_id  | (qedf->prio << VLAN_PRIO_SHIFT);
127 	qedf->vlan_id = vlan_id_tmp;
128 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
129 		  "Setting vlan_id=0x%04x prio=%d.\n",
130 		  vlan_id_tmp, qedf->prio);
131 }
132 
133 /* Returns true if we have a valid vlan, false otherwise */
qedf_initiate_fipvlan_req(struct qedf_ctx * qedf)134 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
135 {
136 
137 	while (qedf->fipvlan_retries--) {
138 		/* This is to catch if link goes down during fipvlan retries */
139 		if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
140 			QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
141 			return false;
142 		}
143 
144 		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
145 			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
146 			return false;
147 		}
148 
149 		if (qedf->vlan_id > 0) {
150 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
151 				  "vlan = 0x%x already set, calling ctlr_link_up.\n",
152 				  qedf->vlan_id);
153 			if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
154 				fcoe_ctlr_link_up(&qedf->ctlr);
155 			return true;
156 		}
157 
158 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
159 			   "Retry %d.\n", qedf->fipvlan_retries);
160 		init_completion(&qedf->fipvlan_compl);
161 		qedf_fcoe_send_vlan_req(qedf);
162 		wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
163 	}
164 
165 	return false;
166 }
167 
qedf_handle_link_update(struct work_struct * work)168 static void qedf_handle_link_update(struct work_struct *work)
169 {
170 	struct qedf_ctx *qedf =
171 	    container_of(work, struct qedf_ctx, link_update.work);
172 	int rc;
173 
174 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
175 		  atomic_read(&qedf->link_state));
176 
177 	if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
178 		rc = qedf_initiate_fipvlan_req(qedf);
179 		if (rc)
180 			return;
181 
182 		if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
183 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
184 				  "Link is down, resetting vlan_id.\n");
185 			qedf->vlan_id = 0;
186 			return;
187 		}
188 
189 		/*
190 		 * If we get here then we never received a repsonse to our
191 		 * fip vlan request so set the vlan_id to the default and
192 		 * tell FCoE that the link is up
193 		 */
194 		QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
195 			   "response, falling back to default VLAN %d.\n",
196 			   qedf_fallback_vlan);
197 		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
198 
199 		/*
200 		 * Zero out data_src_addr so we'll update it with the new
201 		 * lport port_id
202 		 */
203 		eth_zero_addr(qedf->data_src_addr);
204 		fcoe_ctlr_link_up(&qedf->ctlr);
205 	} else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
206 		/*
207 		 * If we hit here and link_down_tmo_valid is still 1 it means
208 		 * that link_down_tmo timed out so set it to 0 to make sure any
209 		 * other readers have accurate state.
210 		 */
211 		atomic_set(&qedf->link_down_tmo_valid, 0);
212 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
213 		    "Calling fcoe_ctlr_link_down().\n");
214 		fcoe_ctlr_link_down(&qedf->ctlr);
215 		if (qedf_wait_for_upload(qedf) == false)
216 			QEDF_ERR(&qedf->dbg_ctx,
217 				 "Could not upload all sessions.\n");
218 		/* Reset the number of FIP VLAN retries */
219 		qedf->fipvlan_retries = qedf_fipvlan_retries;
220 	}
221 }
222 
223 #define	QEDF_FCOE_MAC_METHOD_GRANGED_MAC		1
224 #define QEDF_FCOE_MAC_METHOD_FCF_MAP			2
225 #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC		3
qedf_set_data_src_addr(struct qedf_ctx * qedf,struct fc_frame * fp)226 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
227 {
228 	u8 *granted_mac;
229 	struct fc_frame_header *fh = fc_frame_header_get(fp);
230 	u8 fc_map[3];
231 	int method = 0;
232 
233 	/* Get granted MAC address from FIP FLOGI payload */
234 	granted_mac = fr_cb(fp)->granted_mac;
235 
236 	/*
237 	 * We set the source MAC for FCoE traffic based on the Granted MAC
238 	 * address from the switch.
239 	 *
240 	 * If granted_mac is non-zero, we used that.
241 	 * If the granted_mac is zeroed out, created the FCoE MAC based on
242 	 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
243 	 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
244 	 * d_id of the FLOGI frame.
245 	 */
246 	if (!is_zero_ether_addr(granted_mac)) {
247 		ether_addr_copy(qedf->data_src_addr, granted_mac);
248 		method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
249 	} else if (qedf->ctlr.sel_fcf->fc_map != 0) {
250 		hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
251 		qedf->data_src_addr[0] = fc_map[0];
252 		qedf->data_src_addr[1] = fc_map[1];
253 		qedf->data_src_addr[2] = fc_map[2];
254 		qedf->data_src_addr[3] = fh->fh_d_id[0];
255 		qedf->data_src_addr[4] = fh->fh_d_id[1];
256 		qedf->data_src_addr[5] = fh->fh_d_id[2];
257 		method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
258 	} else {
259 		fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
260 		method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
261 	}
262 
263 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
264 	    "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
265 }
266 
qedf_flogi_resp(struct fc_seq * seq,struct fc_frame * fp,void * arg)267 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
268 	void *arg)
269 {
270 	struct fc_exch *exch = fc_seq_exch(seq);
271 	struct fc_lport *lport = exch->lp;
272 	struct qedf_ctx *qedf = lport_priv(lport);
273 
274 	if (!qedf) {
275 		QEDF_ERR(NULL, "qedf is NULL.\n");
276 		return;
277 	}
278 
279 	/*
280 	 * If ERR_PTR is set then don't try to stat anything as it will cause
281 	 * a crash when we access fp.
282 	 */
283 	if (IS_ERR(fp)) {
284 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
285 		    "fp has IS_ERR() set.\n");
286 		goto skip_stat;
287 	}
288 
289 	/* Log stats for FLOGI reject */
290 	if (fc_frame_payload_op(fp) == ELS_LS_RJT)
291 		qedf->flogi_failed++;
292 	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
293 		/* Set the source MAC we will use for FCoE traffic */
294 		qedf_set_data_src_addr(qedf, fp);
295 		qedf->flogi_pending = 0;
296 	}
297 
298 	/* Complete flogi_compl so we can proceed to sending ADISCs */
299 	complete(&qedf->flogi_compl);
300 
301 skip_stat:
302 	/* Report response to libfc */
303 	fc_lport_flogi_resp(seq, fp, lport);
304 }
305 
qedf_elsct_send(struct fc_lport * lport,u32 did,struct fc_frame * fp,unsigned int op,void (* resp)(struct fc_seq *,struct fc_frame *,void *),void * arg,u32 timeout)306 static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
307 	struct fc_frame *fp, unsigned int op,
308 	void (*resp)(struct fc_seq *,
309 	struct fc_frame *,
310 	void *),
311 	void *arg, u32 timeout)
312 {
313 	struct qedf_ctx *qedf = lport_priv(lport);
314 
315 	/*
316 	 * Intercept FLOGI for statistic purposes. Note we use the resp
317 	 * callback to tell if this is really a flogi.
318 	 */
319 	if (resp == fc_lport_flogi_resp) {
320 		qedf->flogi_cnt++;
321 		qedf->flogi_pending++;
322 
323 		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
324 			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
325 			qedf->flogi_pending = 0;
326 		}
327 
328 		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
329 			schedule_delayed_work(&qedf->stag_work, 2);
330 			return NULL;
331 		}
332 
333 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
334 		    arg, timeout);
335 	}
336 
337 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
338 }
339 
qedf_send_flogi(struct qedf_ctx * qedf)340 int qedf_send_flogi(struct qedf_ctx *qedf)
341 {
342 	struct fc_lport *lport;
343 	struct fc_frame *fp;
344 
345 	lport = qedf->lport;
346 
347 	if (!lport->tt.elsct_send) {
348 		QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
349 		return -EINVAL;
350 	}
351 
352 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
353 	if (!fp) {
354 		QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
355 		return -ENOMEM;
356 	}
357 
358 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
359 	    "Sending FLOGI to reestablish session with switch.\n");
360 	lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
361 	    ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
362 
363 	init_completion(&qedf->flogi_compl);
364 
365 	return 0;
366 }
367 
368 /*
369  * This function is called if link_down_tmo is in use.  If we get a link up and
370  * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
371  * sessions with targets.  Otherwise, just call fcoe_ctlr_link_up().
372  */
qedf_link_recovery(struct work_struct * work)373 static void qedf_link_recovery(struct work_struct *work)
374 {
375 	struct qedf_ctx *qedf =
376 	    container_of(work, struct qedf_ctx, link_recovery.work);
377 	struct fc_lport *lport = qedf->lport;
378 	struct fc_rport_priv *rdata;
379 	bool rc;
380 	int retries = 30;
381 	int rval, i;
382 	struct list_head rdata_login_list;
383 
384 	INIT_LIST_HEAD(&rdata_login_list);
385 
386 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
387 	    "Link down tmo did not expire.\n");
388 
389 	/*
390 	 * Essentially reset the fcoe_ctlr here without affecting the state
391 	 * of the libfc structs.
392 	 */
393 	qedf->ctlr.state = FIP_ST_LINK_WAIT;
394 	fcoe_ctlr_link_down(&qedf->ctlr);
395 
396 	/*
397 	 * Bring the link up before we send the fipvlan request so libfcoe
398 	 * can select a new fcf in parallel
399 	 */
400 	fcoe_ctlr_link_up(&qedf->ctlr);
401 
402 	/* Since the link when down and up to verify which vlan we're on */
403 	qedf->fipvlan_retries = qedf_fipvlan_retries;
404 	rc = qedf_initiate_fipvlan_req(qedf);
405 	/* If getting the VLAN fails, set the VLAN to the fallback one */
406 	if (!rc)
407 		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
408 
409 	/*
410 	 * We need to wait for an FCF to be selected due to the
411 	 * fcoe_ctlr_link_up other the FLOGI will be rejected.
412 	 */
413 	while (retries > 0) {
414 		if (qedf->ctlr.sel_fcf) {
415 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
416 			    "FCF reselected, proceeding with FLOGI.\n");
417 			break;
418 		}
419 		msleep(500);
420 		retries--;
421 	}
422 
423 	if (retries < 1) {
424 		QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
425 		    "FCF selection.\n");
426 		return;
427 	}
428 
429 	rval = qedf_send_flogi(qedf);
430 	if (rval)
431 		return;
432 
433 	/* Wait for FLOGI completion before proceeding with sending ADISCs */
434 	i = wait_for_completion_timeout(&qedf->flogi_compl,
435 	    qedf->lport->r_a_tov);
436 	if (i == 0) {
437 		QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
438 		return;
439 	}
440 
441 	/*
442 	 * Call lport->tt.rport_login which will cause libfc to send an
443 	 * ADISC since the rport is in state ready.
444 	 */
445 	mutex_lock(&lport->disc.disc_mutex);
446 	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
447 		if (kref_get_unless_zero(&rdata->kref)) {
448 			fc_rport_login(rdata);
449 			kref_put(&rdata->kref, fc_rport_destroy);
450 		}
451 	}
452 	mutex_unlock(&lport->disc.disc_mutex);
453 }
454 
qedf_update_link_speed(struct qedf_ctx * qedf,struct qed_link_output * link)455 static void qedf_update_link_speed(struct qedf_ctx *qedf,
456 	struct qed_link_output *link)
457 {
458 	__ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
459 	struct fc_lport *lport = qedf->lport;
460 
461 	lport->link_speed = FC_PORTSPEED_UNKNOWN;
462 	lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
463 
464 	/* Set fc_host link speed */
465 	switch (link->speed) {
466 	case 10000:
467 		lport->link_speed = FC_PORTSPEED_10GBIT;
468 		break;
469 	case 25000:
470 		lport->link_speed = FC_PORTSPEED_25GBIT;
471 		break;
472 	case 40000:
473 		lport->link_speed = FC_PORTSPEED_40GBIT;
474 		break;
475 	case 50000:
476 		lport->link_speed = FC_PORTSPEED_50GBIT;
477 		break;
478 	case 100000:
479 		lport->link_speed = FC_PORTSPEED_100GBIT;
480 		break;
481 	case 20000:
482 		lport->link_speed = FC_PORTSPEED_20GBIT;
483 		break;
484 	default:
485 		lport->link_speed = FC_PORTSPEED_UNKNOWN;
486 		break;
487 	}
488 
489 	/*
490 	 * Set supported link speed by querying the supported
491 	 * capabilities of the link.
492 	 */
493 
494 	phylink_zero(sup_caps);
495 	phylink_set(sup_caps, 10000baseT_Full);
496 	phylink_set(sup_caps, 10000baseKX4_Full);
497 	phylink_set(sup_caps, 10000baseR_FEC);
498 	phylink_set(sup_caps, 10000baseCR_Full);
499 	phylink_set(sup_caps, 10000baseSR_Full);
500 	phylink_set(sup_caps, 10000baseLR_Full);
501 	phylink_set(sup_caps, 10000baseLRM_Full);
502 	phylink_set(sup_caps, 10000baseKR_Full);
503 
504 	if (linkmode_intersects(link->supported_caps, sup_caps))
505 		lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
506 
507 	phylink_zero(sup_caps);
508 	phylink_set(sup_caps, 25000baseKR_Full);
509 	phylink_set(sup_caps, 25000baseCR_Full);
510 	phylink_set(sup_caps, 25000baseSR_Full);
511 
512 	if (linkmode_intersects(link->supported_caps, sup_caps))
513 		lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
514 
515 	phylink_zero(sup_caps);
516 	phylink_set(sup_caps, 40000baseLR4_Full);
517 	phylink_set(sup_caps, 40000baseKR4_Full);
518 	phylink_set(sup_caps, 40000baseCR4_Full);
519 	phylink_set(sup_caps, 40000baseSR4_Full);
520 
521 	if (linkmode_intersects(link->supported_caps, sup_caps))
522 		lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
523 
524 	phylink_zero(sup_caps);
525 	phylink_set(sup_caps, 50000baseKR2_Full);
526 	phylink_set(sup_caps, 50000baseCR2_Full);
527 	phylink_set(sup_caps, 50000baseSR2_Full);
528 
529 	if (linkmode_intersects(link->supported_caps, sup_caps))
530 		lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
531 
532 	phylink_zero(sup_caps);
533 	phylink_set(sup_caps, 100000baseKR4_Full);
534 	phylink_set(sup_caps, 100000baseSR4_Full);
535 	phylink_set(sup_caps, 100000baseCR4_Full);
536 	phylink_set(sup_caps, 100000baseLR4_ER4_Full);
537 
538 	if (linkmode_intersects(link->supported_caps, sup_caps))
539 		lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
540 
541 	phylink_zero(sup_caps);
542 	phylink_set(sup_caps, 20000baseKR2_Full);
543 
544 	if (linkmode_intersects(link->supported_caps, sup_caps))
545 		lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
546 
547 	if (lport->host && lport->host->shost_data)
548 		fc_host_supported_speeds(lport->host) =
549 			lport->link_supported_speeds;
550 }
551 
qedf_bw_update(void * dev)552 static void qedf_bw_update(void *dev)
553 {
554 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
555 	struct qed_link_output link;
556 
557 	/* Get the latest status of the link */
558 	qed_ops->common->get_link(qedf->cdev, &link);
559 
560 	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
561 		QEDF_ERR(&qedf->dbg_ctx,
562 			 "Ignore link update, driver getting unload.\n");
563 		return;
564 	}
565 
566 	if (link.link_up) {
567 		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
568 			qedf_update_link_speed(qedf, &link);
569 		else
570 			QEDF_ERR(&qedf->dbg_ctx,
571 				 "Ignore bw update, link is down.\n");
572 
573 	} else {
574 		QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
575 	}
576 }
577 
qedf_link_update(void * dev,struct qed_link_output * link)578 static void qedf_link_update(void *dev, struct qed_link_output *link)
579 {
580 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
581 
582 	/*
583 	 * Prevent race where we're removing the module and we get link update
584 	 * for qed.
585 	 */
586 	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
587 		QEDF_ERR(&qedf->dbg_ctx,
588 			 "Ignore link update, driver getting unload.\n");
589 		return;
590 	}
591 
592 	if (link->link_up) {
593 		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
594 			QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
595 			    "Ignoring link up event as link is already up.\n");
596 			return;
597 		}
598 		QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
599 		    link->speed / 1000);
600 
601 		/* Cancel any pending link down work */
602 		cancel_delayed_work(&qedf->link_update);
603 
604 		atomic_set(&qedf->link_state, QEDF_LINK_UP);
605 		qedf_update_link_speed(qedf, link);
606 
607 		if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
608 		    qedf_dcbx_no_wait) {
609 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
610 			     "DCBx done.\n");
611 			if (atomic_read(&qedf->link_down_tmo_valid) > 0)
612 				queue_delayed_work(qedf->link_update_wq,
613 				    &qedf->link_recovery, 0);
614 			else
615 				queue_delayed_work(qedf->link_update_wq,
616 				    &qedf->link_update, 0);
617 			atomic_set(&qedf->link_down_tmo_valid, 0);
618 		}
619 
620 	} else {
621 		QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
622 
623 		atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
624 		atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
625 		/*
626 		 * Flag that we're waiting for the link to come back up before
627 		 * informing the fcoe layer of the event.
628 		 */
629 		if (qedf_link_down_tmo > 0) {
630 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
631 			    "Starting link down tmo.\n");
632 			atomic_set(&qedf->link_down_tmo_valid, 1);
633 		}
634 		qedf->vlan_id = 0;
635 		qedf_update_link_speed(qedf, link);
636 		queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
637 		    qedf_link_down_tmo * HZ);
638 	}
639 }
640 
641 
qedf_dcbx_handler(void * dev,struct qed_dcbx_get * get,u32 mib_type)642 static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
643 {
644 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
645 	u8 tmp_prio;
646 
647 	QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
648 	    "prio=%d.\n", get->operational.valid, get->operational.enabled,
649 	    get->operational.app_prio.fcoe);
650 
651 	if (get->operational.enabled && get->operational.valid) {
652 		/* If DCBX was already negotiated on link up then just exit */
653 		if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
654 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
655 			    "DCBX already set on link up.\n");
656 			return;
657 		}
658 
659 		atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
660 
661 		/*
662 		 * Set the 8021q priority in the following manner:
663 		 *
664 		 * 1. If a modparam is set use that
665 		 * 2. If the value is not between 0..7 use the default
666 		 * 3. Use the priority we get from the DCBX app tag
667 		 */
668 		tmp_prio = get->operational.app_prio.fcoe;
669 		if (qedf_default_prio > -1)
670 			qedf->prio = qedf_default_prio;
671 		else if (tmp_prio > 7) {
672 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
673 			    "FIP/FCoE prio %d out of range, setting to %d.\n",
674 			    tmp_prio, QEDF_DEFAULT_PRIO);
675 			qedf->prio = QEDF_DEFAULT_PRIO;
676 		} else
677 			qedf->prio = tmp_prio;
678 
679 		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
680 		    !qedf_dcbx_no_wait) {
681 			if (atomic_read(&qedf->link_down_tmo_valid) > 0)
682 				queue_delayed_work(qedf->link_update_wq,
683 				    &qedf->link_recovery, 0);
684 			else
685 				queue_delayed_work(qedf->link_update_wq,
686 				    &qedf->link_update, 0);
687 			atomic_set(&qedf->link_down_tmo_valid, 0);
688 		}
689 	}
690 
691 }
692 
qedf_get_login_failures(void * cookie)693 static u32 qedf_get_login_failures(void *cookie)
694 {
695 	struct qedf_ctx *qedf;
696 
697 	qedf = (struct qedf_ctx *)cookie;
698 	return qedf->flogi_failed;
699 }
700 
701 static struct qed_fcoe_cb_ops qedf_cb_ops = {
702 	{
703 		.link_update = qedf_link_update,
704 		.bw_update = qedf_bw_update,
705 		.schedule_recovery_handler = qedf_schedule_recovery_handler,
706 		.dcbx_aen = qedf_dcbx_handler,
707 		.get_generic_tlv_data = qedf_get_generic_tlv_data,
708 		.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
709 		.schedule_hw_err_handler = qedf_schedule_hw_err_handler,
710 	}
711 };
712 
713 /*
714  * Various transport templates.
715  */
716 
717 static struct scsi_transport_template *qedf_fc_transport_template;
718 static struct scsi_transport_template *qedf_fc_vport_transport_template;
719 
720 /*
721  * SCSI EH handlers
722  */
qedf_eh_abort(struct scsi_cmnd * sc_cmd)723 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
724 {
725 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
726 	struct fc_lport *lport;
727 	struct qedf_ctx *qedf;
728 	struct qedf_ioreq *io_req;
729 	struct fc_rport_libfc_priv *rp = rport->dd_data;
730 	struct fc_rport_priv *rdata;
731 	struct qedf_rport *fcport = NULL;
732 	int rc = FAILED;
733 	int wait_count = 100;
734 	int refcount = 0;
735 	int rval;
736 	int got_ref = 0;
737 
738 	lport = shost_priv(sc_cmd->device->host);
739 	qedf = (struct qedf_ctx *)lport_priv(lport);
740 
741 	/* rport and tgt are allocated together, so tgt should be non-NULL */
742 	fcport = (struct qedf_rport *)&rp[1];
743 	rdata = fcport->rdata;
744 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
745 		QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
746 		rc = SUCCESS;
747 		goto out;
748 	}
749 
750 
751 	io_req = qedf_priv(sc_cmd)->io_req;
752 	if (!io_req) {
753 		QEDF_ERR(&qedf->dbg_ctx,
754 			 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
755 			 sc_cmd, sc_cmd->cmnd[0],
756 			 rdata->ids.port_id);
757 		rc = SUCCESS;
758 		goto drop_rdata_kref;
759 	}
760 
761 	rval = kref_get_unless_zero(&io_req->refcount);	/* ID: 005 */
762 	if (rval)
763 		got_ref = 1;
764 
765 	/* If we got a valid io_req, confirm it belongs to this sc_cmd. */
766 	if (!rval || io_req->sc_cmd != sc_cmd) {
767 		QEDF_ERR(&qedf->dbg_ctx,
768 			 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
769 			 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
770 
771 		goto drop_rdata_kref;
772 	}
773 
774 	if (fc_remote_port_chkready(rport)) {
775 		refcount = kref_read(&io_req->refcount);
776 		QEDF_ERR(&qedf->dbg_ctx,
777 			 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
778 			 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
779 			 refcount, rdata->ids.port_id);
780 
781 		goto drop_rdata_kref;
782 	}
783 
784 	rc = fc_block_rport(rport);
785 	if (rc)
786 		goto drop_rdata_kref;
787 
788 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
789 		QEDF_ERR(&qedf->dbg_ctx,
790 			 "Connection uploading, xid=0x%x., port_id=%06x\n",
791 			 io_req->xid, rdata->ids.port_id);
792 		while (io_req->sc_cmd && (wait_count != 0)) {
793 			msleep(100);
794 			wait_count--;
795 		}
796 		if (wait_count) {
797 			QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
798 			rc = SUCCESS;
799 		} else {
800 			QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
801 			rc = FAILED;
802 		}
803 		goto drop_rdata_kref;
804 	}
805 
806 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
807 		QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
808 		goto drop_rdata_kref;
809 	}
810 
811 	QEDF_ERR(&qedf->dbg_ctx,
812 		 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
813 		 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
814 		 rdata->ids.port_id);
815 
816 	if (qedf->stop_io_on_error) {
817 		qedf_stop_all_io(qedf);
818 		rc = SUCCESS;
819 		goto drop_rdata_kref;
820 	}
821 
822 	init_completion(&io_req->abts_done);
823 	rval = qedf_initiate_abts(io_req, true);
824 	if (rval) {
825 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
826 		/*
827 		 * If we fail to queue the ABTS then return this command to
828 		 * the SCSI layer as it will own and free the xid
829 		 */
830 		rc = SUCCESS;
831 		qedf_scsi_done(qedf, io_req, DID_ERROR);
832 		goto drop_rdata_kref;
833 	}
834 
835 	wait_for_completion(&io_req->abts_done);
836 
837 	if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
838 	    io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
839 	    io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
840 		/*
841 		 * If we get a reponse to the abort this is success from
842 		 * the perspective that all references to the command have
843 		 * been removed from the driver and firmware
844 		 */
845 		rc = SUCCESS;
846 	} else {
847 		/* If the abort and cleanup failed then return a failure */
848 		rc = FAILED;
849 	}
850 
851 	if (rc == SUCCESS)
852 		QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
853 			  io_req->xid);
854 	else
855 		QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
856 			  io_req->xid);
857 
858 drop_rdata_kref:
859 	kref_put(&rdata->kref, fc_rport_destroy);
860 out:
861 	if (got_ref)
862 		kref_put(&io_req->refcount, qedf_release_cmd);
863 	return rc;
864 }
865 
qedf_eh_target_reset(struct scsi_cmnd * sc_cmd)866 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
867 {
868 	struct scsi_target *starget = scsi_target(sc_cmd->device);
869 	struct fc_rport *rport = starget_to_rport(starget);
870 
871 	QEDF_ERR(NULL, "TARGET RESET Issued...");
872 	return qedf_initiate_tmf(rport, 0, FCP_TMF_TGT_RESET);
873 }
874 
qedf_eh_device_reset(struct scsi_cmnd * sc_cmd)875 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
876 {
877 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
878 
879 	QEDF_ERR(NULL, "LUN RESET Issued...\n");
880 	return qedf_initiate_tmf(rport, sc_cmd->device->lun, FCP_TMF_LUN_RESET);
881 }
882 
qedf_wait_for_upload(struct qedf_ctx * qedf)883 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
884 {
885 	struct qedf_rport *fcport;
886 	int wait_cnt = 120;
887 
888 	while (wait_cnt--) {
889 		if (atomic_read(&qedf->num_offloads))
890 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
891 				  "Waiting for all uploads to complete num_offloads = 0x%x.\n",
892 				  atomic_read(&qedf->num_offloads));
893 		else
894 			return true;
895 		msleep(500);
896 	}
897 
898 	rcu_read_lock();
899 	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
900 		if (test_bit(QEDF_RPORT_SESSION_READY,
901 				       &fcport->flags)) {
902 			if (fcport->rdata)
903 				QEDF_ERR(&qedf->dbg_ctx,
904 					 "Waiting for fcport %p portid=%06x.\n",
905 					 fcport, fcport->rdata->ids.port_id);
906 			} else {
907 				QEDF_ERR(&qedf->dbg_ctx,
908 					 "Waiting for fcport %p.\n", fcport);
909 			}
910 	}
911 
912 	rcu_read_unlock();
913 	return false;
914 }
915 
916 /* Performs soft reset of qedf_ctx by simulating a link down/up */
qedf_ctx_soft_reset(struct fc_lport * lport)917 void qedf_ctx_soft_reset(struct fc_lport *lport)
918 {
919 	struct qedf_ctx *qedf;
920 	struct qed_link_output if_link;
921 
922 	qedf = lport_priv(lport);
923 
924 	if (lport->vport) {
925 		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
926 		printk_ratelimited("Cannot issue host reset on NPIV port.\n");
927 		return;
928 	}
929 
930 	qedf->flogi_pending = 0;
931 	/* For host reset, essentially do a soft link up/down */
932 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
933 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
934 		  "Queuing link down work.\n");
935 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
936 	    0);
937 
938 	if (qedf_wait_for_upload(qedf) == false) {
939 		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
940 		WARN_ON(atomic_read(&qedf->num_offloads));
941 	}
942 
943 	/* Before setting link up query physical link state */
944 	qed_ops->common->get_link(qedf->cdev, &if_link);
945 	/* Bail if the physical link is not up */
946 	if (!if_link.link_up) {
947 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
948 			  "Physical link is not up.\n");
949 		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
950 		return;
951 	}
952 	/* Flush and wait to make sure link down is processed */
953 	flush_delayed_work(&qedf->link_update);
954 	msleep(500);
955 
956 	atomic_set(&qedf->link_state, QEDF_LINK_UP);
957 	qedf->vlan_id  = 0;
958 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
959 		  "Queue link up work.\n");
960 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
961 	    0);
962 	clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
963 }
964 
965 /* Reset the host by gracefully logging out and then logging back in */
qedf_eh_host_reset(struct scsi_cmnd * sc_cmd)966 static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
967 {
968 	struct fc_lport *lport;
969 	struct qedf_ctx *qedf;
970 
971 	lport = shost_priv(sc_cmd->device->host);
972 	qedf = lport_priv(lport);
973 
974 	if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
975 	    test_bit(QEDF_UNLOADING, &qedf->flags))
976 		return FAILED;
977 
978 	QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
979 
980 	qedf_ctx_soft_reset(lport);
981 
982 	return SUCCESS;
983 }
984 
qedf_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)985 static int qedf_sdev_configure(struct scsi_device *sdev,
986 			       struct queue_limits *lim)
987 {
988 	if (qedf_queue_depth) {
989 		scsi_change_queue_depth(sdev, qedf_queue_depth);
990 	}
991 
992 	return 0;
993 }
994 
995 static const struct scsi_host_template qedf_host_template = {
996 	.module 	= THIS_MODULE,
997 	.name 		= QEDF_MODULE_NAME,
998 	.this_id 	= -1,
999 	.cmd_per_lun	= 32,
1000 	.max_sectors 	= 0xffff,
1001 	.queuecommand 	= qedf_queuecommand,
1002 	.shost_groups	= qedf_host_groups,
1003 	.eh_abort_handler	= qedf_eh_abort,
1004 	.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
1005 	.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
1006 	.eh_host_reset_handler  = qedf_eh_host_reset,
1007 	.sdev_configure	= qedf_sdev_configure,
1008 	.dma_boundary = QED_HW_DMA_BOUNDARY,
1009 	.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
1010 	.can_queue = FCOE_PARAMS_NUM_TASKS,
1011 	.change_queue_depth = scsi_change_queue_depth,
1012 	.cmd_size = sizeof(struct qedf_cmd_priv),
1013 };
1014 
qedf_get_paged_crc_eof(struct sk_buff * skb,int tlen)1015 static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1016 {
1017 	int rc;
1018 
1019 	spin_lock(&qedf_global_lock);
1020 	rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
1021 	spin_unlock(&qedf_global_lock);
1022 
1023 	return rc;
1024 }
1025 
qedf_fcport_lookup(struct qedf_ctx * qedf,u32 port_id)1026 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1027 {
1028 	struct qedf_rport *fcport;
1029 	struct fc_rport_priv *rdata;
1030 
1031 	rcu_read_lock();
1032 	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1033 		rdata = fcport->rdata;
1034 		if (rdata == NULL)
1035 			continue;
1036 		if (rdata->ids.port_id == port_id) {
1037 			rcu_read_unlock();
1038 			return fcport;
1039 		}
1040 	}
1041 	rcu_read_unlock();
1042 
1043 	/* Return NULL to caller to let them know fcport was not found */
1044 	return NULL;
1045 }
1046 
1047 /* Transmits an ELS frame over an offloaded session */
qedf_xmit_l2_frame(struct qedf_rport * fcport,struct fc_frame * fp)1048 static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
1049 {
1050 	struct fc_frame_header *fh;
1051 	int rc = 0;
1052 
1053 	fh = fc_frame_header_get(fp);
1054 	if ((fh->fh_type == FC_TYPE_ELS) &&
1055 	    (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1056 		switch (fc_frame_payload_op(fp)) {
1057 		case ELS_ADISC:
1058 			qedf_send_adisc(fcport, fp);
1059 			rc = 1;
1060 			break;
1061 		}
1062 	}
1063 
1064 	return rc;
1065 }
1066 
1067 /*
1068  * qedf_xmit - qedf FCoE frame transmit function
1069  */
qedf_xmit(struct fc_lport * lport,struct fc_frame * fp)1070 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
1071 {
1072 	struct fc_lport		*base_lport;
1073 	struct qedf_ctx		*qedf;
1074 	struct ethhdr		*eh;
1075 	struct fcoe_crc_eof	*cp;
1076 	struct sk_buff		*skb;
1077 	struct fc_frame_header	*fh;
1078 	struct fcoe_hdr		*hp;
1079 	u8			sof, eof;
1080 	u32			crc;
1081 	unsigned int		hlen, tlen, elen;
1082 	int			wlen;
1083 	struct fc_lport *tmp_lport;
1084 	struct fc_lport *vn_port = NULL;
1085 	struct qedf_rport *fcport;
1086 	int rc;
1087 	u16 vlan_tci = 0;
1088 
1089 	qedf = (struct qedf_ctx *)lport_priv(lport);
1090 
1091 	fh = fc_frame_header_get(fp);
1092 	skb = fp_skb(fp);
1093 
1094 	/* Filter out traffic to other NPIV ports on the same host */
1095 	if (lport->vport)
1096 		base_lport = shost_priv(vport_to_shost(lport->vport));
1097 	else
1098 		base_lport = lport;
1099 
1100 	/* Flag if the destination is the base port */
1101 	if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
1102 		vn_port = base_lport;
1103 	} else {
1104 		/* Got through the list of vports attached to the base_lport
1105 		 * and see if we have a match with the destination address.
1106 		 */
1107 		list_for_each_entry(tmp_lport, &base_lport->vports, list) {
1108 			if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
1109 				vn_port = tmp_lport;
1110 				break;
1111 			}
1112 		}
1113 	}
1114 	if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
1115 		struct fc_rport_priv *rdata = NULL;
1116 
1117 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1118 		    "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
1119 		kfree_skb(skb);
1120 		rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
1121 		if (rdata) {
1122 			rdata->retries = lport->max_rport_retry_count;
1123 			kref_put(&rdata->kref, fc_rport_destroy);
1124 		}
1125 		return -EINVAL;
1126 	}
1127 	/* End NPIV filtering */
1128 
1129 	if (!qedf->ctlr.sel_fcf) {
1130 		kfree_skb(skb);
1131 		return 0;
1132 	}
1133 
1134 	if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1135 		QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1136 		kfree_skb(skb);
1137 		return 0;
1138 	}
1139 
1140 	if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1141 		QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1142 		kfree_skb(skb);
1143 		return 0;
1144 	}
1145 
1146 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1147 		if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1148 			return 0;
1149 	}
1150 
1151 	/* Check to see if this needs to be sent on an offloaded session */
1152 	fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1153 
1154 	if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1155 		rc = qedf_xmit_l2_frame(fcport, fp);
1156 		/*
1157 		 * If the frame was successfully sent over the middle path
1158 		 * then do not try to also send it over the LL2 path
1159 		 */
1160 		if (rc)
1161 			return 0;
1162 	}
1163 
1164 	sof = fr_sof(fp);
1165 	eof = fr_eof(fp);
1166 
1167 	elen = sizeof(struct ethhdr);
1168 	hlen = sizeof(struct fcoe_hdr);
1169 	tlen = sizeof(struct fcoe_crc_eof);
1170 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1171 
1172 	skb->ip_summed = CHECKSUM_NONE;
1173 	crc = fcoe_fc_crc(fp);
1174 
1175 	/* copy port crc and eof to the skb buff */
1176 	if (skb_is_nonlinear(skb)) {
1177 		skb_frag_t *frag;
1178 
1179 		if (qedf_get_paged_crc_eof(skb, tlen)) {
1180 			kfree_skb(skb);
1181 			return -ENOMEM;
1182 		}
1183 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1184 		cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1185 	} else {
1186 		cp = skb_put(skb, tlen);
1187 	}
1188 
1189 	memset(cp, 0, sizeof(*cp));
1190 	cp->fcoe_eof = eof;
1191 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1192 	if (skb_is_nonlinear(skb)) {
1193 		kunmap_atomic(cp);
1194 		cp = NULL;
1195 	}
1196 
1197 
1198 	/* adjust skb network/transport offsets to match mac/fcoe/port */
1199 	skb_push(skb, elen + hlen);
1200 	skb_reset_mac_header(skb);
1201 	skb_reset_network_header(skb);
1202 	skb->mac_len = elen;
1203 	skb->protocol = htons(ETH_P_FCOE);
1204 
1205 	/*
1206 	 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1207 	 * for FIP/FCoE traffic.
1208 	 */
1209 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1210 
1211 	/* fill up mac and fcoe headers */
1212 	eh = eth_hdr(skb);
1213 	eh->h_proto = htons(ETH_P_FCOE);
1214 	if (qedf->ctlr.map_dest)
1215 		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1216 	else
1217 		/* insert GW address */
1218 		ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1219 
1220 	/* Set the source MAC address */
1221 	ether_addr_copy(eh->h_source, qedf->data_src_addr);
1222 
1223 	hp = (struct fcoe_hdr *)(eh + 1);
1224 	memset(hp, 0, sizeof(*hp));
1225 	if (FC_FCOE_VER)
1226 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1227 	hp->fcoe_sof = sof;
1228 
1229 	/*update tx stats */
1230 	this_cpu_inc(lport->stats->TxFrames);
1231 	this_cpu_add(lport->stats->TxWords, wlen);
1232 
1233 	/* Get VLAN ID from skb for printing purposes */
1234 	__vlan_hwaccel_get_tag(skb, &vlan_tci);
1235 
1236 	/* send down to lld */
1237 	fr_dev(fp) = lport;
1238 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1239 	    "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1240 	    ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1241 	    vlan_tci);
1242 	if (qedf_dump_frames)
1243 		print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1244 		    1, skb->data, skb->len, false);
1245 	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1246 	if (rc) {
1247 		QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1248 		kfree_skb(skb);
1249 		return rc;
1250 	}
1251 
1252 	return 0;
1253 }
1254 
qedf_alloc_sq(struct qedf_ctx * qedf,struct qedf_rport * fcport)1255 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1256 {
1257 	int rval = 0;
1258 	u32 *pbl;
1259 	dma_addr_t page;
1260 	int num_pages;
1261 
1262 	/* Calculate appropriate queue and PBL sizes */
1263 	fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1264 	fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1265 	fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1266 	    sizeof(void *);
1267 	fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1268 
1269 	fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1270 					&fcport->sq_dma, GFP_KERNEL);
1271 	if (!fcport->sq) {
1272 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1273 		rval = 1;
1274 		goto out;
1275 	}
1276 
1277 	fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1278 					    fcport->sq_pbl_size,
1279 					    &fcport->sq_pbl_dma, GFP_KERNEL);
1280 	if (!fcport->sq_pbl) {
1281 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1282 		rval = 1;
1283 		goto out_free_sq;
1284 	}
1285 
1286 	/* Create PBL */
1287 	num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1288 	page = fcport->sq_dma;
1289 	pbl = (u32 *)fcport->sq_pbl;
1290 
1291 	while (num_pages--) {
1292 		*pbl = U64_LO(page);
1293 		pbl++;
1294 		*pbl = U64_HI(page);
1295 		pbl++;
1296 		page += QEDF_PAGE_SIZE;
1297 	}
1298 
1299 	return rval;
1300 
1301 out_free_sq:
1302 	dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1303 	    fcport->sq_dma);
1304 out:
1305 	return rval;
1306 }
1307 
qedf_free_sq(struct qedf_ctx * qedf,struct qedf_rport * fcport)1308 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1309 {
1310 	if (fcport->sq_pbl)
1311 		dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1312 		    fcport->sq_pbl, fcport->sq_pbl_dma);
1313 	if (fcport->sq)
1314 		dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1315 		    fcport->sq, fcport->sq_dma);
1316 }
1317 
qedf_offload_connection(struct qedf_ctx * qedf,struct qedf_rport * fcport)1318 static int qedf_offload_connection(struct qedf_ctx *qedf,
1319 	struct qedf_rport *fcport)
1320 {
1321 	struct qed_fcoe_params_offload conn_info;
1322 	u32 port_id;
1323 	int rval;
1324 	uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1325 
1326 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1327 		   "portid=%06x.\n", fcport->rdata->ids.port_id);
1328 	rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1329 	    &fcport->fw_cid, &fcport->p_doorbell);
1330 	if (rval) {
1331 		QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1332 			   "for portid=%06x.\n", fcport->rdata->ids.port_id);
1333 		rval = 1; /* For some reason qed returns 0 on failure here */
1334 		goto out;
1335 	}
1336 
1337 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1338 		   "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1339 		   fcport->fw_cid, fcport->handle);
1340 
1341 	memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1342 
1343 	/* Fill in the offload connection info */
1344 	conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1345 
1346 	conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1347 	conn_info.sq_next_page_addr =
1348 	    (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1349 
1350 	/* Need to use our FCoE MAC for the offload session */
1351 	ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1352 
1353 	ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1354 
1355 	conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1356 	conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1357 	conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1358 	conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1359 
1360 	/* Set VLAN data */
1361 	conn_info.vlan_tag = qedf->vlan_id <<
1362 	    FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1363 	conn_info.vlan_tag |=
1364 	    qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1365 	conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1366 	    FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1367 
1368 	/* Set host port source id */
1369 	port_id = fc_host_port_id(qedf->lport->host);
1370 	fcport->sid = port_id;
1371 	conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1372 	conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1373 	conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1374 
1375 	conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1376 
1377 	/* Set remote port destination id */
1378 	port_id = fcport->rdata->rport->port_id;
1379 	conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1380 	conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1381 	conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1382 
1383 	conn_info.def_q_idx = 0; /* Default index for send queue? */
1384 
1385 	/* Set FC-TAPE specific flags if needed */
1386 	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1387 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1388 		    "Enable CONF, REC for portid=%06x.\n",
1389 		    fcport->rdata->ids.port_id);
1390 		conn_info.flags |= 1 <<
1391 		    FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1392 		conn_info.flags |=
1393 		    ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1394 		    FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1395 	}
1396 
1397 	rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1398 	if (rval) {
1399 		QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1400 			   "for portid=%06x.\n", fcport->rdata->ids.port_id);
1401 		goto out_free_conn;
1402 	} else
1403 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1404 			   "succeeded portid=%06x total_sqe=%d.\n",
1405 			   fcport->rdata->ids.port_id, total_sqe);
1406 
1407 	spin_lock_init(&fcport->rport_lock);
1408 	atomic_set(&fcport->free_sqes, total_sqe);
1409 	return 0;
1410 out_free_conn:
1411 	qed_ops->release_conn(qedf->cdev, fcport->handle);
1412 out:
1413 	return rval;
1414 }
1415 
1416 #define QEDF_TERM_BUFF_SIZE		10
qedf_upload_connection(struct qedf_ctx * qedf,struct qedf_rport * fcport)1417 static void qedf_upload_connection(struct qedf_ctx *qedf,
1418 	struct qedf_rport *fcport)
1419 {
1420 	void *term_params;
1421 	dma_addr_t term_params_dma;
1422 
1423 	/* Term params needs to be a DMA coherent buffer as qed shared the
1424 	 * physical DMA address with the firmware. The buffer may be used in
1425 	 * the receive path so we may eventually have to move this.
1426 	 */
1427 	term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1428 		&term_params_dma, GFP_KERNEL);
1429 	if (!term_params)
1430 		return;
1431 
1432 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1433 		   "port_id=%06x.\n", fcport->rdata->ids.port_id);
1434 
1435 	qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1436 	qed_ops->release_conn(qedf->cdev, fcport->handle);
1437 
1438 	dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1439 	    term_params_dma);
1440 }
1441 
qedf_cleanup_fcport(struct qedf_ctx * qedf,struct qedf_rport * fcport)1442 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1443 	struct qedf_rport *fcport)
1444 {
1445 	struct fc_rport_priv *rdata = fcport->rdata;
1446 
1447 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1448 	    fcport->rdata->ids.port_id);
1449 
1450 	/* Flush any remaining i/o's before we upload the connection */
1451 	qedf_flush_active_ios(fcport, -1);
1452 
1453 	if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1454 		qedf_upload_connection(qedf, fcport);
1455 	qedf_free_sq(qedf, fcport);
1456 	fcport->rdata = NULL;
1457 	fcport->qedf = NULL;
1458 	kref_put(&rdata->kref, fc_rport_destroy);
1459 }
1460 
1461 /*
1462  * This event_callback is called after successful completion of libfc
1463  * initiated target login. qedf can proceed with initiating the session
1464  * establishment.
1465  */
qedf_rport_event_handler(struct fc_lport * lport,struct fc_rport_priv * rdata,enum fc_rport_event event)1466 static void qedf_rport_event_handler(struct fc_lport *lport,
1467 				struct fc_rport_priv *rdata,
1468 				enum fc_rport_event event)
1469 {
1470 	struct qedf_ctx *qedf = lport_priv(lport);
1471 	struct fc_rport *rport = rdata->rport;
1472 	struct fc_rport_libfc_priv *rp;
1473 	struct qedf_rport *fcport;
1474 	u32 port_id;
1475 	int rval;
1476 	unsigned long flags;
1477 
1478 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1479 		   "port_id = 0x%x\n", event, rdata->ids.port_id);
1480 
1481 	switch (event) {
1482 	case RPORT_EV_READY:
1483 		if (!rport) {
1484 			QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1485 			break;
1486 		}
1487 
1488 		rp = rport->dd_data;
1489 		fcport = (struct qedf_rport *)&rp[1];
1490 		fcport->qedf = qedf;
1491 
1492 		if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1493 			QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1494 			    "portid=0x%x as max number of offloaded sessions "
1495 			    "reached.\n", rdata->ids.port_id);
1496 			return;
1497 		}
1498 
1499 		/*
1500 		 * Don't try to offload the session again. Can happen when we
1501 		 * get an ADISC
1502 		 */
1503 		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1504 			QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1505 				   "offloaded, portid=0x%x.\n",
1506 				   rdata->ids.port_id);
1507 			return;
1508 		}
1509 
1510 		if (rport->port_id == FC_FID_DIR_SERV) {
1511 			/*
1512 			 * qedf_rport structure doesn't exist for
1513 			 * directory server.
1514 			 * We should not come here, as lport will
1515 			 * take care of fabric login
1516 			 */
1517 			QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1518 			    "exist for dir server port_id=%x\n",
1519 			    rdata->ids.port_id);
1520 			break;
1521 		}
1522 
1523 		if (rdata->spp_type != FC_TYPE_FCP) {
1524 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1525 			    "Not offloading since spp type isn't FCP\n");
1526 			break;
1527 		}
1528 		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1529 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1530 			    "Not FCP target so not offloading\n");
1531 			break;
1532 		}
1533 
1534 		/* Initial reference held on entry, so this can't fail */
1535 		kref_get(&rdata->kref);
1536 		fcport->rdata = rdata;
1537 		fcport->rport = rport;
1538 
1539 		rval = qedf_alloc_sq(qedf, fcport);
1540 		if (rval) {
1541 			qedf_cleanup_fcport(qedf, fcport);
1542 			break;
1543 		}
1544 
1545 		/* Set device type */
1546 		if (rdata->flags & FC_RP_FLAGS_RETRY &&
1547 		    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1548 		    !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1549 			fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1550 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1551 			    "portid=%06x is a TAPE device.\n",
1552 			    rdata->ids.port_id);
1553 		} else {
1554 			fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1555 		}
1556 
1557 		rval = qedf_offload_connection(qedf, fcport);
1558 		if (rval) {
1559 			qedf_cleanup_fcport(qedf, fcport);
1560 			break;
1561 		}
1562 
1563 		/* Add fcport to list of qedf_ctx list of offloaded ports */
1564 		spin_lock_irqsave(&qedf->hba_lock, flags);
1565 		list_add_rcu(&fcport->peers, &qedf->fcports);
1566 		spin_unlock_irqrestore(&qedf->hba_lock, flags);
1567 
1568 		/*
1569 		 * Set the session ready bit to let everyone know that this
1570 		 * connection is ready for I/O
1571 		 */
1572 		set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1573 		atomic_inc(&qedf->num_offloads);
1574 
1575 		break;
1576 	case RPORT_EV_LOGO:
1577 	case RPORT_EV_FAILED:
1578 	case RPORT_EV_STOP:
1579 		port_id = rdata->ids.port_id;
1580 		if (port_id == FC_FID_DIR_SERV)
1581 			break;
1582 
1583 		if (rdata->spp_type != FC_TYPE_FCP) {
1584 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1585 			    "No action since spp type isn't FCP\n");
1586 			break;
1587 		}
1588 		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1589 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1590 			    "Not FCP target so no action\n");
1591 			break;
1592 		}
1593 
1594 		if (!rport) {
1595 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1596 			    "port_id=%x - rport notcreated Yet!!\n", port_id);
1597 			break;
1598 		}
1599 		rp = rport->dd_data;
1600 		/*
1601 		 * Perform session upload. Note that rdata->peers is already
1602 		 * removed from disc->rports list before we get this event.
1603 		 */
1604 		fcport = (struct qedf_rport *)&rp[1];
1605 
1606 		spin_lock_irqsave(&fcport->rport_lock, flags);
1607 		/* Only free this fcport if it is offloaded already */
1608 		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1609 		    !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1610 		    &fcport->flags)) {
1611 			set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1612 				&fcport->flags);
1613 			spin_unlock_irqrestore(&fcport->rport_lock, flags);
1614 			qedf_cleanup_fcport(qedf, fcport);
1615 			/*
1616 			 * Remove fcport to list of qedf_ctx list of offloaded
1617 			 * ports
1618 			 */
1619 			spin_lock_irqsave(&qedf->hba_lock, flags);
1620 			list_del_rcu(&fcport->peers);
1621 			spin_unlock_irqrestore(&qedf->hba_lock, flags);
1622 
1623 			clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1624 			    &fcport->flags);
1625 			atomic_dec(&qedf->num_offloads);
1626 		} else {
1627 			spin_unlock_irqrestore(&fcport->rport_lock, flags);
1628 		}
1629 		break;
1630 
1631 	case RPORT_EV_NONE:
1632 		break;
1633 	}
1634 }
1635 
qedf_abort_io(struct fc_lport * lport)1636 static void qedf_abort_io(struct fc_lport *lport)
1637 {
1638 	/* NO-OP but need to fill in the template */
1639 }
1640 
qedf_fcp_cleanup(struct fc_lport * lport)1641 static void qedf_fcp_cleanup(struct fc_lport *lport)
1642 {
1643 	/*
1644 	 * NO-OP but need to fill in template to prevent a NULL
1645 	 * function pointer dereference during link down. I/Os
1646 	 * will be flushed when port is uploaded.
1647 	 */
1648 }
1649 
1650 static struct libfc_function_template qedf_lport_template = {
1651 	.frame_send		= qedf_xmit,
1652 	.fcp_abort_io		= qedf_abort_io,
1653 	.fcp_cleanup		= qedf_fcp_cleanup,
1654 	.rport_event_callback	= qedf_rport_event_handler,
1655 	.elsct_send		= qedf_elsct_send,
1656 };
1657 
qedf_fcoe_ctlr_setup(struct qedf_ctx * qedf)1658 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1659 {
1660 	fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1661 
1662 	qedf->ctlr.send = qedf_fip_send;
1663 	qedf->ctlr.get_src_addr = qedf_get_src_mac;
1664 	ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1665 }
1666 
qedf_setup_fdmi(struct qedf_ctx * qedf)1667 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1668 {
1669 	struct fc_lport *lport = qedf->lport;
1670 	u8 buf[8];
1671 	int pos;
1672 	uint32_t i;
1673 
1674 	/*
1675 	 * fdmi_enabled needs to be set for libfc
1676 	 * to execute FDMI registration
1677 	 */
1678 	lport->fdmi_enabled = 1;
1679 
1680 	/*
1681 	 * Setup the necessary fc_host attributes to that will be used to fill
1682 	 * in the FDMI information.
1683 	 */
1684 
1685 	/* Get the PCI-e Device Serial Number Capability */
1686 	pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1687 	if (pos) {
1688 		pos += 4;
1689 		for (i = 0; i < 8; i++)
1690 			pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1691 
1692 		snprintf(fc_host_serial_number(lport->host),
1693 		    FC_SERIAL_NUMBER_SIZE,
1694 		    "%02X%02X%02X%02X%02X%02X%02X%02X",
1695 		    buf[7], buf[6], buf[5], buf[4],
1696 		    buf[3], buf[2], buf[1], buf[0]);
1697 	} else
1698 		snprintf(fc_host_serial_number(lport->host),
1699 		    FC_SERIAL_NUMBER_SIZE, "Unknown");
1700 
1701 	snprintf(fc_host_manufacturer(lport->host),
1702 	    FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
1703 
1704 	if (qedf->pdev->device == QL45xxx) {
1705 		snprintf(fc_host_model(lport->host),
1706 			FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
1707 
1708 		snprintf(fc_host_model_description(lport->host),
1709 			FC_SYMBOLIC_NAME_SIZE, "%s",
1710 			"Marvell FastLinQ QL45xxx FCoE Adapter");
1711 	}
1712 
1713 	if (qedf->pdev->device == QL41xxx) {
1714 		snprintf(fc_host_model(lport->host),
1715 			FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
1716 
1717 		snprintf(fc_host_model_description(lport->host),
1718 			FC_SYMBOLIC_NAME_SIZE, "%s",
1719 			"Marvell FastLinQ QL41xxx FCoE Adapter");
1720 	}
1721 
1722 	snprintf(fc_host_hardware_version(lport->host),
1723 	    FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1724 
1725 	snprintf(fc_host_driver_version(lport->host),
1726 	    FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
1727 
1728 	snprintf(fc_host_firmware_version(lport->host),
1729 	    FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
1730 	    FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1731 	    FW_ENGINEERING_VERSION);
1732 
1733 	snprintf(fc_host_vendor_identifier(lport->host),
1734 		FC_VENDOR_IDENTIFIER, "%s", "Marvell");
1735 
1736 }
1737 
qedf_lport_setup(struct qedf_ctx * qedf)1738 static int qedf_lport_setup(struct qedf_ctx *qedf)
1739 {
1740 	struct fc_lport *lport = qedf->lport;
1741 
1742 	lport->link_up = 0;
1743 	lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1744 	lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1745 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1746 	    FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1747 	lport->boot_time = jiffies;
1748 	lport->e_d_tov = 2 * 1000;
1749 	lport->r_a_tov = 10 * 1000;
1750 
1751 	/* Set NPIV support */
1752 	lport->does_npiv = 1;
1753 	fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1754 
1755 	fc_set_wwnn(lport, qedf->wwnn);
1756 	fc_set_wwpn(lport, qedf->wwpn);
1757 
1758 	if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1759 		QEDF_ERR(&qedf->dbg_ctx,
1760 			 "fcoe_libfc_config failed.\n");
1761 		return -ENOMEM;
1762 	}
1763 
1764 	/* Allocate the exchange manager */
1765 	fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1766 			  0xfffe, NULL);
1767 
1768 	if (fc_lport_init_stats(lport))
1769 		return -ENOMEM;
1770 
1771 	/* Finish lport config */
1772 	fc_lport_config(lport);
1773 
1774 	/* Set max frame size */
1775 	fc_set_mfs(lport, QEDF_MFS);
1776 	fc_host_maxframe_size(lport->host) = lport->mfs;
1777 
1778 	/* Set default dev_loss_tmo based on module parameter */
1779 	fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1780 
1781 	/* Set symbolic node name */
1782 	if (qedf->pdev->device == QL45xxx)
1783 		snprintf(fc_host_symbolic_name(lport->host), 256,
1784 			"Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1785 
1786 	if (qedf->pdev->device == QL41xxx)
1787 		snprintf(fc_host_symbolic_name(lport->host), 256,
1788 			"Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1789 
1790 	qedf_setup_fdmi(qedf);
1791 
1792 	return 0;
1793 }
1794 
1795 /*
1796  * NPIV functions
1797  */
1798 
qedf_vport_libfc_config(struct fc_vport * vport,struct fc_lport * lport)1799 static int qedf_vport_libfc_config(struct fc_vport *vport,
1800 	struct fc_lport *lport)
1801 {
1802 	lport->link_up = 0;
1803 	lport->qfull = 0;
1804 	lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1805 	lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1806 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1807 	    FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1808 	lport->boot_time = jiffies;
1809 	lport->e_d_tov = 2 * 1000;
1810 	lport->r_a_tov = 10 * 1000;
1811 	lport->does_npiv = 1; /* Temporary until we add NPIV support */
1812 
1813 	/* Allocate stats for vport */
1814 	if (fc_lport_init_stats(lport))
1815 		return -ENOMEM;
1816 
1817 	/* Finish lport config */
1818 	fc_lport_config(lport);
1819 
1820 	/* offload related configuration */
1821 	lport->crc_offload = 0;
1822 	lport->seq_offload = 0;
1823 	lport->lro_enabled = 0;
1824 	lport->lro_xid = 0;
1825 	lport->lso_max = 0;
1826 
1827 	return 0;
1828 }
1829 
qedf_vport_create(struct fc_vport * vport,bool disabled)1830 static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1831 {
1832 	struct Scsi_Host *shost = vport_to_shost(vport);
1833 	struct fc_lport *n_port = shost_priv(shost);
1834 	struct fc_lport *vn_port;
1835 	struct qedf_ctx *base_qedf = lport_priv(n_port);
1836 	struct qedf_ctx *vport_qedf;
1837 
1838 	char buf[32];
1839 	int rc = 0;
1840 
1841 	rc = fcoe_validate_vport_create(vport);
1842 	if (rc) {
1843 		fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1844 		QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1845 			   "WWPN (0x%s) already exists.\n", buf);
1846 		return rc;
1847 	}
1848 
1849 	if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1850 		QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1851 			   "because link is not up.\n");
1852 		return -EIO;
1853 	}
1854 
1855 	vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1856 	if (!vn_port) {
1857 		QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1858 			   "for vport.\n");
1859 		return -ENOMEM;
1860 	}
1861 
1862 	fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1863 	QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1864 	    buf);
1865 
1866 	/* Copy some fields from base_qedf */
1867 	vport_qedf = lport_priv(vn_port);
1868 	memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1869 
1870 	/* Set qedf data specific to this vport */
1871 	vport_qedf->lport = vn_port;
1872 	/* Use same hba_lock as base_qedf */
1873 	vport_qedf->hba_lock = base_qedf->hba_lock;
1874 	vport_qedf->pdev = base_qedf->pdev;
1875 	vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1876 	init_completion(&vport_qedf->flogi_compl);
1877 	INIT_LIST_HEAD(&vport_qedf->fcports);
1878 	INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
1879 
1880 	rc = qedf_vport_libfc_config(vport, vn_port);
1881 	if (rc) {
1882 		QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1883 		    "for lport stats.\n");
1884 		goto err;
1885 	}
1886 
1887 	fc_set_wwnn(vn_port, vport->node_name);
1888 	fc_set_wwpn(vn_port, vport->port_name);
1889 	vport_qedf->wwnn = vn_port->wwnn;
1890 	vport_qedf->wwpn = vn_port->wwpn;
1891 
1892 	vn_port->host->transportt = qedf_fc_vport_transport_template;
1893 	vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
1894 	vn_port->host->max_lun = qedf_max_lun;
1895 	vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1896 	vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1897 	vn_port->host->max_id = QEDF_MAX_SESSIONS;
1898 
1899 	rc = scsi_add_host(vn_port->host, &vport->dev);
1900 	if (rc) {
1901 		QEDF_WARN(&base_qedf->dbg_ctx,
1902 			  "Error adding Scsi_Host rc=0x%x.\n", rc);
1903 		goto err;
1904 	}
1905 
1906 	/* Set default dev_loss_tmo based on module parameter */
1907 	fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1908 
1909 	/* Init libfc stuffs */
1910 	memcpy(&vn_port->tt, &qedf_lport_template,
1911 		sizeof(qedf_lport_template));
1912 	fc_exch_init(vn_port);
1913 	fc_elsct_init(vn_port);
1914 	fc_lport_init(vn_port);
1915 	fc_disc_init(vn_port);
1916 	fc_disc_config(vn_port, vn_port);
1917 
1918 
1919 	/* Allocate the exchange manager */
1920 	shost = vport_to_shost(vport);
1921 	n_port = shost_priv(shost);
1922 	fc_exch_mgr_list_clone(n_port, vn_port);
1923 
1924 	/* Set max frame size */
1925 	fc_set_mfs(vn_port, QEDF_MFS);
1926 
1927 	fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1928 
1929 	if (disabled) {
1930 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
1931 	} else {
1932 		vn_port->boot_time = jiffies;
1933 		fc_fabric_login(vn_port);
1934 		fc_vport_setlink(vn_port);
1935 	}
1936 
1937 	/* Set symbolic node name */
1938 	if (base_qedf->pdev->device == QL45xxx)
1939 		snprintf(fc_host_symbolic_name(vn_port->host), 256,
1940 			 "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1941 
1942 	if (base_qedf->pdev->device == QL41xxx)
1943 		snprintf(fc_host_symbolic_name(vn_port->host), 256,
1944 			 "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1945 
1946 	/* Set supported speed */
1947 	fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
1948 
1949 	/* Set speed */
1950 	vn_port->link_speed = n_port->link_speed;
1951 
1952 	/* Set port type */
1953 	fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
1954 
1955 	/* Set maxframe size */
1956 	fc_host_maxframe_size(vn_port->host) = n_port->mfs;
1957 
1958 	QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1959 		   vn_port);
1960 
1961 	/* Set up debug context for vport */
1962 	vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1963 	vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1964 
1965 	return 0;
1966 
1967 err:
1968 	scsi_host_put(vn_port->host);
1969 	return rc;
1970 }
1971 
qedf_vport_destroy(struct fc_vport * vport)1972 static int qedf_vport_destroy(struct fc_vport *vport)
1973 {
1974 	struct Scsi_Host *shost = vport_to_shost(vport);
1975 	struct fc_lport *n_port = shost_priv(shost);
1976 	struct fc_lport *vn_port = vport->dd_data;
1977 	struct qedf_ctx *qedf = lport_priv(vn_port);
1978 
1979 	if (!qedf) {
1980 		QEDF_ERR(NULL, "qedf is NULL.\n");
1981 		goto out;
1982 	}
1983 
1984 	/* Set unloading bit on vport qedf_ctx to prevent more I/O */
1985 	set_bit(QEDF_UNLOADING, &qedf->flags);
1986 
1987 	mutex_lock(&n_port->lp_mutex);
1988 	list_del(&vn_port->list);
1989 	mutex_unlock(&n_port->lp_mutex);
1990 
1991 	fc_fabric_logoff(vn_port);
1992 	fc_lport_destroy(vn_port);
1993 
1994 	/* Detach from scsi-ml */
1995 	fc_remove_host(vn_port->host);
1996 	scsi_remove_host(vn_port->host);
1997 
1998 	/*
1999 	 * Only try to release the exchange manager if the vn_port
2000 	 * configuration is complete.
2001 	 */
2002 	if (vn_port->state == LPORT_ST_READY)
2003 		fc_exch_mgr_free(vn_port);
2004 
2005 	/* Free memory used by statistical counters */
2006 	fc_lport_free_stats(vn_port);
2007 
2008 	/* Release Scsi_Host */
2009 	scsi_host_put(vn_port->host);
2010 
2011 out:
2012 	return 0;
2013 }
2014 
qedf_vport_disable(struct fc_vport * vport,bool disable)2015 static int qedf_vport_disable(struct fc_vport *vport, bool disable)
2016 {
2017 	struct fc_lport *lport = vport->dd_data;
2018 
2019 	if (disable) {
2020 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2021 		fc_fabric_logoff(lport);
2022 	} else {
2023 		lport->boot_time = jiffies;
2024 		fc_fabric_login(lport);
2025 		fc_vport_setlink(lport);
2026 	}
2027 	return 0;
2028 }
2029 
2030 /*
2031  * During removal we need to wait for all the vports associated with a port
2032  * to be destroyed so we avoid a race condition where libfc is still trying
2033  * to reap vports while the driver remove function has already reaped the
2034  * driver contexts associated with the physical port.
2035  */
qedf_wait_for_vport_destroy(struct qedf_ctx * qedf)2036 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2037 {
2038 	struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2039 
2040 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2041 	    "Entered.\n");
2042 	while (fc_host->npiv_vports_inuse > 0) {
2043 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2044 		    "Waiting for all vports to be reaped.\n");
2045 		msleep(1000);
2046 	}
2047 }
2048 
2049 /**
2050  * qedf_fcoe_reset - Resets the fcoe
2051  *
2052  * @shost: shost the reset is from
2053  *
2054  * Returns: always 0
2055  */
qedf_fcoe_reset(struct Scsi_Host * shost)2056 static int qedf_fcoe_reset(struct Scsi_Host *shost)
2057 {
2058 	struct fc_lport *lport = shost_priv(shost);
2059 
2060 	qedf_ctx_soft_reset(lport);
2061 	return 0;
2062 }
2063 
qedf_get_host_port_id(struct Scsi_Host * shost)2064 static void qedf_get_host_port_id(struct Scsi_Host *shost)
2065 {
2066 	struct fc_lport *lport = shost_priv(shost);
2067 
2068 	fc_host_port_id(shost) = lport->port_id;
2069 }
2070 
qedf_fc_get_host_stats(struct Scsi_Host * shost)2071 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
2072 	*shost)
2073 {
2074 	struct fc_host_statistics *qedf_stats;
2075 	struct fc_lport *lport = shost_priv(shost);
2076 	struct qedf_ctx *qedf = lport_priv(lport);
2077 	struct qed_fcoe_stats *fw_fcoe_stats;
2078 
2079 	qedf_stats = fc_get_host_stats(shost);
2080 
2081 	/* We don't collect offload stats for specific NPIV ports */
2082 	if (lport->vport)
2083 		goto out;
2084 
2085 	fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
2086 	if (!fw_fcoe_stats) {
2087 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2088 		    "fw_fcoe_stats.\n");
2089 		goto out;
2090 	}
2091 
2092 	mutex_lock(&qedf->stats_mutex);
2093 
2094 	/* Query firmware for offload stats */
2095 	qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2096 
2097 	/*
2098 	 * The expectation is that we add our offload stats to the stats
2099 	 * being maintained by libfc each time the fc_get_host_status callback
2100 	 * is invoked. The additions are not carried over for each call to
2101 	 * the fc_get_host_stats callback.
2102 	 */
2103 	qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
2104 	    fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
2105 	    fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
2106 	qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
2107 	    fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
2108 	    fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
2109 	qedf_stats->fcp_input_megabytes +=
2110 	    do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
2111 	qedf_stats->fcp_output_megabytes +=
2112 	    do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
2113 	qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
2114 	qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
2115 	qedf_stats->invalid_crc_count +=
2116 	    fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
2117 	qedf_stats->dumped_frames =
2118 	    fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2119 	qedf_stats->error_frames +=
2120 	    fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2121 	qedf_stats->fcp_input_requests += qedf->input_requests;
2122 	qedf_stats->fcp_output_requests += qedf->output_requests;
2123 	qedf_stats->fcp_control_requests += qedf->control_requests;
2124 	qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2125 	qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2126 
2127 	mutex_unlock(&qedf->stats_mutex);
2128 	kfree(fw_fcoe_stats);
2129 out:
2130 	return qedf_stats;
2131 }
2132 
2133 static struct fc_function_template qedf_fc_transport_fn = {
2134 	.show_host_node_name = 1,
2135 	.show_host_port_name = 1,
2136 	.show_host_supported_classes = 1,
2137 	.show_host_supported_fc4s = 1,
2138 	.show_host_active_fc4s = 1,
2139 	.show_host_maxframe_size = 1,
2140 
2141 	.get_host_port_id = qedf_get_host_port_id,
2142 	.show_host_port_id = 1,
2143 	.show_host_supported_speeds = 1,
2144 	.get_host_speed = fc_get_host_speed,
2145 	.show_host_speed = 1,
2146 	.show_host_port_type = 1,
2147 	.get_host_port_state = fc_get_host_port_state,
2148 	.show_host_port_state = 1,
2149 	.show_host_symbolic_name = 1,
2150 
2151 	/*
2152 	 * Tell FC transport to allocate enough space to store the backpointer
2153 	 * for the associate qedf_rport struct.
2154 	 */
2155 	.dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2156 				sizeof(struct qedf_rport)),
2157 	.show_rport_maxframe_size = 1,
2158 	.show_rport_supported_classes = 1,
2159 	.show_host_fabric_name = 1,
2160 	.show_starget_node_name = 1,
2161 	.show_starget_port_name = 1,
2162 	.show_starget_port_id = 1,
2163 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2164 	.show_rport_dev_loss_tmo = 1,
2165 	.get_fc_host_stats = qedf_fc_get_host_stats,
2166 	.issue_fc_host_lip = qedf_fcoe_reset,
2167 	.vport_create = qedf_vport_create,
2168 	.vport_delete = qedf_vport_destroy,
2169 	.vport_disable = qedf_vport_disable,
2170 	.bsg_request = fc_lport_bsg_request,
2171 };
2172 
2173 static struct fc_function_template qedf_fc_vport_transport_fn = {
2174 	.show_host_node_name = 1,
2175 	.show_host_port_name = 1,
2176 	.show_host_supported_classes = 1,
2177 	.show_host_supported_fc4s = 1,
2178 	.show_host_active_fc4s = 1,
2179 	.show_host_maxframe_size = 1,
2180 	.show_host_port_id = 1,
2181 	.show_host_supported_speeds = 1,
2182 	.get_host_speed = fc_get_host_speed,
2183 	.show_host_speed = 1,
2184 	.show_host_port_type = 1,
2185 	.get_host_port_state = fc_get_host_port_state,
2186 	.show_host_port_state = 1,
2187 	.show_host_symbolic_name = 1,
2188 	.dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2189 				sizeof(struct qedf_rport)),
2190 	.show_rport_maxframe_size = 1,
2191 	.show_rport_supported_classes = 1,
2192 	.show_host_fabric_name = 1,
2193 	.show_starget_node_name = 1,
2194 	.show_starget_port_name = 1,
2195 	.show_starget_port_id = 1,
2196 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2197 	.show_rport_dev_loss_tmo = 1,
2198 	.get_fc_host_stats = fc_get_host_stats,
2199 	.issue_fc_host_lip = qedf_fcoe_reset,
2200 	.bsg_request = fc_lport_bsg_request,
2201 };
2202 
qedf_fp_has_work(struct qedf_fastpath * fp)2203 static bool qedf_fp_has_work(struct qedf_fastpath *fp)
2204 {
2205 	struct qedf_ctx *qedf = fp->qedf;
2206 	struct global_queue *que;
2207 	struct qed_sb_info *sb_info = fp->sb_info;
2208 	struct status_block *sb = sb_info->sb_virt;
2209 	u16 prod_idx;
2210 
2211 	/* Get the pointer to the global CQ this completion is on */
2212 	que = qedf->global_queues[fp->sb_id];
2213 
2214 	/* Be sure all responses have been written to PI */
2215 	rmb();
2216 
2217 	/* Get the current firmware producer index */
2218 	prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2219 
2220 	return (que->cq_prod_idx != prod_idx);
2221 }
2222 
2223 /*
2224  * Interrupt handler code.
2225  */
2226 
2227 /* Process completion queue and copy CQE contents for deferred processesing
2228  *
2229  * Return true if we should wake the I/O thread, false if not.
2230  */
qedf_process_completions(struct qedf_fastpath * fp)2231 static bool qedf_process_completions(struct qedf_fastpath *fp)
2232 {
2233 	struct qedf_ctx *qedf = fp->qedf;
2234 	struct qed_sb_info *sb_info = fp->sb_info;
2235 	struct status_block *sb = sb_info->sb_virt;
2236 	struct global_queue *que;
2237 	u16 prod_idx;
2238 	struct fcoe_cqe *cqe;
2239 	struct qedf_io_work *io_work;
2240 	unsigned int cpu;
2241 	struct qedf_ioreq *io_req = NULL;
2242 	u16 xid;
2243 	u16 new_cqes;
2244 	u32 comp_type;
2245 
2246 	/* Get the current firmware producer index */
2247 	prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2248 
2249 	/* Get the pointer to the global CQ this completion is on */
2250 	que = qedf->global_queues[fp->sb_id];
2251 
2252 	/* Calculate the amount of new elements since last processing */
2253 	new_cqes = (prod_idx >= que->cq_prod_idx) ?
2254 	    (prod_idx - que->cq_prod_idx) :
2255 	    0x10000 - que->cq_prod_idx + prod_idx;
2256 
2257 	/* Save producer index */
2258 	que->cq_prod_idx = prod_idx;
2259 
2260 	while (new_cqes) {
2261 		fp->completions++;
2262 		cqe = &que->cq[que->cq_cons_idx];
2263 
2264 		comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2265 		    FCOE_CQE_CQE_TYPE_MASK;
2266 
2267 		/*
2268 		 * Process unsolicited CQEs directly in the interrupt handler
2269 		 * sine we need the fastpath ID
2270 		 */
2271 		if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2272 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2273 			   "Unsolicated CQE.\n");
2274 			qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2275 			/*
2276 			 * Don't add a work list item.  Increment consumer
2277 			 * consumer index and move on.
2278 			 */
2279 			goto inc_idx;
2280 		}
2281 
2282 		xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2283 		io_req = &qedf->cmd_mgr->cmds[xid];
2284 
2285 		/*
2286 		 * Figure out which percpu thread we should queue this I/O
2287 		 * on.
2288 		 */
2289 		if (!io_req)
2290 			/* If there is not io_req associated with this CQE
2291 			 * just queue it on CPU 0
2292 			 */
2293 			cpu = 0;
2294 		else {
2295 			cpu = io_req->cpu;
2296 			io_req->int_cpu = smp_processor_id();
2297 		}
2298 
2299 		io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2300 		if (!io_work) {
2301 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2302 				   "work for I/O completion.\n");
2303 			continue;
2304 		}
2305 		memset(io_work, 0, sizeof(struct qedf_io_work));
2306 
2307 		INIT_WORK(&io_work->work, qedf_fp_io_handler);
2308 
2309 		/* Copy contents of CQE for deferred processing */
2310 		memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2311 
2312 		io_work->qedf = fp->qedf;
2313 		io_work->fp = NULL; /* Only used for unsolicited frames */
2314 
2315 		queue_work_on(cpu, qedf_io_wq, &io_work->work);
2316 
2317 inc_idx:
2318 		que->cq_cons_idx++;
2319 		if (que->cq_cons_idx == fp->cq_num_entries)
2320 			que->cq_cons_idx = 0;
2321 		new_cqes--;
2322 	}
2323 
2324 	return true;
2325 }
2326 
2327 
2328 /* MSI-X fastpath handler code */
qedf_msix_handler(int irq,void * dev_id)2329 static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2330 {
2331 	struct qedf_fastpath *fp = dev_id;
2332 
2333 	if (!fp) {
2334 		QEDF_ERR(NULL, "fp is null.\n");
2335 		return IRQ_HANDLED;
2336 	}
2337 	if (!fp->sb_info) {
2338 		QEDF_ERR(NULL, "fp->sb_info in null.");
2339 		return IRQ_HANDLED;
2340 	}
2341 
2342 	/*
2343 	 * Disable interrupts for this status block while we process new
2344 	 * completions
2345 	 */
2346 	qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
2347 
2348 	while (1) {
2349 		qedf_process_completions(fp);
2350 
2351 		if (qedf_fp_has_work(fp) == 0) {
2352 			/* Update the sb information */
2353 			qed_sb_update_sb_idx(fp->sb_info);
2354 
2355 			/* Check for more work */
2356 			rmb();
2357 
2358 			if (qedf_fp_has_work(fp) == 0) {
2359 				/* Re-enable interrupts */
2360 				qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
2361 				return IRQ_HANDLED;
2362 			}
2363 		}
2364 	}
2365 
2366 	/* Do we ever want to break out of above loop? */
2367 	return IRQ_HANDLED;
2368 }
2369 
2370 /* simd handler for MSI/INTa */
qedf_simd_int_handler(void * cookie)2371 static void qedf_simd_int_handler(void *cookie)
2372 {
2373 	/* Cookie is qedf_ctx struct */
2374 	struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2375 
2376 	QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2377 }
2378 
2379 #define QEDF_SIMD_HANDLER_NUM		0
qedf_sync_free_irqs(struct qedf_ctx * qedf)2380 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2381 {
2382 	int i;
2383 	u16 vector_idx = 0;
2384 	u32 vector;
2385 
2386 	if (qedf->int_info.msix_cnt) {
2387 		for (i = 0; i < qedf->int_info.used_cnt; i++) {
2388 			vector_idx = i * qedf->dev_info.common.num_hwfns +
2389 				qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2390 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2391 				  "Freeing IRQ #%d vector_idx=%d.\n",
2392 				  i, vector_idx);
2393 			vector = qedf->int_info.msix[vector_idx].vector;
2394 			synchronize_irq(vector);
2395 			irq_set_affinity_hint(vector, NULL);
2396 			irq_set_affinity_notifier(vector, NULL);
2397 			free_irq(vector, &qedf->fp_array[i]);
2398 		}
2399 	} else
2400 		qed_ops->common->simd_handler_clean(qedf->cdev,
2401 		    QEDF_SIMD_HANDLER_NUM);
2402 
2403 	qedf->int_info.used_cnt = 0;
2404 	qed_ops->common->set_fp_int(qedf->cdev, 0);
2405 }
2406 
qedf_request_msix_irq(struct qedf_ctx * qedf)2407 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2408 {
2409 	int i, rc, cpu;
2410 	u16 vector_idx = 0;
2411 	u32 vector;
2412 
2413 	cpu = cpumask_first(cpu_online_mask);
2414 	for (i = 0; i < qedf->num_queues; i++) {
2415 		vector_idx = i * qedf->dev_info.common.num_hwfns +
2416 			qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2417 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2418 			  "Requesting IRQ #%d vector_idx=%d.\n",
2419 			  i, vector_idx);
2420 		vector = qedf->int_info.msix[vector_idx].vector;
2421 		rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2422 				 &qedf->fp_array[i]);
2423 
2424 		if (rc) {
2425 			QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2426 			qedf_sync_free_irqs(qedf);
2427 			return rc;
2428 		}
2429 
2430 		qedf->int_info.used_cnt++;
2431 		rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
2432 		cpu = cpumask_next(cpu, cpu_online_mask);
2433 	}
2434 
2435 	return 0;
2436 }
2437 
qedf_setup_int(struct qedf_ctx * qedf)2438 static int qedf_setup_int(struct qedf_ctx *qedf)
2439 {
2440 	int rc = 0;
2441 
2442 	/*
2443 	 * Learn interrupt configuration
2444 	 */
2445 	rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2446 	if (rc <= 0)
2447 		return 0;
2448 
2449 	rc  = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2450 	if (rc)
2451 		return 0;
2452 
2453 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2454 		   "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2455 		   num_online_cpus());
2456 
2457 	if (qedf->int_info.msix_cnt)
2458 		return qedf_request_msix_irq(qedf);
2459 
2460 	qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2461 	    QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2462 	qedf->int_info.used_cnt = 1;
2463 
2464 	QEDF_ERR(&qedf->dbg_ctx,
2465 		 "Cannot load driver due to a lack of MSI-X vectors.\n");
2466 	return -EINVAL;
2467 }
2468 
2469 /* Main function for libfc frame reception */
qedf_recv_frame(struct qedf_ctx * qedf,struct sk_buff * skb)2470 static void qedf_recv_frame(struct qedf_ctx *qedf,
2471 	struct sk_buff *skb)
2472 {
2473 	u32 fr_len;
2474 	struct fc_lport *lport;
2475 	struct fc_frame_header *fh;
2476 	struct fcoe_crc_eof crc_eof;
2477 	struct fc_frame *fp;
2478 	u8 *mac = NULL;
2479 	u8 *dest_mac = NULL;
2480 	struct fcoe_hdr *hp;
2481 	struct qedf_rport *fcport;
2482 	struct fc_lport *vn_port;
2483 	u32 f_ctl;
2484 
2485 	lport = qedf->lport;
2486 	if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2487 		QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2488 		kfree_skb(skb);
2489 		return;
2490 	}
2491 
2492 	if (skb_is_nonlinear(skb))
2493 		skb_linearize(skb);
2494 	mac = eth_hdr(skb)->h_source;
2495 	dest_mac = eth_hdr(skb)->h_dest;
2496 
2497 	/* Pull the header */
2498 	hp = (struct fcoe_hdr *)skb->data;
2499 	fh = (struct fc_frame_header *) skb_transport_header(skb);
2500 	skb_pull(skb, sizeof(struct fcoe_hdr));
2501 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2502 
2503 	fp = (struct fc_frame *)skb;
2504 	fc_frame_init(fp);
2505 	fr_dev(fp) = lport;
2506 	fr_sof(fp) = hp->fcoe_sof;
2507 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2508 		QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2509 		kfree_skb(skb);
2510 		return;
2511 	}
2512 	fr_eof(fp) = crc_eof.fcoe_eof;
2513 	fr_crc(fp) = crc_eof.fcoe_crc32;
2514 	if (pskb_trim(skb, fr_len)) {
2515 		QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2516 		kfree_skb(skb);
2517 		return;
2518 	}
2519 
2520 	fh = fc_frame_header_get(fp);
2521 
2522 	/*
2523 	 * Invalid frame filters.
2524 	 */
2525 
2526 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2527 	    fh->fh_type == FC_TYPE_FCP) {
2528 		/* Drop FCP data. We dont this in L2 path */
2529 		kfree_skb(skb);
2530 		return;
2531 	}
2532 	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2533 	    fh->fh_type == FC_TYPE_ELS) {
2534 		switch (fc_frame_payload_op(fp)) {
2535 		case ELS_LOGO:
2536 			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2537 				/* drop non-FIP LOGO */
2538 				kfree_skb(skb);
2539 				return;
2540 			}
2541 			break;
2542 		}
2543 	}
2544 
2545 	if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2546 		/* Drop incoming ABTS */
2547 		kfree_skb(skb);
2548 		return;
2549 	}
2550 
2551 	if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
2552 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2553 		    "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2554 		kfree_skb(skb);
2555 		return;
2556 	}
2557 
2558 	if (qedf->ctlr.state) {
2559 		if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2560 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2561 			    "Wrong source address: mac:%pM dest_addr:%pM.\n",
2562 			    mac, qedf->ctlr.dest_addr);
2563 			kfree_skb(skb);
2564 			return;
2565 		}
2566 	}
2567 
2568 	vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
2569 
2570 	/*
2571 	 * If the destination ID from the frame header does not match what we
2572 	 * have on record for lport and the search for a NPIV port came up
2573 	 * empty then this is not addressed to our port so simply drop it.
2574 	 */
2575 	if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2576 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2577 			  "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2578 			  lport->port_id, ntoh24(fh->fh_d_id));
2579 		kfree_skb(skb);
2580 		return;
2581 	}
2582 
2583 	f_ctl = ntoh24(fh->fh_f_ctl);
2584 	if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2585 	    (f_ctl & FC_FC_EX_CTX)) {
2586 		/* Drop incoming ABTS response that has both SEQ/EX CTX set */
2587 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2588 			  "Dropping ABTS response as both SEQ/EX CTX set.\n");
2589 		kfree_skb(skb);
2590 		return;
2591 	}
2592 
2593 	/*
2594 	 * If a connection is uploading, drop incoming FCoE frames as there
2595 	 * is a small window where we could try to return a frame while libfc
2596 	 * is trying to clean things up.
2597 	 */
2598 
2599 	/* Get fcport associated with d_id if it exists */
2600 	fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2601 
2602 	if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2603 	    &fcport->flags)) {
2604 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2605 		    "Connection uploading, dropping fp=%p.\n", fp);
2606 		kfree_skb(skb);
2607 		return;
2608 	}
2609 
2610 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2611 	    "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2612 	    ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2613 	    fh->fh_type);
2614 	if (qedf_dump_frames)
2615 		print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2616 		    1, skb->data, skb->len, false);
2617 	fc_exch_recv(lport, fp);
2618 }
2619 
qedf_ll2_process_skb(struct work_struct * work)2620 static void qedf_ll2_process_skb(struct work_struct *work)
2621 {
2622 	struct qedf_skb_work *skb_work =
2623 	    container_of(work, struct qedf_skb_work, work);
2624 	struct qedf_ctx *qedf = skb_work->qedf;
2625 	struct sk_buff *skb = skb_work->skb;
2626 	struct ethhdr *eh;
2627 
2628 	if (!qedf) {
2629 		QEDF_ERR(NULL, "qedf is NULL\n");
2630 		goto err_out;
2631 	}
2632 
2633 	eh = (struct ethhdr *)skb->data;
2634 
2635 	/* Undo VLAN encapsulation */
2636 	if (eh->h_proto == htons(ETH_P_8021Q)) {
2637 		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2638 		eh = skb_pull(skb, VLAN_HLEN);
2639 		skb_reset_mac_header(skb);
2640 	}
2641 
2642 	/*
2643 	 * Process either a FIP frame or FCoE frame based on the
2644 	 * protocol value.  If it's not either just drop the
2645 	 * frame.
2646 	 */
2647 	if (eh->h_proto == htons(ETH_P_FIP)) {
2648 		qedf_fip_recv(qedf, skb);
2649 		goto out;
2650 	} else if (eh->h_proto == htons(ETH_P_FCOE)) {
2651 		__skb_pull(skb, ETH_HLEN);
2652 		qedf_recv_frame(qedf, skb);
2653 		goto out;
2654 	} else
2655 		goto err_out;
2656 
2657 err_out:
2658 	kfree_skb(skb);
2659 out:
2660 	kfree(skb_work);
2661 	return;
2662 }
2663 
qedf_ll2_rx(void * cookie,struct sk_buff * skb,u32 arg1,u32 arg2)2664 static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2665 	u32 arg1, u32 arg2)
2666 {
2667 	struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2668 	struct qedf_skb_work *skb_work;
2669 
2670 	if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2671 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2672 			  "Dropping frame as link state is down.\n");
2673 		kfree_skb(skb);
2674 		return 0;
2675 	}
2676 
2677 	skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2678 	if (!skb_work) {
2679 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2680 			   "dropping frame.\n");
2681 		kfree_skb(skb);
2682 		return 0;
2683 	}
2684 
2685 	INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2686 	skb_work->skb = skb;
2687 	skb_work->qedf = qedf;
2688 	queue_work(qedf->ll2_recv_wq, &skb_work->work);
2689 
2690 	return 0;
2691 }
2692 
2693 static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2694 	.rx_cb = qedf_ll2_rx,
2695 	.tx_cb = NULL,
2696 };
2697 
2698 /* Main thread to process I/O completions */
qedf_fp_io_handler(struct work_struct * work)2699 void qedf_fp_io_handler(struct work_struct *work)
2700 {
2701 	struct qedf_io_work *io_work =
2702 	    container_of(work, struct qedf_io_work, work);
2703 	u32 comp_type;
2704 
2705 	/*
2706 	 * Deferred part of unsolicited CQE sends
2707 	 * frame to libfc.
2708 	 */
2709 	comp_type = (io_work->cqe.cqe_data >>
2710 	    FCOE_CQE_CQE_TYPE_SHIFT) &
2711 	    FCOE_CQE_CQE_TYPE_MASK;
2712 	if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2713 	    io_work->fp)
2714 		fc_exch_recv(io_work->qedf->lport, io_work->fp);
2715 	else
2716 		qedf_process_cqe(io_work->qedf, &io_work->cqe);
2717 
2718 	kfree(io_work);
2719 }
2720 
qedf_alloc_and_init_sb(struct qedf_ctx * qedf,struct qed_sb_info * sb_info,u16 sb_id)2721 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2722 	struct qed_sb_info *sb_info, u16 sb_id)
2723 {
2724 	struct status_block *sb_virt;
2725 	dma_addr_t sb_phys;
2726 	int ret;
2727 
2728 	sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2729 	    sizeof(struct status_block), &sb_phys, GFP_KERNEL);
2730 
2731 	if (!sb_virt) {
2732 		QEDF_ERR(&qedf->dbg_ctx,
2733 			 "Status block allocation failed for id = %d.\n",
2734 			 sb_id);
2735 		return -ENOMEM;
2736 	}
2737 
2738 	ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2739 	    sb_id, QED_SB_TYPE_STORAGE);
2740 
2741 	if (ret) {
2742 		dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
2743 		QEDF_ERR(&qedf->dbg_ctx,
2744 			 "Status block initialization failed (0x%x) for id = %d.\n",
2745 			 ret, sb_id);
2746 		return ret;
2747 	}
2748 
2749 	return 0;
2750 }
2751 
qedf_free_sb(struct qedf_ctx * qedf,struct qed_sb_info * sb_info)2752 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2753 {
2754 	if (sb_info->sb_virt)
2755 		dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2756 		    (void *)sb_info->sb_virt, sb_info->sb_phys);
2757 }
2758 
qedf_destroy_sb(struct qedf_ctx * qedf)2759 static void qedf_destroy_sb(struct qedf_ctx *qedf)
2760 {
2761 	int id;
2762 	struct qedf_fastpath *fp = NULL;
2763 
2764 	for (id = 0; id < qedf->num_queues; id++) {
2765 		fp = &(qedf->fp_array[id]);
2766 		if (fp->sb_id == QEDF_SB_ID_NULL)
2767 			break;
2768 		qedf_free_sb(qedf, fp->sb_info);
2769 		kfree(fp->sb_info);
2770 	}
2771 	kfree(qedf->fp_array);
2772 }
2773 
qedf_prepare_sb(struct qedf_ctx * qedf)2774 static int qedf_prepare_sb(struct qedf_ctx *qedf)
2775 {
2776 	int id;
2777 	struct qedf_fastpath *fp;
2778 	int ret;
2779 
2780 	qedf->fp_array =
2781 	    kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2782 		GFP_KERNEL);
2783 
2784 	if (!qedf->fp_array) {
2785 		QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2786 			  "failed.\n");
2787 		return -ENOMEM;
2788 	}
2789 
2790 	for (id = 0; id < qedf->num_queues; id++) {
2791 		fp = &(qedf->fp_array[id]);
2792 		fp->sb_id = QEDF_SB_ID_NULL;
2793 		fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2794 		if (!fp->sb_info) {
2795 			QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2796 				  "allocation failed.\n");
2797 			goto err;
2798 		}
2799 		ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2800 		if (ret) {
2801 			QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2802 				  "initialization failed.\n");
2803 			goto err;
2804 		}
2805 		fp->sb_id = id;
2806 		fp->qedf = qedf;
2807 		fp->cq_num_entries =
2808 		    qedf->global_queues[id]->cq_mem_size /
2809 		    sizeof(struct fcoe_cqe);
2810 	}
2811 err:
2812 	return 0;
2813 }
2814 
qedf_process_cqe(struct qedf_ctx * qedf,struct fcoe_cqe * cqe)2815 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2816 {
2817 	u16 xid;
2818 	struct qedf_ioreq *io_req;
2819 	struct qedf_rport *fcport;
2820 	u32 comp_type;
2821 	u8 io_comp_type;
2822 	unsigned long flags;
2823 
2824 	comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2825 	    FCOE_CQE_CQE_TYPE_MASK;
2826 
2827 	xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2828 	io_req = &qedf->cmd_mgr->cmds[xid];
2829 
2830 	/* Completion not for a valid I/O anymore so just return */
2831 	if (!io_req) {
2832 		QEDF_ERR(&qedf->dbg_ctx,
2833 			 "io_req is NULL for xid=0x%x.\n", xid);
2834 		return;
2835 	}
2836 
2837 	fcport = io_req->fcport;
2838 
2839 	if (fcport == NULL) {
2840 		QEDF_ERR(&qedf->dbg_ctx,
2841 			 "fcport is NULL for xid=0x%x io_req=%p.\n",
2842 			 xid, io_req);
2843 		return;
2844 	}
2845 
2846 	/*
2847 	 * Check that fcport is offloaded.  If it isn't then the spinlock
2848 	 * isn't valid and shouldn't be taken. We should just return.
2849 	 */
2850 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2851 		QEDF_ERR(&qedf->dbg_ctx,
2852 			 "Session not offloaded yet, fcport = %p.\n", fcport);
2853 		return;
2854 	}
2855 
2856 	spin_lock_irqsave(&fcport->rport_lock, flags);
2857 	io_comp_type = io_req->cmd_type;
2858 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2859 
2860 	switch (comp_type) {
2861 	case FCOE_GOOD_COMPLETION_CQE_TYPE:
2862 		atomic_inc(&fcport->free_sqes);
2863 		switch (io_comp_type) {
2864 		case QEDF_SCSI_CMD:
2865 			qedf_scsi_completion(qedf, cqe, io_req);
2866 			break;
2867 		case QEDF_ELS:
2868 			qedf_process_els_compl(qedf, cqe, io_req);
2869 			break;
2870 		case QEDF_TASK_MGMT_CMD:
2871 			qedf_process_tmf_compl(qedf, cqe, io_req);
2872 			break;
2873 		case QEDF_SEQ_CLEANUP:
2874 			qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2875 			break;
2876 		}
2877 		break;
2878 	case FCOE_ERROR_DETECTION_CQE_TYPE:
2879 		atomic_inc(&fcport->free_sqes);
2880 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2881 		    "Error detect CQE.\n");
2882 		qedf_process_error_detect(qedf, cqe, io_req);
2883 		break;
2884 	case FCOE_EXCH_CLEANUP_CQE_TYPE:
2885 		atomic_inc(&fcport->free_sqes);
2886 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2887 		    "Cleanup CQE.\n");
2888 		qedf_process_cleanup_compl(qedf, cqe, io_req);
2889 		break;
2890 	case FCOE_ABTS_CQE_TYPE:
2891 		atomic_inc(&fcport->free_sqes);
2892 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2893 		    "Abort CQE.\n");
2894 		qedf_process_abts_compl(qedf, cqe, io_req);
2895 		break;
2896 	case FCOE_DUMMY_CQE_TYPE:
2897 		atomic_inc(&fcport->free_sqes);
2898 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2899 		    "Dummy CQE.\n");
2900 		break;
2901 	case FCOE_LOCAL_COMP_CQE_TYPE:
2902 		atomic_inc(&fcport->free_sqes);
2903 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2904 		    "Local completion CQE.\n");
2905 		break;
2906 	case FCOE_WARNING_CQE_TYPE:
2907 		atomic_inc(&fcport->free_sqes);
2908 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2909 		    "Warning CQE.\n");
2910 		qedf_process_warning_compl(qedf, cqe, io_req);
2911 		break;
2912 	case MAX_FCOE_CQE_TYPE:
2913 		atomic_inc(&fcport->free_sqes);
2914 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2915 		    "Max FCoE CQE.\n");
2916 		break;
2917 	default:
2918 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2919 		    "Default CQE.\n");
2920 		break;
2921 	}
2922 }
2923 
qedf_free_bdq(struct qedf_ctx * qedf)2924 static void qedf_free_bdq(struct qedf_ctx *qedf)
2925 {
2926 	int i;
2927 
2928 	if (qedf->bdq_pbl_list)
2929 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2930 		    qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2931 
2932 	if (qedf->bdq_pbl)
2933 		dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2934 		    qedf->bdq_pbl, qedf->bdq_pbl_dma);
2935 
2936 	for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2937 		if (qedf->bdq[i].buf_addr) {
2938 			dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2939 			    qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2940 		}
2941 	}
2942 }
2943 
qedf_free_global_queues(struct qedf_ctx * qedf)2944 static void qedf_free_global_queues(struct qedf_ctx *qedf)
2945 {
2946 	int i;
2947 	struct global_queue **gl = qedf->global_queues;
2948 
2949 	for (i = 0; i < qedf->num_queues; i++) {
2950 		if (!gl[i])
2951 			continue;
2952 
2953 		if (gl[i]->cq)
2954 			dma_free_coherent(&qedf->pdev->dev,
2955 			    gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2956 		if (gl[i]->cq_pbl)
2957 			dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2958 			    gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2959 
2960 		kfree(gl[i]);
2961 	}
2962 
2963 	qedf_free_bdq(qedf);
2964 }
2965 
qedf_alloc_bdq(struct qedf_ctx * qedf)2966 static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2967 {
2968 	int i;
2969 	struct scsi_bd *pbl;
2970 	u64 *list;
2971 
2972 	/* Alloc dma memory for BDQ buffers */
2973 	for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2974 		qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2975 		    QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2976 		if (!qedf->bdq[i].buf_addr) {
2977 			QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2978 			    "buffer %d.\n", i);
2979 			return -ENOMEM;
2980 		}
2981 	}
2982 
2983 	/* Alloc dma memory for BDQ page buffer list */
2984 	qedf->bdq_pbl_mem_size =
2985 	    QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2986 	qedf->bdq_pbl_mem_size =
2987 	    ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2988 
2989 	qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2990 	    qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2991 	if (!qedf->bdq_pbl) {
2992 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2993 		return -ENOMEM;
2994 	}
2995 
2996 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2997 		  "BDQ PBL addr=0x%p dma=%pad\n",
2998 		  qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2999 
3000 	/*
3001 	 * Populate BDQ PBL with physical and virtual address of individual
3002 	 * BDQ buffers
3003 	 */
3004 	pbl = (struct scsi_bd *)qedf->bdq_pbl;
3005 	for (i = 0; i < QEDF_BDQ_SIZE; i++) {
3006 		pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
3007 		pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
3008 		pbl->opaque.fcoe_opaque.hi = 0;
3009 		/* Opaque lo data is an index into the BDQ array */
3010 		pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
3011 		pbl++;
3012 	}
3013 
3014 	/* Allocate list of PBL pages */
3015 	qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
3016 						QEDF_PAGE_SIZE,
3017 						&qedf->bdq_pbl_list_dma,
3018 						GFP_KERNEL);
3019 	if (!qedf->bdq_pbl_list) {
3020 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
3021 		return -ENOMEM;
3022 	}
3023 
3024 	/*
3025 	 * Now populate PBL list with pages that contain pointers to the
3026 	 * individual buffers.
3027 	 */
3028 	qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
3029 	    QEDF_PAGE_SIZE;
3030 	list = (u64 *)qedf->bdq_pbl_list;
3031 	for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
3032 		*list = qedf->bdq_pbl_dma;
3033 		list++;
3034 	}
3035 
3036 	return 0;
3037 }
3038 
qedf_alloc_global_queues(struct qedf_ctx * qedf)3039 static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3040 {
3041 	u32 *list;
3042 	int i;
3043 	int status;
3044 	u32 *pbl;
3045 	dma_addr_t page;
3046 	int num_pages;
3047 
3048 	/* Allocate and map CQs, RQs */
3049 	/*
3050 	 * Number of global queues (CQ / RQ). This should
3051 	 * be <= number of available MSIX vectors for the PF
3052 	 */
3053 	if (!qedf->num_queues) {
3054 		QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3055 		return -ENOMEM;
3056 	}
3057 
3058 	/*
3059 	 * Make sure we allocated the PBL that will contain the physical
3060 	 * addresses of our queues
3061 	 */
3062 	if (!qedf->p_cpuq) {
3063 		QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3064 		return -EINVAL;
3065 	}
3066 
3067 	qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3068 	    * qedf->num_queues), GFP_KERNEL);
3069 	if (!qedf->global_queues) {
3070 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3071 			  "queues array ptr memory\n");
3072 		return -ENOMEM;
3073 	}
3074 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3075 		   "qedf->global_queues=%p.\n", qedf->global_queues);
3076 
3077 	/* Allocate DMA coherent buffers for BDQ */
3078 	status = qedf_alloc_bdq(qedf);
3079 	if (status) {
3080 		QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3081 		goto mem_alloc_failure;
3082 	}
3083 
3084 	/* Allocate a CQ and an associated PBL for each MSI-X vector */
3085 	for (i = 0; i < qedf->num_queues; i++) {
3086 		qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3087 		    GFP_KERNEL);
3088 		if (!qedf->global_queues[i]) {
3089 			QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3090 				   "global queue %d.\n", i);
3091 			status = -ENOMEM;
3092 			goto mem_alloc_failure;
3093 		}
3094 
3095 		qedf->global_queues[i]->cq_mem_size =
3096 		    FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3097 		qedf->global_queues[i]->cq_mem_size =
3098 		    ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3099 
3100 		qedf->global_queues[i]->cq_pbl_size =
3101 		    (qedf->global_queues[i]->cq_mem_size /
3102 		    PAGE_SIZE) * sizeof(void *);
3103 		qedf->global_queues[i]->cq_pbl_size =
3104 		    ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3105 
3106 		qedf->global_queues[i]->cq =
3107 		    dma_alloc_coherent(&qedf->pdev->dev,
3108 				       qedf->global_queues[i]->cq_mem_size,
3109 				       &qedf->global_queues[i]->cq_dma,
3110 				       GFP_KERNEL);
3111 
3112 		if (!qedf->global_queues[i]->cq) {
3113 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3114 			status = -ENOMEM;
3115 			goto mem_alloc_failure;
3116 		}
3117 
3118 		qedf->global_queues[i]->cq_pbl =
3119 		    dma_alloc_coherent(&qedf->pdev->dev,
3120 				       qedf->global_queues[i]->cq_pbl_size,
3121 				       &qedf->global_queues[i]->cq_pbl_dma,
3122 				       GFP_KERNEL);
3123 
3124 		if (!qedf->global_queues[i]->cq_pbl) {
3125 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3126 			status = -ENOMEM;
3127 			goto mem_alloc_failure;
3128 		}
3129 
3130 		/* Create PBL */
3131 		num_pages = qedf->global_queues[i]->cq_mem_size /
3132 		    QEDF_PAGE_SIZE;
3133 		page = qedf->global_queues[i]->cq_dma;
3134 		pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3135 
3136 		while (num_pages--) {
3137 			*pbl = U64_LO(page);
3138 			pbl++;
3139 			*pbl = U64_HI(page);
3140 			pbl++;
3141 			page += QEDF_PAGE_SIZE;
3142 		}
3143 		/* Set the initial consumer index for cq */
3144 		qedf->global_queues[i]->cq_cons_idx = 0;
3145 	}
3146 
3147 	list = (u32 *)qedf->p_cpuq;
3148 
3149 	/*
3150 	 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
3151 	 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
3152 	 * to the physical address which contains an array of pointers to
3153 	 * the physical addresses of the specific queue pages.
3154 	 */
3155 	for (i = 0; i < qedf->num_queues; i++) {
3156 		*list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3157 		list++;
3158 		*list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3159 		list++;
3160 		*list = U64_LO(0);
3161 		list++;
3162 		*list = U64_HI(0);
3163 		list++;
3164 	}
3165 
3166 	return 0;
3167 
3168 mem_alloc_failure:
3169 	qedf_free_global_queues(qedf);
3170 	return status;
3171 }
3172 
qedf_set_fcoe_pf_param(struct qedf_ctx * qedf)3173 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3174 {
3175 	u8 sq_num_pbl_pages;
3176 	u32 sq_mem_size;
3177 	u32 cq_mem_size;
3178 	u32 cq_num_entries;
3179 	int rval;
3180 
3181 	/*
3182 	 * The number of completion queues/fastpath interrupts/status blocks
3183 	 * we allocation is the minimum off:
3184 	 *
3185 	 * Number of CPUs
3186 	 * Number allocated by qed for our PCI function
3187 	 */
3188 	qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3189 
3190 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3191 		   qedf->num_queues);
3192 
3193 	qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3194 	    qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3195 	    &qedf->hw_p_cpuq, GFP_KERNEL);
3196 
3197 	if (!qedf->p_cpuq) {
3198 		QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3199 		return 1;
3200 	}
3201 
3202 	rval = qedf_alloc_global_queues(qedf);
3203 	if (rval) {
3204 		QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3205 			  "failed.\n");
3206 		return 1;
3207 	}
3208 
3209 	/* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
3210 	sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
3211 	sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
3212 	sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
3213 
3214 	/* Calculate CQ num entries */
3215 	cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3216 	cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
3217 	cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
3218 
3219 	memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3220 
3221 	/* Setup the value for fcoe PF */
3222 	qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3223 	qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3224 	qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3225 	    (u64)qedf->hw_p_cpuq;
3226 	qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3227 
3228 	qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3229 
3230 	qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3231 	qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3232 
3233 	/* log_page_size: 12 for 4KB pages */
3234 	qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3235 
3236 	qedf->pf_params.fcoe_pf_params.mtu = 9000;
3237 	qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3238 	qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3239 
3240 	/* BDQ address and size */
3241 	qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3242 	    qedf->bdq_pbl_list_dma;
3243 	qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3244 	    qedf->bdq_pbl_list_num_entries;
3245 	qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3246 
3247 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3248 	    "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
3249 	    qedf->bdq_pbl_list,
3250 	    qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3251 	    qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3252 
3253 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3254 	    "cq_num_entries=%d.\n",
3255 	    qedf->pf_params.fcoe_pf_params.cq_num_entries);
3256 
3257 	return 0;
3258 }
3259 
3260 /* Free DMA coherent memory for array of queue pointers we pass to qed */
qedf_free_fcoe_pf_param(struct qedf_ctx * qedf)3261 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3262 {
3263 	size_t size = 0;
3264 
3265 	if (qedf->p_cpuq) {
3266 		size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3267 		dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3268 		    qedf->hw_p_cpuq);
3269 	}
3270 
3271 	qedf_free_global_queues(qedf);
3272 
3273 	kfree(qedf->global_queues);
3274 }
3275 
3276 /*
3277  * PCI driver functions
3278  */
3279 
3280 static const struct pci_device_id qedf_pci_tbl[] = {
3281 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3282 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3283 	{0}
3284 };
3285 MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3286 
3287 static struct pci_driver qedf_pci_driver = {
3288 	.name = QEDF_MODULE_NAME,
3289 	.id_table = qedf_pci_tbl,
3290 	.probe = qedf_probe,
3291 	.remove = qedf_remove,
3292 	.shutdown = qedf_shutdown,
3293 	.suspend = qedf_suspend,
3294 };
3295 
__qedf_probe(struct pci_dev * pdev,int mode)3296 static int __qedf_probe(struct pci_dev *pdev, int mode)
3297 {
3298 	int rc = -EINVAL;
3299 	struct fc_lport *lport;
3300 	struct qedf_ctx *qedf = NULL;
3301 	struct Scsi_Host *host;
3302 	bool is_vf = false;
3303 	struct qed_ll2_params params;
3304 	char host_buf[20];
3305 	struct qed_link_params link_params;
3306 	int status;
3307 	void *task_start, *task_end;
3308 	struct qed_slowpath_params slowpath_params;
3309 	struct qed_probe_params qed_params;
3310 	u16 retry_cnt = 10;
3311 
3312 	/*
3313 	 * When doing error recovery we didn't reap the lport so don't try
3314 	 * to reallocate it.
3315 	 */
3316 retry_probe:
3317 	if (mode == QEDF_MODE_RECOVERY)
3318 		msleep(2000);
3319 
3320 	if (mode != QEDF_MODE_RECOVERY) {
3321 		lport = libfc_host_alloc(&qedf_host_template,
3322 		    sizeof(struct qedf_ctx));
3323 
3324 		if (!lport) {
3325 			QEDF_ERR(NULL, "Could not allocate lport.\n");
3326 			rc = -ENOMEM;
3327 			goto err0;
3328 		}
3329 
3330 		fc_disc_init(lport);
3331 
3332 		/* Initialize qedf_ctx */
3333 		qedf = lport_priv(lport);
3334 		set_bit(QEDF_PROBING, &qedf->flags);
3335 		qedf->lport = lport;
3336 		qedf->ctlr.lp = lport;
3337 		qedf->pdev = pdev;
3338 		qedf->dbg_ctx.pdev = pdev;
3339 		qedf->dbg_ctx.host_no = lport->host->host_no;
3340 		spin_lock_init(&qedf->hba_lock);
3341 		INIT_LIST_HEAD(&qedf->fcports);
3342 		qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3343 		atomic_set(&qedf->num_offloads, 0);
3344 		qedf->stop_io_on_error = false;
3345 		pci_set_drvdata(pdev, qedf);
3346 		init_completion(&qedf->fipvlan_compl);
3347 		mutex_init(&qedf->stats_mutex);
3348 		mutex_init(&qedf->flush_mutex);
3349 		qedf->flogi_pending = 0;
3350 
3351 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3352 		   "QLogic FastLinQ FCoE Module qedf %s, "
3353 		   "FW %d.%d.%d.%d\n", QEDF_VERSION,
3354 		   FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3355 		   FW_ENGINEERING_VERSION);
3356 	} else {
3357 		/* Init pointers during recovery */
3358 		qedf = pci_get_drvdata(pdev);
3359 		set_bit(QEDF_PROBING, &qedf->flags);
3360 		lport = qedf->lport;
3361 	}
3362 
3363 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3364 
3365 	host = lport->host;
3366 
3367 	/* Allocate mempool for qedf_io_work structs */
3368 	qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3369 	    qedf_io_work_cache);
3370 	if (qedf->io_mempool == NULL) {
3371 		QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3372 		goto err1;
3373 	}
3374 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3375 	    qedf->io_mempool);
3376 
3377 	qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
3378 					       1, qedf->lport->host->host_no);
3379 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3380 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3381 	INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3382 	INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3383 	qedf->fipvlan_retries = qedf_fipvlan_retries;
3384 	/* Set a default prio in case DCBX doesn't converge */
3385 	if (qedf_default_prio > -1) {
3386 		/*
3387 		 * This is the case where we pass a modparam in so we want to
3388 		 * honor it even if dcbx doesn't converge.
3389 		 */
3390 		qedf->prio = qedf_default_prio;
3391 	} else
3392 		qedf->prio = QEDF_DEFAULT_PRIO;
3393 
3394 	/*
3395 	 * Common probe. Takes care of basic hardware init and pci_*
3396 	 * functions.
3397 	 */
3398 	memset(&qed_params, 0, sizeof(qed_params));
3399 	qed_params.protocol = QED_PROTOCOL_FCOE;
3400 	qed_params.dp_module = qedf_dp_module;
3401 	qed_params.dp_level = qedf_dp_level;
3402 	qed_params.is_vf = is_vf;
3403 	qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3404 	if (!qedf->cdev) {
3405 		if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3406 			QEDF_ERR(&qedf->dbg_ctx,
3407 				"Retry %d initialize hardware\n", retry_cnt);
3408 			retry_cnt--;
3409 			goto retry_probe;
3410 		}
3411 		QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3412 		rc = -ENODEV;
3413 		goto err1;
3414 	}
3415 
3416 	/* Learn information crucial for qedf to progress */
3417 	rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3418 	if (rc) {
3419 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3420 		goto err1;
3421 	}
3422 
3423 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3424 		  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3425 		  qedf->dev_info.common.num_hwfns,
3426 		  qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3427 
3428 	/* queue allocation code should come here
3429 	 * order should be
3430 	 * 	slowpath_start
3431 	 * 	status block allocation
3432 	 *	interrupt registration (to get min number of queues)
3433 	 *	set_fcoe_pf_param
3434 	 *	qed_sp_fcoe_func_start
3435 	 */
3436 	rc = qedf_set_fcoe_pf_param(qedf);
3437 	if (rc) {
3438 		QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3439 		goto err2;
3440 	}
3441 	qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3442 
3443 	/* Learn information crucial for qedf to progress */
3444 	rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3445 	if (rc) {
3446 		QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3447 		goto err2;
3448 	}
3449 
3450 	if (mode != QEDF_MODE_RECOVERY) {
3451 		qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
3452 		if (IS_ERR(qedf->devlink)) {
3453 			QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
3454 			rc = PTR_ERR(qedf->devlink);
3455 			qedf->devlink = NULL;
3456 			goto err2;
3457 		}
3458 	}
3459 
3460 	/* Record BDQ producer doorbell addresses */
3461 	qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3462 	qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3463 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3464 	    "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3465 	    qedf->bdq_secondary_prod);
3466 
3467 	qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3468 
3469 	rc = qedf_prepare_sb(qedf);
3470 	if (rc) {
3471 
3472 		QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3473 		goto err2;
3474 	}
3475 
3476 	/* Start the Slowpath-process */
3477 	memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
3478 	slowpath_params.int_mode = QED_INT_MODE_MSIX;
3479 	slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3480 	slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3481 	slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3482 	slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
3483 	strscpy(slowpath_params.name, "qedf", sizeof(slowpath_params.name));
3484 	rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3485 	if (rc) {
3486 		QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3487 		goto err2;
3488 	}
3489 
3490 	/*
3491 	 * update_pf_params needs to be called before and after slowpath
3492 	 * start
3493 	 */
3494 	qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3495 
3496 	/* Setup interrupts */
3497 	rc = qedf_setup_int(qedf);
3498 	if (rc) {
3499 		QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3500 		goto err3;
3501 	}
3502 
3503 	rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3504 	if (rc) {
3505 		QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3506 		goto err4;
3507 	}
3508 	task_start = qedf_get_task_mem(&qedf->tasks, 0);
3509 	task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3510 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3511 		   "end=%p block_size=%u.\n", task_start, task_end,
3512 		   qedf->tasks.size);
3513 
3514 	/*
3515 	 * We need to write the number of BDs in the BDQ we've preallocated so
3516 	 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3517 	 * packet arrives.
3518 	 */
3519 	qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3520 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3521 	    "Writing %d to primary and secondary BDQ doorbell registers.\n",
3522 	    qedf->bdq_prod_idx);
3523 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3524 	readw(qedf->bdq_primary_prod);
3525 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3526 	readw(qedf->bdq_secondary_prod);
3527 
3528 	qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3529 
3530 	/* Now that the dev_info struct has been filled in set the MAC
3531 	 * address
3532 	 */
3533 	ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3534 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3535 		   qedf->mac);
3536 
3537 	/*
3538 	 * Set the WWNN and WWPN in the following way:
3539 	 *
3540 	 * If the info we get from qed is non-zero then use that to set the
3541 	 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3542 	 * on the MAC address.
3543 	 */
3544 	if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3545 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3546 		    "Setting WWPN and WWNN from qed dev_info.\n");
3547 		qedf->wwnn = qedf->dev_info.wwnn;
3548 		qedf->wwpn = qedf->dev_info.wwpn;
3549 	} else {
3550 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3551 		    "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3552 		qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3553 		qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3554 	}
3555 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,  "WWNN=%016llx "
3556 		   "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3557 
3558 	sprintf(host_buf, "host_%d", host->host_no);
3559 	qed_ops->common->set_name(qedf->cdev, host_buf);
3560 
3561 	/* Allocate cmd mgr */
3562 	qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3563 	if (!qedf->cmd_mgr) {
3564 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3565 		rc = -ENOMEM;
3566 		goto err5;
3567 	}
3568 
3569 	if (mode != QEDF_MODE_RECOVERY) {
3570 		host->transportt = qedf_fc_transport_template;
3571 		host->max_lun = qedf_max_lun;
3572 		host->max_cmd_len = QEDF_MAX_CDB_LEN;
3573 		host->max_id = QEDF_MAX_SESSIONS;
3574 		host->can_queue = FCOE_PARAMS_NUM_TASKS;
3575 		rc = scsi_add_host(host, &pdev->dev);
3576 		if (rc) {
3577 			QEDF_WARN(&qedf->dbg_ctx,
3578 				  "Error adding Scsi_Host rc=0x%x.\n", rc);
3579 			goto err6;
3580 		}
3581 	}
3582 
3583 	memset(&params, 0, sizeof(params));
3584 	params.mtu = QEDF_LL2_BUF_SIZE;
3585 	ether_addr_copy(params.ll2_mac_address, qedf->mac);
3586 
3587 	/* Start LL2 processing thread */
3588 	qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
3589 					    host->host_no);
3590 	if (!qedf->ll2_recv_wq) {
3591 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3592 		rc = -ENOMEM;
3593 		goto err7;
3594 	}
3595 
3596 #ifdef CONFIG_DEBUG_FS
3597 	qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3598 			    qedf_dbg_fops);
3599 #endif
3600 
3601 	/* Start LL2 */
3602 	qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3603 	rc = qed_ops->ll2->start(qedf->cdev, &params);
3604 	if (rc) {
3605 		QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3606 		goto err7;
3607 	}
3608 	set_bit(QEDF_LL2_STARTED, &qedf->flags);
3609 
3610 	/* Set initial FIP/FCoE VLAN to NULL */
3611 	qedf->vlan_id = 0;
3612 
3613 	/*
3614 	 * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3615 	 * they were not reaped during the unload process.
3616 	 */
3617 	if (mode != QEDF_MODE_RECOVERY) {
3618 		/* Setup imbedded fcoe controller */
3619 		qedf_fcoe_ctlr_setup(qedf);
3620 
3621 		/* Setup lport */
3622 		rc = qedf_lport_setup(qedf);
3623 		if (rc) {
3624 			QEDF_ERR(&(qedf->dbg_ctx),
3625 			    "qedf_lport_setup failed.\n");
3626 			goto err7;
3627 		}
3628 	}
3629 
3630 	qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
3631 				WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
3632 	if (!qedf->timer_work_queue) {
3633 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3634 			  "workqueue.\n");
3635 		rc = -ENOMEM;
3636 		goto err7;
3637 	}
3638 
3639 	/* DPC workqueue is not reaped during recovery unload */
3640 	if (mode != QEDF_MODE_RECOVERY) {
3641 		sprintf(host_buf, "qedf_%u_dpc",
3642 		    qedf->lport->host->host_no);
3643 		qedf->dpc_wq =
3644 			alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
3645 	}
3646 	INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3647 
3648 	/*
3649 	 * GRC dump and sysfs parameters are not reaped during the recovery
3650 	 * unload process.
3651 	 */
3652 	if (mode != QEDF_MODE_RECOVERY) {
3653 		qedf->grcdump_size =
3654 		    qed_ops->common->dbg_all_data_size(qedf->cdev);
3655 		if (qedf->grcdump_size) {
3656 			rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3657 			    qedf->grcdump_size);
3658 			if (rc) {
3659 				QEDF_ERR(&(qedf->dbg_ctx),
3660 				    "GRC Dump buffer alloc failed.\n");
3661 				qedf->grcdump = NULL;
3662 			}
3663 
3664 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3665 			    "grcdump: addr=%p, size=%u.\n",
3666 			    qedf->grcdump, qedf->grcdump_size);
3667 		}
3668 		qedf_create_sysfs_ctx_attr(qedf);
3669 
3670 		/* Initialize I/O tracing for this adapter */
3671 		spin_lock_init(&qedf->io_trace_lock);
3672 		qedf->io_trace_idx = 0;
3673 	}
3674 
3675 	init_completion(&qedf->flogi_compl);
3676 
3677 	status = qed_ops->common->update_drv_state(qedf->cdev, true);
3678 	if (status)
3679 		QEDF_ERR(&(qedf->dbg_ctx),
3680 			"Failed to send drv state to MFW.\n");
3681 
3682 	memset(&link_params, 0, sizeof(struct qed_link_params));
3683 	link_params.link_up = true;
3684 	status = qed_ops->common->set_link(qedf->cdev, &link_params);
3685 	if (status)
3686 		QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3687 
3688 	/* Start/restart discovery */
3689 	if (mode == QEDF_MODE_RECOVERY)
3690 		fcoe_ctlr_link_up(&qedf->ctlr);
3691 	else
3692 		fc_fabric_login(lport);
3693 
3694 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3695 
3696 	clear_bit(QEDF_PROBING, &qedf->flags);
3697 
3698 	/* All good */
3699 	return 0;
3700 
3701 err7:
3702 	if (qedf->ll2_recv_wq)
3703 		destroy_workqueue(qedf->ll2_recv_wq);
3704 	fc_remove_host(qedf->lport->host);
3705 	scsi_remove_host(qedf->lport->host);
3706 #ifdef CONFIG_DEBUG_FS
3707 	qedf_dbg_host_exit(&(qedf->dbg_ctx));
3708 #endif
3709 err6:
3710 	qedf_cmd_mgr_free(qedf->cmd_mgr);
3711 err5:
3712 	qed_ops->stop(qedf->cdev);
3713 err4:
3714 	qedf_free_fcoe_pf_param(qedf);
3715 	qedf_sync_free_irqs(qedf);
3716 err3:
3717 	qed_ops->common->slowpath_stop(qedf->cdev);
3718 err2:
3719 	qed_ops->common->remove(qedf->cdev);
3720 err1:
3721 	scsi_host_put(lport->host);
3722 err0:
3723 	return rc;
3724 }
3725 
qedf_probe(struct pci_dev * pdev,const struct pci_device_id * id)3726 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3727 {
3728 	return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3729 }
3730 
__qedf_remove(struct pci_dev * pdev,int mode)3731 static void __qedf_remove(struct pci_dev *pdev, int mode)
3732 {
3733 	struct qedf_ctx *qedf;
3734 	int rc;
3735 	int cnt = 0;
3736 
3737 	if (!pdev) {
3738 		QEDF_ERR(NULL, "pdev is NULL.\n");
3739 		return;
3740 	}
3741 
3742 	qedf = pci_get_drvdata(pdev);
3743 
3744 	/*
3745 	 * Prevent race where we're in board disable work and then try to
3746 	 * rmmod the module.
3747 	 */
3748 	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3749 		QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3750 		return;
3751 	}
3752 
3753 stag_in_prog:
3754 	if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
3755 		QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
3756 		cnt++;
3757 
3758 		if (cnt < 5) {
3759 			msleep(500);
3760 			goto stag_in_prog;
3761 		}
3762 	}
3763 
3764 	if (mode != QEDF_MODE_RECOVERY)
3765 		set_bit(QEDF_UNLOADING, &qedf->flags);
3766 
3767 	/* Logoff the fabric to upload all connections */
3768 	if (mode == QEDF_MODE_RECOVERY)
3769 		fcoe_ctlr_link_down(&qedf->ctlr);
3770 	else
3771 		fc_fabric_logoff(qedf->lport);
3772 
3773 	if (!qedf_wait_for_upload(qedf))
3774 		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3775 
3776 #ifdef CONFIG_DEBUG_FS
3777 	qedf_dbg_host_exit(&(qedf->dbg_ctx));
3778 #endif
3779 
3780 	/* Stop any link update handling */
3781 	cancel_delayed_work_sync(&qedf->link_update);
3782 	destroy_workqueue(qedf->link_update_wq);
3783 	qedf->link_update_wq = NULL;
3784 
3785 	if (qedf->timer_work_queue)
3786 		destroy_workqueue(qedf->timer_work_queue);
3787 
3788 	/* Stop Light L2 */
3789 	clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3790 	qed_ops->ll2->stop(qedf->cdev);
3791 	if (qedf->ll2_recv_wq)
3792 		destroy_workqueue(qedf->ll2_recv_wq);
3793 
3794 	/* Stop fastpath */
3795 	qedf_sync_free_irqs(qedf);
3796 	qedf_destroy_sb(qedf);
3797 
3798 	/*
3799 	 * During recovery don't destroy OS constructs that represent the
3800 	 * physical port.
3801 	 */
3802 	if (mode != QEDF_MODE_RECOVERY) {
3803 		qedf_free_grc_dump_buf(&qedf->grcdump);
3804 		qedf_remove_sysfs_ctx_attr(qedf);
3805 
3806 		/* Remove all SCSI/libfc/libfcoe structures */
3807 		fcoe_ctlr_destroy(&qedf->ctlr);
3808 		fc_lport_destroy(qedf->lport);
3809 		fc_remove_host(qedf->lport->host);
3810 		scsi_remove_host(qedf->lport->host);
3811 	}
3812 
3813 	qedf_cmd_mgr_free(qedf->cmd_mgr);
3814 
3815 	if (mode != QEDF_MODE_RECOVERY) {
3816 		fc_exch_mgr_free(qedf->lport);
3817 		fc_lport_free_stats(qedf->lport);
3818 
3819 		/* Wait for all vports to be reaped */
3820 		qedf_wait_for_vport_destroy(qedf);
3821 	}
3822 
3823 	/*
3824 	 * Now that all connections have been uploaded we can stop the
3825 	 * rest of the qed operations
3826 	 */
3827 	qed_ops->stop(qedf->cdev);
3828 
3829 	if (mode != QEDF_MODE_RECOVERY) {
3830 		if (qedf->dpc_wq) {
3831 			/* Stop general DPC handling */
3832 			destroy_workqueue(qedf->dpc_wq);
3833 			qedf->dpc_wq = NULL;
3834 		}
3835 	}
3836 
3837 	/* Final shutdown for the board */
3838 	qedf_free_fcoe_pf_param(qedf);
3839 	if (mode != QEDF_MODE_RECOVERY) {
3840 		qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3841 		pci_set_drvdata(pdev, NULL);
3842 	}
3843 
3844 	rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3845 	if (rc)
3846 		QEDF_ERR(&(qedf->dbg_ctx),
3847 			"Failed to send drv state to MFW.\n");
3848 
3849 	if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
3850 		qed_ops->common->devlink_unregister(qedf->devlink);
3851 		qedf->devlink = NULL;
3852 	}
3853 
3854 	qed_ops->common->slowpath_stop(qedf->cdev);
3855 	qed_ops->common->remove(qedf->cdev);
3856 
3857 	mempool_destroy(qedf->io_mempool);
3858 
3859 	/* Only reap the Scsi_host on a real removal */
3860 	if (mode != QEDF_MODE_RECOVERY)
3861 		scsi_host_put(qedf->lport->host);
3862 }
3863 
qedf_remove(struct pci_dev * pdev)3864 static void qedf_remove(struct pci_dev *pdev)
3865 {
3866 	/* Check to make sure this function wasn't already disabled */
3867 	if (!atomic_read(&pdev->enable_cnt))
3868 		return;
3869 
3870 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
3871 }
3872 
qedf_wq_grcdump(struct work_struct * work)3873 void qedf_wq_grcdump(struct work_struct *work)
3874 {
3875 	struct qedf_ctx *qedf =
3876 	    container_of(work, struct qedf_ctx, grcdump_work.work);
3877 
3878 	QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3879 	qedf_capture_grc_dump(qedf);
3880 }
3881 
qedf_schedule_hw_err_handler(void * dev,enum qed_hw_err_type err_type)3882 void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3883 {
3884 	struct qedf_ctx *qedf = dev;
3885 
3886 	QEDF_ERR(&(qedf->dbg_ctx),
3887 			"Hardware error handler scheduled, event=%d.\n",
3888 			err_type);
3889 
3890 	if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3891 		QEDF_ERR(&(qedf->dbg_ctx),
3892 				"Already in recovery, not scheduling board disable work.\n");
3893 		return;
3894 	}
3895 
3896 	switch (err_type) {
3897 	case QED_HW_ERR_FAN_FAIL:
3898 		schedule_delayed_work(&qedf->board_disable_work, 0);
3899 		break;
3900 	case QED_HW_ERR_MFW_RESP_FAIL:
3901 	case QED_HW_ERR_HW_ATTN:
3902 	case QED_HW_ERR_DMAE_FAIL:
3903 	case QED_HW_ERR_FW_ASSERT:
3904 		/* Prevent HW attentions from being reasserted */
3905 		qed_ops->common->attn_clr_enable(qedf->cdev, true);
3906 		break;
3907 	case QED_HW_ERR_RAMROD_FAIL:
3908 		/* Prevent HW attentions from being reasserted */
3909 		qed_ops->common->attn_clr_enable(qedf->cdev, true);
3910 
3911 		if (qedf_enable_recovery && qedf->devlink)
3912 			qed_ops->common->report_fatal_error(qedf->devlink,
3913 				err_type);
3914 
3915 		break;
3916 	default:
3917 		break;
3918 	}
3919 }
3920 
3921 /*
3922  * Protocol TLV handler
3923  */
qedf_get_protocol_tlv_data(void * dev,void * data)3924 void qedf_get_protocol_tlv_data(void *dev, void *data)
3925 {
3926 	struct qedf_ctx *qedf = dev;
3927 	struct qed_mfw_tlv_fcoe *fcoe = data;
3928 	struct fc_lport *lport;
3929 	struct Scsi_Host *host;
3930 	struct fc_host_attrs *fc_host;
3931 	struct fc_host_statistics *hst;
3932 
3933 	if (!qedf) {
3934 		QEDF_ERR(NULL, "qedf is null.\n");
3935 		return;
3936 	}
3937 
3938 	if (test_bit(QEDF_PROBING, &qedf->flags)) {
3939 		QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3940 		return;
3941 	}
3942 
3943 	lport = qedf->lport;
3944 	host = lport->host;
3945 	fc_host = shost_to_fc_host(host);
3946 
3947 	/* Force a refresh of the fc_host stats including offload stats */
3948 	hst = qedf_fc_get_host_stats(host);
3949 
3950 	fcoe->qos_pri_set = true;
3951 	fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3952 
3953 	fcoe->ra_tov_set = true;
3954 	fcoe->ra_tov = lport->r_a_tov;
3955 
3956 	fcoe->ed_tov_set = true;
3957 	fcoe->ed_tov = lport->e_d_tov;
3958 
3959 	fcoe->npiv_state_set = true;
3960 	fcoe->npiv_state = 1; /* NPIV always enabled */
3961 
3962 	fcoe->num_npiv_ids_set = true;
3963 	fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3964 
3965 	/* Certain attributes we only want to set if we've selected an FCF */
3966 	if (qedf->ctlr.sel_fcf) {
3967 		fcoe->switch_name_set = true;
3968 		u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3969 	}
3970 
3971 	fcoe->port_state_set = true;
3972 	/* For qedf we're either link down or fabric attach */
3973 	if (lport->link_up)
3974 		fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3975 	else
3976 		fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3977 
3978 	fcoe->link_failures_set = true;
3979 	fcoe->link_failures = (u16)hst->link_failure_count;
3980 
3981 	fcoe->fcoe_txq_depth_set = true;
3982 	fcoe->fcoe_rxq_depth_set = true;
3983 	fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3984 	fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3985 
3986 	fcoe->fcoe_rx_frames_set = true;
3987 	fcoe->fcoe_rx_frames = hst->rx_frames;
3988 
3989 	fcoe->fcoe_tx_frames_set = true;
3990 	fcoe->fcoe_tx_frames = hst->tx_frames;
3991 
3992 	fcoe->fcoe_rx_bytes_set = true;
3993 	fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3994 
3995 	fcoe->fcoe_tx_bytes_set = true;
3996 	fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3997 
3998 	fcoe->crc_count_set = true;
3999 	fcoe->crc_count = hst->invalid_crc_count;
4000 
4001 	fcoe->tx_abts_set = true;
4002 	fcoe->tx_abts = hst->fcp_packet_aborts;
4003 
4004 	fcoe->tx_lun_rst_set = true;
4005 	fcoe->tx_lun_rst = qedf->lun_resets;
4006 
4007 	fcoe->abort_task_sets_set = true;
4008 	fcoe->abort_task_sets = qedf->packet_aborts;
4009 
4010 	fcoe->scsi_busy_set = true;
4011 	fcoe->scsi_busy = qedf->busy;
4012 
4013 	fcoe->scsi_tsk_full_set = true;
4014 	fcoe->scsi_tsk_full = qedf->task_set_fulls;
4015 }
4016 
4017 /* Deferred work function to perform soft context reset on STAG change */
qedf_stag_change_work(struct work_struct * work)4018 void qedf_stag_change_work(struct work_struct *work)
4019 {
4020 	struct qedf_ctx *qedf =
4021 	    container_of(work, struct qedf_ctx, stag_work.work);
4022 
4023 	if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
4024 		QEDF_ERR(&qedf->dbg_ctx,
4025 			 "Already is in recovery, hence not calling software context reset.\n");
4026 		return;
4027 	}
4028 
4029 	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
4030 		QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
4031 		return;
4032 	}
4033 
4034 	set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
4035 
4036 	printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
4037 			dev_name(&qedf->pdev->dev), __func__, __LINE__,
4038 			qedf->dbg_ctx.host_no);
4039 	qedf_ctx_soft_reset(qedf->lport);
4040 }
4041 
qedf_shutdown(struct pci_dev * pdev)4042 static void qedf_shutdown(struct pci_dev *pdev)
4043 {
4044 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
4045 }
4046 
qedf_suspend(struct pci_dev * pdev,pm_message_t state)4047 static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
4048 {
4049 	struct qedf_ctx *qedf;
4050 
4051 	if (!pdev) {
4052 		QEDF_ERR(NULL, "pdev is NULL.\n");
4053 		return -ENODEV;
4054 	}
4055 
4056 	qedf = pci_get_drvdata(pdev);
4057 
4058 	QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
4059 
4060 	return -EPERM;
4061 }
4062 
4063 /*
4064  * Recovery handler code
4065  */
qedf_schedule_recovery_handler(void * dev)4066 static void qedf_schedule_recovery_handler(void *dev)
4067 {
4068 	struct qedf_ctx *qedf = dev;
4069 
4070 	QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4071 	schedule_delayed_work(&qedf->recovery_work, 0);
4072 }
4073 
qedf_recovery_handler(struct work_struct * work)4074 static void qedf_recovery_handler(struct work_struct *work)
4075 {
4076 	struct qedf_ctx *qedf =
4077 	    container_of(work, struct qedf_ctx, recovery_work.work);
4078 
4079 	if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4080 		return;
4081 
4082 	/*
4083 	 * Call common_ops->recovery_prolog to allow the MFW to quiesce
4084 	 * any PCI transactions.
4085 	 */
4086 	qed_ops->common->recovery_prolog(qedf->cdev);
4087 
4088 	QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4089 	__qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4090 	/*
4091 	 * Reset link and dcbx to down state since we will not get a link down
4092 	 * event from the MFW but calling __qedf_remove will essentially be a
4093 	 * link down event.
4094 	 */
4095 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4096 	atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4097 	__qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4098 	clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4099 	QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4100 }
4101 
4102 /* Generic TLV data callback */
qedf_get_generic_tlv_data(void * dev,struct qed_generic_tlvs * data)4103 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
4104 {
4105 	struct qedf_ctx *qedf;
4106 
4107 	if (!dev) {
4108 		QEDF_INFO(NULL, QEDF_LOG_EVT,
4109 			  "dev is NULL so ignoring get_generic_tlv_data request.\n");
4110 		return;
4111 	}
4112 	qedf = (struct qedf_ctx *)dev;
4113 
4114 	memset(data, 0, sizeof(struct qed_generic_tlvs));
4115 	ether_addr_copy(data->mac[0], qedf->mac);
4116 }
4117 
4118 /*
4119  * Module Init/Remove
4120  */
4121 
qedf_init(void)4122 static int __init qedf_init(void)
4123 {
4124 	int ret;
4125 
4126 	/* If debug=1 passed, set the default log mask */
4127 	if (qedf_debug == QEDF_LOG_DEFAULT)
4128 		qedf_debug = QEDF_DEFAULT_LOG_MASK;
4129 
4130 	/*
4131 	 * Check that default prio for FIP/FCoE traffic is between 0..7 if a
4132 	 * value has been set
4133 	 */
4134 	if (qedf_default_prio > -1)
4135 		if (qedf_default_prio > 7) {
4136 			qedf_default_prio = QEDF_DEFAULT_PRIO;
4137 			QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
4138 			    QEDF_DEFAULT_PRIO);
4139 		}
4140 
4141 	/* Print driver banner */
4142 	QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
4143 		   QEDF_VERSION);
4144 
4145 	/* Create kmem_cache for qedf_io_work structs */
4146 	qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
4147 	    sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
4148 	if (qedf_io_work_cache == NULL) {
4149 		QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
4150 		goto err1;
4151 	}
4152 	QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
4153 	    qedf_io_work_cache);
4154 
4155 	qed_ops = qed_get_fcoe_ops();
4156 	if (!qed_ops) {
4157 		QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
4158 		goto err1;
4159 	}
4160 
4161 #ifdef CONFIG_DEBUG_FS
4162 	qedf_dbg_init("qedf");
4163 #endif
4164 
4165 	qedf_fc_transport_template =
4166 	    fc_attach_transport(&qedf_fc_transport_fn);
4167 	if (!qedf_fc_transport_template) {
4168 		QEDF_ERR(NULL, "Could not register with FC transport\n");
4169 		goto err2;
4170 	}
4171 
4172 	qedf_fc_vport_transport_template =
4173 		fc_attach_transport(&qedf_fc_vport_transport_fn);
4174 	if (!qedf_fc_vport_transport_template) {
4175 		QEDF_ERR(NULL, "Could not register vport template with FC "
4176 			  "transport\n");
4177 		goto err3;
4178 	}
4179 
4180 	qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
4181 	if (!qedf_io_wq) {
4182 		QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
4183 		goto err4;
4184 	}
4185 
4186 	qedf_cb_ops.get_login_failures = qedf_get_login_failures;
4187 
4188 	ret = pci_register_driver(&qedf_pci_driver);
4189 	if (ret) {
4190 		QEDF_ERR(NULL, "Failed to register driver\n");
4191 		goto err5;
4192 	}
4193 
4194 	return 0;
4195 
4196 err5:
4197 	destroy_workqueue(qedf_io_wq);
4198 err4:
4199 	fc_release_transport(qedf_fc_vport_transport_template);
4200 err3:
4201 	fc_release_transport(qedf_fc_transport_template);
4202 err2:
4203 #ifdef CONFIG_DEBUG_FS
4204 	qedf_dbg_exit();
4205 #endif
4206 	qed_put_fcoe_ops();
4207 err1:
4208 	return -EINVAL;
4209 }
4210 
qedf_cleanup(void)4211 static void __exit qedf_cleanup(void)
4212 {
4213 	pci_unregister_driver(&qedf_pci_driver);
4214 
4215 	destroy_workqueue(qedf_io_wq);
4216 
4217 	fc_release_transport(qedf_fc_vport_transport_template);
4218 	fc_release_transport(qedf_fc_transport_template);
4219 #ifdef CONFIG_DEBUG_FS
4220 	qedf_dbg_exit();
4221 #endif
4222 	qed_put_fcoe_ops();
4223 
4224 	kmem_cache_destroy(qedf_io_work_cache);
4225 }
4226 
4227 MODULE_LICENSE("GPL");
4228 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
4229 MODULE_AUTHOR("QLogic Corporation");
4230 MODULE_VERSION(QEDF_VERSION);
4231 module_init(qedf_init);
4232 module_exit(qedf_cleanup);
4233