xref: /linux/drivers/scsi/fnic/fnic_main.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
4  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5  */
6 #include <linux/module.h>
7 #include <linux/mempool.h>
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/pci.h>
13 #include <linux/skbuff.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/spinlock.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <scsi/fc/fc_fip.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_transport.h>
22 #include <scsi/scsi_transport_fc.h>
23 #include <scsi/scsi_tcq.h>
24 #include <scsi/fc_frame.h>
25 
26 #include "vnic_dev.h"
27 #include "vnic_intr.h"
28 #include "vnic_stats.h"
29 #include "fnic_io.h"
30 #include "fnic.h"
31 #include "fnic_fdls.h"
32 #include "fdls_fc.h"
33 
34 #define PCI_DEVICE_ID_CISCO_FNIC	0x0045
35 
36 /* Timer to poll notification area for events. Used for MSI interrupts */
37 #define FNIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
38 
39 static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
40 static struct kmem_cache *fnic_io_req_cache;
41 static struct kmem_cache *fdls_frame_cache;
42 static struct kmem_cache *fdls_frame_elem_cache;
43 static LIST_HEAD(fnic_list);
44 static DEFINE_SPINLOCK(fnic_list_lock);
45 static DEFINE_IDA(fnic_ida);
46 
47 struct work_struct reset_fnic_work;
48 LIST_HEAD(reset_fnic_list);
49 DEFINE_SPINLOCK(reset_fnic_list_lock);
50 
51 /* Supported devices by fnic module */
52 static const struct pci_device_id fnic_id_table[] = {
53 	{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
54 	{ 0, }
55 };
56 
57 MODULE_DESCRIPTION(DRV_DESCRIPTION);
58 MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
59 	      "Joseph R. Eykholt <jeykholt@cisco.com>");
60 MODULE_LICENSE("GPL v2");
61 MODULE_VERSION(DRV_VERSION);
62 MODULE_DEVICE_TABLE(pci, fnic_id_table);
63 
64 unsigned int fnic_log_level;
65 module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
66 MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
67 
68 unsigned int fnic_fdmi_support = 1;
69 module_param(fnic_fdmi_support, int, 0644);
70 MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support");
71 
72 static unsigned int fnic_tgt_id_binding = 1;
73 module_param(fnic_tgt_id_binding, uint, 0644);
74 MODULE_PARM_DESC(fnic_tgt_id_binding,
75 		 "Target ID binding (0 for none. 1 for binding by WWPN (default))");
76 
77 unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
78 module_param(io_completions, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time");
80 
81 unsigned int fnic_trace_max_pages = 16;
82 module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
83 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
84 					"for fnic trace buffer");
85 
86 unsigned int fnic_fc_trace_max_pages = 64;
87 module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(fnic_fc_trace_max_pages,
89 		 "Total allocated memory pages for fc trace buffer");
90 
91 static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
92 module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
93 MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
94 
95 unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON;
96 module_param(pc_rscn_handling_feature_flag, uint, 0644);
97 MODULE_PARM_DESC(pc_rscn_handling_feature_flag,
98 		 "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))");
99 
100 struct workqueue_struct *reset_fnic_work_queue;
101 struct workqueue_struct *fnic_fip_queue;
102 
fnic_sdev_init(struct scsi_device * sdev)103 static int fnic_sdev_init(struct scsi_device *sdev)
104 {
105 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
106 
107 	if (!rport || fc_remote_port_chkready(rport))
108 		return -ENXIO;
109 
110 	scsi_change_queue_depth(sdev, fnic_max_qdepth);
111 	return 0;
112 }
113 
114 static const struct scsi_host_template fnic_host_template = {
115 	.module = THIS_MODULE,
116 	.name = DRV_NAME,
117 	.queuecommand = fnic_queuecommand,
118 	.eh_timed_out = fc_eh_timed_out,
119 	.eh_abort_handler = fnic_abort_cmd,
120 	.eh_device_reset_handler = fnic_device_reset,
121 	.eh_host_reset_handler = fnic_eh_host_reset_handler,
122 	.sdev_init = fnic_sdev_init,
123 	.change_queue_depth = scsi_change_queue_depth,
124 	.this_id = -1,
125 	.cmd_per_lun = 3,
126 	.can_queue = FNIC_DFLT_IO_REQ,
127 	.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
128 	.max_sectors = 0xffff,
129 	.shost_groups = fnic_host_groups,
130 	.track_queue_depth = 1,
131 	.cmd_size = sizeof(struct fnic_cmd_priv),
132 	.map_queues = fnic_mq_map_queues_cpus,
133 };
134 
135 static void
fnic_set_rport_dev_loss_tmo(struct fc_rport * rport,u32 timeout)136 fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
137 {
138 	if (timeout)
139 		rport->dev_loss_tmo = timeout;
140 	else
141 		rport->dev_loss_tmo = 1;
142 }
143 
144 static void fnic_get_host_speed(struct Scsi_Host *shost);
145 static struct scsi_transport_template *fnic_fc_transport;
146 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
147 static void fnic_reset_host_stats(struct Scsi_Host *);
148 
149 static struct fc_function_template fnic_fc_functions = {
150 
151 	.show_host_node_name = 1,
152 	.show_host_port_name = 1,
153 	.show_host_supported_classes = 1,
154 	.show_host_supported_fc4s = 1,
155 	.show_host_active_fc4s = 1,
156 	.show_host_maxframe_size = 1,
157 	.show_host_port_id = 1,
158 	.show_host_supported_speeds = 1,
159 	.get_host_speed = fnic_get_host_speed,
160 	.show_host_speed = 1,
161 	.show_host_port_type = 1,
162 	.get_host_port_state = fnic_get_host_port_state,
163 	.show_host_port_state = 1,
164 	.show_host_symbolic_name = 1,
165 	.show_rport_maxframe_size = 1,
166 	.show_rport_supported_classes = 1,
167 	.show_host_fabric_name = 1,
168 	.show_starget_node_name = 1,
169 	.show_starget_port_name = 1,
170 	.show_starget_port_id = 1,
171 	.show_rport_dev_loss_tmo = 1,
172 	.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
173 	.issue_fc_host_lip = fnic_issue_fc_host_lip,
174 	.get_fc_host_stats = fnic_get_stats,
175 	.reset_fc_host_stats = fnic_reset_host_stats,
176 	.dd_fcrport_size = sizeof(struct rport_dd_data_s),
177 	.terminate_rport_io = fnic_terminate_rport_io,
178 	.bsg_request = NULL,
179 };
180 
fnic_get_host_speed(struct Scsi_Host * shost)181 static void fnic_get_host_speed(struct Scsi_Host *shost)
182 {
183 	struct fnic *fnic = *((struct fnic **) shost_priv(shost));
184 	u32 port_speed = vnic_dev_port_speed(fnic->vdev);
185 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
186 
187 	FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
188 				  "port_speed: %d Mbps", port_speed);
189 	atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed);
190 
191 	/* Add in other values as they get defined in fw */
192 	switch (port_speed) {
193 	case DCEM_PORTSPEED_1G:
194 		fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
195 		break;
196 	case DCEM_PORTSPEED_2G:
197 		fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
198 		break;
199 	case DCEM_PORTSPEED_4G:
200 		fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
201 		break;
202 	case DCEM_PORTSPEED_8G:
203 		fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
204 		break;
205 	case DCEM_PORTSPEED_10G:
206 		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
207 		break;
208 	case DCEM_PORTSPEED_16G:
209 		fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
210 		break;
211 	case DCEM_PORTSPEED_20G:
212 		fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
213 		break;
214 	case DCEM_PORTSPEED_25G:
215 		fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
216 		break;
217 	case DCEM_PORTSPEED_32G:
218 		fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
219 		break;
220 	case DCEM_PORTSPEED_40G:
221 	case DCEM_PORTSPEED_4x10G:
222 		fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
223 		break;
224 	case DCEM_PORTSPEED_50G:
225 		fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
226 		break;
227 	case DCEM_PORTSPEED_64G:
228 		fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
229 		break;
230 	case DCEM_PORTSPEED_100G:
231 		fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
232 		break;
233 	case DCEM_PORTSPEED_128G:
234 		fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
235 		break;
236 	default:
237 		FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
238 					  "Unknown FC speed: %d Mbps", port_speed);
239 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
240 		break;
241 	}
242 }
243 
244 /* Placeholder function */
fnic_get_stats(struct Scsi_Host * host)245 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
246 {
247 	int ret;
248 	struct fnic *fnic = *((struct fnic **) shost_priv(host));
249 	struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats;
250 	struct vnic_stats *vs;
251 	unsigned long flags;
252 
253 	if (time_before
254 		(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
255 		return stats;
256 	fnic->stats_time = jiffies;
257 
258 	spin_lock_irqsave(&fnic->fnic_lock, flags);
259 	ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
260 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
261 
262 	if (ret) {
263 		FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
264 					  "fnic: Get vnic stats failed: 0x%x", ret);
265 		return stats;
266 	}
267 	vs = fnic->stats;
268 	stats->tx_frames = vs->tx.tx_unicast_frames_ok;
269 	stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
270 	stats->rx_frames = vs->rx.rx_unicast_frames_ok;
271 	stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
272 	stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
273 	stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
274 	stats->invalid_crc_count = vs->rx.rx_crc_errors;
275 	stats->seconds_since_last_reset =
276 		(jiffies - fnic->stats_reset_time) / HZ;
277 	stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
278 	stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
279 	return stats;
280 }
281 
282 /*
283  * fnic_dump_fchost_stats
284  * note : dumps fc_statistics into system logs
285  */
fnic_dump_fchost_stats(struct Scsi_Host * host,struct fc_host_statistics * stats)286 void fnic_dump_fchost_stats(struct Scsi_Host *host,
287 				struct fc_host_statistics *stats)
288 {
289 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
290 			"fnic: seconds since last reset = %llu\n",
291 			stats->seconds_since_last_reset);
292 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
293 			"fnic: tx frames		= %llu\n",
294 			stats->tx_frames);
295 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
296 			"fnic: tx words		= %llu\n",
297 			stats->tx_words);
298 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
299 			"fnic: rx frames		= %llu\n",
300 			stats->rx_frames);
301 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
302 			"fnic: rx words		= %llu\n",
303 			stats->rx_words);
304 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
305 			"fnic: lip count		= %llu\n",
306 			stats->lip_count);
307 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
308 			"fnic: nos count		= %llu\n",
309 			stats->nos_count);
310 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
311 			"fnic: error frames		= %llu\n",
312 			stats->error_frames);
313 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
314 			"fnic: dumped frames	= %llu\n",
315 			stats->dumped_frames);
316 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
317 			"fnic: link failure count	= %llu\n",
318 			stats->link_failure_count);
319 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
320 			"fnic: loss of sync count	= %llu\n",
321 			stats->loss_of_sync_count);
322 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
323 			"fnic: loss of signal count	= %llu\n",
324 			stats->loss_of_signal_count);
325 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
326 			"fnic: prim seq protocol err count = %llu\n",
327 			stats->prim_seq_protocol_err_count);
328 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
329 			"fnic: invalid tx word count= %llu\n",
330 			stats->invalid_tx_word_count);
331 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
332 			"fnic: invalid crc count	= %llu\n",
333 			stats->invalid_crc_count);
334 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
335 			"fnic: fcp input requests	= %llu\n",
336 			stats->fcp_input_requests);
337 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
338 			"fnic: fcp output requests	= %llu\n",
339 			stats->fcp_output_requests);
340 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
341 			"fnic: fcp control requests	= %llu\n",
342 			stats->fcp_control_requests);
343 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
344 			"fnic: fcp input megabytes	= %llu\n",
345 			stats->fcp_input_megabytes);
346 	FNIC_MAIN_NOTE(KERN_NOTICE, host,
347 			"fnic: fcp output megabytes	= %llu\n",
348 			stats->fcp_output_megabytes);
349 	return;
350 }
351 
352 /*
353  * fnic_reset_host_stats : clears host stats
354  * note : called when reset_statistics set under sysfs dir
355  */
fnic_reset_host_stats(struct Scsi_Host * host)356 static void fnic_reset_host_stats(struct Scsi_Host *host)
357 {
358 	int ret;
359 	struct fnic *fnic = *((struct fnic **) shost_priv(host));
360 	struct fc_host_statistics *stats;
361 	unsigned long flags;
362 
363 	/* dump current stats, before clearing them */
364 	stats = fnic_get_stats(host);
365 	fnic_dump_fchost_stats(host, stats);
366 
367 	spin_lock_irqsave(&fnic->fnic_lock, flags);
368 	ret = vnic_dev_stats_clear(fnic->vdev);
369 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
370 
371 	if (ret) {
372 		FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
373 				"fnic: Reset vnic stats failed"
374 				" 0x%x", ret);
375 		return;
376 	}
377 	fnic->stats_reset_time = jiffies;
378 	memset(stats, 0, sizeof(*stats));
379 
380 	return;
381 }
382 
fnic_log_q_error(struct fnic * fnic)383 void fnic_log_q_error(struct fnic *fnic)
384 {
385 	unsigned int i;
386 	u32 error_status;
387 
388 	for (i = 0; i < fnic->raw_wq_count; i++) {
389 		error_status = ioread32(&fnic->wq[i].ctrl->error_status);
390 		if (error_status)
391 			dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status);
392 	}
393 
394 	for (i = 0; i < fnic->rq_count; i++) {
395 		error_status = ioread32(&fnic->rq[i].ctrl->error_status);
396 		if (error_status)
397 			dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status);
398 	}
399 
400 	for (i = 0; i < fnic->wq_copy_count; i++) {
401 		error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status);
402 		if (error_status)
403 			dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status);
404 	}
405 }
406 
fnic_handle_link_event(struct fnic * fnic)407 void fnic_handle_link_event(struct fnic *fnic)
408 {
409 	unsigned long flags;
410 
411 	spin_lock_irqsave(&fnic->fnic_lock, flags);
412 	if (fnic->stop_rx_link_events) {
413 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
414 		return;
415 	}
416 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
417 
418 	queue_work(fnic_event_queue, &fnic->link_work);
419 
420 }
421 
fnic_notify_set(struct fnic * fnic)422 static int fnic_notify_set(struct fnic *fnic)
423 {
424 	int err;
425 
426 	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
427 	case VNIC_DEV_INTR_MODE_INTX:
428 		err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
429 		break;
430 	case VNIC_DEV_INTR_MODE_MSI:
431 		err = vnic_dev_notify_set(fnic->vdev, -1);
432 		break;
433 	case VNIC_DEV_INTR_MODE_MSIX:
434 		err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base);
435 		break;
436 	default:
437 		dev_err(&fnic->pdev->dev, "Interrupt mode should be set up"
438 			     " before devcmd notify set %d\n",
439 			     vnic_dev_get_intr_mode(fnic->vdev));
440 		err = -1;
441 		break;
442 	}
443 
444 	return err;
445 }
446 
fnic_notify_timer(struct timer_list * t)447 static void fnic_notify_timer(struct timer_list *t)
448 {
449 	struct fnic *fnic = from_timer(fnic, t, notify_timer);
450 
451 	fnic_handle_link_event(fnic);
452 	mod_timer(&fnic->notify_timer,
453 		  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
454 }
455 
fnic_notify_timer_start(struct fnic * fnic)456 static void fnic_notify_timer_start(struct fnic *fnic)
457 {
458 	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
459 	case VNIC_DEV_INTR_MODE_MSI:
460 		/*
461 		 * Schedule first timeout immediately. The driver is
462 		 * initiatialized and ready to look for link up notification
463 		 */
464 		mod_timer(&fnic->notify_timer, jiffies);
465 		break;
466 	default:
467 		/* Using intr for notification for INTx/MSI-X */
468 		break;
469 	}
470 }
471 
fnic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)472 static int fnic_dev_wait(struct vnic_dev *vdev,
473 			 int (*start)(struct vnic_dev *, int),
474 			 int (*finished)(struct vnic_dev *, int *),
475 			 int arg)
476 {
477 	unsigned long time;
478 	int done;
479 	int err;
480 	int count;
481 
482 	count = 0;
483 
484 	err = start(vdev, arg);
485 	if (err)
486 		return err;
487 
488 	/* Wait for func to complete.
489 	* Sometime schedule_timeout_uninterruptible take long time
490 	* to wake up so we do not retry as we are only waiting for
491 	* 2 seconds in while loop. By adding count, we make sure
492 	* we try atleast three times before returning -ETIMEDOUT
493 	*/
494 	time = jiffies + (HZ * 2);
495 	do {
496 		err = finished(vdev, &done);
497 		count++;
498 		if (err)
499 			return err;
500 		if (done)
501 			return 0;
502 		schedule_timeout_uninterruptible(HZ / 10);
503 	} while (time_after(time, jiffies) || (count < 3));
504 
505 	return -ETIMEDOUT;
506 }
507 
fnic_cleanup(struct fnic * fnic)508 static int fnic_cleanup(struct fnic *fnic)
509 {
510 	unsigned int i;
511 	int err;
512 	int raw_wq_rq_counts;
513 
514 	vnic_dev_disable(fnic->vdev);
515 	for (i = 0; i < fnic->intr_count; i++)
516 		vnic_intr_mask(&fnic->intr[i]);
517 
518 	for (i = 0; i < fnic->rq_count; i++) {
519 		err = vnic_rq_disable(&fnic->rq[i]);
520 		if (err)
521 			return err;
522 	}
523 	for (i = 0; i < fnic->raw_wq_count; i++) {
524 		err = vnic_wq_disable(&fnic->wq[i]);
525 		if (err)
526 			return err;
527 	}
528 	for (i = 0; i < fnic->wq_copy_count; i++) {
529 		err = vnic_wq_copy_disable(&fnic->hw_copy_wq[i]);
530 		if (err)
531 			return err;
532 		raw_wq_rq_counts = fnic->raw_wq_count + fnic->rq_count;
533 		fnic_wq_copy_cmpl_handler(fnic, -1, i + raw_wq_rq_counts);
534 	}
535 
536 	/* Clean up completed IOs and FCS frames */
537 	fnic_wq_cmpl_handler(fnic, -1);
538 	fnic_rq_cmpl_handler(fnic, -1);
539 
540 	/* Clean up the IOs and FCS frames that have not completed */
541 	for (i = 0; i < fnic->raw_wq_count; i++)
542 		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
543 	for (i = 0; i < fnic->rq_count; i++)
544 		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
545 	for (i = 0; i < fnic->wq_copy_count; i++)
546 		vnic_wq_copy_clean(&fnic->hw_copy_wq[i],
547 				   fnic_wq_copy_cleanup_handler);
548 
549 	for (i = 0; i < fnic->cq_count; i++)
550 		vnic_cq_clean(&fnic->cq[i]);
551 	for (i = 0; i < fnic->intr_count; i++)
552 		vnic_intr_clean(&fnic->intr[i]);
553 
554 	mempool_destroy(fnic->io_req_pool);
555 	mempool_destroy(fnic->frame_pool);
556 	mempool_destroy(fnic->frame_elem_pool);
557 	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
558 		mempool_destroy(fnic->io_sgl_pool[i]);
559 
560 	return 0;
561 }
562 
fnic_iounmap(struct fnic * fnic)563 static void fnic_iounmap(struct fnic *fnic)
564 {
565 	if (fnic->bar0.vaddr)
566 		iounmap(fnic->bar0.vaddr);
567 }
568 
fnic_set_vlan(struct fnic * fnic,u16 vlan_id)569 static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
570 {
571 	vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
572 }
573 
fnic_scsi_init(struct fnic * fnic)574 static void fnic_scsi_init(struct fnic *fnic)
575 {
576 	struct Scsi_Host *host = fnic->host;
577 
578 	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
579 			 host->host_no);
580 
581 	host->transportt = fnic_fc_transport;
582 }
583 
fnic_free_ioreq_tables_mq(struct fnic * fnic)584 static void fnic_free_ioreq_tables_mq(struct fnic *fnic)
585 {
586 	int hwq;
587 
588 	for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
589 		kfree(fnic->sw_copy_wq[hwq].io_req_table);
590 }
591 
fnic_scsi_drv_init(struct fnic * fnic)592 static int fnic_scsi_drv_init(struct fnic *fnic)
593 {
594 	struct Scsi_Host *host = fnic->host;
595 	int err;
596 	struct pci_dev *pdev = fnic->pdev;
597 	struct fnic_iport_s *iport = &fnic->iport;
598 	int hwq;
599 
600 	/* Configure maximum outstanding IO reqs*/
601 	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
602 		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
603 					max_t(u32, FNIC_MIN_IO_REQ,
604 					fnic->config.io_throttle_count));
605 
606 	fnic->fnic_max_tag_id = host->can_queue;
607 	host->max_lun = fnic->config.luns_per_tgt;
608 	host->max_id = FNIC_MAX_FCP_TARGET;
609 	host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN;
610 
611 	host->nr_hw_queues = fnic->wq_copy_count;
612 
613 	dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu",
614 			host->can_queue, host->max_lun);
615 
616 	dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
617 			host->max_id, host->max_cmd_len, host->nr_hw_queues);
618 
619 	for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
620 		fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
621 		fnic->sw_copy_wq[hwq].io_req_table =
622 			kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
623 					sizeof(struct fnic_io_req *), GFP_KERNEL);
624 
625 		if (!fnic->sw_copy_wq[hwq].io_req_table) {
626 			fnic_free_ioreq_tables_mq(fnic);
627 			return -ENOMEM;
628 		}
629 	}
630 
631 	dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
632 			fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
633 
634 	fnic_scsi_init(fnic);
635 
636 	err = scsi_add_host(fnic->host, &pdev->dev);
637 	if (err) {
638 		dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n");
639 		return err;
640 	}
641 	fc_host_maxframe_size(fnic->host) = iport->max_payload_size;
642 	fc_host_dev_loss_tmo(fnic->host) =
643 		fnic->config.port_down_timeout / 1000;
644 	sprintf(fc_host_symbolic_name(fnic->host),
645 			DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
646 	fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT;
647 	fc_host_node_name(fnic->host) = iport->wwnn;
648 	fc_host_port_name(fnic->host) = iport->wwpn;
649 	fc_host_supported_classes(fnic->host) = FC_COS_CLASS3;
650 	memset(fc_host_supported_fc4s(fnic->host), 0,
651 		   sizeof(fc_host_supported_fc4s(fnic->host)));
652 	fc_host_supported_fc4s(fnic->host)[2] = 1;
653 	fc_host_supported_fc4s(fnic->host)[7] = 1;
654 	fc_host_supported_speeds(fnic->host) = 0;
655 	fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT;
656 
657 	dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data);
658 	if (fnic->host->shost_data != NULL) {
659 		if (fnic_tgt_id_binding == 0) {
660 			dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n");
661 			fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE;
662 		} else {
663 			dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n");
664 			fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN;
665 		}
666 	}
667 
668 	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
669 	if (!fnic->io_req_pool) {
670 		scsi_remove_host(fnic->host);
671 		return -ENOMEM;
672 	}
673 
674 	return 0;
675 }
676 
fnic_mq_map_queues_cpus(struct Scsi_Host * host)677 void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
678 {
679 	struct fnic *fnic = *((struct fnic **) shost_priv(host));
680 	struct pci_dev *l_pdev = fnic->pdev;
681 	int intr_mode = fnic->config.intr_mode;
682 	struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
683 
684 	if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
685 		FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
686 			"intr_mode is not msix\n");
687 		return;
688 	}
689 
690 	FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
691 			"qmap->nr_queues: %d\n", qmap->nr_queues);
692 
693 	if (l_pdev == NULL) {
694 		FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
695 						"l_pdev is null\n");
696 		return;
697 	}
698 
699 	blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
700 }
701 
fnic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)702 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
703 {
704 	struct Scsi_Host *host = NULL;
705 	struct fnic *fnic;
706 	mempool_t *pool;
707 	struct fnic_iport_s *iport;
708 	int err = 0;
709 	int fnic_id = 0;
710 	int i;
711 	unsigned long flags;
712 	char *desc, *subsys_desc;
713 	int len;
714 
715 	/*
716 	 * Allocate fnic
717 	 */
718 	fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL);
719 	if (!fnic) {
720 		err = -ENOMEM;
721 		goto err_out_fnic_alloc;
722 	}
723 
724 	iport = &fnic->iport;
725 
726 	fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL);
727 	if (fnic_id < 0) {
728 		dev_err(&pdev->dev, "Unable to alloc fnic ID\n");
729 		err = fnic_id;
730 		goto err_out_ida_alloc;
731 	}
732 
733 	fnic->pdev = pdev;
734 	fnic->fnic_num = fnic_id;
735 
736 	/* Find model name from PCIe subsys ID */
737 	if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) {
738 		dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc);
739 
740 		/* Update FDMI model */
741 		fnic->subsys_desc_len = strlen(subsys_desc);
742 		len = ARRAY_SIZE(fnic->subsys_desc);
743 		if (fnic->subsys_desc_len > len)
744 			fnic->subsys_desc_len = len;
745 		memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len);
746 		dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc);
747 	} else {
748 		fnic->subsys_desc_len = 0;
749 		dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown",
750 				pdev->subsystem_device);
751 	}
752 
753 	err = pci_enable_device(pdev);
754 	if (err) {
755 		dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n");
756 		goto err_out_pci_enable_device;
757 	}
758 
759 	err = pci_request_regions(pdev, DRV_NAME);
760 	if (err) {
761 		dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n");
762 		goto err_out_pci_request_regions;
763 	}
764 
765 	pci_set_master(pdev);
766 
767 	/* Query PCI controller on system for DMA addressing
768 	 * limitation for the device.  Try 47-bit first, and
769 	 * fail to 32-bit. Cisco VIC supports 47 bits only.
770 	 */
771 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
772 	if (err) {
773 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
774 		if (err) {
775 			dev_err(&fnic->pdev->dev, "No usable DMA configuration "
776 				     "aborting\n");
777 			goto err_out_set_dma_mask;
778 		}
779 	}
780 
781 	/* Map vNIC resources from BAR0 */
782 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
783 		dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n");
784 		err = -ENODEV;
785 		goto err_out_map_bar;
786 	}
787 
788 	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
789 	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
790 	fnic->bar0.len = pci_resource_len(pdev, 0);
791 
792 	if (!fnic->bar0.vaddr) {
793 		dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, "
794 			     "aborting.\n");
795 		err = -ENODEV;
796 		goto err_out_fnic_map_bar;
797 	}
798 
799 	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
800 	if (!fnic->vdev) {
801 		dev_err(&fnic->pdev->dev, "vNIC registration failed, "
802 			     "aborting.\n");
803 		err = -ENODEV;
804 		goto err_out_dev_register;
805 	}
806 
807 	err = vnic_dev_cmd_init(fnic->vdev);
808 	if (err) {
809 		dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n",
810 				err);
811 		goto err_out_dev_cmd_init;
812 	}
813 
814 	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
815 			    vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
816 	if (err) {
817 		dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n");
818 		goto err_out_dev_open;
819 	}
820 
821 	err = vnic_dev_init(fnic->vdev, 0);
822 	if (err) {
823 		dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n");
824 		goto err_out_dev_init;
825 	}
826 
827 	err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac);
828 	if (err) {
829 		dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n");
830 		goto err_out_dev_mac_addr;
831 	}
832 	/* set data_src for point-to-point mode and to keep it non-zero */
833 	memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN);
834 
835 	/* Get vNIC configuration */
836 	err = fnic_get_vnic_config(fnic);
837 	if (err) {
838 		dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, "
839 			     "aborting.\n");
840 		goto err_out_fnic_get_config;
841 	}
842 
843 	switch (fnic->config.flags & 0xff0) {
844 	case VFCF_FC_INITIATOR:
845 		{
846 			host =
847 				scsi_host_alloc(&fnic_host_template,
848 								sizeof(struct fnic *));
849 			if (!host) {
850 				dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n");
851 				err = -ENOMEM;
852 				goto err_out_scsi_host_alloc;
853 			}
854 			*((struct fnic **) shost_priv(host)) = fnic;
855 
856 			fnic->host = host;
857 			fnic->role = FNIC_ROLE_FCP_INITIATOR;
858 			dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n",
859 					fnic->fnic_num);
860 		}
861 		break;
862 	default:
863 		dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num);
864 		err = -EINVAL;
865 		goto err_out_fnic_role;
866 	}
867 
868 	/* Setup PCI resources */
869 	pci_set_drvdata(pdev, fnic);
870 
871 	fnic_get_res_counts(fnic);
872 
873 	err = fnic_set_intr_mode(fnic);
874 	if (err) {
875 		dev_err(&fnic->pdev->dev, "Failed to set intr mode, "
876 			     "aborting.\n");
877 		goto err_out_fnic_set_intr_mode;
878 	}
879 
880 	err = fnic_alloc_vnic_resources(fnic);
881 	if (err) {
882 		dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, "
883 			     "aborting.\n");
884 		goto err_out_fnic_alloc_vnic_res;
885 	}
886 	dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
887 			fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
888 
889 	/* initialize all fnic locks */
890 	spin_lock_init(&fnic->fnic_lock);
891 
892 	for (i = 0; i < FNIC_WQ_MAX; i++)
893 		spin_lock_init(&fnic->wq_lock[i]);
894 
895 	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
896 		spin_lock_init(&fnic->wq_copy_lock[i]);
897 		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
898 		fnic->fw_ack_recd[i] = 0;
899 		fnic->fw_ack_index[i] = -1;
900 	}
901 
902 	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
903 	if (!pool) {
904 		err = -ENOMEM;
905 		goto err_out_free_resources;
906 	}
907 	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
908 
909 	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
910 	if (!pool) {
911 		err = -ENOMEM;
912 		goto err_out_free_dflt_pool;
913 	}
914 	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
915 
916 	pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache);
917 	if (!pool) {
918 		err = -ENOMEM;
919 		goto err_out_fdls_frame_pool;
920 	}
921 	fnic->frame_pool = pool;
922 
923 	pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM,
924 						fdls_frame_elem_cache);
925 	if (!pool) {
926 		err = -ENOMEM;
927 		goto err_out_fdls_frame_elem_pool;
928 	}
929 	fnic->frame_elem_pool = pool;
930 
931 	/* setup vlan config, hw inserts vlan header */
932 	fnic->vlan_hw_insert = 1;
933 	fnic->vlan_id = 0;
934 
935 	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
936 		dev_info(&fnic->pdev->dev, "firmware supports FIP\n");
937 		/* enable directed and multicast */
938 		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
939 		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
940 		vnic_dev_add_addr(fnic->vdev, iport->hwmac);
941 		spin_lock_init(&fnic->vlans_lock);
942 		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
943 		INIT_LIST_HEAD(&fnic->fip_frame_queue);
944 		INIT_LIST_HEAD(&fnic->vlan_list);
945 		timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0);
946 		timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0);
947 		timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0);
948 		timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0);
949 		fnic->set_vlan = fnic_set_vlan;
950 	} else {
951 		dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n");
952 	}
953 	fnic->state = FNIC_IN_FC_MODE;
954 
955 	atomic_set(&fnic->in_flight, 0);
956 	fnic->state_flags = FNIC_FLAGS_NONE;
957 
958 	/* Enable hardware stripping of vlan header on ingress */
959 	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
960 
961 	/* Setup notification buffer area */
962 	err = fnic_notify_set(fnic);
963 	if (err) {
964 		dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n");
965 		goto err_out_fnic_notify_set;
966 	}
967 
968 	/* Setup notify timer when using MSI interrupts */
969 	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
970 		timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
971 
972 	/* allocate RQ buffers and post them to RQ*/
973 	for (i = 0; i < fnic->rq_count; i++) {
974 		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
975 		if (err) {
976 			dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc "
977 				     "frame\n");
978 			goto err_out_alloc_rq_buf;
979 		}
980 	}
981 
982 	init_completion(&fnic->reset_completion_wait);
983 
984 	/* Start local port initialization */
985 	iport->max_flogi_retries = fnic->config.flogi_retries;
986 	iport->max_plogi_retries = fnic->config.plogi_retries;
987 	iport->plogi_timeout = fnic->config.plogi_timeout;
988 	iport->service_params =
989 		(FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS |
990 		 FNIC_FCP_SP_CONF_CMPL);
991 	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
992 		iport->service_params |= FNIC_FCP_SP_RETRY;
993 
994 	iport->boot_time = jiffies;
995 	iport->e_d_tov = fnic->config.ed_tov;
996 	iport->r_a_tov = fnic->config.ra_tov;
997 	iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT;
998 	iport->wwpn = fnic->config.port_wwn;
999 	iport->wwnn = fnic->config.node_wwn;
1000 
1001 	iport->max_payload_size = fnic->config.maxdatafieldsize;
1002 
1003 	if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) ||
1004 		(iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) ||
1005 		((iport->max_payload_size % 4) != 0)) {
1006 		iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN;
1007 	}
1008 
1009 	iport->flags |= FNIC_FIRST_LINK_UP;
1010 
1011 	timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback,
1012 				0);
1013 
1014 	fnic->stats_reset_time = jiffies;
1015 
1016 	INIT_WORK(&fnic->link_work, fnic_handle_link);
1017 	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
1018 	INIT_WORK(&fnic->tport_work, fnic_tport_event_handler);
1019 	INIT_WORK(&fnic->flush_work, fnic_flush_tx);
1020 
1021 	INIT_LIST_HEAD(&fnic->frame_queue);
1022 	INIT_LIST_HEAD(&fnic->tx_queue);
1023 	INIT_LIST_HEAD(&fnic->tport_event_list);
1024 
1025 	INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry,
1026 	fdls_schedule_oxid_free_retry_work);
1027 
1028 	/* Initialize the oxid reclaim list and work struct */
1029 	INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list);
1030 	INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler);
1031 
1032 	/* Enable all queues */
1033 	for (i = 0; i < fnic->raw_wq_count; i++)
1034 		vnic_wq_enable(&fnic->wq[i]);
1035 	for (i = 0; i < fnic->rq_count; i++) {
1036 		if (!ioread32(&fnic->rq[i].ctrl->enable))
1037 			vnic_rq_enable(&fnic->rq[i]);
1038 	}
1039 	for (i = 0; i < fnic->wq_copy_count; i++)
1040 		vnic_wq_copy_enable(&fnic->hw_copy_wq[i]);
1041 
1042 	vnic_dev_enable(fnic->vdev);
1043 
1044 	err = fnic_request_intr(fnic);
1045 	if (err) {
1046 		dev_err(&fnic->pdev->dev, "Unable to request irq.\n");
1047 		goto err_out_fnic_request_intr;
1048 	}
1049 
1050 	fnic_notify_timer_start(fnic);
1051 
1052 	fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE));
1053 
1054 	err = fnic_scsi_drv_init(fnic);
1055 	if (err)
1056 		goto err_out_scsi_drv_init;
1057 
1058 	err = fnic_stats_debugfs_init(fnic);
1059 	if (err) {
1060 		dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n");
1061 		goto err_out_free_stats_debugfs;
1062 	}
1063 
1064 	for (i = 0; i < fnic->intr_count; i++)
1065 		vnic_intr_unmask(&fnic->intr[i]);
1066 
1067 	spin_lock_irqsave(&fnic_list_lock, flags);
1068 	list_add_tail(&fnic->list, &fnic_list);
1069 	spin_unlock_irqrestore(&fnic_list_lock, flags);
1070 
1071 	return 0;
1072 
1073 err_out_free_stats_debugfs:
1074 	fnic_stats_debugfs_remove(fnic);
1075 	fnic_free_ioreq_tables_mq(fnic);
1076 	scsi_remove_host(fnic->host);
1077 err_out_scsi_drv_init:
1078 	fnic_free_intr(fnic);
1079 err_out_fnic_request_intr:
1080 err_out_alloc_rq_buf:
1081 	for (i = 0; i < fnic->rq_count; i++) {
1082 		if (ioread32(&fnic->rq[i].ctrl->enable))
1083 			vnic_rq_disable(&fnic->rq[i]);
1084 		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
1085 	}
1086 	vnic_dev_notify_unset(fnic->vdev);
1087 err_out_fnic_notify_set:
1088 	mempool_destroy(fnic->frame_elem_pool);
1089 err_out_fdls_frame_elem_pool:
1090 	mempool_destroy(fnic->frame_pool);
1091 err_out_fdls_frame_pool:
1092 	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
1093 err_out_free_dflt_pool:
1094 	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
1095 err_out_free_resources:
1096 	fnic_free_vnic_resources(fnic);
1097 err_out_fnic_alloc_vnic_res:
1098 	fnic_clear_intr_mode(fnic);
1099 err_out_fnic_set_intr_mode:
1100 	scsi_host_put(fnic->host);
1101 err_out_fnic_role:
1102 err_out_scsi_host_alloc:
1103 err_out_fnic_get_config:
1104 err_out_dev_mac_addr:
1105 err_out_dev_init:
1106 	vnic_dev_close(fnic->vdev);
1107 err_out_dev_open:
1108 err_out_dev_cmd_init:
1109 	vnic_dev_unregister(fnic->vdev);
1110 err_out_dev_register:
1111 	fnic_iounmap(fnic);
1112 err_out_fnic_map_bar:
1113 err_out_map_bar:
1114 err_out_set_dma_mask:
1115 	pci_release_regions(pdev);
1116 err_out_pci_request_regions:
1117 	pci_disable_device(pdev);
1118 err_out_pci_enable_device:
1119 	ida_free(&fnic_ida, fnic->fnic_num);
1120 err_out_ida_alloc:
1121 	kfree(fnic);
1122 err_out_fnic_alloc:
1123 	return err;
1124 }
1125 
fnic_remove(struct pci_dev * pdev)1126 static void fnic_remove(struct pci_dev *pdev)
1127 {
1128 	struct fnic *fnic = pci_get_drvdata(pdev);
1129 	unsigned long flags;
1130 
1131 	/*
1132 	 * Sometimes when probe() fails and do not exit with an error code,
1133 	 * remove() gets called with 'drvdata' not set. Avoid a crash by
1134 	 * adding a defensive check.
1135 	 */
1136 	if (!fnic)
1137 		return;
1138 
1139 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1140 	fnic->stop_rx_link_events = 1;
1141 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1142 
1143 	/*
1144 	 * Flush the fnic event queue. After this call, there should
1145 	 * be no event queued for this fnic device in the workqueue
1146 	 */
1147 	flush_workqueue(fnic_event_queue);
1148 
1149 	fnic_scsi_unload(fnic);
1150 
1151 	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
1152 		del_timer_sync(&fnic->notify_timer);
1153 
1154 	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
1155 		del_timer_sync(&fnic->retry_fip_timer);
1156 		del_timer_sync(&fnic->fcs_ka_timer);
1157 		del_timer_sync(&fnic->enode_ka_timer);
1158 		del_timer_sync(&fnic->vn_ka_timer);
1159 
1160 		fnic_free_txq(&fnic->fip_frame_queue);
1161 		fnic_fcoe_reset_vlans(fnic);
1162 	}
1163 
1164 	if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0))
1165 		del_timer_sync(&fnic->iport.fabric.fdmi_timer);
1166 
1167 	fnic_stats_debugfs_remove(fnic);
1168 
1169 	/*
1170 	 * This stops the fnic device, masks all interrupts. Completed
1171 	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
1172 	 * cleaned up
1173 	 */
1174 	fnic_cleanup(fnic);
1175 
1176 	spin_lock_irqsave(&fnic_list_lock, flags);
1177 	list_del(&fnic->list);
1178 	spin_unlock_irqrestore(&fnic_list_lock, flags);
1179 
1180 	fnic_free_txq(&fnic->frame_queue);
1181 	fnic_free_txq(&fnic->tx_queue);
1182 
1183 	vnic_dev_notify_unset(fnic->vdev);
1184 	fnic_free_intr(fnic);
1185 	fnic_free_vnic_resources(fnic);
1186 	fnic_clear_intr_mode(fnic);
1187 	vnic_dev_close(fnic->vdev);
1188 	vnic_dev_unregister(fnic->vdev);
1189 	fnic_iounmap(fnic);
1190 	pci_release_regions(pdev);
1191 	pci_disable_device(pdev);
1192 	pci_set_drvdata(pdev, NULL);
1193 	ida_free(&fnic_ida, fnic->fnic_num);
1194 	fnic_scsi_unload_cleanup(fnic);
1195 	scsi_host_put(fnic->host);
1196 	kfree(fnic);
1197 }
1198 
1199 static struct pci_driver fnic_driver = {
1200 	.name = DRV_NAME,
1201 	.id_table = fnic_id_table,
1202 	.probe = fnic_probe,
1203 	.remove = fnic_remove,
1204 };
1205 
fnic_init_module(void)1206 static int __init fnic_init_module(void)
1207 {
1208 	size_t len;
1209 	int err = 0;
1210 
1211 	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
1212 
1213 	/* Create debugfs entries for fnic */
1214 	err = fnic_debugfs_init();
1215 	if (err < 0) {
1216 		printk(KERN_ERR PFX "Failed to create fnic directory "
1217 				"for tracing and stats logging\n");
1218 		fnic_debugfs_terminate();
1219 	}
1220 
1221 	/* Allocate memory for trace buffer */
1222 	err = fnic_trace_buf_init();
1223 	if (err < 0) {
1224 		printk(KERN_ERR PFX
1225 		       "Trace buffer initialization Failed. "
1226 		       "Fnic Tracing utility is disabled\n");
1227 		fnic_trace_free();
1228 	}
1229 
1230     /* Allocate memory for fc trace buffer */
1231 	err = fnic_fc_trace_init();
1232 	if (err < 0) {
1233 		printk(KERN_ERR PFX "FC trace buffer initialization Failed "
1234 		       "FC frame tracing utility is disabled\n");
1235 		fnic_fc_trace_free();
1236 	}
1237 
1238 	/* Create a cache for allocation of default size sgls */
1239 	len = sizeof(struct fnic_dflt_sgl_list);
1240 	fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
1241 		("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
1242 		 SLAB_HWCACHE_ALIGN,
1243 		 NULL);
1244 	if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
1245 		printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
1246 		err = -ENOMEM;
1247 		goto err_create_fnic_sgl_slab_dflt;
1248 	}
1249 
1250 	/* Create a cache for allocation of max size sgls*/
1251 	len = sizeof(struct fnic_sgl_list);
1252 	fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
1253 		("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
1254 		  SLAB_HWCACHE_ALIGN,
1255 		  NULL);
1256 	if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
1257 		printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
1258 		err = -ENOMEM;
1259 		goto err_create_fnic_sgl_slab_max;
1260 	}
1261 
1262 	/* Create a cache of io_req structs for use via mempool */
1263 	fnic_io_req_cache = kmem_cache_create("fnic_io_req",
1264 					      sizeof(struct fnic_io_req),
1265 					      0, SLAB_HWCACHE_ALIGN, NULL);
1266 	if (!fnic_io_req_cache) {
1267 		printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
1268 		err = -ENOMEM;
1269 		goto err_create_fnic_ioreq_slab;
1270 	}
1271 
1272 	fdls_frame_cache = kmem_cache_create("fdls_frames",
1273 					FNIC_FCOE_FRAME_MAXSZ,
1274 					0, SLAB_HWCACHE_ALIGN, NULL);
1275 	if (!fdls_frame_cache) {
1276 		pr_err("fnic fdls frame cache create failed\n");
1277 		err = -ENOMEM;
1278 		goto err_create_fdls_frame_cache;
1279 	}
1280 
1281 	fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem",
1282 					sizeof(struct fnic_frame_list),
1283 					0, SLAB_HWCACHE_ALIGN, NULL);
1284 	if (!fdls_frame_elem_cache) {
1285 		pr_err("fnic fdls frame elem cache create failed\n");
1286 		err = -ENOMEM;
1287 		goto err_create_fdls_frame_cache_elem;
1288 	}
1289 
1290 	fnic_event_queue =
1291 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
1292 	if (!fnic_event_queue) {
1293 		printk(KERN_ERR PFX "fnic work queue create failed\n");
1294 		err = -ENOMEM;
1295 		goto err_create_fnic_workq;
1296 	}
1297 
1298 	fnic_fip_queue =
1299 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q");
1300 	if (!fnic_fip_queue) {
1301 		printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
1302 		err = -ENOMEM;
1303 		goto err_create_fip_workq;
1304 	}
1305 
1306 	if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) {
1307 		reset_fnic_work_queue =
1308 			create_singlethread_workqueue("reset_fnic_work_queue");
1309 		if (!reset_fnic_work_queue) {
1310 			pr_err("reset fnic work queue create failed\n");
1311 			err = -ENOMEM;
1312 			goto err_create_reset_fnic_workq;
1313 		}
1314 		spin_lock_init(&reset_fnic_list_lock);
1315 		INIT_LIST_HEAD(&reset_fnic_list);
1316 		INIT_WORK(&reset_fnic_work, fnic_reset_work_handler);
1317 	}
1318 
1319 	fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
1320 	if (!fnic_fc_transport) {
1321 		printk(KERN_ERR PFX "fc_attach_transport error\n");
1322 		err = -ENOMEM;
1323 		goto err_fc_transport;
1324 	}
1325 
1326 	/* register the driver with PCI system */
1327 	err = pci_register_driver(&fnic_driver);
1328 	if (err < 0) {
1329 		printk(KERN_ERR PFX "pci register error\n");
1330 		goto err_pci_register;
1331 	}
1332 	return err;
1333 
1334 err_pci_register:
1335 	fc_release_transport(fnic_fc_transport);
1336 err_fc_transport:
1337 	destroy_workqueue(fnic_fip_queue);
1338 err_create_fip_workq:
1339 	if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
1340 		destroy_workqueue(reset_fnic_work_queue);
1341 err_create_reset_fnic_workq:
1342 	destroy_workqueue(fnic_event_queue);
1343 err_create_fnic_workq:
1344 	kmem_cache_destroy(fdls_frame_elem_cache);
1345 err_create_fdls_frame_cache_elem:
1346 	kmem_cache_destroy(fdls_frame_cache);
1347 err_create_fdls_frame_cache:
1348 	kmem_cache_destroy(fnic_io_req_cache);
1349 err_create_fnic_ioreq_slab:
1350 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
1351 err_create_fnic_sgl_slab_max:
1352 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1353 err_create_fnic_sgl_slab_dflt:
1354 	fnic_trace_free();
1355 	fnic_fc_trace_free();
1356 	fnic_debugfs_terminate();
1357 	return err;
1358 }
1359 
fnic_cleanup_module(void)1360 static void __exit fnic_cleanup_module(void)
1361 {
1362 	pci_unregister_driver(&fnic_driver);
1363 	destroy_workqueue(fnic_event_queue);
1364 
1365 	if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
1366 		destroy_workqueue(reset_fnic_work_queue);
1367 
1368 	if (fnic_fip_queue) {
1369 		flush_workqueue(fnic_fip_queue);
1370 		destroy_workqueue(fnic_fip_queue);
1371 	}
1372 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
1373 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1374 	kmem_cache_destroy(fnic_io_req_cache);
1375 	kmem_cache_destroy(fdls_frame_cache);
1376 	fc_release_transport(fnic_fc_transport);
1377 	fnic_trace_free();
1378 	fnic_fc_trace_free();
1379 	fnic_debugfs_terminate();
1380 	ida_destroy(&fnic_ida);
1381 }
1382 
1383 module_init(fnic_init_module);
1384 module_exit(fnic_cleanup_module);
1385