xref: /linux/drivers/scsi/snic/snic_main.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3 
4 #include <linux/module.h>
5 #include <linux/mempool.h>
6 #include <linux/string.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/skbuff.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/workqueue.h>
15 #include <scsi/scsi_host.h>
16 #include <scsi/scsi_tcq.h>
17 
18 #include "snic.h"
19 #include "snic_fwint.h"
20 
21 #define PCI_DEVICE_ID_CISCO_SNIC	0x0046
22 
23 /* Supported devices by snic module */
24 static struct pci_device_id snic_id_table[] = {
25 	{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
26 	{ 0, }	/* end of table */
27 };
28 
29 unsigned int snic_log_level = 0x0;
30 module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
31 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
32 
33 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
34 unsigned int snic_trace_max_pages = 16;
35 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(snic_trace_max_pages,
37 		"Total allocated memory pages for snic trace buffer");
38 
39 #endif
40 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
41 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
42 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
43 
44 /*
45  * snic_slave_alloc : callback function to SCSI Mid Layer, called on
46  * scsi device initialization.
47  */
48 static int
snic_slave_alloc(struct scsi_device * sdev)49 snic_slave_alloc(struct scsi_device *sdev)
50 {
51 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
52 
53 	if (!tgt || snic_tgt_chkready(tgt))
54 		return -ENXIO;
55 
56 	return 0;
57 }
58 
59 /*
60  * snic_slave_configure : callback function to SCSI Mid Layer, called on
61  * scsi device initialization.
62  */
63 static int
snic_slave_configure(struct scsi_device * sdev)64 snic_slave_configure(struct scsi_device *sdev)
65 {
66 	struct snic *snic = shost_priv(sdev->host);
67 	u32 qdepth = 0, max_ios = 0;
68 	int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
69 
70 	/* Set Queue Depth */
71 	max_ios = snic_max_qdepth;
72 	qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
73 	scsi_change_queue_depth(sdev, qdepth);
74 
75 	if (snic->fwinfo.io_tmo > 1)
76 		tmo = snic->fwinfo.io_tmo * HZ;
77 
78 	/* FW requires extended timeouts */
79 	blk_queue_rq_timeout(sdev->request_queue, tmo);
80 
81 	return 0;
82 }
83 
84 static int
snic_change_queue_depth(struct scsi_device * sdev,int qdepth)85 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
86 {
87 	struct snic *snic = shost_priv(sdev->host);
88 	int qsz = 0;
89 
90 	qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
91 	if (qsz < sdev->queue_depth)
92 		atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
93 	else if (qsz > sdev->queue_depth)
94 		atomic64_inc(&snic->s_stats.misc.qsz_rampup);
95 
96 	atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
97 
98 	scsi_change_queue_depth(sdev, qsz);
99 
100 	return sdev->queue_depth;
101 }
102 
103 static const struct scsi_host_template snic_host_template = {
104 	.module = THIS_MODULE,
105 	.name = SNIC_DRV_NAME,
106 	.queuecommand = snic_queuecommand,
107 	.eh_abort_handler = snic_abort_cmd,
108 	.eh_device_reset_handler = snic_device_reset,
109 	.eh_host_reset_handler = snic_host_reset,
110 	.slave_alloc = snic_slave_alloc,
111 	.slave_configure = snic_slave_configure,
112 	.change_queue_depth = snic_change_queue_depth,
113 	.this_id = -1,
114 	.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
115 	.can_queue = SNIC_MAX_IO_REQ,
116 	.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
117 	.max_sectors = 0x800,
118 	.shost_groups = snic_host_groups,
119 	.track_queue_depth = 1,
120 	.cmd_size = sizeof(struct snic_internal_io_state),
121 	.proc_name = "snic_scsi",
122 };
123 
124 /*
125  * snic_handle_link_event : Handles link events such as link up/down/error
126  */
127 void
snic_handle_link_event(struct snic * snic)128 snic_handle_link_event(struct snic *snic)
129 {
130 	unsigned long flags;
131 
132 	spin_lock_irqsave(&snic->snic_lock, flags);
133 	if (snic->stop_link_events) {
134 		spin_unlock_irqrestore(&snic->snic_lock, flags);
135 
136 		return;
137 	}
138 	spin_unlock_irqrestore(&snic->snic_lock, flags);
139 
140 	queue_work(snic_glob->event_q, &snic->link_work);
141 } /* end of snic_handle_link_event */
142 
143 /*
144  * snic_notify_set : sets notification area
145  * This notification area is to receive events from fw
146  * Note: snic supports only MSIX interrupts, in which we can just call
147  *  svnic_dev_notify_set directly
148  */
149 static int
snic_notify_set(struct snic * snic)150 snic_notify_set(struct snic *snic)
151 {
152 	int ret = 0;
153 	enum vnic_dev_intr_mode intr_mode;
154 
155 	intr_mode = svnic_dev_get_intr_mode(snic->vdev);
156 
157 	if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
158 		ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
159 	} else {
160 		SNIC_HOST_ERR(snic->shost,
161 			      "Interrupt mode should be setup before devcmd notify set %d\n",
162 			      intr_mode);
163 		ret = -1;
164 	}
165 
166 	return ret;
167 } /* end of snic_notify_set */
168 
169 /*
170  * snic_dev_wait : polls vnic open status.
171  */
172 static int
snic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)173 snic_dev_wait(struct vnic_dev *vdev,
174 		int (*start)(struct vnic_dev *, int),
175 		int (*finished)(struct vnic_dev *, int *),
176 		int arg)
177 {
178 	unsigned long time;
179 	int ret, done;
180 	int retry_cnt = 0;
181 
182 	ret = start(vdev, arg);
183 	if (ret)
184 		return ret;
185 
186 	/*
187 	 * Wait for func to complete...2 seconds max.
188 	 *
189 	 * Sometimes schedule_timeout_uninterruptible take long	time
190 	 * to wakeup, which results skipping retry. The retry counter
191 	 * ensures to retry at least two times.
192 	 */
193 	time = jiffies + (HZ * 2);
194 	do {
195 		ret = finished(vdev, &done);
196 		if (ret)
197 			return ret;
198 
199 		if (done)
200 			return 0;
201 		schedule_timeout_uninterruptible(HZ/10);
202 		++retry_cnt;
203 	} while (time_after(time, jiffies) || (retry_cnt < 3));
204 
205 	return -ETIMEDOUT;
206 } /* end of snic_dev_wait */
207 
208 /*
209  * snic_cleanup: called by snic_remove
210  * Stops the snic device, masks all interrupts, Completed CQ entries are
211  * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
212  */
213 static int
snic_cleanup(struct snic * snic)214 snic_cleanup(struct snic *snic)
215 {
216 	unsigned int i;
217 	int ret;
218 
219 	svnic_dev_disable(snic->vdev);
220 	for (i = 0; i < snic->intr_count; i++)
221 		svnic_intr_mask(&snic->intr[i]);
222 
223 	for (i = 0; i < snic->wq_count; i++) {
224 		ret = svnic_wq_disable(&snic->wq[i]);
225 		if (ret)
226 			return ret;
227 	}
228 
229 	/* Clean up completed IOs */
230 	snic_fwcq_cmpl_handler(snic, -1);
231 
232 	snic_wq_cmpl_handler(snic, -1);
233 
234 	/* Clean up the IOs that have not completed */
235 	for (i = 0; i < snic->wq_count; i++)
236 		svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
237 
238 	for (i = 0; i < snic->cq_count; i++)
239 		svnic_cq_clean(&snic->cq[i]);
240 
241 	for (i = 0; i < snic->intr_count; i++)
242 		svnic_intr_clean(&snic->intr[i]);
243 
244 	/* Cleanup snic specific requests */
245 	snic_free_all_untagged_reqs(snic);
246 
247 	/* Cleanup Pending SCSI commands */
248 	snic_shutdown_scsi_cleanup(snic);
249 
250 	for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
251 		mempool_destroy(snic->req_pool[i]);
252 
253 	return 0;
254 } /* end of snic_cleanup */
255 
256 
257 static void
snic_iounmap(struct snic * snic)258 snic_iounmap(struct snic *snic)
259 {
260 	if (snic->bar0.vaddr)
261 		iounmap(snic->bar0.vaddr);
262 }
263 
264 /*
265  * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
266  */
267 static int
snic_vdev_open_done(struct vnic_dev * vdev,int * done)268 snic_vdev_open_done(struct vnic_dev *vdev, int *done)
269 {
270 	struct snic *snic = svnic_dev_priv(vdev);
271 	int ret;
272 	int nretries = 5;
273 
274 	do {
275 		ret = svnic_dev_open_done(vdev, done);
276 		if (ret == 0)
277 			break;
278 
279 		SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
280 	} while (nretries--);
281 
282 	return ret;
283 } /* end of snic_vdev_open_done */
284 
285 /*
286  * snic_add_host : registers scsi host with ML
287  */
288 static int
snic_add_host(struct Scsi_Host * shost,struct pci_dev * pdev)289 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
290 {
291 	int ret = 0;
292 
293 	ret = scsi_add_host(shost, &pdev->dev);
294 	if (ret) {
295 		SNIC_HOST_ERR(shost,
296 			      "snic: scsi_add_host failed. %d\n",
297 			      ret);
298 
299 		return ret;
300 	}
301 
302 	SNIC_BUG_ON(shost->work_q != NULL);
303 	shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
304 						shost->host_no);
305 	if (!shost->work_q) {
306 		SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
307 
308 		ret = -ENOMEM;
309 	}
310 
311 	return ret;
312 } /* end of snic_add_host */
313 
314 static void
snic_del_host(struct Scsi_Host * shost)315 snic_del_host(struct Scsi_Host *shost)
316 {
317 	if (!shost->work_q)
318 		return;
319 
320 	destroy_workqueue(shost->work_q);
321 	shost->work_q = NULL;
322 	scsi_remove_host(shost);
323 }
324 
325 int
snic_get_state(struct snic * snic)326 snic_get_state(struct snic *snic)
327 {
328 	return atomic_read(&snic->state);
329 }
330 
331 void
snic_set_state(struct snic * snic,enum snic_state state)332 snic_set_state(struct snic *snic, enum snic_state state)
333 {
334 	SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
335 		       snic_state_to_str(snic_get_state(snic)),
336 		       snic_state_to_str(state));
337 
338 	atomic_set(&snic->state, state);
339 }
340 
341 /*
342  * snic_probe : Initialize the snic interface.
343  */
344 static int
snic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)345 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
346 {
347 	struct Scsi_Host *shost;
348 	struct snic *snic;
349 	mempool_t *pool;
350 	unsigned long flags;
351 	u32 max_ios = 0;
352 	int ret, i;
353 
354 	/* Device Information */
355 	SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
356 		  pdev->vendor, pdev->device, pdev->subsystem_vendor,
357 		  pdev->subsystem_device);
358 
359 	SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
360 		  pdev->bus->number, PCI_SLOT(pdev->devfn),
361 		  PCI_FUNC(pdev->devfn));
362 
363 	/*
364 	 * Allocate SCSI Host and setup association between host, and snic
365 	 */
366 	shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
367 	if (!shost) {
368 		SNIC_ERR("Unable to alloc scsi_host\n");
369 		ret = -ENOMEM;
370 
371 		goto prob_end;
372 	}
373 	snic = shost_priv(shost);
374 	snic->shost = shost;
375 
376 	snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
377 		 shost->host_no);
378 
379 	SNIC_HOST_INFO(shost,
380 		       "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
381 		       shost->host_no, snic, shost, pdev->bus->number,
382 		       PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
383 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
384 	/* Per snic debugfs init */
385 	snic_stats_debugfs_init(snic);
386 #endif
387 
388 	/* Setup PCI Resources */
389 	pci_set_drvdata(pdev, snic);
390 	snic->pdev = pdev;
391 
392 	ret = pci_enable_device(pdev);
393 	if (ret) {
394 		SNIC_HOST_ERR(shost,
395 			      "Cannot enable PCI Resources, aborting : %d\n",
396 			      ret);
397 
398 		goto err_free_snic;
399 	}
400 
401 	ret = pci_request_regions(pdev, SNIC_DRV_NAME);
402 	if (ret) {
403 		SNIC_HOST_ERR(shost,
404 			      "Cannot obtain PCI Resources, aborting : %d\n",
405 			      ret);
406 
407 		goto err_pci_disable;
408 	}
409 
410 	pci_set_master(pdev);
411 
412 	/*
413 	 * Query PCI Controller on system for DMA addressing
414 	 * limitation for the device. Try 43-bit first, and
415 	 * fail to 32-bit.
416 	 */
417 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
418 	if (ret) {
419 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
420 		if (ret) {
421 			SNIC_HOST_ERR(shost,
422 				      "No Usable DMA Configuration, aborting %d\n",
423 				      ret);
424 			goto err_rel_regions;
425 		}
426 	}
427 
428 	/* Map vNIC resources from BAR0 */
429 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
430 		SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
431 
432 		ret = -ENODEV;
433 		goto err_rel_regions;
434 	}
435 
436 	snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
437 	if (!snic->bar0.vaddr) {
438 		SNIC_HOST_ERR(shost,
439 			      "Cannot memory map BAR0 res hdr aborting.\n");
440 
441 		ret = -ENODEV;
442 		goto err_rel_regions;
443 	}
444 
445 	snic->bar0.bus_addr = pci_resource_start(pdev, 0);
446 	snic->bar0.len = pci_resource_len(pdev, 0);
447 	SNIC_BUG_ON(snic->bar0.bus_addr == 0);
448 
449 	/* Devcmd2 Resource Allocation and Initialization */
450 	snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
451 	if (!snic->vdev) {
452 		SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
453 
454 		ret = -ENODEV;
455 		goto err_iounmap;
456 	}
457 
458 	ret = svnic_dev_cmd_init(snic->vdev, 0);
459 	if (ret) {
460 		SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
461 
462 		goto err_vnic_unreg;
463 	}
464 
465 	ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
466 	if (ret) {
467 		SNIC_HOST_ERR(shost,
468 			      "vNIC dev open failed, aborting. %d\n",
469 			      ret);
470 
471 		goto err_vnic_unreg;
472 	}
473 
474 	ret = svnic_dev_init(snic->vdev, 0);
475 	if (ret) {
476 		SNIC_HOST_ERR(shost,
477 			      "vNIC dev init failed. aborting. %d\n",
478 			      ret);
479 
480 		goto err_dev_close;
481 	}
482 
483 	/* Get vNIC information */
484 	ret = snic_get_vnic_config(snic);
485 	if (ret) {
486 		SNIC_HOST_ERR(shost,
487 			      "Get vNIC configuration failed, aborting. %d\n",
488 			      ret);
489 
490 		goto err_dev_close;
491 	}
492 
493 	/* Configure Maximum Outstanding IO reqs */
494 	max_ios = snic->config.io_throttle_count;
495 	if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
496 		shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
497 					 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
498 
499 	snic->max_tag_id = shost->can_queue;
500 
501 	shost->max_lun = snic->config.luns_per_tgt;
502 	shost->max_id = SNIC_MAX_TARGET;
503 
504 	shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
505 
506 	snic_get_res_counts(snic);
507 
508 	/*
509 	 * Assumption: Only MSIx is supported
510 	 */
511 	ret = snic_set_intr_mode(snic);
512 	if (ret) {
513 		SNIC_HOST_ERR(shost,
514 			      "Failed to set intr mode aborting. %d\n",
515 			      ret);
516 
517 		goto err_dev_close;
518 	}
519 
520 	ret = snic_alloc_vnic_res(snic);
521 	if (ret) {
522 		SNIC_HOST_ERR(shost,
523 			      "Failed to alloc vNIC resources aborting. %d\n",
524 			      ret);
525 
526 		goto err_clear_intr;
527 	}
528 
529 	/* Initialize specific lists */
530 	INIT_LIST_HEAD(&snic->list);
531 
532 	/*
533 	 * spl_cmd_list for maintaining snic specific cmds
534 	 * such as EXCH_VER_REQ, REPORT_TARGETS etc
535 	 */
536 	INIT_LIST_HEAD(&snic->spl_cmd_list);
537 	spin_lock_init(&snic->spl_cmd_lock);
538 
539 	/* initialize all snic locks */
540 	spin_lock_init(&snic->snic_lock);
541 
542 	for (i = 0; i < SNIC_WQ_MAX; i++)
543 		spin_lock_init(&snic->wq_lock[i]);
544 
545 	for (i = 0; i < SNIC_IO_LOCKS; i++)
546 		spin_lock_init(&snic->io_req_lock[i]);
547 
548 	pool = mempool_create_slab_pool(2,
549 				snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
550 	if (!pool) {
551 		SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
552 
553 		ret = -ENOMEM;
554 		goto err_free_res;
555 	}
556 
557 	snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
558 
559 	pool = mempool_create_slab_pool(2,
560 				snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
561 	if (!pool) {
562 		SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
563 
564 		ret = -ENOMEM;
565 		goto err_free_dflt_sgl_pool;
566 	}
567 
568 	snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
569 
570 	pool = mempool_create_slab_pool(2,
571 				snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
572 	if (!pool) {
573 		SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
574 
575 		ret = -ENOMEM;
576 		goto err_free_max_sgl_pool;
577 	}
578 
579 	snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
580 
581 	/* Initialize snic state */
582 	atomic_set(&snic->state, SNIC_INIT);
583 
584 	atomic_set(&snic->ios_inflight, 0);
585 
586 	/* Setup notification buffer area */
587 	ret = snic_notify_set(snic);
588 	if (ret) {
589 		SNIC_HOST_ERR(shost,
590 			      "Failed to alloc notify buffer aborting. %d\n",
591 			      ret);
592 
593 		goto err_free_tmreq_pool;
594 	}
595 
596 	spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
597 	list_add_tail(&snic->list, &snic_glob->snic_list);
598 	spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
599 
600 	snic_disc_init(&snic->disc);
601 	INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
602 	INIT_WORK(&snic->disc_work, snic_handle_disc);
603 	INIT_WORK(&snic->link_work, snic_handle_link);
604 
605 	/* Enable all queues */
606 	for (i = 0; i < snic->wq_count; i++)
607 		svnic_wq_enable(&snic->wq[i]);
608 
609 	ret = svnic_dev_enable_wait(snic->vdev);
610 	if (ret) {
611 		SNIC_HOST_ERR(shost,
612 			      "vNIC dev enable failed w/ error %d\n",
613 			      ret);
614 
615 		goto err_vdev_enable;
616 	}
617 
618 	ret = snic_request_intr(snic);
619 	if (ret) {
620 		SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
621 
622 		goto err_req_intr;
623 	}
624 
625 	for (i = 0; i < snic->intr_count; i++)
626 		svnic_intr_unmask(&snic->intr[i]);
627 
628 	/* Get snic params */
629 	ret = snic_get_conf(snic);
630 	if (ret) {
631 		SNIC_HOST_ERR(shost,
632 			      "Failed to get snic io config from FW w err %d\n",
633 			      ret);
634 
635 		goto err_get_conf;
636 	}
637 
638 	/*
639 	 * Initialization done with PCI system, hardware, firmware.
640 	 * Add shost to SCSI
641 	 */
642 	ret = snic_add_host(shost, pdev);
643 	if (ret) {
644 		SNIC_HOST_ERR(shost,
645 			      "Adding scsi host Failed ... exiting. %d\n",
646 			      ret);
647 
648 		goto err_get_conf;
649 	}
650 
651 	snic_set_state(snic, SNIC_ONLINE);
652 
653 	ret = snic_disc_start(snic);
654 	if (ret) {
655 		SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
656 			      ret);
657 
658 		goto err_get_conf;
659 	}
660 
661 	SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
662 
663 	return 0;
664 
665 err_get_conf:
666 	snic_free_all_untagged_reqs(snic);
667 
668 	for (i = 0; i < snic->intr_count; i++)
669 		svnic_intr_mask(&snic->intr[i]);
670 
671 	snic_free_intr(snic);
672 
673 err_req_intr:
674 	svnic_dev_disable(snic->vdev);
675 
676 err_vdev_enable:
677 	svnic_dev_notify_unset(snic->vdev);
678 
679 	for (i = 0; i < snic->wq_count; i++) {
680 		int rc = 0;
681 
682 		rc = svnic_wq_disable(&snic->wq[i]);
683 		if (rc) {
684 			SNIC_HOST_ERR(shost,
685 				      "WQ Disable Failed w/ err = %d\n", rc);
686 
687 			 break;
688 		}
689 	}
690 	snic_del_host(snic->shost);
691 
692 err_free_tmreq_pool:
693 	mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
694 
695 err_free_max_sgl_pool:
696 	mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
697 
698 err_free_dflt_sgl_pool:
699 	mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
700 
701 err_free_res:
702 	snic_free_vnic_res(snic);
703 
704 err_clear_intr:
705 	snic_clear_intr_mode(snic);
706 
707 err_dev_close:
708 	svnic_dev_close(snic->vdev);
709 
710 err_vnic_unreg:
711 	svnic_dev_unregister(snic->vdev);
712 
713 err_iounmap:
714 	snic_iounmap(snic);
715 
716 err_rel_regions:
717 	pci_release_regions(pdev);
718 
719 err_pci_disable:
720 	pci_disable_device(pdev);
721 
722 err_free_snic:
723 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
724 	snic_stats_debugfs_remove(snic);
725 #endif
726 	scsi_host_put(shost);
727 	pci_set_drvdata(pdev, NULL);
728 
729 prob_end:
730 	SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
731 		  pdev->bus->number, PCI_SLOT(pdev->devfn),
732 		  PCI_FUNC(pdev->devfn));
733 
734 	return ret;
735 } /* end of snic_probe */
736 
737 
738 /*
739  * snic_remove : invoked on unbinding the interface to cleanup the
740  * resources allocated in snic_probe on initialization.
741  */
742 static void
snic_remove(struct pci_dev * pdev)743 snic_remove(struct pci_dev *pdev)
744 {
745 	struct snic *snic = pci_get_drvdata(pdev);
746 	unsigned long flags;
747 
748 	if (!snic) {
749 		SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
750 			  pdev->bus->number, PCI_SLOT(pdev->devfn),
751 			  PCI_FUNC(pdev->devfn));
752 
753 		return;
754 	}
755 
756 	/*
757 	 * Mark state so that the workqueue thread stops forwarding
758 	 * received frames and link events. ISR and other threads
759 	 * that can queue work items will also stop creating work
760 	 * items on the snic workqueue
761 	 */
762 	snic_set_state(snic, SNIC_OFFLINE);
763 	spin_lock_irqsave(&snic->snic_lock, flags);
764 	snic->stop_link_events = 1;
765 	spin_unlock_irqrestore(&snic->snic_lock, flags);
766 
767 	flush_workqueue(snic_glob->event_q);
768 	snic_disc_term(snic);
769 
770 	spin_lock_irqsave(&snic->snic_lock, flags);
771 	snic->in_remove = 1;
772 	spin_unlock_irqrestore(&snic->snic_lock, flags);
773 
774 	/*
775 	 * This stops the snic device, masks all interrupts, Completed
776 	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
777 	 * cleanup
778 	 */
779 	snic_cleanup(snic);
780 
781 	spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
782 	list_del(&snic->list);
783 	spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
784 
785 	snic_tgt_del_all(snic);
786 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
787 	snic_stats_debugfs_remove(snic);
788 #endif
789 	snic_del_host(snic->shost);
790 
791 	svnic_dev_notify_unset(snic->vdev);
792 	snic_free_intr(snic);
793 	snic_free_vnic_res(snic);
794 	snic_clear_intr_mode(snic);
795 	svnic_dev_close(snic->vdev);
796 	svnic_dev_unregister(snic->vdev);
797 	snic_iounmap(snic);
798 	pci_release_regions(pdev);
799 	pci_disable_device(pdev);
800 	pci_set_drvdata(pdev, NULL);
801 
802 	/* this frees Scsi_Host and snic memory (continuous chunk) */
803 	scsi_host_put(snic->shost);
804 } /* end of snic_remove */
805 
806 
807 struct snic_global *snic_glob;
808 
809 /*
810  * snic_global_data_init: Initialize SNIC Global Data
811  * Notes: All the global lists, variables should be part of global data
812  * this helps in debugging.
813  */
814 static int
snic_global_data_init(void)815 snic_global_data_init(void)
816 {
817 	int ret = 0;
818 	struct kmem_cache *cachep;
819 	ssize_t len = 0;
820 
821 	snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
822 
823 	if (!snic_glob) {
824 		SNIC_ERR("Failed to allocate Global Context.\n");
825 
826 		ret = -ENOMEM;
827 		goto gdi_end;
828 	}
829 
830 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
831 	/* Debugfs related Initialization */
832 	/* Create debugfs entries for snic */
833 	snic_debugfs_init();
834 
835 	/* Trace related Initialization */
836 	/* Allocate memory for trace buffer */
837 	ret = snic_trc_init();
838 	if (ret < 0) {
839 		SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
840 		snic_trc_free();
841 		/* continue even if it fails */
842 	}
843 
844 #endif
845 	INIT_LIST_HEAD(&snic_glob->snic_list);
846 	spin_lock_init(&snic_glob->snic_list_lock);
847 
848 	/* Create a cache for allocation of snic_host_req+default size ESGLs */
849 	len = sizeof(struct snic_req_info);
850 	len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
851 	cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
852 				   SLAB_HWCACHE_ALIGN, NULL);
853 	if (!cachep) {
854 		SNIC_ERR("Failed to create snic default sgl slab\n");
855 		ret = -ENOMEM;
856 
857 		goto err_dflt_req_slab;
858 	}
859 	snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
860 
861 	/* Create a cache for allocation of max size Extended SGLs */
862 	len = sizeof(struct snic_req_info);
863 	len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
864 	cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
865 				   SLAB_HWCACHE_ALIGN, NULL);
866 	if (!cachep) {
867 		SNIC_ERR("Failed to create snic max sgl slab\n");
868 		ret = -ENOMEM;
869 
870 		goto err_max_req_slab;
871 	}
872 	snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
873 
874 	len = sizeof(struct snic_host_req);
875 	cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
876 				   SLAB_HWCACHE_ALIGN, NULL);
877 	if (!cachep) {
878 		SNIC_ERR("Failed to create snic tm req slab\n");
879 		ret = -ENOMEM;
880 
881 		goto err_tmreq_slab;
882 	}
883 	snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
884 
885 	/* snic_event queue */
886 	snic_glob->event_q =
887 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
888 	if (!snic_glob->event_q) {
889 		SNIC_ERR("snic event queue create failed\n");
890 		ret = -ENOMEM;
891 
892 		goto err_eventq;
893 	}
894 
895 	return ret;
896 
897 err_eventq:
898 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
899 
900 err_tmreq_slab:
901 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
902 
903 err_max_req_slab:
904 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
905 
906 err_dflt_req_slab:
907 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
908 	snic_trc_free();
909 	snic_debugfs_term();
910 #endif
911 	kfree(snic_glob);
912 	snic_glob = NULL;
913 
914 gdi_end:
915 	return ret;
916 } /* end of snic_glob_init */
917 
918 /*
919  * snic_global_data_cleanup : Frees SNIC Global Data
920  */
921 static void
snic_global_data_cleanup(void)922 snic_global_data_cleanup(void)
923 {
924 	SNIC_BUG_ON(snic_glob == NULL);
925 
926 	destroy_workqueue(snic_glob->event_q);
927 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
928 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
929 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
930 
931 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
932 	/* Freeing Trace Resources */
933 	snic_trc_free();
934 
935 	/* Freeing Debugfs Resources */
936 	snic_debugfs_term();
937 #endif
938 	kfree(snic_glob);
939 	snic_glob = NULL;
940 } /* end of snic_glob_cleanup */
941 
942 static struct pci_driver snic_driver = {
943 	.name = SNIC_DRV_NAME,
944 	.id_table = snic_id_table,
945 	.probe = snic_probe,
946 	.remove = snic_remove,
947 };
948 
949 static int __init
snic_init_module(void)950 snic_init_module(void)
951 {
952 	int ret = 0;
953 
954 #ifndef __x86_64__
955 	SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
956 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
957 #endif
958 
959 	SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
960 
961 	ret = snic_global_data_init();
962 	if (ret) {
963 		SNIC_ERR("Failed to Initialize Global Data.\n");
964 
965 		return ret;
966 	}
967 
968 	ret = pci_register_driver(&snic_driver);
969 	if (ret < 0) {
970 		SNIC_ERR("PCI driver register error\n");
971 
972 		goto err_pci_reg;
973 	}
974 
975 	return ret;
976 
977 err_pci_reg:
978 	snic_global_data_cleanup();
979 
980 	return ret;
981 }
982 
983 static void __exit
snic_cleanup_module(void)984 snic_cleanup_module(void)
985 {
986 	pci_unregister_driver(&snic_driver);
987 	snic_global_data_cleanup();
988 }
989 
990 module_init(snic_init_module);
991 module_exit(snic_cleanup_module);
992 
993 MODULE_LICENSE("GPL v2");
994 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
995 MODULE_VERSION(SNIC_DRV_VERSION);
996 MODULE_DEVICE_TABLE(pci, snic_id_table);
997 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
998 	      "Sesidhar Baddela <sebaddel@cisco.com>");
999