xref: /linux/drivers/scsi/elx/efct/efct_driver.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include "efct_driver.h"
8 
9 #include "efct_hw.h"
10 #include "efct_unsol.h"
11 #include "efct_scsi.h"
12 
13 LIST_HEAD(efct_devices);
14 
15 static int logmask;
16 module_param(logmask, int, 0444);
17 MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
18 
19 static struct libefc_function_template efct_libefc_templ = {
20 	.issue_mbox_rqst = efct_issue_mbox_rqst,
21 	.send_els = efct_els_hw_srrs_send,
22 	.send_bls = efct_efc_bls_send,
23 
24 	.new_nport = efct_scsi_tgt_new_nport,
25 	.del_nport = efct_scsi_tgt_del_nport,
26 	.scsi_new_node = efct_scsi_new_initiator,
27 	.scsi_del_node = efct_scsi_del_initiator,
28 	.hw_seq_free = efct_efc_hw_sequence_free,
29 };
30 
31 static int
efct_device_init(void)32 efct_device_init(void)
33 {
34 	int rc;
35 
36 	/* driver-wide init for target-server */
37 	rc = efct_scsi_tgt_driver_init();
38 	if (rc) {
39 		pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
40 		return rc;
41 	}
42 
43 	rc = efct_scsi_reg_fc_transport();
44 	if (rc) {
45 		efct_scsi_tgt_driver_exit();
46 		pr_err("failed to register to FC host\n");
47 		return rc;
48 	}
49 
50 	return 0;
51 }
52 
53 static void
efct_device_shutdown(void)54 efct_device_shutdown(void)
55 {
56 	efct_scsi_release_fc_transport();
57 
58 	efct_scsi_tgt_driver_exit();
59 }
60 
61 static void *
efct_device_alloc(u32 nid)62 efct_device_alloc(u32 nid)
63 {
64 	struct efct *efct = NULL;
65 
66 	efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
67 	if (!efct)
68 		return efct;
69 
70 	INIT_LIST_HEAD(&efct->list_entry);
71 	list_add_tail(&efct->list_entry, &efct_devices);
72 
73 	return efct;
74 }
75 
76 static void
efct_teardown_msix(struct efct * efct)77 efct_teardown_msix(struct efct *efct)
78 {
79 	u32 i;
80 
81 	for (i = 0; i < efct->n_msix_vec; i++) {
82 		free_irq(pci_irq_vector(efct->pci, i),
83 			 &efct->intr_context[i]);
84 	}
85 
86 	pci_free_irq_vectors(efct->pci);
87 }
88 
89 static int
efct_efclib_config(struct efct * efct,struct libefc_function_template * tt)90 efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
91 {
92 	struct efc *efc;
93 	struct sli4 *sli;
94 	int rc = 0;
95 
96 	efc = kzalloc_obj(*efc);
97 	if (!efc)
98 		return -ENOMEM;
99 
100 	efct->efcport = efc;
101 
102 	memcpy(&efc->tt, tt, sizeof(*tt));
103 	efc->base = efct;
104 	efc->pci = efct->pci;
105 
106 	efc->def_wwnn = efct_get_wwnn(&efct->hw);
107 	efc->def_wwpn = efct_get_wwpn(&efct->hw);
108 	efc->enable_tgt = 1;
109 	efc->log_level = EFC_LOG_LIB;
110 
111 	sli = &efct->hw.sli;
112 	efc->max_xfer_size = sli->sge_supported_length *
113 			     sli_get_max_sgl(&efct->hw.sli);
114 	efc->sli = sli;
115 	efc->fcfi = efct->hw.fcf_indicator;
116 
117 	rc = efcport_init(efc);
118 	if (rc)
119 		efc_log_err(efc, "efcport_init failed\n");
120 
121 	return rc;
122 }
123 
124 static int efct_request_firmware_update(struct efct *efct);
125 
126 static const char*
efct_pci_model(u16 device)127 efct_pci_model(u16 device)
128 {
129 	switch (device) {
130 	case EFCT_DEVICE_LANCER_G6:	return "LPE31004";
131 	case EFCT_DEVICE_LANCER_G7:	return "LPE36000";
132 	default:			return "unknown";
133 	}
134 }
135 
136 static int
efct_device_attach(struct efct * efct)137 efct_device_attach(struct efct *efct)
138 {
139 	u32 rc = 0, i = 0;
140 
141 	if (efct->attached) {
142 		efc_log_err(efct, "Device is already attached\n");
143 		return -EIO;
144 	}
145 
146 	snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
147 		 efct->instance_index);
148 
149 	efct->logmask = logmask;
150 	efct->filter_def = EFCT_DEFAULT_FILTER;
151 	efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
152 
153 	efct->model = efct_pci_model(efct->pci->device);
154 
155 	efct->efct_req_fw_upgrade = true;
156 
157 	/* Allocate transport object and bring online */
158 	efct->xport = efct_xport_alloc(efct);
159 	if (!efct->xport) {
160 		efc_log_err(efct, "failed to allocate transport object\n");
161 		rc = -ENOMEM;
162 		goto out;
163 	}
164 
165 	rc = efct_xport_attach(efct->xport);
166 	if (rc) {
167 		efc_log_err(efct, "failed to attach transport object\n");
168 		goto xport_out;
169 	}
170 
171 	rc = efct_xport_initialize(efct->xport);
172 	if (rc) {
173 		efc_log_err(efct, "failed to initialize transport object\n");
174 		goto xport_out;
175 	}
176 
177 	rc = efct_efclib_config(efct, &efct_libefc_templ);
178 	if (rc) {
179 		efc_log_err(efct, "failed to init efclib\n");
180 		goto efclib_out;
181 	}
182 
183 	for (i = 0; i < efct->n_msix_vec; i++) {
184 		efc_log_debug(efct, "irq %d enabled\n", i);
185 		enable_irq(pci_irq_vector(efct->pci, i));
186 	}
187 
188 	efct->attached = true;
189 
190 	if (efct->efct_req_fw_upgrade)
191 		efct_request_firmware_update(efct);
192 
193 	return rc;
194 
195 efclib_out:
196 	efct_xport_detach(efct->xport);
197 xport_out:
198 	efct_xport_free(efct->xport);
199 	efct->xport = NULL;
200 out:
201 	return rc;
202 }
203 
204 static int
efct_device_detach(struct efct * efct)205 efct_device_detach(struct efct *efct)
206 {
207 	int i;
208 
209 	if (!efct || !efct->attached) {
210 		pr_err("Device is not attached\n");
211 		return -EIO;
212 	}
213 
214 	if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
215 		efc_log_err(efct, "Transport Shutdown timed out\n");
216 
217 	for (i = 0; i < efct->n_msix_vec; i++)
218 		disable_irq(pci_irq_vector(efct->pci, i));
219 
220 	efct_xport_detach(efct->xport);
221 
222 	efct_xport_free(efct->xport);
223 	efct->xport = NULL;
224 
225 	efcport_destroy(efct->efcport);
226 	kfree(efct->efcport);
227 
228 	efct->attached = false;
229 
230 	return 0;
231 }
232 
233 static void
efct_fw_write_cb(int status,u32 actual_write_length,u32 change_status,void * arg)234 efct_fw_write_cb(int status, u32 actual_write_length,
235 		 u32 change_status, void *arg)
236 {
237 	struct efct_fw_write_result *result = arg;
238 
239 	result->status = status;
240 	result->actual_xfer = actual_write_length;
241 	result->change_status = change_status;
242 
243 	complete(&result->done);
244 }
245 
246 static int
efct_firmware_write(struct efct * efct,const u8 * buf,size_t buf_len,u8 * change_status)247 efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
248 		    u8 *change_status)
249 {
250 	int rc = 0;
251 	u32 bytes_left;
252 	u32 xfer_size;
253 	u32 offset;
254 	struct efc_dma dma;
255 	int last = 0;
256 	struct efct_fw_write_result result;
257 
258 	init_completion(&result.done);
259 
260 	bytes_left = buf_len;
261 	offset = 0;
262 
263 	dma.size = FW_WRITE_BUFSIZE;
264 	dma.virt = dma_alloc_coherent(&efct->pci->dev,
265 				      dma.size, &dma.phys, GFP_KERNEL);
266 	if (!dma.virt)
267 		return -ENOMEM;
268 
269 	while (bytes_left > 0) {
270 		if (bytes_left > FW_WRITE_BUFSIZE)
271 			xfer_size = FW_WRITE_BUFSIZE;
272 		else
273 			xfer_size = bytes_left;
274 
275 		memcpy(dma.virt, buf + offset, xfer_size);
276 
277 		if (bytes_left == xfer_size)
278 			last = 1;
279 
280 		efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
281 				       last, efct_fw_write_cb, &result);
282 
283 		if (wait_for_completion_interruptible(&result.done) != 0) {
284 			rc = -ENXIO;
285 			break;
286 		}
287 
288 		if (result.actual_xfer == 0 || result.status != 0) {
289 			rc = -EFAULT;
290 			break;
291 		}
292 
293 		if (last)
294 			*change_status = result.change_status;
295 
296 		bytes_left -= result.actual_xfer;
297 		offset += result.actual_xfer;
298 	}
299 
300 	dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
301 	return rc;
302 }
303 
304 static int
efct_fw_reset(struct efct * efct)305 efct_fw_reset(struct efct *efct)
306 {
307 	/*
308 	 * Firmware reset to activate the new firmware.
309 	 * Function 0 will update and load the new firmware
310 	 * during attach.
311 	 */
312 	if (timer_pending(&efct->xport->stats_timer))
313 		timer_delete(&efct->xport->stats_timer);
314 
315 	if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
316 		efc_log_info(efct, "failed to reset firmware\n");
317 		return -EIO;
318 	}
319 
320 	efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
321 
322 	efct_device_detach(efct);
323 	return efct_device_attach(efct);
324 }
325 
326 static int
efct_request_firmware_update(struct efct * efct)327 efct_request_firmware_update(struct efct *efct)
328 {
329 	int rc = 0;
330 	u8 file_name[256], fw_change_status = 0;
331 	const struct firmware *fw;
332 	struct efct_hw_grp_hdr *fw_image;
333 
334 	snprintf(file_name, 256, "%s.grp", efct->model);
335 
336 	rc = request_firmware(&fw, file_name, &efct->pci->dev);
337 	if (rc) {
338 		efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
339 		return rc;
340 	}
341 
342 	fw_image = (struct efct_hw_grp_hdr *)fw->data;
343 
344 	if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
345 		     strnlen(fw_image->revision, 16))) {
346 		efc_log_debug(efct,
347 			      "Skip update. Firmware is already up to date.\n");
348 		goto exit;
349 	}
350 
351 	efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
352 		     efct->hw.sli.fw_name[0], fw_image->revision);
353 
354 	rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
355 	if (rc) {
356 		efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
357 		goto exit;
358 	}
359 
360 	efc_log_info(efct, "Firmware updated successfully\n");
361 	switch (fw_change_status) {
362 	case 0x00:
363 		efc_log_info(efct, "New firmware is active.\n");
364 		break;
365 	case 0x01:
366 		efc_log_info(efct,
367 			"System reboot needed to activate the new firmware\n");
368 		break;
369 	case 0x02:
370 	case 0x03:
371 		efc_log_info(efct,
372 			     "firmware reset to activate the new firmware\n");
373 		efct_fw_reset(efct);
374 		break;
375 	default:
376 		efc_log_info(efct, "Unexpected value change_status:%d\n",
377 			     fw_change_status);
378 		break;
379 	}
380 
381 exit:
382 	release_firmware(fw);
383 
384 	return rc;
385 }
386 
387 static void
efct_device_free(struct efct * efct)388 efct_device_free(struct efct *efct)
389 {
390 	if (efct) {
391 		list_del(&efct->list_entry);
392 		kfree(efct);
393 	}
394 }
395 
396 static int
efct_device_interrupts_required(struct efct * efct)397 efct_device_interrupts_required(struct efct *efct)
398 {
399 	int rc;
400 
401 	rc = efct_hw_setup(&efct->hw, efct, efct->pci);
402 	if (rc < 0)
403 		return rc;
404 
405 	return efct->hw.config.n_eq;
406 }
407 
408 static irqreturn_t
efct_intr_thread(int irq,void * handle)409 efct_intr_thread(int irq, void *handle)
410 {
411 	struct efct_intr_context *intr_ctx = handle;
412 	struct efct *efct = intr_ctx->efct;
413 
414 	efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
415 	return IRQ_HANDLED;
416 }
417 
418 static int
efct_setup_msix(struct efct * efct,u32 num_intrs)419 efct_setup_msix(struct efct *efct, u32 num_intrs)
420 {
421 	int rc = 0, i;
422 
423 	if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
424 		dev_err(&efct->pci->dev,
425 			"%s : MSI-X not available\n", __func__);
426 		return -EIO;
427 	}
428 
429 	efct->n_msix_vec = num_intrs;
430 
431 	rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
432 				   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
433 
434 	if (rc < 0) {
435 		dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
436 		return rc;
437 	}
438 
439 	for (i = 0; i < num_intrs; i++) {
440 		struct efct_intr_context *intr_ctx = NULL;
441 
442 		intr_ctx = &efct->intr_context[i];
443 		intr_ctx->efct = efct;
444 		intr_ctx->index = i;
445 
446 		rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
447 					  NULL, efct_intr_thread, IRQF_ONESHOT,
448 					  EFCT_DRIVER_NAME, intr_ctx);
449 		if (rc) {
450 			dev_err(&efct->pci->dev,
451 				"Failed to register %d vector: %d\n", i, rc);
452 			goto out;
453 		}
454 	}
455 
456 	return rc;
457 
458 out:
459 	while (--i >= 0)
460 		free_irq(pci_irq_vector(efct->pci, i),
461 			 &efct->intr_context[i]);
462 
463 	pci_free_irq_vectors(efct->pci);
464 	return rc;
465 }
466 
467 static const struct pci_device_id efct_pci_table[] = {
468 	{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
469 	{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
470 	{}	/* terminate list */
471 };
472 
473 static int
efct_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)474 efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
475 {
476 	struct efct *efct = NULL;
477 	int rc;
478 	u32 i, r;
479 	int num_interrupts = 0;
480 	int nid;
481 
482 	dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
483 
484 	rc = pci_enable_device_mem(pdev);
485 	if (rc)
486 		return rc;
487 
488 	pci_set_master(pdev);
489 
490 	rc = pci_set_mwi(pdev);
491 	if (rc) {
492 		dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
493 		goto mwi_out;
494 	}
495 
496 	rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
497 	if (rc) {
498 		dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
499 		goto req_regions_out;
500 	}
501 
502 	/* Fetch the Numa node id for this device */
503 	nid = dev_to_node(&pdev->dev);
504 	if (nid < 0) {
505 		dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
506 		nid = 0;
507 	}
508 
509 	/* Allocate efct */
510 	efct = efct_device_alloc(nid);
511 	if (!efct) {
512 		dev_err(&pdev->dev, "Failed to allocate efct\n");
513 		rc = -ENOMEM;
514 		goto alloc_out;
515 	}
516 
517 	efct->pci = pdev;
518 	efct->numa_node = nid;
519 
520 	/* Map all memory BARs */
521 	for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
522 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
523 			efct->reg[r] = ioremap(pci_resource_start(pdev, i),
524 					       pci_resource_len(pdev, i));
525 			r++;
526 		}
527 
528 		/*
529 		 * If the 64-bit attribute is set, both this BAR and the
530 		 * next form the complete address. Skip processing the
531 		 * next BAR.
532 		 */
533 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
534 			i++;
535 	}
536 
537 	pci_set_drvdata(pdev, efct);
538 
539 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
540 	if (rc) {
541 		dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
542 		goto dma_mask_out;
543 	}
544 
545 	num_interrupts = efct_device_interrupts_required(efct);
546 	if (num_interrupts < 0) {
547 		efc_log_err(efct, "efct_device_interrupts_required failed\n");
548 		rc = -1;
549 		goto dma_mask_out;
550 	}
551 
552 	/*
553 	 * Initialize MSIX interrupts, note,
554 	 * efct_setup_msix() enables the interrupt
555 	 */
556 	rc = efct_setup_msix(efct, num_interrupts);
557 	if (rc) {
558 		dev_err(&pdev->dev, "Can't setup msix\n");
559 		goto dma_mask_out;
560 	}
561 	/* Disable interrupt for now */
562 	for (i = 0; i < efct->n_msix_vec; i++) {
563 		efc_log_debug(efct, "irq %d disabled\n", i);
564 		disable_irq(pci_irq_vector(efct->pci, i));
565 	}
566 
567 	rc = efct_device_attach(efct);
568 	if (rc)
569 		goto attach_out;
570 
571 	return 0;
572 
573 attach_out:
574 	efct_teardown_msix(efct);
575 dma_mask_out:
576 	pci_set_drvdata(pdev, NULL);
577 
578 	for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
579 		if (efct->reg[i])
580 			iounmap(efct->reg[i]);
581 	}
582 	efct_device_free(efct);
583 alloc_out:
584 	pci_release_regions(pdev);
585 req_regions_out:
586 	pci_clear_mwi(pdev);
587 mwi_out:
588 	pci_disable_device(pdev);
589 	return rc;
590 }
591 
592 static void
efct_pci_remove(struct pci_dev * pdev)593 efct_pci_remove(struct pci_dev *pdev)
594 {
595 	struct efct *efct = pci_get_drvdata(pdev);
596 	u32 i;
597 
598 	if (!efct)
599 		return;
600 
601 	efct_device_detach(efct);
602 
603 	efct_teardown_msix(efct);
604 
605 	for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
606 		if (efct->reg[i])
607 			iounmap(efct->reg[i]);
608 	}
609 
610 	pci_set_drvdata(pdev, NULL);
611 
612 	efct_device_free(efct);
613 
614 	pci_release_regions(pdev);
615 
616 	pci_disable_device(pdev);
617 }
618 
619 static void
efct_device_prep_for_reset(struct efct * efct,struct pci_dev * pdev)620 efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
621 {
622 	if (efct) {
623 		efc_log_debug(efct,
624 			      "PCI channel disable preparing for reset\n");
625 		efct_device_detach(efct);
626 		/* Disable interrupt and pci device */
627 		efct_teardown_msix(efct);
628 	}
629 	pci_disable_device(pdev);
630 }
631 
632 static void
efct_device_prep_for_recover(struct efct * efct)633 efct_device_prep_for_recover(struct efct *efct)
634 {
635 	if (efct) {
636 		efc_log_debug(efct, "PCI channel preparing for recovery\n");
637 		efct_hw_io_abort_all(&efct->hw);
638 	}
639 }
640 
641 /**
642  * efct_pci_io_error_detected - method for handling PCI I/O error
643  * @pdev: pointer to PCI device.
644  * @state: the current PCI connection state.
645  *
646  * This routine is registered to the PCI subsystem for error handling. This
647  * function is called by the PCI subsystem after a PCI bus error affecting
648  * this device has been detected. When this routine is invoked, it dispatches
649  * device error detected handling routine, which will perform the proper
650  * error detected operation.
651  *
652  * Return codes
653  * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
654  * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
655  */
656 static pci_ers_result_t
efct_pci_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)657 efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
658 {
659 	struct efct *efct = pci_get_drvdata(pdev);
660 	pci_ers_result_t rc;
661 
662 	switch (state) {
663 	case pci_channel_io_normal:
664 		efct_device_prep_for_recover(efct);
665 		rc = PCI_ERS_RESULT_CAN_RECOVER;
666 		break;
667 	case pci_channel_io_frozen:
668 		efct_device_prep_for_reset(efct, pdev);
669 		rc = PCI_ERS_RESULT_NEED_RESET;
670 		break;
671 	case pci_channel_io_perm_failure:
672 		efct_device_detach(efct);
673 		rc = PCI_ERS_RESULT_DISCONNECT;
674 		break;
675 	default:
676 		efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
677 		efct_device_prep_for_reset(efct, pdev);
678 		rc = PCI_ERS_RESULT_NEED_RESET;
679 		break;
680 	}
681 
682 	return rc;
683 }
684 
685 static pci_ers_result_t
efct_pci_io_slot_reset(struct pci_dev * pdev)686 efct_pci_io_slot_reset(struct pci_dev *pdev)
687 {
688 	int rc;
689 	struct efct *efct = pci_get_drvdata(pdev);
690 
691 	rc = pci_enable_device_mem(pdev);
692 	if (rc) {
693 		efc_log_err(efct, "failed to enable PCI device after reset\n");
694 		return PCI_ERS_RESULT_DISCONNECT;
695 	}
696 
697 	/*
698 	 * As the new kernel behavior of pci_restore_state() API call clears
699 	 * device saved_state flag, need to save the restored state again.
700 	 */
701 
702 	pci_save_state(pdev);
703 
704 	pci_set_master(pdev);
705 
706 	rc = efct_setup_msix(efct, efct->n_msix_vec);
707 	if (rc)
708 		efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
709 			    rc);
710 
711 	/* Perform device reset */
712 	efct_device_detach(efct);
713 	/* Bring device to online*/
714 	efct_device_attach(efct);
715 
716 	return PCI_ERS_RESULT_RECOVERED;
717 }
718 
719 static void
efct_pci_io_resume(struct pci_dev * pdev)720 efct_pci_io_resume(struct pci_dev *pdev)
721 {
722 	struct efct *efct = pci_get_drvdata(pdev);
723 
724 	/* Perform device reset */
725 	efct_device_detach(efct);
726 	/* Bring device to online*/
727 	efct_device_attach(efct);
728 }
729 
730 MODULE_DEVICE_TABLE(pci, efct_pci_table);
731 
732 static const struct pci_error_handlers efct_pci_err_handler = {
733 	.error_detected = efct_pci_io_error_detected,
734 	.slot_reset = efct_pci_io_slot_reset,
735 	.resume = efct_pci_io_resume,
736 };
737 
738 static struct pci_driver efct_pci_driver = {
739 	.name		= EFCT_DRIVER_NAME,
740 	.id_table	= efct_pci_table,
741 	.probe		= efct_pci_probe,
742 	.remove		= efct_pci_remove,
743 	.err_handler	= &efct_pci_err_handler,
744 };
745 
746 static
efct_init(void)747 int __init efct_init(void)
748 {
749 	int rc;
750 
751 	rc = efct_device_init();
752 	if (rc) {
753 		pr_err("efct_device_init failed rc=%d\n", rc);
754 		return rc;
755 	}
756 
757 	rc = pci_register_driver(&efct_pci_driver);
758 	if (rc) {
759 		pr_err("pci_register_driver failed rc=%d\n", rc);
760 		efct_device_shutdown();
761 	}
762 
763 	return rc;
764 }
765 
efct_exit(void)766 static void __exit efct_exit(void)
767 {
768 	pci_unregister_driver(&efct_pci_driver);
769 	efct_device_shutdown();
770 }
771 
772 module_init(efct_init);
773 module_exit(efct_exit);
774 MODULE_VERSION(EFCT_DRIVER_VERSION);
775 MODULE_DESCRIPTION("Emulex Fibre Channel Target driver");
776 MODULE_LICENSE("GPL");
777 MODULE_AUTHOR("Broadcom");
778