xref: /linux/drivers/scsi/esas2r/esas2r_init.c (revision 8ca4fc323d2e4ab9dabbdd57633af40b0c7e6af9)
1 /*
2  *  linux/drivers/scsi/esas2r/esas2r_init.c
3  *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4  *
5  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
6  *  (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version 2
11  * of the License, or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * NO WARRANTY
19  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23  * solely responsible for determining the appropriateness of using and
24  * distributing the Program and assumes all risks associated with its
25  * exercise of rights under this Agreement, including but not limited to
26  * the risks and costs of program errors, damage to or loss of data,
27  * programs or equipment, and unavailability or interruption of operations.
28  *
29  * DISCLAIMER OF LIABILITY
30  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37  *
38  * You should have received a copy of the GNU General Public License
39  * along with this program; if not, write to the Free Software
40  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
41  * USA.
42  */
43 
44 #include "esas2r.h"
45 
46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
47 				 struct esas2r_mem_desc *mem_desc,
48 				 u32 align)
49 {
50 	mem_desc->esas2r_param = mem_desc->size + align;
51 	mem_desc->virt_addr = NULL;
52 	mem_desc->phys_addr = 0;
53 	mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
54 						   (size_t)mem_desc->
55 						   esas2r_param,
56 						   (dma_addr_t *)&mem_desc->
57 						   phys_addr,
58 						   GFP_KERNEL);
59 
60 	if (mem_desc->esas2r_data == NULL) {
61 		esas2r_log(ESAS2R_LOG_CRIT,
62 			   "failed to allocate %lu bytes of consistent memory!",
63 			   (long
64 			    unsigned
65 			    int)mem_desc->esas2r_param);
66 		return false;
67 	}
68 
69 	mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
70 	mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
71 	memset(mem_desc->virt_addr, 0, mem_desc->size);
72 	return true;
73 }
74 
75 static void esas2r_initmem_free(struct esas2r_adapter *a,
76 				struct esas2r_mem_desc *mem_desc)
77 {
78 	if (mem_desc->virt_addr == NULL)
79 		return;
80 
81 	/*
82 	 * Careful!  phys_addr and virt_addr may have been adjusted from the
83 	 * original allocation in order to return the desired alignment.  That
84 	 * means we have to use the original address (in esas2r_data) and size
85 	 * (esas2r_param) and calculate the original physical address based on
86 	 * the difference between the requested and actual allocation size.
87 	 */
88 	if (mem_desc->phys_addr) {
89 		int unalign = ((u8 *)mem_desc->virt_addr) -
90 			      ((u8 *)mem_desc->esas2r_data);
91 
92 		dma_free_coherent(&a->pcid->dev,
93 				  (size_t)mem_desc->esas2r_param,
94 				  mem_desc->esas2r_data,
95 				  (dma_addr_t)(mem_desc->phys_addr - unalign));
96 	} else {
97 		kfree(mem_desc->esas2r_data);
98 	}
99 
100 	mem_desc->virt_addr = NULL;
101 }
102 
103 static bool alloc_vda_req(struct esas2r_adapter *a,
104 			  struct esas2r_request *rq)
105 {
106 	struct esas2r_mem_desc *memdesc = kzalloc(
107 		sizeof(struct esas2r_mem_desc), GFP_KERNEL);
108 
109 	if (memdesc == NULL) {
110 		esas2r_hdebug("could not alloc mem for vda request memdesc\n");
111 		return false;
112 	}
113 
114 	memdesc->size = sizeof(union atto_vda_req) +
115 			ESAS2R_DATA_BUF_LEN;
116 
117 	if (!esas2r_initmem_alloc(a, memdesc, 256)) {
118 		esas2r_hdebug("could not alloc mem for vda request\n");
119 		kfree(memdesc);
120 		return false;
121 	}
122 
123 	a->num_vrqs++;
124 	list_add(&memdesc->next_desc, &a->vrq_mds_head);
125 
126 	rq->vrq_md = memdesc;
127 	rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
128 	rq->vrq->scsi.handle = a->num_vrqs;
129 
130 	return true;
131 }
132 
133 static void esas2r_unmap_regions(struct esas2r_adapter *a)
134 {
135 	if (a->regs)
136 		iounmap((void __iomem *)a->regs);
137 
138 	a->regs = NULL;
139 
140 	pci_release_region(a->pcid, 2);
141 
142 	if (a->data_window)
143 		iounmap((void __iomem *)a->data_window);
144 
145 	a->data_window = NULL;
146 
147 	pci_release_region(a->pcid, 0);
148 }
149 
150 static int esas2r_map_regions(struct esas2r_adapter *a)
151 {
152 	int error;
153 
154 	a->regs = NULL;
155 	a->data_window = NULL;
156 
157 	error = pci_request_region(a->pcid, 2, a->name);
158 	if (error != 0) {
159 		esas2r_log(ESAS2R_LOG_CRIT,
160 			   "pci_request_region(2) failed, error %d",
161 			   error);
162 
163 		return error;
164 	}
165 
166 	a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
167 					  pci_resource_len(a->pcid, 2));
168 	if (a->regs == NULL) {
169 		esas2r_log(ESAS2R_LOG_CRIT,
170 			   "ioremap failed for regs mem region\n");
171 		pci_release_region(a->pcid, 2);
172 		return -EFAULT;
173 	}
174 
175 	error = pci_request_region(a->pcid, 0, a->name);
176 	if (error != 0) {
177 		esas2r_log(ESAS2R_LOG_CRIT,
178 			   "pci_request_region(2) failed, error %d",
179 			   error);
180 		esas2r_unmap_regions(a);
181 		return error;
182 	}
183 
184 	a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
185 								    0),
186 						 pci_resource_len(a->pcid, 0));
187 	if (a->data_window == NULL) {
188 		esas2r_log(ESAS2R_LOG_CRIT,
189 			   "ioremap failed for data_window mem region\n");
190 		esas2r_unmap_regions(a);
191 		return -EFAULT;
192 	}
193 
194 	return 0;
195 }
196 
197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
198 {
199 	int i;
200 
201 	/* Set up interrupt mode based on the requested value */
202 	switch (intr_mode) {
203 	case INTR_MODE_LEGACY:
204 use_legacy_interrupts:
205 		a->intr_mode = INTR_MODE_LEGACY;
206 		break;
207 
208 	case INTR_MODE_MSI:
209 		i = pci_enable_msi(a->pcid);
210 		if (i != 0) {
211 			esas2r_log(ESAS2R_LOG_WARN,
212 				   "failed to enable MSI for adapter %d, "
213 				   "falling back to legacy interrupts "
214 				   "(err=%d)", a->index,
215 				   i);
216 			goto use_legacy_interrupts;
217 		}
218 		a->intr_mode = INTR_MODE_MSI;
219 		set_bit(AF2_MSI_ENABLED, &a->flags2);
220 		break;
221 
222 
223 	default:
224 		esas2r_log(ESAS2R_LOG_WARN,
225 			   "unknown interrupt_mode %d requested, "
226 			   "falling back to legacy interrupt",
227 			   interrupt_mode);
228 		goto use_legacy_interrupts;
229 	}
230 }
231 
232 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
233 {
234 	unsigned long flags = 0;
235 
236 	if (a->intr_mode == INTR_MODE_LEGACY)
237 		flags |= IRQF_SHARED;
238 
239 	esas2r_log(ESAS2R_LOG_INFO,
240 		   "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
241 		   a->pcid->irq, a, a->name, flags);
242 
243 	if (request_irq(a->pcid->irq,
244 			(a->intr_mode ==
245 			 INTR_MODE_LEGACY) ? esas2r_interrupt :
246 			esas2r_msi_interrupt,
247 			flags,
248 			a->name,
249 			a)) {
250 		esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
251 			   a->pcid->irq);
252 		return;
253 	}
254 
255 	set_bit(AF2_IRQ_CLAIMED, &a->flags2);
256 	esas2r_log(ESAS2R_LOG_INFO,
257 		   "claimed IRQ %d flags: 0x%lx",
258 		   a->pcid->irq, flags);
259 }
260 
261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
262 			int index)
263 {
264 	struct esas2r_adapter *a;
265 	u64 bus_addr = 0;
266 	int i;
267 	void *next_uncached;
268 	struct esas2r_request *first_request, *last_request;
269 	bool dma64 = false;
270 
271 	if (index >= MAX_ADAPTERS) {
272 		esas2r_log(ESAS2R_LOG_CRIT,
273 			   "tried to init invalid adapter index %u!",
274 			   index);
275 		return 0;
276 	}
277 
278 	if (esas2r_adapters[index]) {
279 		esas2r_log(ESAS2R_LOG_CRIT,
280 			   "tried to init existing adapter index %u!",
281 			   index);
282 		return 0;
283 	}
284 
285 	a = (struct esas2r_adapter *)host->hostdata;
286 	memset(a, 0, sizeof(struct esas2r_adapter));
287 	a->pcid = pcid;
288 	a->host = host;
289 
290 	if (sizeof(dma_addr_t) > 4 &&
291 	    dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
292 	    !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
293 		dma64 = true;
294 
295 	if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
296 		esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
297 		esas2r_kill_adapter(index);
298 		return 0;
299 	}
300 
301 	esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
302 		       "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
303 
304 	esas2r_adapters[index] = a;
305 	sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
306 	esas2r_debug("new adapter %p, name %s", a, a->name);
307 	spin_lock_init(&a->request_lock);
308 	spin_lock_init(&a->fw_event_lock);
309 	mutex_init(&a->fm_api_mutex);
310 	mutex_init(&a->fs_api_mutex);
311 	sema_init(&a->nvram_semaphore, 1);
312 
313 	esas2r_fw_event_off(a);
314 	snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
315 		 a->index);
316 	a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
317 
318 	init_waitqueue_head(&a->buffered_ioctl_waiter);
319 	init_waitqueue_head(&a->nvram_waiter);
320 	init_waitqueue_head(&a->fm_api_waiter);
321 	init_waitqueue_head(&a->fs_api_waiter);
322 	init_waitqueue_head(&a->vda_waiter);
323 
324 	INIT_LIST_HEAD(&a->general_req.req_list);
325 	INIT_LIST_HEAD(&a->active_list);
326 	INIT_LIST_HEAD(&a->defer_list);
327 	INIT_LIST_HEAD(&a->free_sg_list_head);
328 	INIT_LIST_HEAD(&a->avail_request);
329 	INIT_LIST_HEAD(&a->vrq_mds_head);
330 	INIT_LIST_HEAD(&a->fw_event_list);
331 
332 	first_request = (struct esas2r_request *)((u8 *)(a + 1));
333 
334 	for (last_request = first_request, i = 1; i < num_requests;
335 	     last_request++, i++) {
336 		INIT_LIST_HEAD(&last_request->req_list);
337 		list_add_tail(&last_request->comp_list, &a->avail_request);
338 		if (!alloc_vda_req(a, last_request)) {
339 			esas2r_log(ESAS2R_LOG_CRIT,
340 				   "failed to allocate a VDA request!");
341 			esas2r_kill_adapter(index);
342 			return 0;
343 		}
344 	}
345 
346 	esas2r_debug("requests: %p to %p (%d, %d)", first_request,
347 		     last_request,
348 		     sizeof(*first_request),
349 		     num_requests);
350 
351 	if (esas2r_map_regions(a) != 0) {
352 		esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
353 		esas2r_kill_adapter(index);
354 		return 0;
355 	}
356 
357 	a->index = index;
358 
359 	/* interrupts will be disabled until we are done with init */
360 	atomic_inc(&a->dis_ints_cnt);
361 	atomic_inc(&a->disable_cnt);
362 	set_bit(AF_CHPRST_PENDING, &a->flags);
363 	set_bit(AF_DISC_PENDING, &a->flags);
364 	set_bit(AF_FIRST_INIT, &a->flags);
365 	set_bit(AF_LEGACY_SGE_MODE, &a->flags);
366 
367 	a->init_msg = ESAS2R_INIT_MSG_START;
368 	a->max_vdareq_size = 128;
369 	a->build_sgl = esas2r_build_sg_list_sge;
370 
371 	esas2r_setup_interrupts(a, interrupt_mode);
372 
373 	a->uncached_size = esas2r_get_uncached_size(a);
374 	a->uncached = dma_alloc_coherent(&pcid->dev,
375 					 (size_t)a->uncached_size,
376 					 (dma_addr_t *)&bus_addr,
377 					 GFP_KERNEL);
378 	if (a->uncached == NULL) {
379 		esas2r_log(ESAS2R_LOG_CRIT,
380 			   "failed to allocate %d bytes of consistent memory!",
381 			   a->uncached_size);
382 		esas2r_kill_adapter(index);
383 		return 0;
384 	}
385 
386 	a->uncached_phys = bus_addr;
387 
388 	esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
389 		     a->uncached_size,
390 		     a->uncached,
391 		     upper_32_bits(bus_addr),
392 		     lower_32_bits(bus_addr));
393 	memset(a->uncached, 0, a->uncached_size);
394 	next_uncached = a->uncached;
395 
396 	if (!esas2r_init_adapter_struct(a,
397 					&next_uncached)) {
398 		esas2r_log(ESAS2R_LOG_CRIT,
399 			   "failed to initialize adapter structure (2)!");
400 		esas2r_kill_adapter(index);
401 		return 0;
402 	}
403 
404 	tasklet_init(&a->tasklet,
405 		     esas2r_adapter_tasklet,
406 		     (unsigned long)a);
407 
408 	/*
409 	 * Disable chip interrupts to prevent spurious interrupts
410 	 * until we claim the IRQ.
411 	 */
412 	esas2r_disable_chip_interrupts(a);
413 	esas2r_check_adapter(a);
414 
415 	if (!esas2r_init_adapter_hw(a, true)) {
416 		esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
417 	} else {
418 		esas2r_debug("esas2r_init_adapter ok");
419 	}
420 
421 	esas2r_claim_interrupts(a);
422 
423 	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
424 		esas2r_enable_chip_interrupts(a);
425 
426 	set_bit(AF2_INIT_DONE, &a->flags2);
427 	if (!test_bit(AF_DEGRADED_MODE, &a->flags))
428 		esas2r_kickoff_timer(a);
429 	esas2r_debug("esas2r_init_adapter done for %p (%d)",
430 		     a, a->disable_cnt);
431 
432 	return 1;
433 }
434 
435 static void esas2r_adapter_power_down(struct esas2r_adapter *a,
436 				      int power_management)
437 {
438 	struct esas2r_mem_desc *memdesc, *next;
439 
440 	if ((test_bit(AF2_INIT_DONE, &a->flags2))
441 	    &&  (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
442 		if (!power_management) {
443 			del_timer_sync(&a->timer);
444 			tasklet_kill(&a->tasklet);
445 		}
446 		esas2r_power_down(a);
447 
448 		/*
449 		 * There are versions of firmware that do not handle the sync
450 		 * cache command correctly.  Stall here to ensure that the
451 		 * cache is lazily flushed.
452 		 */
453 		mdelay(500);
454 		esas2r_debug("chip halted");
455 	}
456 
457 	/* Remove sysfs binary files */
458 	if (a->sysfs_fw_created) {
459 		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
460 		a->sysfs_fw_created = 0;
461 	}
462 
463 	if (a->sysfs_fs_created) {
464 		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
465 		a->sysfs_fs_created = 0;
466 	}
467 
468 	if (a->sysfs_vda_created) {
469 		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
470 		a->sysfs_vda_created = 0;
471 	}
472 
473 	if (a->sysfs_hw_created) {
474 		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
475 		a->sysfs_hw_created = 0;
476 	}
477 
478 	if (a->sysfs_live_nvram_created) {
479 		sysfs_remove_bin_file(&a->host->shost_dev.kobj,
480 				      &bin_attr_live_nvram);
481 		a->sysfs_live_nvram_created = 0;
482 	}
483 
484 	if (a->sysfs_default_nvram_created) {
485 		sysfs_remove_bin_file(&a->host->shost_dev.kobj,
486 				      &bin_attr_default_nvram);
487 		a->sysfs_default_nvram_created = 0;
488 	}
489 
490 	/* Clean up interrupts */
491 	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
492 		esas2r_log_dev(ESAS2R_LOG_INFO,
493 			       &(a->pcid->dev),
494 			       "free_irq(%d) called", a->pcid->irq);
495 
496 		free_irq(a->pcid->irq, a);
497 		esas2r_debug("IRQ released");
498 		clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
499 	}
500 
501 	if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
502 		pci_disable_msi(a->pcid);
503 		clear_bit(AF2_MSI_ENABLED, &a->flags2);
504 		esas2r_debug("MSI disabled");
505 	}
506 
507 	if (a->inbound_list_md.virt_addr)
508 		esas2r_initmem_free(a, &a->inbound_list_md);
509 
510 	if (a->outbound_list_md.virt_addr)
511 		esas2r_initmem_free(a, &a->outbound_list_md);
512 
513 	list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
514 				 next_desc) {
515 		esas2r_initmem_free(a, memdesc);
516 	}
517 
518 	/* Following frees everything allocated via alloc_vda_req */
519 	list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
520 		esas2r_initmem_free(a, memdesc);
521 		list_del(&memdesc->next_desc);
522 		kfree(memdesc);
523 	}
524 
525 	kfree(a->first_ae_req);
526 	a->first_ae_req = NULL;
527 
528 	kfree(a->sg_list_mds);
529 	a->sg_list_mds = NULL;
530 
531 	kfree(a->req_table);
532 	a->req_table = NULL;
533 
534 	if (a->regs) {
535 		esas2r_unmap_regions(a);
536 		a->regs = NULL;
537 		a->data_window = NULL;
538 		esas2r_debug("regions unmapped");
539 	}
540 }
541 
542 /* Release/free allocated resources for specified adapters. */
543 void esas2r_kill_adapter(int i)
544 {
545 	struct esas2r_adapter *a = esas2r_adapters[i];
546 
547 	if (a) {
548 		unsigned long flags;
549 		struct workqueue_struct *wq;
550 		esas2r_debug("killing adapter %p [%d] ", a, i);
551 		esas2r_fw_event_off(a);
552 		esas2r_adapter_power_down(a, 0);
553 		if (esas2r_buffered_ioctl &&
554 		    (a->pcid == esas2r_buffered_ioctl_pcid)) {
555 			dma_free_coherent(&a->pcid->dev,
556 					  (size_t)esas2r_buffered_ioctl_size,
557 					  esas2r_buffered_ioctl,
558 					  esas2r_buffered_ioctl_addr);
559 			esas2r_buffered_ioctl = NULL;
560 		}
561 
562 		if (a->vda_buffer) {
563 			dma_free_coherent(&a->pcid->dev,
564 					  (size_t)VDA_MAX_BUFFER_SIZE,
565 					  a->vda_buffer,
566 					  (dma_addr_t)a->ppvda_buffer);
567 			a->vda_buffer = NULL;
568 		}
569 		if (a->fs_api_buffer) {
570 			dma_free_coherent(&a->pcid->dev,
571 					  (size_t)a->fs_api_buffer_size,
572 					  a->fs_api_buffer,
573 					  (dma_addr_t)a->ppfs_api_buffer);
574 			a->fs_api_buffer = NULL;
575 		}
576 
577 		kfree(a->local_atto_ioctl);
578 		a->local_atto_ioctl = NULL;
579 
580 		spin_lock_irqsave(&a->fw_event_lock, flags);
581 		wq = a->fw_event_q;
582 		a->fw_event_q = NULL;
583 		spin_unlock_irqrestore(&a->fw_event_lock, flags);
584 		if (wq)
585 			destroy_workqueue(wq);
586 
587 		if (a->uncached) {
588 			dma_free_coherent(&a->pcid->dev,
589 					  (size_t)a->uncached_size,
590 					  a->uncached,
591 					  (dma_addr_t)a->uncached_phys);
592 			a->uncached = NULL;
593 			esas2r_debug("uncached area freed");
594 		}
595 
596 		esas2r_log_dev(ESAS2R_LOG_INFO,
597 			       &(a->pcid->dev),
598 			       "pci_disable_device() called.  msix_enabled: %d "
599 			       "msi_enabled: %d irq: %d pin: %d",
600 			       a->pcid->msix_enabled,
601 			       a->pcid->msi_enabled,
602 			       a->pcid->irq,
603 			       a->pcid->pin);
604 
605 		esas2r_log_dev(ESAS2R_LOG_INFO,
606 			       &(a->pcid->dev),
607 			       "before pci_disable_device() enable_cnt: %d",
608 			       a->pcid->enable_cnt.counter);
609 
610 		pci_disable_device(a->pcid);
611 		esas2r_log_dev(ESAS2R_LOG_INFO,
612 			       &(a->pcid->dev),
613 			       "after pci_disable_device() enable_cnt: %d",
614 			       a->pcid->enable_cnt.counter);
615 
616 		esas2r_log_dev(ESAS2R_LOG_INFO,
617 			       &(a->pcid->dev),
618 			       "pci_set_drv_data(%p, NULL) called",
619 			       a->pcid);
620 
621 		pci_set_drvdata(a->pcid, NULL);
622 		esas2r_adapters[i] = NULL;
623 
624 		if (test_bit(AF2_INIT_DONE, &a->flags2)) {
625 			clear_bit(AF2_INIT_DONE, &a->flags2);
626 
627 			set_bit(AF_DEGRADED_MODE, &a->flags);
628 
629 			esas2r_log_dev(ESAS2R_LOG_INFO,
630 				       &(a->host->shost_gendev),
631 				       "scsi_remove_host() called");
632 
633 			scsi_remove_host(a->host);
634 
635 			esas2r_log_dev(ESAS2R_LOG_INFO,
636 				       &(a->host->shost_gendev),
637 				       "scsi_host_put() called");
638 
639 			scsi_host_put(a->host);
640 		}
641 	}
642 }
643 
644 static int __maybe_unused esas2r_suspend(struct device *dev)
645 {
646 	struct Scsi_Host *host = dev_get_drvdata(dev);
647 	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
648 
649 	esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
650 	if (!a)
651 		return -ENODEV;
652 
653 	esas2r_adapter_power_down(a, 1);
654 	esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
655 	return 0;
656 }
657 
658 static int __maybe_unused esas2r_resume(struct device *dev)
659 {
660 	struct Scsi_Host *host = dev_get_drvdata(dev);
661 	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
662 	int rez = 0;
663 
664 	esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
665 
666 	if (!a) {
667 		rez = -ENODEV;
668 		goto error_exit;
669 	}
670 
671 	if (esas2r_map_regions(a) != 0) {
672 		esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
673 		rez = -ENOMEM;
674 		goto error_exit;
675 	}
676 
677 	/* Set up interupt mode */
678 	esas2r_setup_interrupts(a, a->intr_mode);
679 
680 	/*
681 	 * Disable chip interrupts to prevent spurious interrupts until we
682 	 * claim the IRQ.
683 	 */
684 	esas2r_disable_chip_interrupts(a);
685 	if (!esas2r_power_up(a, true)) {
686 		esas2r_debug("yikes, esas2r_power_up failed");
687 		rez = -ENOMEM;
688 		goto error_exit;
689 	}
690 
691 	esas2r_claim_interrupts(a);
692 
693 	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
694 		/*
695 		 * Now that system interrupt(s) are claimed, we can enable
696 		 * chip interrupts.
697 		 */
698 		esas2r_enable_chip_interrupts(a);
699 		esas2r_kickoff_timer(a);
700 	} else {
701 		esas2r_debug("yikes, unable to claim IRQ");
702 		esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
703 		rez = -ENOMEM;
704 		goto error_exit;
705 	}
706 
707 error_exit:
708 	esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
709 		       rez);
710 	return rez;
711 }
712 
713 SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
714 
715 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
716 {
717 	set_bit(AF_DEGRADED_MODE, &a->flags);
718 	esas2r_log(ESAS2R_LOG_CRIT,
719 		   "setting adapter to degraded mode: %s\n", error_str);
720 	return false;
721 }
722 
723 u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
724 {
725 	return sizeof(struct esas2r_sas_nvram)
726 	       + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
727 	       + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
728 	       + 8
729 	       + (num_sg_lists * (u16)sgl_page_size)
730 	       + ALIGN((num_requests + num_ae_requests + 1 +
731 			ESAS2R_LIST_EXTRA) *
732 		       sizeof(struct esas2r_inbound_list_source_entry),
733 		       8)
734 	       + ALIGN((num_requests + num_ae_requests + 1 +
735 			ESAS2R_LIST_EXTRA) *
736 		       sizeof(struct atto_vda_ob_rsp), 8)
737 	       + 256; /* VDA request and buffer align */
738 }
739 
740 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
741 {
742 	if (pci_is_pcie(a->pcid)) {
743 		u16 devcontrol;
744 
745 		pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
746 
747 		if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
748 		     PCI_EXP_DEVCTL_READRQ_512B) {
749 			esas2r_log(ESAS2R_LOG_INFO,
750 				   "max read request size > 512B");
751 
752 			devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
753 			devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
754 			pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
755 						   devcontrol);
756 		}
757 	}
758 }
759 
760 /*
761  * Determine the organization of the uncached data area and
762  * finish initializing the adapter structure
763  */
764 bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
765 				void **uncached_area)
766 {
767 	u32 i;
768 	u8 *high;
769 	struct esas2r_inbound_list_source_entry *element;
770 	struct esas2r_request *rq;
771 	struct esas2r_mem_desc *sgl;
772 
773 	spin_lock_init(&a->sg_list_lock);
774 	spin_lock_init(&a->mem_lock);
775 	spin_lock_init(&a->queue_lock);
776 
777 	a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
778 
779 	if (!alloc_vda_req(a, &a->general_req)) {
780 		esas2r_hdebug(
781 			"failed to allocate a VDA request for the general req!");
782 		return false;
783 	}
784 
785 	/* allocate requests for asynchronous events */
786 	a->first_ae_req =
787 		kcalloc(num_ae_requests, sizeof(struct esas2r_request),
788 			GFP_KERNEL);
789 
790 	if (a->first_ae_req == NULL) {
791 		esas2r_log(ESAS2R_LOG_CRIT,
792 			   "failed to allocate memory for asynchronous events");
793 		return false;
794 	}
795 
796 	/* allocate the S/G list memory descriptors */
797 	a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
798 				 GFP_KERNEL);
799 
800 	if (a->sg_list_mds == NULL) {
801 		esas2r_log(ESAS2R_LOG_CRIT,
802 			   "failed to allocate memory for s/g list descriptors");
803 		return false;
804 	}
805 
806 	/* allocate the request table */
807 	a->req_table =
808 		kcalloc(num_requests + num_ae_requests + 1,
809 			sizeof(struct esas2r_request *),
810 			GFP_KERNEL);
811 
812 	if (a->req_table == NULL) {
813 		esas2r_log(ESAS2R_LOG_CRIT,
814 			   "failed to allocate memory for the request table");
815 		return false;
816 	}
817 
818 	/* initialize PCI configuration space */
819 	esas2r_init_pci_cfg_space(a);
820 
821 	/*
822 	 * the thunder_stream boards all have a serial flash part that has a
823 	 * different base address on the AHB bus.
824 	 */
825 	if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
826 	    && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
827 		a->flags2 |= AF2_THUNDERBOLT;
828 
829 	if (test_bit(AF2_THUNDERBOLT, &a->flags2))
830 		a->flags2 |= AF2_SERIAL_FLASH;
831 
832 	if (a->pcid->subsystem_device == ATTO_TLSH_1068)
833 		a->flags2 |= AF2_THUNDERLINK;
834 
835 	/* Uncached Area */
836 	high = (u8 *)*uncached_area;
837 
838 	/* initialize the scatter/gather table pages */
839 
840 	for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
841 		sgl->size = sgl_page_size;
842 
843 		list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
844 
845 		if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
846 			/* Allow the driver to load if the minimum count met. */
847 			if (i < NUM_SGL_MIN)
848 				return false;
849 			break;
850 		}
851 	}
852 
853 	/* compute the size of the lists */
854 	a->list_size = num_requests + ESAS2R_LIST_EXTRA;
855 
856 	/* allocate the inbound list */
857 	a->inbound_list_md.size = a->list_size *
858 				  sizeof(struct
859 					 esas2r_inbound_list_source_entry);
860 
861 	if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
862 		esas2r_hdebug("failed to allocate IB list");
863 		return false;
864 	}
865 
866 	/* allocate the outbound list */
867 	a->outbound_list_md.size = a->list_size *
868 				   sizeof(struct atto_vda_ob_rsp);
869 
870 	if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
871 				  ESAS2R_LIST_ALIGN)) {
872 		esas2r_hdebug("failed to allocate IB list");
873 		return false;
874 	}
875 
876 	/* allocate the NVRAM structure */
877 	a->nvram = (struct esas2r_sas_nvram *)high;
878 	high += sizeof(struct esas2r_sas_nvram);
879 
880 	/* allocate the discovery buffer */
881 	a->disc_buffer = high;
882 	high += ESAS2R_DISC_BUF_LEN;
883 	high = PTR_ALIGN(high, 8);
884 
885 	/* allocate the outbound list copy pointer */
886 	a->outbound_copy = (u32 volatile *)high;
887 	high += sizeof(u32);
888 
889 	if (!test_bit(AF_NVR_VALID, &a->flags))
890 		esas2r_nvram_set_defaults(a);
891 
892 	/* update the caller's uncached memory area pointer */
893 	*uncached_area = (void *)high;
894 
895 	/* initialize the allocated memory */
896 	if (test_bit(AF_FIRST_INIT, &a->flags)) {
897 		esas2r_targ_db_initialize(a);
898 
899 		/* prime parts of the inbound list */
900 		element =
901 			(struct esas2r_inbound_list_source_entry *)a->
902 			inbound_list_md.
903 			virt_addr;
904 
905 		for (i = 0; i < a->list_size; i++) {
906 			element->address = 0;
907 			element->reserved = 0;
908 			element->length = cpu_to_le32(HWILSE_INTERFACE_F0
909 						      | (sizeof(union
910 								atto_vda_req)
911 							 /
912 							 sizeof(u32)));
913 			element++;
914 		}
915 
916 		/* init the AE requests */
917 		for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
918 		     i++) {
919 			INIT_LIST_HEAD(&rq->req_list);
920 			if (!alloc_vda_req(a, rq)) {
921 				esas2r_hdebug(
922 					"failed to allocate a VDA request!");
923 				return false;
924 			}
925 
926 			esas2r_rq_init_request(rq, a);
927 
928 			/* override the completion function */
929 			rq->comp_cb = esas2r_ae_complete;
930 		}
931 	}
932 
933 	return true;
934 }
935 
936 /* This code will verify that the chip is operational. */
937 bool esas2r_check_adapter(struct esas2r_adapter *a)
938 {
939 	u32 starttime;
940 	u32 doorbell;
941 	u64 ppaddr;
942 	u32 dw;
943 
944 	/*
945 	 * if the chip reset detected flag is set, we can bypass a bunch of
946 	 * stuff.
947 	 */
948 	if (test_bit(AF_CHPRST_DETECTED, &a->flags))
949 		goto skip_chip_reset;
950 
951 	/*
952 	 * BEFORE WE DO ANYTHING, disable the chip interrupts!  the boot driver
953 	 * may have left them enabled or we may be recovering from a fault.
954 	 */
955 	esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
956 	esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
957 
958 	/*
959 	 * wait for the firmware to become ready by forcing an interrupt and
960 	 * waiting for a response.
961 	 */
962 	starttime = jiffies_to_msecs(jiffies);
963 
964 	while (true) {
965 		esas2r_force_interrupt(a);
966 		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
967 		if (doorbell == 0xFFFFFFFF) {
968 			/*
969 			 * Give the firmware up to two seconds to enable
970 			 * register access after a reset.
971 			 */
972 			if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
973 				return esas2r_set_degraded_mode(a,
974 								"unable to access registers");
975 		} else if (doorbell & DRBL_FORCE_INT) {
976 			u32 ver = (doorbell & DRBL_FW_VER_MSK);
977 
978 			/*
979 			 * This driver supports version 0 and version 1 of
980 			 * the API
981 			 */
982 			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
983 						    doorbell);
984 
985 			if (ver == DRBL_FW_VER_0) {
986 				set_bit(AF_LEGACY_SGE_MODE, &a->flags);
987 
988 				a->max_vdareq_size = 128;
989 				a->build_sgl = esas2r_build_sg_list_sge;
990 			} else if (ver == DRBL_FW_VER_1) {
991 				clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
992 
993 				a->max_vdareq_size = 1024;
994 				a->build_sgl = esas2r_build_sg_list_prd;
995 			} else {
996 				return esas2r_set_degraded_mode(a,
997 								"unknown firmware version");
998 			}
999 			break;
1000 		}
1001 
1002 		schedule_timeout_interruptible(msecs_to_jiffies(100));
1003 
1004 		if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1005 			esas2r_hdebug("FW ready TMO");
1006 			esas2r_bugon();
1007 
1008 			return esas2r_set_degraded_mode(a,
1009 							"firmware start has timed out");
1010 		}
1011 	}
1012 
1013 	/* purge any asynchronous events since we will repost them later */
1014 	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1015 	starttime = jiffies_to_msecs(jiffies);
1016 
1017 	while (true) {
1018 		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1019 		if (doorbell & DRBL_MSG_IFC_DOWN) {
1020 			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1021 						    doorbell);
1022 			break;
1023 		}
1024 
1025 		schedule_timeout_interruptible(msecs_to_jiffies(50));
1026 
1027 		if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1028 			esas2r_hdebug("timeout waiting for interface down");
1029 			break;
1030 		}
1031 	}
1032 skip_chip_reset:
1033 	/*
1034 	 * first things first, before we go changing any of these registers
1035 	 * disable the communication lists.
1036 	 */
1037 	dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1038 	dw &= ~MU_ILC_ENABLE;
1039 	esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1040 	dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1041 	dw &= ~MU_OLC_ENABLE;
1042 	esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1043 
1044 	/* configure the communication list addresses */
1045 	ppaddr = a->inbound_list_md.phys_addr;
1046 	esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1047 				    lower_32_bits(ppaddr));
1048 	esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1049 				    upper_32_bits(ppaddr));
1050 	ppaddr = a->outbound_list_md.phys_addr;
1051 	esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1052 				    lower_32_bits(ppaddr));
1053 	esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1054 				    upper_32_bits(ppaddr));
1055 	ppaddr = a->uncached_phys +
1056 		 ((u8 *)a->outbound_copy - a->uncached);
1057 	esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1058 				    lower_32_bits(ppaddr));
1059 	esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1060 				    upper_32_bits(ppaddr));
1061 
1062 	/* reset the read and write pointers */
1063 	*a->outbound_copy =
1064 		a->last_write =
1065 			a->last_read = a->list_size - 1;
1066 	set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
1067 	esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1068 				    a->last_write);
1069 	esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1070 				    a->last_write);
1071 	esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1072 				    a->last_write);
1073 	esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1074 				    MU_OLW_TOGGLE | a->last_write);
1075 
1076 	/* configure the interface select fields */
1077 	dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1078 	dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1079 	esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1080 				    (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1081 	dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1082 	dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1083 	esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1084 				    (dw | MU_OLIC_LIST_F0 |
1085 				     MU_OLIC_SOURCE_DDR));
1086 
1087 	/* finish configuring the communication lists */
1088 	dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1089 	dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1090 	dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1091 	      | (a->list_size << MU_ILC_NUMBER_SHIFT);
1092 	esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1093 	dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1094 	dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1095 	dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1096 	esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1097 
1098 	/*
1099 	 * notify the firmware that we're done setting up the communication
1100 	 * list registers.  wait here until the firmware is done configuring
1101 	 * its lists.  it will signal that it is done by enabling the lists.
1102 	 */
1103 	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1104 	starttime = jiffies_to_msecs(jiffies);
1105 
1106 	while (true) {
1107 		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1108 		if (doorbell & DRBL_MSG_IFC_INIT) {
1109 			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1110 						    doorbell);
1111 			break;
1112 		}
1113 
1114 		schedule_timeout_interruptible(msecs_to_jiffies(100));
1115 
1116 		if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1117 			esas2r_hdebug(
1118 				"timeout waiting for communication list init");
1119 			esas2r_bugon();
1120 			return esas2r_set_degraded_mode(a,
1121 							"timeout waiting for communication list init");
1122 		}
1123 	}
1124 
1125 	/*
1126 	 * flag whether the firmware supports the power down doorbell.  we
1127 	 * determine this by reading the inbound doorbell enable mask.
1128 	 */
1129 	doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1130 	if (doorbell & DRBL_POWER_DOWN)
1131 		set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1132 	else
1133 		clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1134 
1135 	/*
1136 	 * enable assertion of outbound queue and doorbell interrupts in the
1137 	 * main interrupt cause register.
1138 	 */
1139 	esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1140 	esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1141 	return true;
1142 }
1143 
1144 /* Process the initialization message just completed and format the next one. */
1145 static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1146 				   struct esas2r_request *rq)
1147 {
1148 	u32 msg = a->init_msg;
1149 	struct atto_vda_cfg_init *ci;
1150 
1151 	a->init_msg = 0;
1152 
1153 	switch (msg) {
1154 	case ESAS2R_INIT_MSG_START:
1155 	case ESAS2R_INIT_MSG_REINIT:
1156 	{
1157 		esas2r_hdebug("CFG init");
1158 		esas2r_build_cfg_req(a,
1159 				     rq,
1160 				     VDA_CFG_INIT,
1161 				     0,
1162 				     NULL);
1163 		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1164 		ci->sgl_page_size = cpu_to_le32(sgl_page_size);
1165 		/* firmware interface overflows in y2106 */
1166 		ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
1167 		rq->flags |= RF_FAILURE_OK;
1168 		a->init_msg = ESAS2R_INIT_MSG_INIT;
1169 		break;
1170 	}
1171 
1172 	case ESAS2R_INIT_MSG_INIT:
1173 		if (rq->req_stat == RS_SUCCESS) {
1174 			u32 major;
1175 			u32 minor;
1176 			u16 fw_release;
1177 
1178 			a->fw_version = le16_to_cpu(
1179 				rq->func_rsp.cfg_rsp.vda_version);
1180 			a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1181 			fw_release = le16_to_cpu(
1182 				rq->func_rsp.cfg_rsp.fw_release);
1183 			major = LOBYTE(fw_release);
1184 			minor = HIBYTE(fw_release);
1185 			a->fw_version += (major << 16) + (minor << 24);
1186 		} else {
1187 			esas2r_hdebug("FAILED");
1188 		}
1189 
1190 		/*
1191 		 * the 2.71 and earlier releases of R6xx firmware did not error
1192 		 * unsupported config requests correctly.
1193 		 */
1194 
1195 		if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
1196 		    || (be32_to_cpu(a->fw_version) > 0x00524702)) {
1197 			esas2r_hdebug("CFG get init");
1198 			esas2r_build_cfg_req(a,
1199 					     rq,
1200 					     VDA_CFG_GET_INIT2,
1201 					     sizeof(struct atto_vda_cfg_init),
1202 					     NULL);
1203 
1204 			rq->vrq->cfg.sg_list_offset = offsetof(
1205 				struct atto_vda_cfg_req,
1206 				data.sge);
1207 			rq->vrq->cfg.data.prde.ctl_len =
1208 				cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1209 			rq->vrq->cfg.data.prde.address = cpu_to_le64(
1210 				rq->vrq_md->phys_addr +
1211 				sizeof(union atto_vda_req));
1212 			rq->flags |= RF_FAILURE_OK;
1213 			a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1214 			break;
1215 		}
1216 		fallthrough;
1217 
1218 	case ESAS2R_INIT_MSG_GET_INIT:
1219 		if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1220 			ci = (struct atto_vda_cfg_init *)rq->data_buf;
1221 			if (rq->req_stat == RS_SUCCESS) {
1222 				a->num_targets_backend =
1223 					le32_to_cpu(ci->num_targets_backend);
1224 				a->ioctl_tunnel =
1225 					le32_to_cpu(ci->ioctl_tunnel);
1226 			} else {
1227 				esas2r_hdebug("FAILED");
1228 			}
1229 		}
1230 		fallthrough;
1231 
1232 	default:
1233 		rq->req_stat = RS_SUCCESS;
1234 		return false;
1235 	}
1236 	return true;
1237 }
1238 
1239 /*
1240  * Perform initialization messages via the request queue.  Messages are
1241  * performed with interrupts disabled.
1242  */
1243 bool esas2r_init_msgs(struct esas2r_adapter *a)
1244 {
1245 	bool success = true;
1246 	struct esas2r_request *rq = &a->general_req;
1247 
1248 	esas2r_rq_init_request(rq, a);
1249 	rq->comp_cb = esas2r_dummy_complete;
1250 
1251 	if (a->init_msg == 0)
1252 		a->init_msg = ESAS2R_INIT_MSG_REINIT;
1253 
1254 	while (a->init_msg) {
1255 		if (esas2r_format_init_msg(a, rq)) {
1256 			unsigned long flags;
1257 			while (true) {
1258 				spin_lock_irqsave(&a->queue_lock, flags);
1259 				esas2r_start_vda_request(a, rq);
1260 				spin_unlock_irqrestore(&a->queue_lock, flags);
1261 				esas2r_wait_request(a, rq);
1262 				if (rq->req_stat != RS_PENDING)
1263 					break;
1264 			}
1265 		}
1266 
1267 		if (rq->req_stat == RS_SUCCESS
1268 		    || ((rq->flags & RF_FAILURE_OK)
1269 			&& rq->req_stat != RS_TIMEOUT))
1270 			continue;
1271 
1272 		esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1273 			   a->init_msg, rq->req_stat, rq->flags);
1274 		a->init_msg = ESAS2R_INIT_MSG_START;
1275 		success = false;
1276 		break;
1277 	}
1278 
1279 	esas2r_rq_destroy_request(rq, a);
1280 	return success;
1281 }
1282 
1283 /* Initialize the adapter chip */
1284 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1285 {
1286 	bool rslt = false;
1287 	struct esas2r_request *rq;
1288 	u32 i;
1289 
1290 	if (test_bit(AF_DEGRADED_MODE, &a->flags))
1291 		goto exit;
1292 
1293 	if (!test_bit(AF_NVR_VALID, &a->flags)) {
1294 		if (!esas2r_nvram_read_direct(a))
1295 			esas2r_log(ESAS2R_LOG_WARN,
1296 				   "invalid/missing NVRAM parameters");
1297 	}
1298 
1299 	if (!esas2r_init_msgs(a)) {
1300 		esas2r_set_degraded_mode(a, "init messages failed");
1301 		goto exit;
1302 	}
1303 
1304 	/* The firmware is ready. */
1305 	clear_bit(AF_DEGRADED_MODE, &a->flags);
1306 	clear_bit(AF_CHPRST_PENDING, &a->flags);
1307 
1308 	/* Post all the async event requests */
1309 	for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1310 		esas2r_start_ae_request(a, rq);
1311 
1312 	if (!a->flash_rev[0])
1313 		esas2r_read_flash_rev(a);
1314 
1315 	if (!a->image_type[0])
1316 		esas2r_read_image_type(a);
1317 
1318 	if (a->fw_version == 0)
1319 		a->fw_rev[0] = 0;
1320 	else
1321 		sprintf(a->fw_rev, "%1d.%02d",
1322 			(int)LOBYTE(HIWORD(a->fw_version)),
1323 			(int)HIBYTE(HIWORD(a->fw_version)));
1324 
1325 	esas2r_hdebug("firmware revision: %s", a->fw_rev);
1326 
1327 	if (test_bit(AF_CHPRST_DETECTED, &a->flags)
1328 	    && (test_bit(AF_FIRST_INIT, &a->flags))) {
1329 		esas2r_enable_chip_interrupts(a);
1330 		return true;
1331 	}
1332 
1333 	/* initialize discovery */
1334 	esas2r_disc_initialize(a);
1335 
1336 	/*
1337 	 * wait for the device wait time to expire here if requested.  this is
1338 	 * usually requested during initial driver load and possibly when
1339 	 * resuming from a low power state.  deferred device waiting will use
1340 	 * interrupts.  chip reset recovery always defers device waiting to
1341 	 * avoid being in a TASKLET too long.
1342 	 */
1343 	if (init_poll) {
1344 		u32 currtime = a->disc_start_time;
1345 		u32 nexttick = 100;
1346 		u32 deltatime;
1347 
1348 		/*
1349 		 * Block Tasklets from getting scheduled and indicate this is
1350 		 * polled discovery.
1351 		 */
1352 		set_bit(AF_TASKLET_SCHEDULED, &a->flags);
1353 		set_bit(AF_DISC_POLLED, &a->flags);
1354 
1355 		/*
1356 		 * Temporarily bring the disable count to zero to enable
1357 		 * deferred processing.  Note that the count is already zero
1358 		 * after the first initialization.
1359 		 */
1360 		if (test_bit(AF_FIRST_INIT, &a->flags))
1361 			atomic_dec(&a->disable_cnt);
1362 
1363 		while (test_bit(AF_DISC_PENDING, &a->flags)) {
1364 			schedule_timeout_interruptible(msecs_to_jiffies(100));
1365 
1366 			/*
1367 			 * Determine the need for a timer tick based on the
1368 			 * delta time between this and the last iteration of
1369 			 * this loop.  We don't use the absolute time because
1370 			 * then we would have to worry about when nexttick
1371 			 * wraps and currtime hasn't yet.
1372 			 */
1373 			deltatime = jiffies_to_msecs(jiffies) - currtime;
1374 			currtime += deltatime;
1375 
1376 			/*
1377 			 * Process any waiting discovery as long as the chip is
1378 			 * up.  If a chip reset happens during initial polling,
1379 			 * we have to make sure the timer tick processes the
1380 			 * doorbell indicating the firmware is ready.
1381 			 */
1382 			if (!test_bit(AF_CHPRST_PENDING, &a->flags))
1383 				esas2r_disc_check_for_work(a);
1384 
1385 			/* Simulate a timer tick. */
1386 			if (nexttick <= deltatime) {
1387 
1388 				/* Time for a timer tick */
1389 				nexttick += 100;
1390 				esas2r_timer_tick(a);
1391 			}
1392 
1393 			if (nexttick > deltatime)
1394 				nexttick -= deltatime;
1395 
1396 			/* Do any deferred processing */
1397 			if (esas2r_is_tasklet_pending(a))
1398 				esas2r_do_tasklet_tasks(a);
1399 
1400 		}
1401 
1402 		if (test_bit(AF_FIRST_INIT, &a->flags))
1403 			atomic_inc(&a->disable_cnt);
1404 
1405 		clear_bit(AF_DISC_POLLED, &a->flags);
1406 		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
1407 	}
1408 
1409 
1410 	esas2r_targ_db_report_changes(a);
1411 
1412 	/*
1413 	 * For cases where (a) the initialization messages processing may
1414 	 * handle an interrupt for a port event and a discovery is waiting, but
1415 	 * we are not waiting for devices, or (b) the device wait time has been
1416 	 * exhausted but there is still discovery pending, start any leftover
1417 	 * discovery in interrupt driven mode.
1418 	 */
1419 	esas2r_disc_start_waiting(a);
1420 
1421 	/* Enable chip interrupts */
1422 	a->int_mask = ESAS2R_INT_STS_MASK;
1423 	esas2r_enable_chip_interrupts(a);
1424 	esas2r_enable_heartbeat(a);
1425 	rslt = true;
1426 
1427 exit:
1428 	/*
1429 	 * Regardless of whether initialization was successful, certain things
1430 	 * need to get done before we exit.
1431 	 */
1432 
1433 	if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
1434 	    test_bit(AF_FIRST_INIT, &a->flags)) {
1435 		/*
1436 		 * Reinitialization was performed during the first
1437 		 * initialization.  Only clear the chip reset flag so the
1438 		 * original device polling is not cancelled.
1439 		 */
1440 		if (!rslt)
1441 			clear_bit(AF_CHPRST_PENDING, &a->flags);
1442 	} else {
1443 		/* First initialization or a subsequent re-init is complete. */
1444 		if (!rslt) {
1445 			clear_bit(AF_CHPRST_PENDING, &a->flags);
1446 			clear_bit(AF_DISC_PENDING, &a->flags);
1447 		}
1448 
1449 
1450 		/* Enable deferred processing after the first initialization. */
1451 		if (test_bit(AF_FIRST_INIT, &a->flags)) {
1452 			clear_bit(AF_FIRST_INIT, &a->flags);
1453 
1454 			if (atomic_dec_return(&a->disable_cnt) == 0)
1455 				esas2r_do_deferred_processes(a);
1456 		}
1457 	}
1458 
1459 	return rslt;
1460 }
1461 
1462 void esas2r_reset_adapter(struct esas2r_adapter *a)
1463 {
1464 	set_bit(AF_OS_RESET, &a->flags);
1465 	esas2r_local_reset_adapter(a);
1466 	esas2r_schedule_tasklet(a);
1467 }
1468 
1469 void esas2r_reset_chip(struct esas2r_adapter *a)
1470 {
1471 	if (!esas2r_is_adapter_present(a))
1472 		return;
1473 
1474 	/*
1475 	 * Before we reset the chip, save off the VDA core dump.  The VDA core
1476 	 * dump is located in the upper 512KB of the onchip SRAM.  Make sure
1477 	 * to not overwrite a previous crash that was saved.
1478 	 */
1479 	if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
1480 	    !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
1481 		esas2r_read_mem_block(a,
1482 				      a->fw_coredump_buff,
1483 				      MW_DATA_ADDR_SRAM + 0x80000,
1484 				      ESAS2R_FWCOREDUMP_SZ);
1485 
1486 		set_bit(AF2_COREDUMP_SAVED, &a->flags2);
1487 	}
1488 
1489 	clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
1490 
1491 	/* Reset the chip */
1492 	if (a->pcid->revision == MVR_FREY_B2)
1493 		esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1494 					    MU_CTL_IN_FULL_RST2);
1495 	else
1496 		esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1497 					    MU_CTL_IN_FULL_RST);
1498 
1499 
1500 	/* Stall a little while to let the reset condition clear */
1501 	mdelay(10);
1502 }
1503 
1504 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1505 {
1506 	u32 starttime;
1507 	u32 doorbell;
1508 
1509 	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1510 	starttime = jiffies_to_msecs(jiffies);
1511 
1512 	while (true) {
1513 		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1514 		if (doorbell & DRBL_POWER_DOWN) {
1515 			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1516 						    doorbell);
1517 			break;
1518 		}
1519 
1520 		schedule_timeout_interruptible(msecs_to_jiffies(100));
1521 
1522 		if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1523 			esas2r_hdebug("Timeout waiting for power down");
1524 			break;
1525 		}
1526 	}
1527 }
1528 
1529 /*
1530  * Perform power management processing including managing device states, adapter
1531  * states, interrupts, and I/O.
1532  */
1533 void esas2r_power_down(struct esas2r_adapter *a)
1534 {
1535 	set_bit(AF_POWER_MGT, &a->flags);
1536 	set_bit(AF_POWER_DOWN, &a->flags);
1537 
1538 	if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
1539 		u32 starttime;
1540 		u32 doorbell;
1541 
1542 		/*
1543 		 * We are currently running OK and will be reinitializing later.
1544 		 * increment the disable count to coordinate with
1545 		 * esas2r_init_adapter.  We don't have to do this in degraded
1546 		 * mode since we never enabled interrupts in the first place.
1547 		 */
1548 		esas2r_disable_chip_interrupts(a);
1549 		esas2r_disable_heartbeat(a);
1550 
1551 		/* wait for any VDA activity to clear before continuing */
1552 		esas2r_write_register_dword(a, MU_DOORBELL_IN,
1553 					    DRBL_MSG_IFC_DOWN);
1554 		starttime = jiffies_to_msecs(jiffies);
1555 
1556 		while (true) {
1557 			doorbell =
1558 				esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1559 			if (doorbell & DRBL_MSG_IFC_DOWN) {
1560 				esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1561 							    doorbell);
1562 				break;
1563 			}
1564 
1565 			schedule_timeout_interruptible(msecs_to_jiffies(100));
1566 
1567 			if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1568 				esas2r_hdebug(
1569 					"timeout waiting for interface down");
1570 				break;
1571 			}
1572 		}
1573 
1574 		/*
1575 		 * For versions of firmware that support it tell them the driver
1576 		 * is powering down.
1577 		 */
1578 		if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
1579 			esas2r_power_down_notify_firmware(a);
1580 	}
1581 
1582 	/* Suspend I/O processing. */
1583 	set_bit(AF_OS_RESET, &a->flags);
1584 	set_bit(AF_DISC_PENDING, &a->flags);
1585 	set_bit(AF_CHPRST_PENDING, &a->flags);
1586 
1587 	esas2r_process_adapter_reset(a);
1588 
1589 	/* Remove devices now that I/O is cleaned up. */
1590 	a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1591 	esas2r_targ_db_remove_all(a, false);
1592 }
1593 
1594 /*
1595  * Perform power management processing including managing device states, adapter
1596  * states, interrupts, and I/O.
1597  */
1598 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1599 {
1600 	bool ret;
1601 
1602 	clear_bit(AF_POWER_DOWN, &a->flags);
1603 	esas2r_init_pci_cfg_space(a);
1604 	set_bit(AF_FIRST_INIT, &a->flags);
1605 	atomic_inc(&a->disable_cnt);
1606 
1607 	/* reinitialize the adapter */
1608 	ret = esas2r_check_adapter(a);
1609 	if (!esas2r_init_adapter_hw(a, init_poll))
1610 		ret = false;
1611 
1612 	/* send the reset asynchronous event */
1613 	esas2r_send_reset_ae(a, true);
1614 
1615 	/* clear this flag after initialization. */
1616 	clear_bit(AF_POWER_MGT, &a->flags);
1617 	return ret;
1618 }
1619 
1620 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1621 {
1622 	if (test_bit(AF_NOT_PRESENT, &a->flags))
1623 		return false;
1624 
1625 	if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1626 		set_bit(AF_NOT_PRESENT, &a->flags);
1627 
1628 		return false;
1629 	}
1630 	return true;
1631 }
1632 
1633 const char *esas2r_get_model_name(struct esas2r_adapter *a)
1634 {
1635 	switch (a->pcid->subsystem_device) {
1636 	case ATTO_ESAS_R680:
1637 		return "ATTO ExpressSAS R680";
1638 
1639 	case ATTO_ESAS_R608:
1640 		return "ATTO ExpressSAS R608";
1641 
1642 	case ATTO_ESAS_R60F:
1643 		return "ATTO ExpressSAS R60F";
1644 
1645 	case ATTO_ESAS_R6F0:
1646 		return "ATTO ExpressSAS R6F0";
1647 
1648 	case ATTO_ESAS_R644:
1649 		return "ATTO ExpressSAS R644";
1650 
1651 	case ATTO_ESAS_R648:
1652 		return "ATTO ExpressSAS R648";
1653 
1654 	case ATTO_TSSC_3808:
1655 		return "ATTO ThunderStream SC 3808D";
1656 
1657 	case ATTO_TSSC_3808E:
1658 		return "ATTO ThunderStream SC 3808E";
1659 
1660 	case ATTO_TLSH_1068:
1661 		return "ATTO ThunderLink SH 1068";
1662 	}
1663 
1664 	return "ATTO SAS Controller";
1665 }
1666 
1667 const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1668 {
1669 	switch (a->pcid->subsystem_device) {
1670 	case ATTO_ESAS_R680:
1671 		return "R680";
1672 
1673 	case ATTO_ESAS_R608:
1674 		return "R608";
1675 
1676 	case ATTO_ESAS_R60F:
1677 		return "R60F";
1678 
1679 	case ATTO_ESAS_R6F0:
1680 		return "R6F0";
1681 
1682 	case ATTO_ESAS_R644:
1683 		return "R644";
1684 
1685 	case ATTO_ESAS_R648:
1686 		return "R648";
1687 
1688 	case ATTO_TSSC_3808:
1689 		return "SC 3808D";
1690 
1691 	case ATTO_TSSC_3808E:
1692 		return "SC 3808E";
1693 
1694 	case ATTO_TLSH_1068:
1695 		return "SH 1068";
1696 	}
1697 
1698 	return "unknown";
1699 }
1700