1 /*
2 * linux/drivers/scsi/esas2r/esas2r_init.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44 #include "esas2r.h"
45
esas2r_initmem_alloc(struct esas2r_adapter * a,struct esas2r_mem_desc * mem_desc,u32 align)46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
47 struct esas2r_mem_desc *mem_desc,
48 u32 align)
49 {
50 mem_desc->esas2r_param = mem_desc->size + align;
51 mem_desc->virt_addr = NULL;
52 mem_desc->phys_addr = 0;
53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
54 (size_t)mem_desc->
55 esas2r_param,
56 (dma_addr_t *)&mem_desc->
57 phys_addr,
58 GFP_KERNEL);
59
60 if (mem_desc->esas2r_data == NULL) {
61 esas2r_log(ESAS2R_LOG_CRIT,
62 "failed to allocate %lu bytes of consistent memory!",
63 (long
64 unsigned
65 int)mem_desc->esas2r_param);
66 return false;
67 }
68
69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
71 memset(mem_desc->virt_addr, 0, mem_desc->size);
72 return true;
73 }
74
esas2r_initmem_free(struct esas2r_adapter * a,struct esas2r_mem_desc * mem_desc)75 static void esas2r_initmem_free(struct esas2r_adapter *a,
76 struct esas2r_mem_desc *mem_desc)
77 {
78 if (mem_desc->virt_addr == NULL)
79 return;
80
81 /*
82 * Careful! phys_addr and virt_addr may have been adjusted from the
83 * original allocation in order to return the desired alignment. That
84 * means we have to use the original address (in esas2r_data) and size
85 * (esas2r_param) and calculate the original physical address based on
86 * the difference between the requested and actual allocation size.
87 */
88 if (mem_desc->phys_addr) {
89 int unalign = ((u8 *)mem_desc->virt_addr) -
90 ((u8 *)mem_desc->esas2r_data);
91
92 dma_free_coherent(&a->pcid->dev,
93 (size_t)mem_desc->esas2r_param,
94 mem_desc->esas2r_data,
95 (dma_addr_t)(mem_desc->phys_addr - unalign));
96 } else {
97 kfree(mem_desc->esas2r_data);
98 }
99
100 mem_desc->virt_addr = NULL;
101 }
102
alloc_vda_req(struct esas2r_adapter * a,struct esas2r_request * rq)103 static bool alloc_vda_req(struct esas2r_adapter *a,
104 struct esas2r_request *rq)
105 {
106 struct esas2r_mem_desc *memdesc = kzalloc_obj(struct esas2r_mem_desc);
107
108 if (memdesc == NULL) {
109 esas2r_hdebug("could not alloc mem for vda request memdesc\n");
110 return false;
111 }
112
113 memdesc->size = sizeof(union atto_vda_req) +
114 ESAS2R_DATA_BUF_LEN;
115
116 if (!esas2r_initmem_alloc(a, memdesc, 256)) {
117 esas2r_hdebug("could not alloc mem for vda request\n");
118 kfree(memdesc);
119 return false;
120 }
121
122 a->num_vrqs++;
123 list_add(&memdesc->next_desc, &a->vrq_mds_head);
124
125 rq->vrq_md = memdesc;
126 rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
127 rq->vrq->scsi.handle = a->num_vrqs;
128
129 return true;
130 }
131
esas2r_unmap_regions(struct esas2r_adapter * a)132 static void esas2r_unmap_regions(struct esas2r_adapter *a)
133 {
134 if (a->regs)
135 iounmap((void __iomem *)a->regs);
136
137 a->regs = NULL;
138
139 pci_release_region(a->pcid, 2);
140
141 if (a->data_window)
142 iounmap((void __iomem *)a->data_window);
143
144 a->data_window = NULL;
145
146 pci_release_region(a->pcid, 0);
147 }
148
esas2r_map_regions(struct esas2r_adapter * a)149 static int esas2r_map_regions(struct esas2r_adapter *a)
150 {
151 int error;
152
153 a->regs = NULL;
154 a->data_window = NULL;
155
156 error = pci_request_region(a->pcid, 2, a->name);
157 if (error != 0) {
158 esas2r_log(ESAS2R_LOG_CRIT,
159 "pci_request_region(2) failed, error %d",
160 error);
161
162 return error;
163 }
164
165 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
166 pci_resource_len(a->pcid, 2));
167 if (a->regs == NULL) {
168 esas2r_log(ESAS2R_LOG_CRIT,
169 "ioremap failed for regs mem region\n");
170 pci_release_region(a->pcid, 2);
171 return -EFAULT;
172 }
173
174 error = pci_request_region(a->pcid, 0, a->name);
175 if (error != 0) {
176 esas2r_log(ESAS2R_LOG_CRIT,
177 "pci_request_region(2) failed, error %d",
178 error);
179 esas2r_unmap_regions(a);
180 return error;
181 }
182
183 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
184 0),
185 pci_resource_len(a->pcid, 0));
186 if (a->data_window == NULL) {
187 esas2r_log(ESAS2R_LOG_CRIT,
188 "ioremap failed for data_window mem region\n");
189 esas2r_unmap_regions(a);
190 return -EFAULT;
191 }
192
193 return 0;
194 }
195
esas2r_setup_interrupts(struct esas2r_adapter * a,int intr_mode)196 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
197 {
198 int i;
199
200 /* Set up interrupt mode based on the requested value */
201 switch (intr_mode) {
202 case INTR_MODE_LEGACY:
203 use_legacy_interrupts:
204 a->intr_mode = INTR_MODE_LEGACY;
205 break;
206
207 case INTR_MODE_MSI:
208 i = pci_enable_msi(a->pcid);
209 if (i != 0) {
210 esas2r_log(ESAS2R_LOG_WARN,
211 "failed to enable MSI for adapter %d, "
212 "falling back to legacy interrupts "
213 "(err=%d)", a->index,
214 i);
215 goto use_legacy_interrupts;
216 }
217 a->intr_mode = INTR_MODE_MSI;
218 set_bit(AF2_MSI_ENABLED, &a->flags2);
219 break;
220
221
222 default:
223 esas2r_log(ESAS2R_LOG_WARN,
224 "unknown interrupt_mode %d requested, "
225 "falling back to legacy interrupt",
226 interrupt_mode);
227 goto use_legacy_interrupts;
228 }
229 }
230
esas2r_claim_interrupts(struct esas2r_adapter * a)231 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
232 {
233 unsigned long flags = 0;
234
235 if (a->intr_mode == INTR_MODE_LEGACY)
236 flags |= IRQF_SHARED;
237
238 esas2r_log(ESAS2R_LOG_INFO,
239 "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
240 a->pcid->irq, a, a->name, flags);
241
242 if (request_irq(a->pcid->irq,
243 (a->intr_mode ==
244 INTR_MODE_LEGACY) ? esas2r_interrupt :
245 esas2r_msi_interrupt,
246 flags,
247 a->name,
248 a)) {
249 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
250 a->pcid->irq);
251 return;
252 }
253
254 set_bit(AF2_IRQ_CLAIMED, &a->flags2);
255 esas2r_log(ESAS2R_LOG_INFO,
256 "claimed IRQ %d flags: 0x%lx",
257 a->pcid->irq, flags);
258 }
259
esas2r_init_adapter(struct Scsi_Host * host,struct pci_dev * pcid,int index)260 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
261 int index)
262 {
263 struct esas2r_adapter *a;
264 u64 bus_addr = 0;
265 int i;
266 void *next_uncached;
267 struct esas2r_request *first_request, *last_request;
268 bool dma64 = false;
269
270 if (index >= MAX_ADAPTERS) {
271 esas2r_log(ESAS2R_LOG_CRIT,
272 "tried to init invalid adapter index %u!",
273 index);
274 return 0;
275 }
276
277 if (esas2r_adapters[index]) {
278 esas2r_log(ESAS2R_LOG_CRIT,
279 "tried to init existing adapter index %u!",
280 index);
281 return 0;
282 }
283
284 a = (struct esas2r_adapter *)host->hostdata;
285 memset(a, 0, sizeof(struct esas2r_adapter));
286 a->pcid = pcid;
287 a->host = host;
288
289 if (sizeof(dma_addr_t) > 4 &&
290 dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
291 !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
292 dma64 = true;
293
294 if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
295 esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
296 esas2r_kill_adapter(index);
297 return 0;
298 }
299
300 esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
301 "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
302
303 esas2r_adapters[index] = a;
304 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
305 esas2r_debug("new adapter %p, name %s", a, a->name);
306 spin_lock_init(&a->request_lock);
307 spin_lock_init(&a->fw_event_lock);
308 mutex_init(&a->fm_api_mutex);
309 mutex_init(&a->fs_api_mutex);
310 sema_init(&a->nvram_semaphore, 1);
311
312 esas2r_fw_event_off(a);
313 a->fw_event_q =
314 alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index);
315
316 init_waitqueue_head(&a->buffered_ioctl_waiter);
317 init_waitqueue_head(&a->nvram_waiter);
318 init_waitqueue_head(&a->fm_api_waiter);
319 init_waitqueue_head(&a->fs_api_waiter);
320 init_waitqueue_head(&a->vda_waiter);
321
322 INIT_LIST_HEAD(&a->general_req.req_list);
323 INIT_LIST_HEAD(&a->active_list);
324 INIT_LIST_HEAD(&a->defer_list);
325 INIT_LIST_HEAD(&a->free_sg_list_head);
326 INIT_LIST_HEAD(&a->avail_request);
327 INIT_LIST_HEAD(&a->vrq_mds_head);
328 INIT_LIST_HEAD(&a->fw_event_list);
329
330 first_request = (struct esas2r_request *)((u8 *)(a + 1));
331
332 for (last_request = first_request, i = 1; i < num_requests;
333 last_request++, i++) {
334 INIT_LIST_HEAD(&last_request->req_list);
335 list_add_tail(&last_request->comp_list, &a->avail_request);
336 if (!alloc_vda_req(a, last_request)) {
337 esas2r_log(ESAS2R_LOG_CRIT,
338 "failed to allocate a VDA request!");
339 esas2r_kill_adapter(index);
340 return 0;
341 }
342 }
343
344 esas2r_debug("requests: %p to %p (%d, %d)", first_request,
345 last_request,
346 sizeof(*first_request),
347 num_requests);
348
349 if (esas2r_map_regions(a) != 0) {
350 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
351 esas2r_kill_adapter(index);
352 return 0;
353 }
354
355 a->index = index;
356
357 /* interrupts will be disabled until we are done with init */
358 atomic_inc(&a->dis_ints_cnt);
359 atomic_inc(&a->disable_cnt);
360 set_bit(AF_CHPRST_PENDING, &a->flags);
361 set_bit(AF_DISC_PENDING, &a->flags);
362 set_bit(AF_FIRST_INIT, &a->flags);
363 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
364
365 a->init_msg = ESAS2R_INIT_MSG_START;
366 a->max_vdareq_size = 128;
367 a->build_sgl = esas2r_build_sg_list_sge;
368
369 esas2r_setup_interrupts(a, interrupt_mode);
370
371 a->uncached_size = esas2r_get_uncached_size(a);
372 a->uncached = dma_alloc_coherent(&pcid->dev,
373 (size_t)a->uncached_size,
374 (dma_addr_t *)&bus_addr,
375 GFP_KERNEL);
376 if (a->uncached == NULL) {
377 esas2r_log(ESAS2R_LOG_CRIT,
378 "failed to allocate %d bytes of consistent memory!",
379 a->uncached_size);
380 esas2r_kill_adapter(index);
381 return 0;
382 }
383
384 a->uncached_phys = bus_addr;
385
386 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
387 a->uncached_size,
388 a->uncached,
389 upper_32_bits(bus_addr),
390 lower_32_bits(bus_addr));
391 memset(a->uncached, 0, a->uncached_size);
392 next_uncached = a->uncached;
393
394 if (!esas2r_init_adapter_struct(a,
395 &next_uncached)) {
396 esas2r_log(ESAS2R_LOG_CRIT,
397 "failed to initialize adapter structure (2)!");
398 esas2r_kill_adapter(index);
399 return 0;
400 }
401
402 tasklet_init(&a->tasklet,
403 esas2r_adapter_tasklet,
404 (unsigned long)a);
405
406 /*
407 * Disable chip interrupts to prevent spurious interrupts
408 * until we claim the IRQ.
409 */
410 esas2r_disable_chip_interrupts(a);
411 esas2r_check_adapter(a);
412
413 if (!esas2r_init_adapter_hw(a, true)) {
414 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
415 } else {
416 esas2r_debug("esas2r_init_adapter ok");
417 }
418
419 esas2r_claim_interrupts(a);
420
421 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
422 esas2r_enable_chip_interrupts(a);
423
424 set_bit(AF2_INIT_DONE, &a->flags2);
425 if (!test_bit(AF_DEGRADED_MODE, &a->flags))
426 esas2r_kickoff_timer(a);
427 esas2r_debug("esas2r_init_adapter done for %p (%d)",
428 a, a->disable_cnt);
429
430 return 1;
431 }
432
esas2r_adapter_power_down(struct esas2r_adapter * a,int power_management)433 static void esas2r_adapter_power_down(struct esas2r_adapter *a,
434 int power_management)
435 {
436 struct esas2r_mem_desc *memdesc, *next;
437
438 if ((test_bit(AF2_INIT_DONE, &a->flags2))
439 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
440 if (!power_management) {
441 timer_delete_sync(&a->timer);
442 tasklet_kill(&a->tasklet);
443 }
444 esas2r_power_down(a);
445
446 /*
447 * There are versions of firmware that do not handle the sync
448 * cache command correctly. Stall here to ensure that the
449 * cache is lazily flushed.
450 */
451 mdelay(500);
452 esas2r_debug("chip halted");
453 }
454
455 /* Remove sysfs binary files */
456 if (a->sysfs_fw_created) {
457 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
458 a->sysfs_fw_created = 0;
459 }
460
461 if (a->sysfs_fs_created) {
462 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
463 a->sysfs_fs_created = 0;
464 }
465
466 if (a->sysfs_vda_created) {
467 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
468 a->sysfs_vda_created = 0;
469 }
470
471 if (a->sysfs_hw_created) {
472 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
473 a->sysfs_hw_created = 0;
474 }
475
476 if (a->sysfs_live_nvram_created) {
477 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
478 &bin_attr_live_nvram);
479 a->sysfs_live_nvram_created = 0;
480 }
481
482 if (a->sysfs_default_nvram_created) {
483 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
484 &bin_attr_default_nvram);
485 a->sysfs_default_nvram_created = 0;
486 }
487
488 /* Clean up interrupts */
489 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
490 esas2r_log_dev(ESAS2R_LOG_INFO,
491 &(a->pcid->dev),
492 "free_irq(%d) called", a->pcid->irq);
493
494 free_irq(a->pcid->irq, a);
495 esas2r_debug("IRQ released");
496 clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
497 }
498
499 if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
500 pci_disable_msi(a->pcid);
501 clear_bit(AF2_MSI_ENABLED, &a->flags2);
502 esas2r_debug("MSI disabled");
503 }
504
505 if (a->inbound_list_md.virt_addr)
506 esas2r_initmem_free(a, &a->inbound_list_md);
507
508 if (a->outbound_list_md.virt_addr)
509 esas2r_initmem_free(a, &a->outbound_list_md);
510
511 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
512 next_desc) {
513 esas2r_initmem_free(a, memdesc);
514 }
515
516 /* Following frees everything allocated via alloc_vda_req */
517 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
518 esas2r_initmem_free(a, memdesc);
519 list_del(&memdesc->next_desc);
520 kfree(memdesc);
521 }
522
523 kfree(a->first_ae_req);
524 a->first_ae_req = NULL;
525
526 kfree(a->sg_list_mds);
527 a->sg_list_mds = NULL;
528
529 kfree(a->req_table);
530 a->req_table = NULL;
531
532 if (a->regs) {
533 esas2r_unmap_regions(a);
534 a->regs = NULL;
535 a->data_window = NULL;
536 esas2r_debug("regions unmapped");
537 }
538 }
539
540 /* Release/free allocated resources for specified adapters. */
esas2r_kill_adapter(int i)541 void esas2r_kill_adapter(int i)
542 {
543 struct esas2r_adapter *a = esas2r_adapters[i];
544
545 if (a) {
546 unsigned long flags;
547 struct workqueue_struct *wq;
548 esas2r_debug("killing adapter %p [%d] ", a, i);
549 esas2r_fw_event_off(a);
550 esas2r_adapter_power_down(a, 0);
551 if (esas2r_buffered_ioctl &&
552 (a->pcid == esas2r_buffered_ioctl_pcid)) {
553 dma_free_coherent(&a->pcid->dev,
554 (size_t)esas2r_buffered_ioctl_size,
555 esas2r_buffered_ioctl,
556 esas2r_buffered_ioctl_addr);
557 esas2r_buffered_ioctl = NULL;
558 }
559
560 if (a->vda_buffer) {
561 dma_free_coherent(&a->pcid->dev,
562 (size_t)VDA_MAX_BUFFER_SIZE,
563 a->vda_buffer,
564 (dma_addr_t)a->ppvda_buffer);
565 a->vda_buffer = NULL;
566 }
567 if (a->fs_api_buffer) {
568 dma_free_coherent(&a->pcid->dev,
569 (size_t)a->fs_api_buffer_size,
570 a->fs_api_buffer,
571 (dma_addr_t)a->ppfs_api_buffer);
572 a->fs_api_buffer = NULL;
573 }
574
575 kfree(a->local_atto_ioctl);
576 a->local_atto_ioctl = NULL;
577
578 spin_lock_irqsave(&a->fw_event_lock, flags);
579 wq = a->fw_event_q;
580 a->fw_event_q = NULL;
581 spin_unlock_irqrestore(&a->fw_event_lock, flags);
582 if (wq)
583 destroy_workqueue(wq);
584
585 if (a->uncached) {
586 dma_free_coherent(&a->pcid->dev,
587 (size_t)a->uncached_size,
588 a->uncached,
589 (dma_addr_t)a->uncached_phys);
590 a->uncached = NULL;
591 esas2r_debug("uncached area freed");
592 }
593
594 esas2r_log_dev(ESAS2R_LOG_INFO,
595 &(a->pcid->dev),
596 "pci_disable_device() called. msix_enabled: %d "
597 "msi_enabled: %d irq: %d pin: %d",
598 a->pcid->msix_enabled,
599 a->pcid->msi_enabled,
600 a->pcid->irq,
601 a->pcid->pin);
602
603 esas2r_log_dev(ESAS2R_LOG_INFO,
604 &(a->pcid->dev),
605 "before pci_disable_device() enable_cnt: %d",
606 a->pcid->enable_cnt.counter);
607
608 pci_disable_device(a->pcid);
609 esas2r_log_dev(ESAS2R_LOG_INFO,
610 &(a->pcid->dev),
611 "after pci_disable_device() enable_cnt: %d",
612 a->pcid->enable_cnt.counter);
613
614 esas2r_log_dev(ESAS2R_LOG_INFO,
615 &(a->pcid->dev),
616 "pci_set_drv_data(%p, NULL) called",
617 a->pcid);
618
619 pci_set_drvdata(a->pcid, NULL);
620 esas2r_adapters[i] = NULL;
621
622 if (test_bit(AF2_INIT_DONE, &a->flags2)) {
623 clear_bit(AF2_INIT_DONE, &a->flags2);
624
625 set_bit(AF_DEGRADED_MODE, &a->flags);
626
627 esas2r_log_dev(ESAS2R_LOG_INFO,
628 &(a->host->shost_gendev),
629 "scsi_remove_host() called");
630
631 scsi_remove_host(a->host);
632
633 esas2r_log_dev(ESAS2R_LOG_INFO,
634 &(a->host->shost_gendev),
635 "scsi_host_put() called");
636
637 scsi_host_put(a->host);
638 }
639 }
640 }
641
esas2r_suspend(struct device * dev)642 static int __maybe_unused esas2r_suspend(struct device *dev)
643 {
644 struct Scsi_Host *host = dev_get_drvdata(dev);
645 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
646
647 esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
648 if (!a)
649 return -ENODEV;
650
651 esas2r_adapter_power_down(a, 1);
652 esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
653 return 0;
654 }
655
esas2r_resume(struct device * dev)656 static int __maybe_unused esas2r_resume(struct device *dev)
657 {
658 struct Scsi_Host *host = dev_get_drvdata(dev);
659 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
660 int rez = 0;
661
662 esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
663
664 if (!a) {
665 rez = -ENODEV;
666 goto error_exit;
667 }
668
669 if (esas2r_map_regions(a) != 0) {
670 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
671 rez = -ENOMEM;
672 goto error_exit;
673 }
674
675 /* Set up interupt mode */
676 esas2r_setup_interrupts(a, a->intr_mode);
677
678 /*
679 * Disable chip interrupts to prevent spurious interrupts until we
680 * claim the IRQ.
681 */
682 esas2r_disable_chip_interrupts(a);
683 if (!esas2r_power_up(a, true)) {
684 esas2r_debug("yikes, esas2r_power_up failed");
685 rez = -ENOMEM;
686 goto error_exit;
687 }
688
689 esas2r_claim_interrupts(a);
690
691 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
692 /*
693 * Now that system interrupt(s) are claimed, we can enable
694 * chip interrupts.
695 */
696 esas2r_enable_chip_interrupts(a);
697 esas2r_kickoff_timer(a);
698 } else {
699 esas2r_debug("yikes, unable to claim IRQ");
700 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
701 rez = -ENOMEM;
702 goto error_exit;
703 }
704
705 error_exit:
706 esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
707 rez);
708 return rez;
709 }
710
711 SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
712
esas2r_set_degraded_mode(struct esas2r_adapter * a,char * error_str)713 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
714 {
715 set_bit(AF_DEGRADED_MODE, &a->flags);
716 esas2r_log(ESAS2R_LOG_CRIT,
717 "setting adapter to degraded mode: %s\n", error_str);
718 return false;
719 }
720
esas2r_get_uncached_size(struct esas2r_adapter * a)721 u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
722 {
723 return sizeof(struct esas2r_sas_nvram)
724 + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
725 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
726 + 8
727 + (num_sg_lists * (u16)sgl_page_size)
728 + ALIGN((num_requests + num_ae_requests + 1 +
729 ESAS2R_LIST_EXTRA) *
730 sizeof(struct esas2r_inbound_list_source_entry),
731 8)
732 + ALIGN((num_requests + num_ae_requests + 1 +
733 ESAS2R_LIST_EXTRA) *
734 sizeof(struct atto_vda_ob_rsp), 8)
735 + 256; /* VDA request and buffer align */
736 }
737
esas2r_init_pci_cfg_space(struct esas2r_adapter * a)738 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
739 {
740 if (pci_is_pcie(a->pcid)) {
741 u16 devcontrol;
742
743 pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
744
745 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
746 PCI_EXP_DEVCTL_READRQ_512B) {
747 esas2r_log(ESAS2R_LOG_INFO,
748 "max read request size > 512B");
749
750 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
751 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
752 pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
753 devcontrol);
754 }
755 }
756 }
757
758 /*
759 * Determine the organization of the uncached data area and
760 * finish initializing the adapter structure
761 */
esas2r_init_adapter_struct(struct esas2r_adapter * a,void ** uncached_area)762 bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
763 void **uncached_area)
764 {
765 u32 i;
766 u8 *high;
767 struct esas2r_inbound_list_source_entry *element;
768 struct esas2r_request *rq;
769 struct esas2r_mem_desc *sgl;
770
771 spin_lock_init(&a->sg_list_lock);
772 spin_lock_init(&a->mem_lock);
773 spin_lock_init(&a->queue_lock);
774
775 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
776
777 if (!alloc_vda_req(a, &a->general_req)) {
778 esas2r_hdebug(
779 "failed to allocate a VDA request for the general req!");
780 return false;
781 }
782
783 /* allocate requests for asynchronous events */
784 a->first_ae_req =
785 kzalloc_objs(struct esas2r_request, num_ae_requests);
786
787 if (a->first_ae_req == NULL) {
788 esas2r_log(ESAS2R_LOG_CRIT,
789 "failed to allocate memory for asynchronous events");
790 return false;
791 }
792
793 /* allocate the S/G list memory descriptors */
794 a->sg_list_mds = kzalloc_objs(struct esas2r_mem_desc, num_sg_lists);
795
796 if (a->sg_list_mds == NULL) {
797 esas2r_log(ESAS2R_LOG_CRIT,
798 "failed to allocate memory for s/g list descriptors");
799 return false;
800 }
801
802 /* allocate the request table */
803 a->req_table =
804 kzalloc_objs(struct esas2r_request *,
805 num_requests + num_ae_requests + 1);
806
807 if (a->req_table == NULL) {
808 esas2r_log(ESAS2R_LOG_CRIT,
809 "failed to allocate memory for the request table");
810 return false;
811 }
812
813 /* initialize PCI configuration space */
814 esas2r_init_pci_cfg_space(a);
815
816 /*
817 * the thunder_stream boards all have a serial flash part that has a
818 * different base address on the AHB bus.
819 */
820 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
821 && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
822 a->flags2 |= AF2_THUNDERBOLT;
823
824 if (test_bit(AF2_THUNDERBOLT, &a->flags2))
825 a->flags2 |= AF2_SERIAL_FLASH;
826
827 if (a->pcid->subsystem_device == ATTO_TLSH_1068)
828 a->flags2 |= AF2_THUNDERLINK;
829
830 /* Uncached Area */
831 high = (u8 *)*uncached_area;
832
833 /* initialize the scatter/gather table pages */
834
835 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
836 sgl->size = sgl_page_size;
837
838 list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
839
840 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
841 /* Allow the driver to load if the minimum count met. */
842 if (i < NUM_SGL_MIN)
843 return false;
844 break;
845 }
846 }
847
848 /* compute the size of the lists */
849 a->list_size = num_requests + ESAS2R_LIST_EXTRA;
850
851 /* allocate the inbound list */
852 a->inbound_list_md.size = a->list_size *
853 sizeof(struct
854 esas2r_inbound_list_source_entry);
855
856 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
857 esas2r_hdebug("failed to allocate IB list");
858 return false;
859 }
860
861 /* allocate the outbound list */
862 a->outbound_list_md.size = a->list_size *
863 sizeof(struct atto_vda_ob_rsp);
864
865 if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
866 ESAS2R_LIST_ALIGN)) {
867 esas2r_hdebug("failed to allocate IB list");
868 return false;
869 }
870
871 /* allocate the NVRAM structure */
872 a->nvram = (struct esas2r_sas_nvram *)high;
873 high += sizeof(struct esas2r_sas_nvram);
874
875 /* allocate the discovery buffer */
876 a->disc_buffer = high;
877 high += ESAS2R_DISC_BUF_LEN;
878 high = PTR_ALIGN(high, 8);
879
880 /* allocate the outbound list copy pointer */
881 a->outbound_copy = (u32 volatile *)high;
882 high += sizeof(u32);
883
884 if (!test_bit(AF_NVR_VALID, &a->flags))
885 esas2r_nvram_set_defaults(a);
886
887 /* update the caller's uncached memory area pointer */
888 *uncached_area = (void *)high;
889
890 /* initialize the allocated memory */
891 if (test_bit(AF_FIRST_INIT, &a->flags)) {
892 esas2r_targ_db_initialize(a);
893
894 /* prime parts of the inbound list */
895 element =
896 (struct esas2r_inbound_list_source_entry *)a->
897 inbound_list_md.
898 virt_addr;
899
900 for (i = 0; i < a->list_size; i++) {
901 element->address = 0;
902 element->reserved = 0;
903 element->length = cpu_to_le32(HWILSE_INTERFACE_F0
904 | (sizeof(union
905 atto_vda_req)
906 /
907 sizeof(u32)));
908 element++;
909 }
910
911 /* init the AE requests */
912 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
913 i++) {
914 INIT_LIST_HEAD(&rq->req_list);
915 if (!alloc_vda_req(a, rq)) {
916 esas2r_hdebug(
917 "failed to allocate a VDA request!");
918 return false;
919 }
920
921 esas2r_rq_init_request(rq, a);
922
923 /* override the completion function */
924 rq->comp_cb = esas2r_ae_complete;
925 }
926 }
927
928 return true;
929 }
930
931 /* This code will verify that the chip is operational. */
esas2r_check_adapter(struct esas2r_adapter * a)932 bool esas2r_check_adapter(struct esas2r_adapter *a)
933 {
934 u32 starttime;
935 u32 doorbell;
936 u64 ppaddr;
937 u32 dw;
938
939 /*
940 * if the chip reset detected flag is set, we can bypass a bunch of
941 * stuff.
942 */
943 if (test_bit(AF_CHPRST_DETECTED, &a->flags))
944 goto skip_chip_reset;
945
946 /*
947 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
948 * may have left them enabled or we may be recovering from a fault.
949 */
950 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
951 esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
952
953 /*
954 * wait for the firmware to become ready by forcing an interrupt and
955 * waiting for a response.
956 */
957 starttime = jiffies_to_msecs(jiffies);
958
959 while (true) {
960 esas2r_force_interrupt(a);
961 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
962 if (doorbell == 0xFFFFFFFF) {
963 /*
964 * Give the firmware up to two seconds to enable
965 * register access after a reset.
966 */
967 if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
968 return esas2r_set_degraded_mode(a,
969 "unable to access registers");
970 } else if (doorbell & DRBL_FORCE_INT) {
971 u32 ver = (doorbell & DRBL_FW_VER_MSK);
972
973 /*
974 * This driver supports version 0 and version 1 of
975 * the API
976 */
977 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
978 doorbell);
979
980 if (ver == DRBL_FW_VER_0) {
981 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
982
983 a->max_vdareq_size = 128;
984 a->build_sgl = esas2r_build_sg_list_sge;
985 } else if (ver == DRBL_FW_VER_1) {
986 clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
987
988 a->max_vdareq_size = 1024;
989 a->build_sgl = esas2r_build_sg_list_prd;
990 } else {
991 return esas2r_set_degraded_mode(a,
992 "unknown firmware version");
993 }
994 break;
995 }
996
997 schedule_timeout_interruptible(msecs_to_jiffies(100));
998
999 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1000 esas2r_hdebug("FW ready TMO");
1001 esas2r_bugon();
1002
1003 return esas2r_set_degraded_mode(a,
1004 "firmware start has timed out");
1005 }
1006 }
1007
1008 /* purge any asynchronous events since we will repost them later */
1009 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1010 starttime = jiffies_to_msecs(jiffies);
1011
1012 while (true) {
1013 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1014 if (doorbell & DRBL_MSG_IFC_DOWN) {
1015 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1016 doorbell);
1017 break;
1018 }
1019
1020 schedule_timeout_interruptible(msecs_to_jiffies(50));
1021
1022 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1023 esas2r_hdebug("timeout waiting for interface down");
1024 break;
1025 }
1026 }
1027 skip_chip_reset:
1028 /*
1029 * first things first, before we go changing any of these registers
1030 * disable the communication lists.
1031 */
1032 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1033 dw &= ~MU_ILC_ENABLE;
1034 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1035 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1036 dw &= ~MU_OLC_ENABLE;
1037 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1038
1039 /* configure the communication list addresses */
1040 ppaddr = a->inbound_list_md.phys_addr;
1041 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1042 lower_32_bits(ppaddr));
1043 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1044 upper_32_bits(ppaddr));
1045 ppaddr = a->outbound_list_md.phys_addr;
1046 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1047 lower_32_bits(ppaddr));
1048 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1049 upper_32_bits(ppaddr));
1050 ppaddr = a->uncached_phys +
1051 ((u8 *)a->outbound_copy - a->uncached);
1052 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1053 lower_32_bits(ppaddr));
1054 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1055 upper_32_bits(ppaddr));
1056
1057 /* reset the read and write pointers */
1058 *a->outbound_copy =
1059 a->last_write =
1060 a->last_read = a->list_size - 1;
1061 set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
1062 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1063 a->last_write);
1064 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1065 a->last_write);
1066 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1067 a->last_write);
1068 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1069 MU_OLW_TOGGLE | a->last_write);
1070
1071 /* configure the interface select fields */
1072 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1073 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1074 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1075 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1076 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1077 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1078 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1079 (dw | MU_OLIC_LIST_F0 |
1080 MU_OLIC_SOURCE_DDR));
1081
1082 /* finish configuring the communication lists */
1083 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1084 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1085 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1086 | (a->list_size << MU_ILC_NUMBER_SHIFT);
1087 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1088 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1089 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1090 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1091 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1092
1093 /*
1094 * notify the firmware that we're done setting up the communication
1095 * list registers. wait here until the firmware is done configuring
1096 * its lists. it will signal that it is done by enabling the lists.
1097 */
1098 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1099 starttime = jiffies_to_msecs(jiffies);
1100
1101 while (true) {
1102 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1103 if (doorbell & DRBL_MSG_IFC_INIT) {
1104 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1105 doorbell);
1106 break;
1107 }
1108
1109 schedule_timeout_interruptible(msecs_to_jiffies(100));
1110
1111 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1112 esas2r_hdebug(
1113 "timeout waiting for communication list init");
1114 esas2r_bugon();
1115 return esas2r_set_degraded_mode(a,
1116 "timeout waiting for communication list init");
1117 }
1118 }
1119
1120 /*
1121 * flag whether the firmware supports the power down doorbell. we
1122 * determine this by reading the inbound doorbell enable mask.
1123 */
1124 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1125 if (doorbell & DRBL_POWER_DOWN)
1126 set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1127 else
1128 clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1129
1130 /*
1131 * enable assertion of outbound queue and doorbell interrupts in the
1132 * main interrupt cause register.
1133 */
1134 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1135 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1136 return true;
1137 }
1138
1139 /* Process the initialization message just completed and format the next one. */
esas2r_format_init_msg(struct esas2r_adapter * a,struct esas2r_request * rq)1140 static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1141 struct esas2r_request *rq)
1142 {
1143 u32 msg = a->init_msg;
1144 struct atto_vda_cfg_init *ci;
1145
1146 a->init_msg = 0;
1147
1148 switch (msg) {
1149 case ESAS2R_INIT_MSG_START:
1150 case ESAS2R_INIT_MSG_REINIT:
1151 {
1152 esas2r_hdebug("CFG init");
1153 esas2r_build_cfg_req(a,
1154 rq,
1155 VDA_CFG_INIT,
1156 0,
1157 NULL);
1158 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1159 ci->sgl_page_size = cpu_to_le32(sgl_page_size);
1160 /* firmware interface overflows in y2106 */
1161 ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
1162 rq->flags |= RF_FAILURE_OK;
1163 a->init_msg = ESAS2R_INIT_MSG_INIT;
1164 break;
1165 }
1166
1167 case ESAS2R_INIT_MSG_INIT:
1168 if (rq->req_stat == RS_SUCCESS) {
1169 u32 major;
1170 u32 minor;
1171 u16 fw_release;
1172
1173 a->fw_version = le16_to_cpu(
1174 rq->func_rsp.cfg_rsp.vda_version);
1175 a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1176 fw_release = le16_to_cpu(
1177 rq->func_rsp.cfg_rsp.fw_release);
1178 major = LOBYTE(fw_release);
1179 minor = HIBYTE(fw_release);
1180 a->fw_version += (major << 16) + (minor << 24);
1181 } else {
1182 esas2r_hdebug("FAILED");
1183 }
1184
1185 /*
1186 * the 2.71 and earlier releases of R6xx firmware did not error
1187 * unsupported config requests correctly.
1188 */
1189
1190 if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
1191 || (be32_to_cpu(a->fw_version) > 0x00524702)) {
1192 esas2r_hdebug("CFG get init");
1193 esas2r_build_cfg_req(a,
1194 rq,
1195 VDA_CFG_GET_INIT2,
1196 sizeof(struct atto_vda_cfg_init),
1197 NULL);
1198
1199 rq->vrq->cfg.sg_list_offset = offsetof(
1200 struct atto_vda_cfg_req,
1201 data.sge);
1202 rq->vrq->cfg.data.prde.ctl_len =
1203 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1204 rq->vrq->cfg.data.prde.address = cpu_to_le64(
1205 rq->vrq_md->phys_addr +
1206 sizeof(union atto_vda_req));
1207 rq->flags |= RF_FAILURE_OK;
1208 a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1209 break;
1210 }
1211 fallthrough;
1212
1213 case ESAS2R_INIT_MSG_GET_INIT:
1214 if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1215 ci = (struct atto_vda_cfg_init *)rq->data_buf;
1216 if (rq->req_stat == RS_SUCCESS) {
1217 a->num_targets_backend =
1218 le32_to_cpu(ci->num_targets_backend);
1219 a->ioctl_tunnel =
1220 le32_to_cpu(ci->ioctl_tunnel);
1221 } else {
1222 esas2r_hdebug("FAILED");
1223 }
1224 }
1225 fallthrough;
1226
1227 default:
1228 rq->req_stat = RS_SUCCESS;
1229 return false;
1230 }
1231 return true;
1232 }
1233
1234 /*
1235 * Perform initialization messages via the request queue. Messages are
1236 * performed with interrupts disabled.
1237 */
esas2r_init_msgs(struct esas2r_adapter * a)1238 bool esas2r_init_msgs(struct esas2r_adapter *a)
1239 {
1240 bool success = true;
1241 struct esas2r_request *rq = &a->general_req;
1242
1243 esas2r_rq_init_request(rq, a);
1244 rq->comp_cb = esas2r_dummy_complete;
1245
1246 if (a->init_msg == 0)
1247 a->init_msg = ESAS2R_INIT_MSG_REINIT;
1248
1249 while (a->init_msg) {
1250 if (esas2r_format_init_msg(a, rq)) {
1251 unsigned long flags;
1252 while (true) {
1253 spin_lock_irqsave(&a->queue_lock, flags);
1254 esas2r_start_vda_request(a, rq);
1255 spin_unlock_irqrestore(&a->queue_lock, flags);
1256 esas2r_wait_request(a, rq);
1257 if (rq->req_stat != RS_PENDING)
1258 break;
1259 }
1260 }
1261
1262 if (rq->req_stat == RS_SUCCESS
1263 || ((rq->flags & RF_FAILURE_OK)
1264 && rq->req_stat != RS_TIMEOUT))
1265 continue;
1266
1267 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1268 a->init_msg, rq->req_stat, rq->flags);
1269 a->init_msg = ESAS2R_INIT_MSG_START;
1270 success = false;
1271 break;
1272 }
1273
1274 esas2r_rq_destroy_request(rq, a);
1275 return success;
1276 }
1277
1278 /* Initialize the adapter chip */
esas2r_init_adapter_hw(struct esas2r_adapter * a,bool init_poll)1279 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1280 {
1281 bool rslt = false;
1282 struct esas2r_request *rq;
1283 u32 i;
1284
1285 if (test_bit(AF_DEGRADED_MODE, &a->flags))
1286 goto exit;
1287
1288 if (!test_bit(AF_NVR_VALID, &a->flags)) {
1289 if (!esas2r_nvram_read_direct(a))
1290 esas2r_log(ESAS2R_LOG_WARN,
1291 "invalid/missing NVRAM parameters");
1292 }
1293
1294 if (!esas2r_init_msgs(a)) {
1295 esas2r_set_degraded_mode(a, "init messages failed");
1296 goto exit;
1297 }
1298
1299 /* The firmware is ready. */
1300 clear_bit(AF_DEGRADED_MODE, &a->flags);
1301 clear_bit(AF_CHPRST_PENDING, &a->flags);
1302
1303 /* Post all the async event requests */
1304 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1305 esas2r_start_ae_request(a, rq);
1306
1307 if (!a->flash_rev[0])
1308 esas2r_read_flash_rev(a);
1309
1310 if (!a->image_type[0])
1311 esas2r_read_image_type(a);
1312
1313 if (a->fw_version == 0)
1314 a->fw_rev[0] = 0;
1315 else
1316 sprintf(a->fw_rev, "%1d.%02d",
1317 (int)LOBYTE(HIWORD(a->fw_version)),
1318 (int)HIBYTE(HIWORD(a->fw_version)));
1319
1320 esas2r_hdebug("firmware revision: %s", a->fw_rev);
1321
1322 if (test_bit(AF_CHPRST_DETECTED, &a->flags)
1323 && (test_bit(AF_FIRST_INIT, &a->flags))) {
1324 esas2r_enable_chip_interrupts(a);
1325 return true;
1326 }
1327
1328 /* initialize discovery */
1329 esas2r_disc_initialize(a);
1330
1331 /*
1332 * wait for the device wait time to expire here if requested. this is
1333 * usually requested during initial driver load and possibly when
1334 * resuming from a low power state. deferred device waiting will use
1335 * interrupts. chip reset recovery always defers device waiting to
1336 * avoid being in a TASKLET too long.
1337 */
1338 if (init_poll) {
1339 u32 currtime = a->disc_start_time;
1340 u32 nexttick = 100;
1341 u32 deltatime;
1342
1343 /*
1344 * Block Tasklets from getting scheduled and indicate this is
1345 * polled discovery.
1346 */
1347 set_bit(AF_TASKLET_SCHEDULED, &a->flags);
1348 set_bit(AF_DISC_POLLED, &a->flags);
1349
1350 /*
1351 * Temporarily bring the disable count to zero to enable
1352 * deferred processing. Note that the count is already zero
1353 * after the first initialization.
1354 */
1355 if (test_bit(AF_FIRST_INIT, &a->flags))
1356 atomic_dec(&a->disable_cnt);
1357
1358 while (test_bit(AF_DISC_PENDING, &a->flags)) {
1359 schedule_timeout_interruptible(msecs_to_jiffies(100));
1360
1361 /*
1362 * Determine the need for a timer tick based on the
1363 * delta time between this and the last iteration of
1364 * this loop. We don't use the absolute time because
1365 * then we would have to worry about when nexttick
1366 * wraps and currtime hasn't yet.
1367 */
1368 deltatime = jiffies_to_msecs(jiffies) - currtime;
1369 currtime += deltatime;
1370
1371 /*
1372 * Process any waiting discovery as long as the chip is
1373 * up. If a chip reset happens during initial polling,
1374 * we have to make sure the timer tick processes the
1375 * doorbell indicating the firmware is ready.
1376 */
1377 if (!test_bit(AF_CHPRST_PENDING, &a->flags))
1378 esas2r_disc_check_for_work(a);
1379
1380 /* Simulate a timer tick. */
1381 if (nexttick <= deltatime) {
1382
1383 /* Time for a timer tick */
1384 nexttick += 100;
1385 esas2r_timer_tick(a);
1386 }
1387
1388 if (nexttick > deltatime)
1389 nexttick -= deltatime;
1390
1391 /* Do any deferred processing */
1392 if (esas2r_is_tasklet_pending(a))
1393 esas2r_do_tasklet_tasks(a);
1394
1395 }
1396
1397 if (test_bit(AF_FIRST_INIT, &a->flags))
1398 atomic_inc(&a->disable_cnt);
1399
1400 clear_bit(AF_DISC_POLLED, &a->flags);
1401 clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
1402 }
1403
1404
1405 esas2r_targ_db_report_changes(a);
1406
1407 /*
1408 * For cases where (a) the initialization messages processing may
1409 * handle an interrupt for a port event and a discovery is waiting, but
1410 * we are not waiting for devices, or (b) the device wait time has been
1411 * exhausted but there is still discovery pending, start any leftover
1412 * discovery in interrupt driven mode.
1413 */
1414 esas2r_disc_start_waiting(a);
1415
1416 /* Enable chip interrupts */
1417 a->int_mask = ESAS2R_INT_STS_MASK;
1418 esas2r_enable_chip_interrupts(a);
1419 esas2r_enable_heartbeat(a);
1420 rslt = true;
1421
1422 exit:
1423 /*
1424 * Regardless of whether initialization was successful, certain things
1425 * need to get done before we exit.
1426 */
1427
1428 if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
1429 test_bit(AF_FIRST_INIT, &a->flags)) {
1430 /*
1431 * Reinitialization was performed during the first
1432 * initialization. Only clear the chip reset flag so the
1433 * original device polling is not cancelled.
1434 */
1435 if (!rslt)
1436 clear_bit(AF_CHPRST_PENDING, &a->flags);
1437 } else {
1438 /* First initialization or a subsequent re-init is complete. */
1439 if (!rslt) {
1440 clear_bit(AF_CHPRST_PENDING, &a->flags);
1441 clear_bit(AF_DISC_PENDING, &a->flags);
1442 }
1443
1444
1445 /* Enable deferred processing after the first initialization. */
1446 if (test_bit(AF_FIRST_INIT, &a->flags)) {
1447 clear_bit(AF_FIRST_INIT, &a->flags);
1448
1449 if (atomic_dec_return(&a->disable_cnt) == 0)
1450 esas2r_do_deferred_processes(a);
1451 }
1452 }
1453
1454 return rslt;
1455 }
1456
esas2r_reset_adapter(struct esas2r_adapter * a)1457 void esas2r_reset_adapter(struct esas2r_adapter *a)
1458 {
1459 set_bit(AF_OS_RESET, &a->flags);
1460 esas2r_local_reset_adapter(a);
1461 esas2r_schedule_tasklet(a);
1462 }
1463
esas2r_reset_chip(struct esas2r_adapter * a)1464 void esas2r_reset_chip(struct esas2r_adapter *a)
1465 {
1466 if (!esas2r_is_adapter_present(a))
1467 return;
1468
1469 /*
1470 * Before we reset the chip, save off the VDA core dump. The VDA core
1471 * dump is located in the upper 512KB of the onchip SRAM. Make sure
1472 * to not overwrite a previous crash that was saved.
1473 */
1474 if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
1475 !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
1476 esas2r_read_mem_block(a,
1477 a->fw_coredump_buff,
1478 MW_DATA_ADDR_SRAM + 0x80000,
1479 ESAS2R_FWCOREDUMP_SZ);
1480
1481 set_bit(AF2_COREDUMP_SAVED, &a->flags2);
1482 }
1483
1484 clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
1485
1486 /* Reset the chip */
1487 if (a->pcid->revision == MVR_FREY_B2)
1488 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1489 MU_CTL_IN_FULL_RST2);
1490 else
1491 esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1492 MU_CTL_IN_FULL_RST);
1493
1494
1495 /* Stall a little while to let the reset condition clear */
1496 mdelay(10);
1497 }
1498
esas2r_power_down_notify_firmware(struct esas2r_adapter * a)1499 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1500 {
1501 u32 starttime;
1502 u32 doorbell;
1503
1504 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1505 starttime = jiffies_to_msecs(jiffies);
1506
1507 while (true) {
1508 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1509 if (doorbell & DRBL_POWER_DOWN) {
1510 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1511 doorbell);
1512 break;
1513 }
1514
1515 schedule_timeout_interruptible(msecs_to_jiffies(100));
1516
1517 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1518 esas2r_hdebug("Timeout waiting for power down");
1519 break;
1520 }
1521 }
1522 }
1523
1524 /*
1525 * Perform power management processing including managing device states, adapter
1526 * states, interrupts, and I/O.
1527 */
esas2r_power_down(struct esas2r_adapter * a)1528 void esas2r_power_down(struct esas2r_adapter *a)
1529 {
1530 set_bit(AF_POWER_MGT, &a->flags);
1531 set_bit(AF_POWER_DOWN, &a->flags);
1532
1533 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
1534 u32 starttime;
1535 u32 doorbell;
1536
1537 /*
1538 * We are currently running OK and will be reinitializing later.
1539 * increment the disable count to coordinate with
1540 * esas2r_init_adapter. We don't have to do this in degraded
1541 * mode since we never enabled interrupts in the first place.
1542 */
1543 esas2r_disable_chip_interrupts(a);
1544 esas2r_disable_heartbeat(a);
1545
1546 /* wait for any VDA activity to clear before continuing */
1547 esas2r_write_register_dword(a, MU_DOORBELL_IN,
1548 DRBL_MSG_IFC_DOWN);
1549 starttime = jiffies_to_msecs(jiffies);
1550
1551 while (true) {
1552 doorbell =
1553 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1554 if (doorbell & DRBL_MSG_IFC_DOWN) {
1555 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1556 doorbell);
1557 break;
1558 }
1559
1560 schedule_timeout_interruptible(msecs_to_jiffies(100));
1561
1562 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1563 esas2r_hdebug(
1564 "timeout waiting for interface down");
1565 break;
1566 }
1567 }
1568
1569 /*
1570 * For versions of firmware that support it tell them the driver
1571 * is powering down.
1572 */
1573 if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
1574 esas2r_power_down_notify_firmware(a);
1575 }
1576
1577 /* Suspend I/O processing. */
1578 set_bit(AF_OS_RESET, &a->flags);
1579 set_bit(AF_DISC_PENDING, &a->flags);
1580 set_bit(AF_CHPRST_PENDING, &a->flags);
1581
1582 esas2r_process_adapter_reset(a);
1583
1584 /* Remove devices now that I/O is cleaned up. */
1585 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1586 esas2r_targ_db_remove_all(a, false);
1587 }
1588
1589 /*
1590 * Perform power management processing including managing device states, adapter
1591 * states, interrupts, and I/O.
1592 */
esas2r_power_up(struct esas2r_adapter * a,bool init_poll)1593 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1594 {
1595 bool ret;
1596
1597 clear_bit(AF_POWER_DOWN, &a->flags);
1598 esas2r_init_pci_cfg_space(a);
1599 set_bit(AF_FIRST_INIT, &a->flags);
1600 atomic_inc(&a->disable_cnt);
1601
1602 /* reinitialize the adapter */
1603 ret = esas2r_check_adapter(a);
1604 if (!esas2r_init_adapter_hw(a, init_poll))
1605 ret = false;
1606
1607 /* send the reset asynchronous event */
1608 esas2r_send_reset_ae(a, true);
1609
1610 /* clear this flag after initialization. */
1611 clear_bit(AF_POWER_MGT, &a->flags);
1612 return ret;
1613 }
1614
esas2r_is_adapter_present(struct esas2r_adapter * a)1615 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1616 {
1617 if (test_bit(AF_NOT_PRESENT, &a->flags))
1618 return false;
1619
1620 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1621 set_bit(AF_NOT_PRESENT, &a->flags);
1622
1623 return false;
1624 }
1625 return true;
1626 }
1627
esas2r_get_model_name(struct esas2r_adapter * a)1628 const char *esas2r_get_model_name(struct esas2r_adapter *a)
1629 {
1630 switch (a->pcid->subsystem_device) {
1631 case ATTO_ESAS_R680:
1632 return "ATTO ExpressSAS R680";
1633
1634 case ATTO_ESAS_R608:
1635 return "ATTO ExpressSAS R608";
1636
1637 case ATTO_ESAS_R60F:
1638 return "ATTO ExpressSAS R60F";
1639
1640 case ATTO_ESAS_R6F0:
1641 return "ATTO ExpressSAS R6F0";
1642
1643 case ATTO_ESAS_R644:
1644 return "ATTO ExpressSAS R644";
1645
1646 case ATTO_ESAS_R648:
1647 return "ATTO ExpressSAS R648";
1648
1649 case ATTO_TSSC_3808:
1650 return "ATTO ThunderStream SC 3808D";
1651
1652 case ATTO_TSSC_3808E:
1653 return "ATTO ThunderStream SC 3808E";
1654
1655 case ATTO_TLSH_1068:
1656 return "ATTO ThunderLink SH 1068";
1657 }
1658
1659 return "ATTO SAS Controller";
1660 }
1661
esas2r_get_model_name_short(struct esas2r_adapter * a)1662 const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1663 {
1664 switch (a->pcid->subsystem_device) {
1665 case ATTO_ESAS_R680:
1666 return "R680";
1667
1668 case ATTO_ESAS_R608:
1669 return "R608";
1670
1671 case ATTO_ESAS_R60F:
1672 return "R60F";
1673
1674 case ATTO_ESAS_R6F0:
1675 return "R6F0";
1676
1677 case ATTO_ESAS_R644:
1678 return "R644";
1679
1680 case ATTO_ESAS_R648:
1681 return "R648";
1682
1683 case ATTO_TSSC_3808:
1684 return "SC 3808D";
1685
1686 case ATTO_TSSC_3808E:
1687 return "SC 3808E";
1688
1689 case ATTO_TLSH_1068:
1690 return "SH 1068";
1691 }
1692
1693 return "unknown";
1694 }
1695