xref: /linux/drivers/scsi/aic94xx/aic94xx_hwi.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Aic94xx SAS/SATA driver hardware interface.
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/pci.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <linux/module.h>
13 #include <linux/firmware.h>
14 
15 #include "aic94xx.h"
16 #include "aic94xx_reg.h"
17 #include "aic94xx_hwi.h"
18 #include "aic94xx_seq.h"
19 #include "aic94xx_dump.h"
20 
21 u32 MBAR0_SWB_SIZE;
22 
23 /* ---------- Initialization ---------- */
24 
asd_get_user_sas_addr(struct asd_ha_struct * asd_ha)25 static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
26 {
27 	/* adapter came with a sas address */
28 	if (asd_ha->hw_prof.sas_addr[0])
29 		return 0;
30 
31 	return sas_request_addr(asd_ha->sas_ha.shost,
32 				asd_ha->hw_prof.sas_addr);
33 }
34 
asd_propagate_sas_addr(struct asd_ha_struct * asd_ha)35 static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
36 {
37 	int i;
38 
39 	for (i = 0; i < ASD_MAX_PHYS; i++) {
40 		if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
41 			continue;
42 		/* Set a phy's address only if it has none.
43 		 */
44 		ASD_DPRINTK("setting phy%d addr to %llx\n", i,
45 			    SAS_ADDR(asd_ha->hw_prof.sas_addr));
46 		memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
47 		       asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
48 	}
49 }
50 
51 /* ---------- PHY initialization ---------- */
52 
asd_init_phy_identify(struct asd_phy * phy)53 static void asd_init_phy_identify(struct asd_phy *phy)
54 {
55 	phy->identify_frame = phy->id_frm_tok->vaddr;
56 
57 	memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
58 
59 	phy->identify_frame->dev_type = SAS_END_DEVICE;
60 	if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
61 		phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
62 	if (phy->sas_phy.role & PHY_ROLE_TARGET)
63 		phy->identify_frame->target_bits = phy->sas_phy.tproto;
64 	memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
65 	       SAS_ADDR_SIZE);
66 	phy->identify_frame->phy_id = phy->sas_phy.id;
67 }
68 
asd_init_phy(struct asd_phy * phy)69 static int asd_init_phy(struct asd_phy *phy)
70 {
71 	struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
72 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
73 
74 	sas_phy->enabled = 1;
75 	sas_phy->iproto = SAS_PROTOCOL_ALL;
76 	sas_phy->tproto = 0;
77 	sas_phy->role = PHY_ROLE_INITIATOR;
78 	sas_phy->oob_mode = OOB_NOT_CONNECTED;
79 	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
80 
81 	phy->id_frm_tok = asd_alloc_coherent(asd_ha,
82 					     sizeof(*phy->identify_frame),
83 					     GFP_KERNEL);
84 	if (!phy->id_frm_tok) {
85 		asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
86 		return -ENOMEM;
87 	} else
88 		asd_init_phy_identify(phy);
89 
90 	memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
91 
92 	return 0;
93 }
94 
asd_init_ports(struct asd_ha_struct * asd_ha)95 static void asd_init_ports(struct asd_ha_struct *asd_ha)
96 {
97 	int i;
98 
99 	spin_lock_init(&asd_ha->asd_ports_lock);
100 	for (i = 0; i < ASD_MAX_PHYS; i++) {
101 		struct asd_port *asd_port = &asd_ha->asd_ports[i];
102 
103 		memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE);
104 		memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE);
105 		asd_port->phy_mask = 0;
106 		asd_port->num_phys = 0;
107 	}
108 }
109 
asd_init_phys(struct asd_ha_struct * asd_ha)110 static int asd_init_phys(struct asd_ha_struct *asd_ha)
111 {
112 	u8 i;
113 	u8 phy_mask = asd_ha->hw_prof.enabled_phys;
114 
115 	for (i = 0; i < ASD_MAX_PHYS; i++) {
116 		struct asd_phy *phy = &asd_ha->phys[i];
117 
118 		phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
119 		phy->asd_port = NULL;
120 
121 		phy->sas_phy.enabled = 0;
122 		phy->sas_phy.id = i;
123 		phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
124 		phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
125 		phy->sas_phy.ha = &asd_ha->sas_ha;
126 		phy->sas_phy.lldd_phy = phy;
127 	}
128 
129 	/* Now enable and initialize only the enabled phys. */
130 	for_each_phy(phy_mask, phy_mask, i) {
131 		int err = asd_init_phy(&asd_ha->phys[i]);
132 		if (err)
133 			return err;
134 	}
135 
136 	return 0;
137 }
138 
139 /* ---------- Sliding windows ---------- */
140 
asd_init_sw(struct asd_ha_struct * asd_ha)141 static int asd_init_sw(struct asd_ha_struct *asd_ha)
142 {
143 	struct pci_dev *pcidev = asd_ha->pcidev;
144 	int err;
145 	u32 v;
146 
147 	/* Unlock MBARs */
148 	err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
149 	if (err) {
150 		asd_printk("couldn't access conf. space of %s\n",
151 			   pci_name(pcidev));
152 		goto Err;
153 	}
154 	if (v)
155 		err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
156 	if (err) {
157 		asd_printk("couldn't write to MBAR_KEY of %s\n",
158 			   pci_name(pcidev));
159 		goto Err;
160 	}
161 
162 	/* Set sliding windows A, B and C to point to proper internal
163 	 * memory regions.
164 	 */
165 	pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
166 	pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
167 			       REG_BASE_ADDR_CSEQCIO);
168 	pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
169 	asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
170 	asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
171 	asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
172 	MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
173 	if (!asd_ha->iospace) {
174 		/* MBAR1 will point to OCM (On Chip Memory) */
175 		pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
176 		asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
177 	}
178 	spin_lock_init(&asd_ha->iolock);
179 Err:
180 	return err;
181 }
182 
183 /* ---------- SCB initialization ---------- */
184 
185 /**
186  * asd_init_scbs - manually allocate the first SCB.
187  * @asd_ha: pointer to host adapter structure
188  *
189  * This allocates the very first SCB which would be sent to the
190  * sequencer for execution.  Its bus address is written to
191  * CSEQ_Q_NEW_POINTER, mode page 2, mode 8.  Since the bus address of
192  * the _next_ scb to be DMA-ed to the host adapter is read from the last
193  * SCB DMA-ed to the host adapter, we have to always stay one step
194  * ahead of the sequencer and keep one SCB already allocated.
195  */
asd_init_scbs(struct asd_ha_struct * asd_ha)196 static int asd_init_scbs(struct asd_ha_struct *asd_ha)
197 {
198 	struct asd_seq_data *seq = &asd_ha->seq;
199 	int bitmap_bytes;
200 
201 	/* allocate the index array and bitmap */
202 	asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
203 	asd_ha->seq.tc_index_array = kcalloc(asd_ha->seq.tc_index_bitmap_bits,
204 					     sizeof(void *),
205 					     GFP_KERNEL);
206 	if (!asd_ha->seq.tc_index_array)
207 		return -ENOMEM;
208 
209 	bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
210 	bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
211 	asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
212 	if (!asd_ha->seq.tc_index_bitmap) {
213 		kfree(asd_ha->seq.tc_index_array);
214 		asd_ha->seq.tc_index_array = NULL;
215 		return -ENOMEM;
216 	}
217 
218 	spin_lock_init(&seq->tc_index_lock);
219 
220 	seq->next_scb.size = sizeof(struct scb);
221 	seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
222 					     &seq->next_scb.dma_handle);
223 	if (!seq->next_scb.vaddr) {
224 		kfree(asd_ha->seq.tc_index_bitmap);
225 		kfree(asd_ha->seq.tc_index_array);
226 		asd_ha->seq.tc_index_bitmap = NULL;
227 		asd_ha->seq.tc_index_array = NULL;
228 		return -ENOMEM;
229 	}
230 
231 	seq->pending = 0;
232 	spin_lock_init(&seq->pend_q_lock);
233 	INIT_LIST_HEAD(&seq->pend_q);
234 
235 	return 0;
236 }
237 
asd_get_max_scb_ddb(struct asd_ha_struct * asd_ha)238 static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
239 {
240 	asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
241 	asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
242 	ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
243 		    asd_ha->hw_prof.max_scbs,
244 		    asd_ha->hw_prof.max_ddbs);
245 }
246 
247 /* ---------- Done List initialization ---------- */
248 
249 static void asd_dl_tasklet_handler(unsigned long);
250 
asd_init_dl(struct asd_ha_struct * asd_ha)251 static int asd_init_dl(struct asd_ha_struct *asd_ha)
252 {
253 	asd_ha->seq.actual_dl
254 		= asd_alloc_coherent(asd_ha,
255 			     ASD_DL_SIZE * sizeof(struct done_list_struct),
256 				     GFP_KERNEL);
257 	if (!asd_ha->seq.actual_dl)
258 		return -ENOMEM;
259 	asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
260 	asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
261 	asd_ha->seq.dl_next = 0;
262 	tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
263 		     (unsigned long) asd_ha);
264 
265 	return 0;
266 }
267 
268 /* ---------- EDB and ESCB init ---------- */
269 
asd_alloc_edbs(struct asd_ha_struct * asd_ha,gfp_t gfp_flags)270 static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags)
271 {
272 	struct asd_seq_data *seq = &asd_ha->seq;
273 	int i;
274 
275 	seq->edb_arr = kmalloc_objs(*seq->edb_arr, seq->num_edbs, gfp_flags);
276 	if (!seq->edb_arr)
277 		return -ENOMEM;
278 
279 	for (i = 0; i < seq->num_edbs; i++) {
280 		seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
281 						     gfp_flags);
282 		if (!seq->edb_arr[i])
283 			goto Err_unroll;
284 		memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
285 	}
286 
287 	ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
288 
289 	return 0;
290 
291 Err_unroll:
292 	for (i-- ; i >= 0; i--)
293 		asd_free_coherent(asd_ha, seq->edb_arr[i]);
294 	kfree(seq->edb_arr);
295 	seq->edb_arr = NULL;
296 
297 	return -ENOMEM;
298 }
299 
asd_alloc_escbs(struct asd_ha_struct * asd_ha,gfp_t gfp_flags)300 static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
301 			   gfp_t gfp_flags)
302 {
303 	struct asd_seq_data *seq = &asd_ha->seq;
304 	struct asd_ascb *escb;
305 	int i, escbs;
306 
307 	seq->escb_arr = kmalloc_objs(*seq->escb_arr, seq->num_escbs, gfp_flags);
308 	if (!seq->escb_arr)
309 		return -ENOMEM;
310 
311 	escbs = seq->num_escbs;
312 	escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
313 	if (!escb) {
314 		asd_printk("couldn't allocate list of escbs\n");
315 		goto Err;
316 	}
317 	seq->num_escbs -= escbs;  /* subtract what was not allocated */
318 	ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
319 
320 	for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
321 							       struct asd_ascb,
322 							       list)) {
323 		seq->escb_arr[i] = escb;
324 		escb->scb->header.opcode = EMPTY_SCB;
325 	}
326 
327 	return 0;
328 Err:
329 	kfree(seq->escb_arr);
330 	seq->escb_arr = NULL;
331 	return -ENOMEM;
332 
333 }
334 
asd_assign_edbs2escbs(struct asd_ha_struct * asd_ha)335 static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
336 {
337 	struct asd_seq_data *seq = &asd_ha->seq;
338 	int i, k, z = 0;
339 
340 	for (i = 0; i < seq->num_escbs; i++) {
341 		struct asd_ascb *ascb = seq->escb_arr[i];
342 		struct empty_scb *escb = &ascb->scb->escb;
343 
344 		ascb->edb_index = z;
345 
346 		escb->num_valid = ASD_EDBS_PER_SCB;
347 
348 		for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
349 			struct sg_el *eb = &escb->eb[k];
350 			struct asd_dma_tok *edb = seq->edb_arr[z++];
351 
352 			memset(eb, 0, sizeof(*eb));
353 			eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
354 			eb->size = cpu_to_le32(((u32) edb->size));
355 		}
356 	}
357 }
358 
359 /**
360  * asd_init_escbs -- allocate and initialize empty scbs
361  * @asd_ha: pointer to host adapter structure
362  *
363  * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
364  * They transport sense data, etc.
365  */
asd_init_escbs(struct asd_ha_struct * asd_ha)366 static int asd_init_escbs(struct asd_ha_struct *asd_ha)
367 {
368 	struct asd_seq_data *seq = &asd_ha->seq;
369 	int err = 0;
370 
371 	/* Allocate two empty data buffers (edb) per sequencer. */
372 	int edbs = 2*(1+asd_ha->hw_prof.num_phys);
373 
374 	seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
375 	seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
376 
377 	err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
378 	if (err) {
379 		asd_printk("couldn't allocate edbs\n");
380 		return err;
381 	}
382 
383 	err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
384 	if (err) {
385 		asd_printk("couldn't allocate escbs\n");
386 		return err;
387 	}
388 
389 	asd_assign_edbs2escbs(asd_ha);
390 	/* In order to insure that normal SCBs do not overfill sequencer
391 	 * memory and leave no space for escbs (halting condition),
392 	 * we increment pending here by the number of escbs.  However,
393 	 * escbs are never pending.
394 	 */
395 	seq->pending   = seq->num_escbs;
396 	seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
397 
398 	return 0;
399 }
400 
401 /* ---------- HW initialization ---------- */
402 
403 /**
404  * asd_chip_hardrst -- hard reset the chip
405  * @asd_ha: pointer to host adapter structure
406  *
407  * This takes 16 cycles and is synchronous to CFCLK, which runs
408  * at 200 MHz, so this should take at most 80 nanoseconds.
409  */
asd_chip_hardrst(struct asd_ha_struct * asd_ha)410 int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
411 {
412 	int i;
413 	int count = 100;
414 	u32 reg;
415 
416 	for (i = 0 ; i < 4 ; i++) {
417 		asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
418 	}
419 
420 	do {
421 		udelay(1);
422 		reg = asd_read_reg_dword(asd_ha, CHIMINT);
423 		if (reg & HARDRSTDET) {
424 			asd_write_reg_dword(asd_ha, CHIMINT,
425 					    HARDRSTDET|PORRSTDET);
426 			return 0;
427 		}
428 	} while (--count > 0);
429 
430 	return -ENODEV;
431 }
432 
433 /**
434  * asd_init_chip -- initialize the chip
435  * @asd_ha: pointer to host adapter structure
436  *
437  * Hard resets the chip, disables HA interrupts, downloads the sequnecer
438  * microcode and starts the sequencers.  The caller has to explicitly
439  * enable HA interrupts with asd_enable_ints(asd_ha).
440  */
asd_init_chip(struct asd_ha_struct * asd_ha)441 static int asd_init_chip(struct asd_ha_struct *asd_ha)
442 {
443 	int err;
444 
445 	err = asd_chip_hardrst(asd_ha);
446 	if (err) {
447 		asd_printk("couldn't hard reset %s\n",
448 			    pci_name(asd_ha->pcidev));
449 		goto out;
450 	}
451 
452 	asd_disable_ints(asd_ha);
453 
454 	err = asd_init_seqs(asd_ha);
455 	if (err) {
456 		asd_printk("couldn't init seqs for %s\n",
457 			   pci_name(asd_ha->pcidev));
458 		goto out;
459 	}
460 
461 	err = asd_start_seqs(asd_ha);
462 	if (err) {
463 		asd_printk("couldn't start seqs for %s\n",
464 			   pci_name(asd_ha->pcidev));
465 		goto out;
466 	}
467 out:
468 	return err;
469 }
470 
471 #define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
472 
473 static int max_devs = 0;
474 module_param_named(max_devs, max_devs, int, S_IRUGO);
475 MODULE_PARM_DESC(max_devs, "\n"
476 	"\tMaximum number of SAS devices to support (not LUs).\n"
477 	"\tDefault: 2176, Maximum: 65663.\n");
478 
479 static int max_cmnds = 0;
480 module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
481 MODULE_PARM_DESC(max_cmnds, "\n"
482 	"\tMaximum number of commands queuable.\n"
483 	"\tDefault: 512, Maximum: 66047.\n");
484 
asd_extend_devctx_ocm(struct asd_ha_struct * asd_ha)485 static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
486 {
487 	unsigned long dma_addr = OCM_BASE_ADDR;
488 	u32 d;
489 
490 	dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
491 	asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
492 	d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
493 	d |= 4;
494 	asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
495 	asd_ha->hw_prof.max_ddbs += MAX_DEVS;
496 }
497 
asd_extend_devctx(struct asd_ha_struct * asd_ha)498 static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
499 {
500 	dma_addr_t dma_handle;
501 	unsigned long dma_addr;
502 	u32 d;
503 	int size;
504 
505 	asd_extend_devctx_ocm(asd_ha);
506 
507 	asd_ha->hw_prof.ddb_ext = NULL;
508 	if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
509 		max_devs = asd_ha->hw_prof.max_ddbs;
510 		return 0;
511 	}
512 
513 	size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
514 
515 	asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
516 	if (!asd_ha->hw_prof.ddb_ext) {
517 		asd_printk("couldn't allocate memory for %d devices\n",
518 			   max_devs);
519 		max_devs = asd_ha->hw_prof.max_ddbs;
520 		return -ENOMEM;
521 	}
522 	dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
523 	dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
524 	dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
525 	dma_handle = (dma_addr_t) dma_addr;
526 	asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
527 	d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
528 	d &= ~4;
529 	asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
530 
531 	asd_ha->hw_prof.max_ddbs = max_devs;
532 
533 	return 0;
534 }
535 
asd_extend_cmdctx(struct asd_ha_struct * asd_ha)536 static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
537 {
538 	dma_addr_t dma_handle;
539 	unsigned long dma_addr;
540 	u32 d;
541 	int size;
542 
543 	asd_ha->hw_prof.scb_ext = NULL;
544 	if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
545 		max_cmnds = asd_ha->hw_prof.max_scbs;
546 		return 0;
547 	}
548 
549 	size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
550 
551 	asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
552 	if (!asd_ha->hw_prof.scb_ext) {
553 		asd_printk("couldn't allocate memory for %d commands\n",
554 			   max_cmnds);
555 		max_cmnds = asd_ha->hw_prof.max_scbs;
556 		return -ENOMEM;
557 	}
558 	dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
559 	dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
560 	dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
561 	dma_handle = (dma_addr_t) dma_addr;
562 	asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
563 	d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
564 	d &= ~1;
565 	asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
566 
567 	asd_ha->hw_prof.max_scbs = max_cmnds;
568 
569 	return 0;
570 }
571 
572 /**
573  * asd_init_ctxmem -- initialize context memory
574  * @asd_ha: pointer to host adapter structure
575  *
576  * This function sets the maximum number of SCBs and
577  * DDBs which can be used by the sequencer.  This is normally
578  * 512 and 128 respectively.  If support for more SCBs or more DDBs
579  * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
580  * initialized here to extend context memory to point to host memory,
581  * thus allowing unlimited support for SCBs and DDBs -- only limited
582  * by host memory.
583  */
asd_init_ctxmem(struct asd_ha_struct * asd_ha)584 static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
585 {
586 	int bitmap_bytes;
587 
588 	asd_get_max_scb_ddb(asd_ha);
589 	asd_extend_devctx(asd_ha);
590 	asd_extend_cmdctx(asd_ha);
591 
592 	/* The kernel wants bitmaps to be unsigned long sized. */
593 	bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
594 	bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
595 	asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
596 	if (!asd_ha->hw_prof.ddb_bitmap)
597 		return -ENOMEM;
598 	spin_lock_init(&asd_ha->hw_prof.ddb_lock);
599 
600 	return 0;
601 }
602 
asd_init_hw(struct asd_ha_struct * asd_ha)603 int asd_init_hw(struct asd_ha_struct *asd_ha)
604 {
605 	int err;
606 	u32 v;
607 
608 	err = asd_init_sw(asd_ha);
609 	if (err)
610 		return err;
611 
612 	err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
613 	if (err) {
614 		asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
615 			   pci_name(asd_ha->pcidev));
616 		return err;
617 	}
618 	err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
619 					v | SC_TMR_DIS);
620 	if (err) {
621 		asd_printk("couldn't disable split completion timer of %s\n",
622 			   pci_name(asd_ha->pcidev));
623 		return err;
624 	}
625 
626 	err = asd_read_ocm(asd_ha);
627 	if (err) {
628 		asd_printk("couldn't read ocm(%d)\n", err);
629 		/* While suspicios, it is not an error that we
630 		 * couldn't read the OCM. */
631 	}
632 
633 	err = asd_read_flash(asd_ha);
634 	if (err) {
635 		asd_printk("couldn't read flash(%d)\n", err);
636 		/* While suspicios, it is not an error that we
637 		 * couldn't read FLASH memory.
638 		 */
639 	}
640 
641 	asd_init_ctxmem(asd_ha);
642 
643 	if (asd_get_user_sas_addr(asd_ha)) {
644 		asd_printk("No SAS Address provided for %s\n",
645 			   pci_name(asd_ha->pcidev));
646 		err = -ENODEV;
647 		goto Out;
648 	}
649 
650 	asd_propagate_sas_addr(asd_ha);
651 
652 	err = asd_init_phys(asd_ha);
653 	if (err) {
654 		asd_printk("couldn't initialize phys for %s\n",
655 			    pci_name(asd_ha->pcidev));
656 		goto Out;
657 	}
658 
659 	asd_init_ports(asd_ha);
660 
661 	err = asd_init_scbs(asd_ha);
662 	if (err) {
663 		asd_printk("couldn't initialize scbs for %s\n",
664 			    pci_name(asd_ha->pcidev));
665 		goto Out;
666 	}
667 
668 	err = asd_init_dl(asd_ha);
669 	if (err) {
670 		asd_printk("couldn't initialize the done list:%d\n",
671 			    err);
672 		goto Out;
673 	}
674 
675 	err = asd_init_escbs(asd_ha);
676 	if (err) {
677 		asd_printk("couldn't initialize escbs\n");
678 		goto Out;
679 	}
680 
681 	err = asd_init_chip(asd_ha);
682 	if (err) {
683 		asd_printk("couldn't init the chip\n");
684 		goto Out;
685 	}
686 Out:
687 	return err;
688 }
689 
690 /* ---------- Chip reset ---------- */
691 
692 /**
693  * asd_chip_reset -- reset the host adapter, etc
694  * @asd_ha: pointer to host adapter structure of interest
695  *
696  * Called from the ISR.  Hard reset the chip.  Let everything
697  * timeout.  This should be no different than hot-unplugging the
698  * host adapter.  Once everything times out we'll init the chip with
699  * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
700  * XXX finish.
701  */
asd_chip_reset(struct asd_ha_struct * asd_ha)702 static void asd_chip_reset(struct asd_ha_struct *asd_ha)
703 {
704 	ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
705 	asd_chip_hardrst(asd_ha);
706 }
707 
708 /* ---------- Done List Routines ---------- */
709 
asd_dl_tasklet_handler(unsigned long data)710 static void asd_dl_tasklet_handler(unsigned long data)
711 {
712 	struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
713 	struct asd_seq_data *seq = &asd_ha->seq;
714 	unsigned long flags;
715 
716 	while (1) {
717 		struct done_list_struct *dl = &seq->dl[seq->dl_next];
718 		struct asd_ascb *ascb;
719 
720 		if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
721 			break;
722 
723 		/* find the aSCB */
724 		spin_lock_irqsave(&seq->tc_index_lock, flags);
725 		ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
726 		spin_unlock_irqrestore(&seq->tc_index_lock, flags);
727 		if (unlikely(!ascb)) {
728 			ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
729 			goto next_1;
730 		} else if (ascb->scb->header.opcode == EMPTY_SCB) {
731 			goto out;
732 		} else if (!ascb->uldd_timer && !timer_delete(&ascb->timer)) {
733 			goto next_1;
734 		}
735 		spin_lock_irqsave(&seq->pend_q_lock, flags);
736 		list_del_init(&ascb->list);
737 		seq->pending--;
738 		spin_unlock_irqrestore(&seq->pend_q_lock, flags);
739 	out:
740 		ascb->tasklet_complete(ascb, dl);
741 
742 	next_1:
743 		seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
744 		if (!seq->dl_next)
745 			seq->dl_toggle ^= DL_TOGGLE_MASK;
746 	}
747 }
748 
749 /* ---------- Interrupt Service Routines ---------- */
750 
751 /**
752  * asd_process_donelist_isr -- schedule processing of done list entries
753  * @asd_ha: pointer to host adapter structure
754  */
asd_process_donelist_isr(struct asd_ha_struct * asd_ha)755 static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
756 {
757 	tasklet_schedule(&asd_ha->seq.dl_tasklet);
758 }
759 
760 /**
761  * asd_com_sas_isr -- process device communication interrupt (COMINT)
762  * @asd_ha: pointer to host adapter structure
763  */
asd_com_sas_isr(struct asd_ha_struct * asd_ha)764 static void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
765 {
766 	u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
767 
768 	/* clear COMSTAT int */
769 	asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
770 
771 	if (comstat & CSBUFPERR) {
772 		asd_printk("%s: command/status buffer dma parity error\n",
773 			   pci_name(asd_ha->pcidev));
774 	} else if (comstat & CSERR) {
775 		int i;
776 		u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
777 		dmaerr &= 0xFF;
778 		asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
779 			   "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
780 			   pci_name(asd_ha->pcidev),
781 			   dmaerr,
782 			   asd_read_reg_dword(asd_ha, CSDMAADR),
783 			   asd_read_reg_dword(asd_ha, CSDMAADR+4));
784 		asd_printk("CSBUFFER:\n");
785 		for (i = 0; i < 8; i++) {
786 			asd_printk("%08x %08x %08x %08x\n",
787 				   asd_read_reg_dword(asd_ha, CSBUFFER),
788 				   asd_read_reg_dword(asd_ha, CSBUFFER+4),
789 				   asd_read_reg_dword(asd_ha, CSBUFFER+8),
790 				   asd_read_reg_dword(asd_ha, CSBUFFER+12));
791 		}
792 		asd_dump_seq_state(asd_ha, 0);
793 	} else if (comstat & OVLYERR) {
794 		u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
795 		dmaerr = (dmaerr >> 8) & 0xFF;
796 		asd_printk("%s: overlay dma error:0x%x\n",
797 			   pci_name(asd_ha->pcidev),
798 			   dmaerr);
799 	}
800 	asd_chip_reset(asd_ha);
801 }
802 
asd_arp2_err(struct asd_ha_struct * asd_ha,u32 dchstatus)803 static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
804 {
805 	static const char *halt_code[256] = {
806 		"UNEXPECTED_INTERRUPT0",
807 		"UNEXPECTED_INTERRUPT1",
808 		"UNEXPECTED_INTERRUPT2",
809 		"UNEXPECTED_INTERRUPT3",
810 		"UNEXPECTED_INTERRUPT4",
811 		"UNEXPECTED_INTERRUPT5",
812 		"UNEXPECTED_INTERRUPT6",
813 		"UNEXPECTED_INTERRUPT7",
814 		"UNEXPECTED_INTERRUPT8",
815 		"UNEXPECTED_INTERRUPT9",
816 		"UNEXPECTED_INTERRUPT10",
817 		[11 ... 19] = "unknown[11,19]",
818 		"NO_FREE_SCB_AVAILABLE",
819 		"INVALID_SCB_OPCODE",
820 		"INVALID_MBX_OPCODE",
821 		"INVALID_ATA_STATE",
822 		"ATA_QUEUE_FULL",
823 		"ATA_TAG_TABLE_FAULT",
824 		"ATA_TAG_MASK_FAULT",
825 		"BAD_LINK_QUEUE_STATE",
826 		"DMA2CHIM_QUEUE_ERROR",
827 		"EMPTY_SCB_LIST_FULL",
828 		"unknown[30]",
829 		"IN_USE_SCB_ON_FREE_LIST",
830 		"BAD_OPEN_WAIT_STATE",
831 		"INVALID_STP_AFFILIATION",
832 		"unknown[34]",
833 		"EXEC_QUEUE_ERROR",
834 		"TOO_MANY_EMPTIES_NEEDED",
835 		"EMPTY_REQ_QUEUE_ERROR",
836 		"Q_MONIRTT_MGMT_ERROR",
837 		"TARGET_MODE_FLOW_ERROR",
838 		"DEVICE_QUEUE_NOT_FOUND",
839 		"START_IRTT_TIMER_ERROR",
840 		"ABORT_TASK_ILLEGAL_REQ",
841 		[43 ... 255] = "unknown[43,255]"
842 	};
843 
844 	if (dchstatus & CSEQINT) {
845 		u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
846 
847 		if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
848 			asd_printk("%s: CSEQ arp2int:0x%x\n",
849 				   pci_name(asd_ha->pcidev),
850 				   arp2int);
851 		} else if (arp2int & ARP2HALTC)
852 			asd_printk("%s: CSEQ halted: %s\n",
853 				   pci_name(asd_ha->pcidev),
854 				   halt_code[(arp2int>>16)&0xFF]);
855 		else
856 			asd_printk("%s: CARP2INT:0x%x\n",
857 				   pci_name(asd_ha->pcidev),
858 				   arp2int);
859 	}
860 	if (dchstatus & LSEQINT_MASK) {
861 		int lseq;
862 		u8  lseq_mask = dchstatus & LSEQINT_MASK;
863 
864 		for_each_sequencer(lseq_mask, lseq_mask, lseq) {
865 			u32 arp2int = asd_read_reg_dword(asd_ha,
866 							 LmARP2INT(lseq));
867 			if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
868 				       | ARP2CIOPERR)) {
869 				asd_printk("%s: LSEQ%d arp2int:0x%x\n",
870 					   pci_name(asd_ha->pcidev),
871 					   lseq, arp2int);
872 				/* XXX we should only do lseq reset */
873 			} else if (arp2int & ARP2HALTC)
874 				asd_printk("%s: LSEQ%d halted: %s\n",
875 					   pci_name(asd_ha->pcidev),
876 					   lseq,halt_code[(arp2int>>16)&0xFF]);
877 			else
878 				asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
879 					   pci_name(asd_ha->pcidev), lseq,
880 					   arp2int);
881 		}
882 	}
883 	asd_chip_reset(asd_ha);
884 }
885 
886 /**
887  * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
888  * @asd_ha: pointer to host adapter structure
889  */
asd_dch_sas_isr(struct asd_ha_struct * asd_ha)890 static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
891 {
892 	u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
893 
894 	if (dchstatus & CFIFTOERR) {
895 		asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
896 		asd_chip_reset(asd_ha);
897 	} else
898 		asd_arp2_err(asd_ha, dchstatus);
899 }
900 
901 /**
902  * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR)
903  * @asd_ha: pointer to host adapter structure
904  */
asd_rbi_exsi_isr(struct asd_ha_struct * asd_ha)905 static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
906 {
907 	u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
908 
909 	if (!(stat0r & ASIERR)) {
910 		asd_printk("hmm, EXSI interrupted but no error?\n");
911 		return;
912 	}
913 
914 	if (stat0r & ASIFMTERR) {
915 		asd_printk("ASI SEEPROM format error for %s\n",
916 			   pci_name(asd_ha->pcidev));
917 	} else if (stat0r & ASISEECHKERR) {
918 		u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
919 		asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
920 			   stat1r & CHECKSUM_MASK,
921 			   pci_name(asd_ha->pcidev));
922 	} else {
923 		u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
924 
925 		if (!(statr & CPI2ASIMSTERR_MASK)) {
926 			ASD_DPRINTK("hmm, ASIERR?\n");
927 			return;
928 		} else {
929 			u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
930 			u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
931 
932 			asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
933 				   "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
934 				   "master id: 0x%x, master err: 0x%x\n",
935 				   pci_name(asd_ha->pcidev),
936 				   addr, data,
937 				   (statr & CPI2ASIBYTECNT_MASK) >> 16,
938 				   (statr & CPI2ASIBYTEEN_MASK) >> 12,
939 				   (statr & CPI2ASITARGERR_MASK) >> 8,
940 				   (statr & CPI2ASITARGMID_MASK) >> 4,
941 				   (statr & CPI2ASIMSTERR_MASK));
942 		}
943 	}
944 	asd_chip_reset(asd_ha);
945 }
946 
947 /**
948  * asd_hst_pcix_isr -- process host interface interrupts
949  * @asd_ha: pointer to host adapter structure
950  *
951  * Asserted on PCIX errors: target abort, etc.
952  */
asd_hst_pcix_isr(struct asd_ha_struct * asd_ha)953 static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
954 {
955 	u16 status;
956 	u32 pcix_status;
957 	u32 ecc_status;
958 
959 	pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
960 	pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
961 	pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
962 
963 	if (status & PCI_STATUS_DETECTED_PARITY)
964 		asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
965 	else if (status & PCI_STATUS_REC_MASTER_ABORT)
966 		asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
967 	else if (status & PCI_STATUS_REC_TARGET_ABORT)
968 		asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
969 	else if (status & PCI_STATUS_PARITY)
970 		asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
971 	else if (pcix_status & RCV_SCE) {
972 		asd_printk("received split completion error for %s\n",
973 			   pci_name(asd_ha->pcidev));
974 		pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
975 		/* XXX: Abort task? */
976 		return;
977 	} else if (pcix_status & UNEXP_SC) {
978 		asd_printk("unexpected split completion for %s\n",
979 			   pci_name(asd_ha->pcidev));
980 		pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
981 		/* ignore */
982 		return;
983 	} else if (pcix_status & SC_DISCARD)
984 		asd_printk("split completion discarded for %s\n",
985 			   pci_name(asd_ha->pcidev));
986 	else if (ecc_status & UNCOR_ECCERR)
987 		asd_printk("uncorrectable ECC error for %s\n",
988 			   pci_name(asd_ha->pcidev));
989 	asd_chip_reset(asd_ha);
990 }
991 
992 /**
993  * asd_hw_isr -- host adapter interrupt service routine
994  * @irq: ignored
995  * @dev_id: pointer to host adapter structure
996  *
997  * The ISR processes done list entries and level 3 error handling.
998  */
asd_hw_isr(int irq,void * dev_id)999 irqreturn_t asd_hw_isr(int irq, void *dev_id)
1000 {
1001 	struct asd_ha_struct *asd_ha = dev_id;
1002 	u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
1003 
1004 	if (!chimint)
1005 		return IRQ_NONE;
1006 
1007 	asd_write_reg_dword(asd_ha, CHIMINT, chimint);
1008 	(void) asd_read_reg_dword(asd_ha, CHIMINT);
1009 
1010 	if (chimint & DLAVAIL)
1011 		asd_process_donelist_isr(asd_ha);
1012 	if (chimint & COMINT)
1013 		asd_com_sas_isr(asd_ha);
1014 	if (chimint & DEVINT)
1015 		asd_dch_sas_isr(asd_ha);
1016 	if (chimint & INITERR)
1017 		asd_rbi_exsi_isr(asd_ha);
1018 	if (chimint & HOSTERR)
1019 		asd_hst_pcix_isr(asd_ha);
1020 
1021 	return IRQ_HANDLED;
1022 }
1023 
1024 /* ---------- SCB handling ---------- */
1025 
asd_ascb_alloc(struct asd_ha_struct * asd_ha,gfp_t gfp_flags)1026 static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1027 				       gfp_t gfp_flags)
1028 {
1029 	extern struct kmem_cache *asd_ascb_cache;
1030 	struct asd_seq_data *seq = &asd_ha->seq;
1031 	struct asd_ascb *ascb;
1032 	unsigned long flags;
1033 
1034 	ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
1035 
1036 	if (ascb) {
1037 		ascb->dma_scb.size = sizeof(struct scb);
1038 		ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
1039 						     gfp_flags,
1040 						    &ascb->dma_scb.dma_handle);
1041 		if (!ascb->dma_scb.vaddr) {
1042 			kmem_cache_free(asd_ascb_cache, ascb);
1043 			return NULL;
1044 		}
1045 		asd_init_ascb(asd_ha, ascb);
1046 
1047 		spin_lock_irqsave(&seq->tc_index_lock, flags);
1048 		ascb->tc_index = asd_tc_index_get(seq, ascb);
1049 		spin_unlock_irqrestore(&seq->tc_index_lock, flags);
1050 		if (ascb->tc_index == -1)
1051 			goto undo;
1052 
1053 		ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
1054 	}
1055 
1056 	return ascb;
1057 undo:
1058 	dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
1059 		      ascb->dma_scb.dma_handle);
1060 	kmem_cache_free(asd_ascb_cache, ascb);
1061 	ASD_DPRINTK("no index for ascb\n");
1062 	return NULL;
1063 }
1064 
1065 /**
1066  * asd_ascb_alloc_list -- allocate a list of aSCBs
1067  * @asd_ha: pointer to host adapter structure
1068  * @num: pointer to integer number of aSCBs
1069  * @gfp_flags: GFP_ flags.
1070  *
1071  * This is the only function which is used to allocate aSCBs.
1072  * It can allocate one or many. If more than one, then they form
1073  * a linked list in two ways: by their list field of the ascb struct
1074  * and by the next_scb field of the scb_header.
1075  *
1076  * Returns NULL if no memory was available, else pointer to a list
1077  * of ascbs.  When this function returns, @num would be the number
1078  * of SCBs which were not able to be allocated, 0 if all requested
1079  * were able to be allocated.
1080  */
asd_ascb_alloc_list(struct asd_ha_struct * asd_ha,int * num,gfp_t gfp_flags)1081 struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
1082 				     *asd_ha, int *num,
1083 				     gfp_t gfp_flags)
1084 {
1085 	struct asd_ascb *first = NULL;
1086 
1087 	for ( ; *num > 0; --*num) {
1088 		struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
1089 
1090 		if (!ascb)
1091 			break;
1092 		else if (!first)
1093 			first = ascb;
1094 		else {
1095 			struct asd_ascb *last = list_entry(first->list.prev,
1096 							   struct asd_ascb,
1097 							   list);
1098 			list_add_tail(&ascb->list, &first->list);
1099 			last->scb->header.next_scb =
1100 				cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
1101 		}
1102 	}
1103 
1104 	return first;
1105 }
1106 
1107 /**
1108  * asd_swap_head_scb -- swap the head scb
1109  * @asd_ha: pointer to host adapter structure
1110  * @ascb: pointer to the head of an ascb list
1111  *
1112  * The sequencer knows the DMA address of the next SCB to be DMAed to
1113  * the host adapter, from initialization or from the last list DMAed.
1114  * seq->next_scb keeps the address of this SCB.  The sequencer will
1115  * DMA to the host adapter this list of SCBs.  But the head (first
1116  * element) of this list is not known to the sequencer.  Here we swap
1117  * the head of the list with the known SCB (memcpy()).
1118  * Only one memcpy() is required per list so it is in our interest
1119  * to keep the list of SCB as long as possible so that the ratio
1120  * of number of memcpy calls to the number of SCB DMA-ed is as small
1121  * as possible.
1122  *
1123  * LOCKING: called with the pending list lock held.
1124  */
asd_swap_head_scb(struct asd_ha_struct * asd_ha,struct asd_ascb * ascb)1125 static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
1126 			      struct asd_ascb *ascb)
1127 {
1128 	struct asd_seq_data *seq = &asd_ha->seq;
1129 	struct asd_ascb *last = list_entry(ascb->list.prev,
1130 					   struct asd_ascb,
1131 					   list);
1132 	struct asd_dma_tok t = ascb->dma_scb;
1133 
1134 	memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
1135 	ascb->dma_scb = seq->next_scb;
1136 	ascb->scb = ascb->dma_scb.vaddr;
1137 	seq->next_scb = t;
1138 	last->scb->header.next_scb =
1139 		cpu_to_le64(((u64)seq->next_scb.dma_handle));
1140 }
1141 
1142 /**
1143  * asd_start_scb_timers -- (add and) start timers of SCBs
1144  * @list: pointer to struct list_head of the scbs
1145  *
1146  * If an SCB in the @list has no timer function, assign the default
1147  * one,  then start the timer of the SCB.  This function is
1148  * intended to be called from asd_post_ascb_list(), just prior to
1149  * posting the SCBs to the sequencer.
1150  */
asd_start_scb_timers(struct list_head * list)1151 static void asd_start_scb_timers(struct list_head *list)
1152 {
1153 	struct asd_ascb *ascb;
1154 	list_for_each_entry(ascb, list, list) {
1155 		if (!ascb->uldd_timer) {
1156 			ascb->timer.function = asd_ascb_timedout;
1157 			ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
1158 			add_timer(&ascb->timer);
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
1165  * @asd_ha: pointer to a host adapter structure
1166  * @ascb: pointer to the first aSCB in the list
1167  * @num: number of aSCBs in the list (to be posted)
1168  *
1169  * See queueing comment in asd_post_escb_list().
1170  *
1171  * Additional note on queuing: In order to minimize the ratio of memcpy()
1172  * to the number of ascbs sent, we try to batch-send as many ascbs as possible
1173  * in one go.
1174  * Two cases are possible:
1175  *    A) can_queue >= num,
1176  *    B) can_queue < num.
1177  * Case A: we can send the whole batch at once.  Increment "pending"
1178  * in the beginning of this function, when it is checked, in order to
1179  * eliminate races when this function is called by multiple processes.
1180  * Case B: should never happen.
1181  */
asd_post_ascb_list(struct asd_ha_struct * asd_ha,struct asd_ascb * ascb,int num)1182 int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1183 		       int num)
1184 {
1185 	unsigned long flags;
1186 	LIST_HEAD(list);
1187 	int can_queue;
1188 
1189 	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1190 	can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
1191 	if (can_queue >= num)
1192 		asd_ha->seq.pending += num;
1193 	else
1194 		can_queue = 0;
1195 
1196 	if (!can_queue) {
1197 		spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1198 		asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
1199 		return -SAS_QUEUE_FULL;
1200 	}
1201 
1202 	asd_swap_head_scb(asd_ha, ascb);
1203 
1204 	__list_add(&list, ascb->list.prev, &ascb->list);
1205 
1206 	asd_start_scb_timers(&list);
1207 
1208 	asd_ha->seq.scbpro += num;
1209 	list_splice_init(&list, asd_ha->seq.pend_q.prev);
1210 	asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1211 	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1212 
1213 	return 0;
1214 }
1215 
1216 /**
1217  * asd_post_escb_list -- post a list of 1 or more empty scb
1218  * @asd_ha: pointer to a host adapter structure
1219  * @ascb: pointer to the first empty SCB in the list
1220  * @num: number of aSCBs in the list (to be posted)
1221  *
1222  * This is essentially the same as asd_post_ascb_list, but we do not
1223  * increment pending, add those to the pending list or get indexes.
1224  * See asd_init_escbs() and asd_init_post_escbs().
1225  *
1226  * Since sending a list of ascbs is a superset of sending a single
1227  * ascb, this function exists to generalize this.  More specifically,
1228  * when sending a list of those, we want to do only a _single_
1229  * memcpy() at swap head, as opposed to for each ascb sent (in the
1230  * case of sending them one by one).  That is, we want to minimize the
1231  * ratio of memcpy() operations to the number of ascbs sent.  The same
1232  * logic applies to asd_post_ascb_list().
1233  */
asd_post_escb_list(struct asd_ha_struct * asd_ha,struct asd_ascb * ascb,int num)1234 int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1235 		       int num)
1236 {
1237 	unsigned long flags;
1238 
1239 	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1240 	asd_swap_head_scb(asd_ha, ascb);
1241 	asd_ha->seq.scbpro += num;
1242 	asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1243 	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1244 
1245 	return 0;
1246 }
1247 
1248 /* ---------- LED ---------- */
1249 
1250 /**
1251  * asd_turn_led -- turn on/off an LED
1252  * @asd_ha: pointer to host adapter structure
1253  * @phy_id: the PHY id whose LED we want to manupulate
1254  * @op: 1 to turn on, 0 to turn off
1255  */
asd_turn_led(struct asd_ha_struct * asd_ha,int phy_id,int op)1256 void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1257 {
1258 	if (phy_id < ASD_MAX_PHYS) {
1259 		u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
1260 		if (op)
1261 			v |= LEDPOL;
1262 		else
1263 			v &= ~LEDPOL;
1264 		asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
1265 	}
1266 }
1267 
1268 /**
1269  * asd_control_led -- enable/disable an LED on the board
1270  * @asd_ha: pointer to host adapter structure
1271  * @phy_id: integer, the phy id
1272  * @op: integer, 1 to enable, 0 to disable the LED
1273  *
1274  * First we output enable the LED, then we set the source
1275  * to be an external module.
1276  */
asd_control_led(struct asd_ha_struct * asd_ha,int phy_id,int op)1277 void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1278 {
1279 	if (phy_id < ASD_MAX_PHYS) {
1280 		u32 v;
1281 
1282 		v = asd_read_reg_dword(asd_ha, GPIOOER);
1283 		if (op)
1284 			v |= (1 << phy_id);
1285 		else
1286 			v &= ~(1 << phy_id);
1287 		asd_write_reg_dword(asd_ha, GPIOOER, v);
1288 
1289 		v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
1290 		if (op)
1291 			v |= (1 << phy_id);
1292 		else
1293 			v &= ~(1 << phy_id);
1294 		asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
1295 	}
1296 }
1297 
1298 /* ---------- PHY enable ---------- */
1299 
asd_enable_phy(struct asd_ha_struct * asd_ha,int phy_id)1300 static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
1301 {
1302 	struct asd_phy *phy = &asd_ha->phys[phy_id];
1303 
1304 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
1305 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
1306 			   HOTPLUG_DELAY_TIMEOUT);
1307 
1308 	/* Get defaults from manuf. sector */
1309 	/* XXX we need defaults for those in case MS is broken. */
1310 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
1311 			   phy->phy_desc->phy_control_0);
1312 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
1313 			   phy->phy_desc->phy_control_1);
1314 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
1315 			   phy->phy_desc->phy_control_2);
1316 	asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
1317 			   phy->phy_desc->phy_control_3);
1318 
1319 	asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
1320 			    ASD_COMINIT_TIMEOUT);
1321 
1322 	asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
1323 			   phy->id_frm_tok->dma_handle);
1324 
1325 	asd_control_led(asd_ha, phy_id, 1);
1326 
1327 	return 0;
1328 }
1329 
asd_enable_phys(struct asd_ha_struct * asd_ha,const u8 phy_mask)1330 int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1331 {
1332 	u8  phy_m;
1333 	u8  i;
1334 	int num = 0, k;
1335 	struct asd_ascb *ascb;
1336 	struct asd_ascb *ascb_list;
1337 
1338 	if (!phy_mask) {
1339 		asd_printk("%s called with phy_mask of 0!?\n", __func__);
1340 		return 0;
1341 	}
1342 
1343 	for_each_phy(phy_mask, phy_m, i) {
1344 		num++;
1345 		asd_enable_phy(asd_ha, i);
1346 	}
1347 
1348 	k = num;
1349 	ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
1350 	if (!ascb_list) {
1351 		asd_printk("no memory for control phy ascb list\n");
1352 		return -ENOMEM;
1353 	}
1354 	num -= k;
1355 
1356 	ascb = ascb_list;
1357 	for_each_phy(phy_mask, phy_m, i) {
1358 		asd_build_control_phy(ascb, i, ENABLE_PHY);
1359 		ascb = list_entry(ascb->list.next, struct asd_ascb, list);
1360 	}
1361 	ASD_DPRINTK("posting %d control phy scbs\n", num);
1362 	k = asd_post_ascb_list(asd_ha, ascb_list, num);
1363 	if (k)
1364 		asd_ascb_free_list(ascb_list);
1365 
1366 	return k;
1367 }
1368