xref: /linux/drivers/scsi/aic94xx/aic94xx_scb.c (revision 306ec721d043bbe5e818d59fbb37c28d999b5d8b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Aic94xx SAS/SATA driver SCB management.
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/gfp.h>
10 #include <scsi/scsi_host.h>
11 
12 #include "aic94xx.h"
13 #include "aic94xx_reg.h"
14 #include "aic94xx_hwi.h"
15 #include "aic94xx_seq.h"
16 
17 #include "aic94xx_dump.h"
18 
19 /* ---------- EMPTY SCB ---------- */
20 
21 #define DL_PHY_MASK      7
22 #define BYTES_DMAED      0
23 #define PRIMITIVE_RECVD  0x08
24 #define PHY_EVENT        0x10
25 #define LINK_RESET_ERROR 0x18
26 #define TIMER_EVENT      0x20
27 #define REQ_TASK_ABORT   0xF0
28 #define REQ_DEVICE_RESET 0xF1
29 #define SIGNAL_NCQ_ERROR 0xF2
30 #define CLEAR_NCQ_ERROR  0xF3
31 
32 #define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE   \
33 			   | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
34 			   | CURRENT_OOB_ERROR)
35 
36 static void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
37 {
38 	struct sas_phy *sas_phy = phy->sas_phy.phy;
39 
40 	switch (oob_mode & 7) {
41 	case PHY_SPEED_60:
42 		/* FIXME: sas transport class doesn't have this */
43 		phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
44 		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
45 		break;
46 	case PHY_SPEED_30:
47 		phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
48 		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
49 		break;
50 	case PHY_SPEED_15:
51 		phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
52 		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
53 		break;
54 	}
55 	sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
56 	sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
57 	sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
58 	sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate;
59 	sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate;
60 
61 	if (oob_mode & SAS_MODE)
62 		phy->sas_phy.oob_mode = SAS_OOB_MODE;
63 	else if (oob_mode & SATA_MODE)
64 		phy->sas_phy.oob_mode = SATA_OOB_MODE;
65 }
66 
67 static void asd_phy_event_tasklet(struct asd_ascb *ascb,
68 					 struct done_list_struct *dl)
69 {
70 	struct asd_ha_struct *asd_ha = ascb->ha;
71 	int phy_id = dl->status_block[0] & DL_PHY_MASK;
72 	struct asd_phy *phy = &asd_ha->phys[phy_id];
73 
74 	u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS;
75 	u8 oob_mode   = dl->status_block[2];
76 
77 	switch (oob_status) {
78 	case CURRENT_LOSS_OF_SIGNAL:
79 		/* directly attached device was removed */
80 		ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
81 		asd_turn_led(asd_ha, phy_id, 0);
82 		sas_phy_disconnected(&phy->sas_phy);
83 		sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
84 				     GFP_ATOMIC);
85 		break;
86 	case CURRENT_OOB_DONE:
87 		/* hot plugged device */
88 		asd_turn_led(asd_ha, phy_id, 1);
89 		get_lrate_mode(phy, oob_mode);
90 		ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
91 			    phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
92 		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
93 		break;
94 	case CURRENT_SPINUP_HOLD:
95 		/* hot plug SATA, no COMWAKE sent */
96 		asd_turn_led(asd_ha, phy_id, 1);
97 		sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
98 				     GFP_ATOMIC);
99 		break;
100 	case CURRENT_GTO_TIMEOUT:
101 	case CURRENT_OOB_ERROR:
102 		ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id,
103 			    dl->status_block[1]);
104 		asd_turn_led(asd_ha, phy_id, 0);
105 		sas_phy_disconnected(&phy->sas_phy);
106 		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
107 		break;
108 	}
109 }
110 
111 /* If phys are enabled sparsely, this will do the right thing. */
112 static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
113 {
114 	u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
115 	int i, k = 0;
116 
117 	for_each_phy(enabled_mask, enabled_mask, i) {
118 		if (&asd_ha->phys[i] == phy)
119 			return k;
120 		k++;
121 	}
122 	return 0;
123 }
124 
125 /**
126  * asd_get_attached_sas_addr -- extract/generate attached SAS address
127  * @phy: pointer to asd_phy
128  * @sas_addr: pointer to buffer where the SAS address is to be written
129  *
130  * This function extracts the SAS address from an IDENTIFY frame
131  * received.  If OOB is SATA, then a SAS address is generated from the
132  * HA tables.
133  *
134  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
135  * buffer.
136  */
137 static void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
138 {
139 	if (phy->sas_phy.frame_rcvd[0] == 0x34
140 	    && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
141 		struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
142 		/* FIS device-to-host */
143 		u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr);
144 
145 		addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy);
146 		*(__be64 *)sas_addr = cpu_to_be64(addr);
147 	} else {
148 		struct sas_identify_frame *idframe =
149 			(void *) phy->sas_phy.frame_rcvd;
150 		memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
151 	}
152 }
153 
154 static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
155 {
156 	int i;
157 	struct asd_port *free_port = NULL;
158 	struct asd_port *port;
159 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
160 	unsigned long flags;
161 
162 	spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
163 	if (!phy->asd_port) {
164 		for (i = 0; i < ASD_MAX_PHYS; i++) {
165 			port = &asd_ha->asd_ports[i];
166 
167 			/* Check for wide port */
168 			if (port->num_phys > 0 &&
169 			    memcmp(port->sas_addr, sas_phy->sas_addr,
170 				   SAS_ADDR_SIZE) == 0 &&
171 			    memcmp(port->attached_sas_addr,
172 				   sas_phy->attached_sas_addr,
173 				   SAS_ADDR_SIZE) == 0) {
174 				break;
175 			}
176 
177 			/* Find a free port */
178 			if (port->num_phys == 0 && free_port == NULL) {
179 				free_port = port;
180 			}
181 		}
182 
183 		/* Use a free port if this doesn't form a wide port */
184 		if (i >= ASD_MAX_PHYS) {
185 			port = free_port;
186 			BUG_ON(!port);
187 			memcpy(port->sas_addr, sas_phy->sas_addr,
188 			       SAS_ADDR_SIZE);
189 			memcpy(port->attached_sas_addr,
190 			       sas_phy->attached_sas_addr,
191 			       SAS_ADDR_SIZE);
192 		}
193 		port->num_phys++;
194 		port->phy_mask |= (1U << sas_phy->id);
195 		phy->asd_port = port;
196 	}
197 	ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
198 		    __func__, phy->asd_port->phy_mask, sas_phy->id);
199 	asd_update_port_links(asd_ha, phy);
200 	spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
201 }
202 
203 static void asd_deform_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
204 {
205 	struct asd_port *port = phy->asd_port;
206 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
207 	unsigned long flags;
208 
209 	spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
210 	if (port) {
211 		port->num_phys--;
212 		port->phy_mask &= ~(1U << sas_phy->id);
213 		phy->asd_port = NULL;
214 	}
215 	spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
216 }
217 
218 static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
219 				    struct done_list_struct *dl,
220 				    int edb_id, int phy_id)
221 {
222 	unsigned long flags;
223 	int edb_el = edb_id + ascb->edb_index;
224 	struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
225 	struct asd_phy *phy = &ascb->ha->phys[phy_id];
226 	u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
227 
228 	size = min(size, (u16) sizeof(phy->frame_rcvd));
229 
230 	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
231 	memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
232 	phy->sas_phy.frame_rcvd_size = size;
233 	asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
234 	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
235 	asd_dump_frame_rcvd(phy, dl);
236 	asd_form_port(ascb->ha, phy);
237 	sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
238 }
239 
240 static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
241 				       struct done_list_struct *dl,
242 				       int phy_id)
243 {
244 	struct asd_ha_struct *asd_ha = ascb->ha;
245 	struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
246 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
247 	struct asd_phy *phy = &asd_ha->phys[phy_id];
248 	u8 lr_error = dl->status_block[1];
249 	u8 retries_left = dl->status_block[2];
250 
251 	switch (lr_error) {
252 	case 0:
253 		ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id);
254 		break;
255 	case 1:
256 		ASD_DPRINTK("phy%d: Loss of signal\n", phy_id);
257 		break;
258 	case 2:
259 		ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id);
260 		break;
261 	case 3:
262 		ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id);
263 		break;
264 	default:
265 		ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n",
266 			    phy_id, lr_error);
267 		break;
268 	}
269 
270 	asd_turn_led(asd_ha, phy_id, 0);
271 	sas_phy_disconnected(sas_phy);
272 	asd_deform_port(asd_ha, phy);
273 	sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC);
274 
275 	if (retries_left == 0) {
276 		int num = 1;
277 		struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
278 							  GFP_ATOMIC);
279 		if (!cp) {
280 			asd_printk("%s: out of memory\n", __func__);
281 			goto out;
282 		}
283 		ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
284 			    phy_id);
285 		asd_build_control_phy(cp, phy_id, ENABLE_PHY);
286 		if (asd_post_ascb_list(ascb->ha, cp, 1) != 0)
287 			asd_ascb_free(cp);
288 	}
289 out:
290 	;
291 }
292 
293 static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
294 				       struct done_list_struct *dl,
295 				       int phy_id)
296 {
297 	unsigned long flags;
298 	struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
299 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
300 	struct asd_ha_struct *asd_ha = ascb->ha;
301 	struct asd_phy *phy = &asd_ha->phys[phy_id];
302 	u8  reg  = dl->status_block[1];
303 	u32 cont = dl->status_block[2] << ((reg & 3)*8);
304 
305 	reg &= ~3;
306 	switch (reg) {
307 	case LmPRMSTAT0BYTE0:
308 		switch (cont) {
309 		case LmBROADCH:
310 		case LmBROADRVCH0:
311 		case LmBROADRVCH1:
312 		case LmBROADSES:
313 			ASD_DPRINTK("phy%d: BROADCAST change received:%d\n",
314 				    phy_id, cont);
315 			spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
316 			sas_phy->sas_prim = ffs(cont);
317 			spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
318 			sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
319 					      GFP_ATOMIC);
320 			break;
321 
322 		case LmUNKNOWNP:
323 			ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id);
324 			break;
325 
326 		default:
327 			ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
328 				    phy_id, reg, cont);
329 			break;
330 		}
331 		break;
332 	case LmPRMSTAT1BYTE0:
333 		switch (cont) {
334 		case LmHARDRST:
335 			ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n",
336 				    phy_id);
337 			/* The sequencer disables all phys on that port.
338 			 * We have to re-enable the phys ourselves. */
339 			asd_deform_port(asd_ha, phy);
340 			sas_notify_port_event(sas_phy, PORTE_HARD_RESET,
341 					      GFP_ATOMIC);
342 			break;
343 
344 		default:
345 			ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
346 				    phy_id, reg, cont);
347 			break;
348 		}
349 		break;
350 	default:
351 		ASD_DPRINTK("unknown primitive register:0x%x\n",
352 			    dl->status_block[1]);
353 		break;
354 	}
355 }
356 
357 /**
358  * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB
359  * @ascb: pointer to Empty SCB
360  * @edb_id: index [0,6] to the empty data buffer which is to be invalidated
361  *
362  * After an EDB has been invalidated, if all EDBs in this ESCB have been
363  * invalidated, the ESCB is posted back to the sequencer.
364  * Context is tasklet/IRQ.
365  */
366 void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
367 {
368 	struct asd_seq_data *seq = &ascb->ha->seq;
369 	struct empty_scb *escb = &ascb->scb->escb;
370 	struct sg_el     *eb   = &escb->eb[edb_id];
371 	struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id];
372 
373 	memset(edb->vaddr, 0, ASD_EDB_SIZE);
374 	eb->flags |= ELEMENT_NOT_VALID;
375 	escb->num_valid--;
376 
377 	if (escb->num_valid == 0) {
378 		int i;
379 		/* ASD_DPRINTK("reposting escb: vaddr: 0x%p, "
380 			    "dma_handle: 0x%08llx, next: 0x%08llx, "
381 			    "index:%d, opcode:0x%02x\n",
382 			    ascb->dma_scb.vaddr,
383 			    (u64)ascb->dma_scb.dma_handle,
384 			    le64_to_cpu(ascb->scb->header.next_scb),
385 			    le16_to_cpu(ascb->scb->header.index),
386 			    ascb->scb->header.opcode);
387 		*/
388 		escb->num_valid = ASD_EDBS_PER_SCB;
389 		for (i = 0; i < ASD_EDBS_PER_SCB; i++)
390 			escb->eb[i].flags = 0;
391 		if (!list_empty(&ascb->list))
392 			list_del_init(&ascb->list);
393 		i = asd_post_escb_list(ascb->ha, ascb, 1);
394 		if (i)
395 			asd_printk("couldn't post escb, err:%d\n", i);
396 	}
397 }
398 
399 static void escb_tasklet_complete(struct asd_ascb *ascb,
400 				  struct done_list_struct *dl)
401 {
402 	struct asd_ha_struct *asd_ha = ascb->ha;
403 	struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
404 	int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */
405 	u8  sb_opcode = dl->status_block[0];
406 	int phy_id = sb_opcode & DL_PHY_MASK;
407 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
408 	struct asd_phy *phy = &asd_ha->phys[phy_id];
409 
410 	if (edb > 6 || edb < 0) {
411 		ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
412 			    edb, dl->opcode);
413 		ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
414 			    sb_opcode, phy_id);
415 		ASD_DPRINTK("escb: vaddr: 0x%p, "
416 			    "dma_handle: 0x%llx, next: 0x%llx, "
417 			    "index:%d, opcode:0x%02x\n",
418 			    ascb->dma_scb.vaddr,
419 			    (unsigned long long)ascb->dma_scb.dma_handle,
420 			    (unsigned long long)
421 			    le64_to_cpu(ascb->scb->header.next_scb),
422 			    le16_to_cpu(ascb->scb->header.index),
423 			    ascb->scb->header.opcode);
424 	}
425 
426 	/* Catch these before we mask off the sb_opcode bits */
427 	switch (sb_opcode) {
428 	case REQ_TASK_ABORT: {
429 		struct asd_ascb *a, *b;
430 		u16 tc_abort;
431 		struct domain_device *failed_dev = NULL;
432 
433 		ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
434 			    __func__, dl->status_block[3]);
435 
436 		/*
437 		 * Find the task that caused the abort and abort it first.
438 		 * The sequencer won't put anything on the done list until
439 		 * that happens.
440 		 */
441 		tc_abort = *((u16*)(&dl->status_block[1]));
442 		tc_abort = le16_to_cpu(tc_abort);
443 
444 		list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
445 			struct sas_task *task = a->uldd_task;
446 
447 			if (a->tc_index != tc_abort)
448 				continue;
449 
450 			if (task) {
451 				failed_dev = task->dev;
452 				sas_task_abort(task);
453 			} else {
454 				ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
455 					    a->scb->header.opcode);
456 			}
457 			break;
458 		}
459 
460 		if (!failed_dev) {
461 			ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
462 				    __func__, tc_abort);
463 			goto out;
464 		}
465 
466 		/*
467 		 * Now abort everything else for that device (hba?) so
468 		 * that the EH will wake up and do something.
469 		 */
470 		list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
471 			struct sas_task *task = a->uldd_task;
472 
473 			if (task &&
474 			    task->dev == failed_dev &&
475 			    a->tc_index != tc_abort)
476 				sas_task_abort(task);
477 		}
478 
479 		goto out;
480 	}
481 	case REQ_DEVICE_RESET: {
482 		struct asd_ascb *a;
483 		u16 conn_handle;
484 		unsigned long flags;
485 		struct sas_task *last_dev_task = NULL;
486 
487 		conn_handle = *((u16*)(&dl->status_block[1]));
488 		conn_handle = le16_to_cpu(conn_handle);
489 
490 		ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
491 			    dl->status_block[3]);
492 
493 		/* Find the last pending task for the device... */
494 		list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
495 			u16 x;
496 			struct domain_device *dev;
497 			struct sas_task *task = a->uldd_task;
498 
499 			if (!task)
500 				continue;
501 			dev = task->dev;
502 
503 			x = (unsigned long)dev->lldd_dev;
504 			if (x == conn_handle)
505 				last_dev_task = task;
506 		}
507 
508 		if (!last_dev_task) {
509 			ASD_DPRINTK("%s: Device reset for idle device %d?\n",
510 				    __func__, conn_handle);
511 			goto out;
512 		}
513 
514 		/* ...and set the reset flag */
515 		spin_lock_irqsave(&last_dev_task->task_state_lock, flags);
516 		last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
517 		spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags);
518 
519 		/* Kill all pending tasks for the device */
520 		list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
521 			u16 x;
522 			struct domain_device *dev;
523 			struct sas_task *task = a->uldd_task;
524 
525 			if (!task)
526 				continue;
527 			dev = task->dev;
528 
529 			x = (unsigned long)dev->lldd_dev;
530 			if (x == conn_handle)
531 				sas_task_abort(task);
532 		}
533 
534 		goto out;
535 	}
536 	case SIGNAL_NCQ_ERROR:
537 		ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
538 		goto out;
539 	case CLEAR_NCQ_ERROR:
540 		ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
541 		goto out;
542 	}
543 
544 	sb_opcode &= ~DL_PHY_MASK;
545 
546 	switch (sb_opcode) {
547 	case BYTES_DMAED:
548 		ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
549 		asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
550 		break;
551 	case PRIMITIVE_RECVD:
552 		ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
553 			    phy_id);
554 		asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
555 		break;
556 	case PHY_EVENT:
557 		ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
558 		asd_phy_event_tasklet(ascb, dl);
559 		break;
560 	case LINK_RESET_ERROR:
561 		ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
562 			    phy_id);
563 		asd_link_reset_err_tasklet(ascb, dl, phy_id);
564 		break;
565 	case TIMER_EVENT:
566 		ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
567 			    __func__, phy_id);
568 		asd_turn_led(asd_ha, phy_id, 0);
569 		/* the device is gone */
570 		sas_phy_disconnected(sas_phy);
571 		asd_deform_port(asd_ha, phy);
572 		sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT, GFP_ATOMIC);
573 		break;
574 	default:
575 		ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
576 			    phy_id, sb_opcode);
577 		ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
578 			    edb, dl->opcode);
579 		ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
580 			    sb_opcode, phy_id);
581 		ASD_DPRINTK("escb: vaddr: 0x%p, "
582 			    "dma_handle: 0x%llx, next: 0x%llx, "
583 			    "index:%d, opcode:0x%02x\n",
584 			    ascb->dma_scb.vaddr,
585 			    (unsigned long long)ascb->dma_scb.dma_handle,
586 			    (unsigned long long)
587 			    le64_to_cpu(ascb->scb->header.next_scb),
588 			    le16_to_cpu(ascb->scb->header.index),
589 			    ascb->scb->header.opcode);
590 
591 		break;
592 	}
593 out:
594 	asd_invalidate_edb(ascb, edb);
595 }
596 
597 int asd_init_post_escbs(struct asd_ha_struct *asd_ha)
598 {
599 	struct asd_seq_data *seq = &asd_ha->seq;
600 	int i;
601 
602 	for (i = 0; i < seq->num_escbs; i++)
603 		seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete;
604 
605 	ASD_DPRINTK("posting %d escbs\n", i);
606 	return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs);
607 }
608 
609 /* ---------- CONTROL PHY ---------- */
610 
611 #define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE   \
612 			    | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
613 			    | CURRENT_OOB_ERROR)
614 
615 /**
616  * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb
617  * @ascb: pointer to an ascb
618  * @dl: pointer to the done list entry
619  *
620  * This function completes a CONTROL PHY scb and frees the ascb.
621  * A note on LEDs:
622  *  - an LED blinks if there is IO though it,
623  *  - if a device is connected to the LED, it is lit,
624  *  - if no device is connected to the LED, is is dimmed (off).
625  */
626 static void control_phy_tasklet_complete(struct asd_ascb *ascb,
627 					 struct done_list_struct *dl)
628 {
629 	struct asd_ha_struct *asd_ha = ascb->ha;
630 	struct scb *scb = ascb->scb;
631 	struct control_phy *control_phy = &scb->control_phy;
632 	u8 phy_id = control_phy->phy_id;
633 	struct asd_phy *phy = &ascb->ha->phys[phy_id];
634 
635 	u8 status     = dl->status_block[0];
636 	u8 oob_status = dl->status_block[1];
637 	u8 oob_mode   = dl->status_block[2];
638 	/* u8 oob_signals= dl->status_block[3]; */
639 
640 	if (status != 0) {
641 		ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
642 			    __func__, phy_id, status);
643 		goto out;
644 	}
645 
646 	switch (control_phy->sub_func) {
647 	case DISABLE_PHY:
648 		asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
649 		asd_turn_led(asd_ha, phy_id, 0);
650 		asd_control_led(asd_ha, phy_id, 0);
651 		ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
652 		break;
653 
654 	case ENABLE_PHY:
655 		asd_control_led(asd_ha, phy_id, 1);
656 		if (oob_status & CURRENT_OOB_DONE) {
657 			asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
658 			get_lrate_mode(phy, oob_mode);
659 			asd_turn_led(asd_ha, phy_id, 1);
660 			ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
661 				    __func__, phy_id,phy->sas_phy.linkrate,
662 				    phy->sas_phy.iproto);
663 		} else if (oob_status & CURRENT_SPINUP_HOLD) {
664 			asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
665 			asd_turn_led(asd_ha, phy_id, 1);
666 			ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
667 				    phy_id);
668 		} else if (oob_status & CURRENT_ERR_MASK) {
669 			asd_turn_led(asd_ha, phy_id, 0);
670 			ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
671 				    __func__, phy_id, oob_status);
672 		} else if (oob_status & (CURRENT_HOT_PLUG_CNCT
673 					 | CURRENT_DEVICE_PRESENT))  {
674 			asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
675 			asd_turn_led(asd_ha, phy_id, 1);
676 			ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
677 				    __func__, phy_id);
678 		} else {
679 			asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
680 			asd_turn_led(asd_ha, phy_id, 0);
681 			ASD_DPRINTK("%s: phy%d: no device present: "
682 				    "oob_status:0x%x\n",
683 				    __func__, phy_id, oob_status);
684 		}
685 		break;
686 	case RELEASE_SPINUP_HOLD:
687 	case PHY_NO_OP:
688 	case EXECUTE_HARD_RESET:
689 		ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
690 			    phy_id, control_phy->sub_func);
691 		/* XXX finish */
692 		break;
693 	default:
694 		ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
695 			    phy_id, control_phy->sub_func);
696 		break;
697 	}
698 out:
699 	asd_ascb_free(ascb);
700 }
701 
702 static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
703 {
704 	/* disable all speeds, then enable defaults */
705 	*speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
706 		| SATA_SPEED_30_DIS | SATA_SPEED_15_DIS;
707 
708 	switch (pd->max_sas_lrate) {
709 	case SAS_LINK_RATE_6_0_GBPS:
710 		*speed_mask &= ~SAS_SPEED_60_DIS;
711 		fallthrough;
712 	default:
713 	case SAS_LINK_RATE_3_0_GBPS:
714 		*speed_mask &= ~SAS_SPEED_30_DIS;
715 		fallthrough;
716 	case SAS_LINK_RATE_1_5_GBPS:
717 		*speed_mask &= ~SAS_SPEED_15_DIS;
718 	}
719 
720 	switch (pd->min_sas_lrate) {
721 	case SAS_LINK_RATE_6_0_GBPS:
722 		*speed_mask |= SAS_SPEED_30_DIS;
723 		fallthrough;
724 	case SAS_LINK_RATE_3_0_GBPS:
725 		*speed_mask |= SAS_SPEED_15_DIS;
726 		fallthrough;
727 	default:
728 	case SAS_LINK_RATE_1_5_GBPS:
729 		/* nothing to do */
730 		;
731 	}
732 
733 	switch (pd->max_sata_lrate) {
734 	case SAS_LINK_RATE_3_0_GBPS:
735 		*speed_mask &= ~SATA_SPEED_30_DIS;
736 		fallthrough;
737 	default:
738 	case SAS_LINK_RATE_1_5_GBPS:
739 		*speed_mask &= ~SATA_SPEED_15_DIS;
740 	}
741 
742 	switch (pd->min_sata_lrate) {
743 	case SAS_LINK_RATE_3_0_GBPS:
744 		*speed_mask |= SATA_SPEED_15_DIS;
745 		fallthrough;
746 	default:
747 	case SAS_LINK_RATE_1_5_GBPS:
748 		/* nothing to do */
749 		;
750 	}
751 }
752 
753 /**
754  * asd_build_control_phy -- build a CONTROL PHY SCB
755  * @ascb: pointer to an ascb
756  * @phy_id: phy id to control, integer
757  * @subfunc: subfunction, what to actually to do the phy
758  *
759  * This function builds a CONTROL PHY scb.  No allocation of any kind
760  * is performed. @ascb is allocated with the list function.
761  * The caller can override the ascb->tasklet_complete to point
762  * to its own callback function.  It must call asd_ascb_free()
763  * at its tasklet complete function.
764  * See the default implementation.
765  */
766 void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
767 {
768 	struct asd_phy *phy = &ascb->ha->phys[phy_id];
769 	struct scb *scb = ascb->scb;
770 	struct control_phy *control_phy = &scb->control_phy;
771 
772 	scb->header.opcode = CONTROL_PHY;
773 	control_phy->phy_id = (u8) phy_id;
774 	control_phy->sub_func = subfunc;
775 
776 	switch (subfunc) {
777 	case EXECUTE_HARD_RESET:  /* 0x81 */
778 	case ENABLE_PHY:          /* 0x01 */
779 		/* decide hot plug delay */
780 		control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT;
781 
782 		/* decide speed mask */
783 		set_speed_mask(&control_phy->speed_mask, phy->phy_desc);
784 
785 		/* initiator port settings are in the hi nibble */
786 		if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
787 			control_phy->port_type = SAS_PROTOCOL_ALL << 4;
788 		else if (phy->sas_phy.role == PHY_ROLE_TARGET)
789 			control_phy->port_type = SAS_PROTOCOL_ALL;
790 		else
791 			control_phy->port_type =
792 				(SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL;
793 
794 		/* link reset retries, this should be nominal */
795 		control_phy->link_reset_retries = 10;
796 		fallthrough;
797 
798 	case RELEASE_SPINUP_HOLD: /* 0x02 */
799 		/* decide the func_mask */
800 		control_phy->func_mask = FUNCTION_MASK_DEFAULT;
801 		if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD)
802 			control_phy->func_mask &= ~SPINUP_HOLD_DIS;
803 		else
804 			control_phy->func_mask |= SPINUP_HOLD_DIS;
805 	}
806 
807 	control_phy->conn_handle = cpu_to_le16(0xFFFF);
808 
809 	ascb->tasklet_complete = control_phy_tasklet_complete;
810 }
811 
812 /* ---------- INITIATE LINK ADM TASK ---------- */
813 
814 #if 0
815 
816 static void link_adm_tasklet_complete(struct asd_ascb *ascb,
817 				      struct done_list_struct *dl)
818 {
819 	u8 opcode = dl->opcode;
820 	struct initiate_link_adm *link_adm = &ascb->scb->link_adm;
821 	u8 phy_id = link_adm->phy_id;
822 
823 	if (opcode != TC_NO_ERROR) {
824 		asd_printk("phy%d: link adm task 0x%x completed with error "
825 			   "0x%x\n", phy_id, link_adm->sub_func, opcode);
826 	}
827 	ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n",
828 		    phy_id, link_adm->sub_func, opcode);
829 
830 	asd_ascb_free(ascb);
831 }
832 
833 void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
834 				      u8 subfunc)
835 {
836 	struct scb *scb = ascb->scb;
837 	struct initiate_link_adm *link_adm = &scb->link_adm;
838 
839 	scb->header.opcode = INITIATE_LINK_ADM_TASK;
840 
841 	link_adm->phy_id = phy_id;
842 	link_adm->sub_func = subfunc;
843 	link_adm->conn_handle = cpu_to_le16(0xFFFF);
844 
845 	ascb->tasklet_complete = link_adm_tasklet_complete;
846 }
847 
848 #endif  /*  0  */
849 
850 /* ---------- SCB timer ---------- */
851 
852 /**
853  * asd_ascb_timedout -- called when a pending SCB's timer has expired
854  * @t: Timer context used to fetch the SCB
855  *
856  * This is the default timeout function which does the most necessary.
857  * Upper layers can implement their own timeout function, say to free
858  * resources they have with this SCB, and then call this one at the
859  * end of their timeout function.  To do this, one should initialize
860  * the ascb->timer.{function, expires} prior to calling the post
861  * function. The timer is started by the post function.
862  */
863 void asd_ascb_timedout(struct timer_list *t)
864 {
865 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
866 	struct asd_seq_data *seq = &ascb->ha->seq;
867 	unsigned long flags;
868 
869 	ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode);
870 
871 	spin_lock_irqsave(&seq->pend_q_lock, flags);
872 	seq->pending--;
873 	list_del_init(&ascb->list);
874 	spin_unlock_irqrestore(&seq->pend_q_lock, flags);
875 
876 	asd_ascb_free(ascb);
877 }
878 
879 /* ---------- CONTROL PHY ---------- */
880 
881 /* Given the spec value, return a driver value. */
882 static const int phy_func_table[] = {
883 	[PHY_FUNC_NOP]        = PHY_NO_OP,
884 	[PHY_FUNC_LINK_RESET] = ENABLE_PHY,
885 	[PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET,
886 	[PHY_FUNC_DISABLE]    = DISABLE_PHY,
887 	[PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD,
888 };
889 
890 int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
891 {
892 	struct asd_ha_struct *asd_ha = phy->ha->lldd_ha;
893 	struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc;
894 	struct asd_ascb *ascb;
895 	struct sas_phy_linkrates *rates;
896 	int res = 1;
897 
898 	switch (func) {
899 	case PHY_FUNC_CLEAR_ERROR_LOG:
900 	case PHY_FUNC_GET_EVENTS:
901 		return -ENOSYS;
902 	case PHY_FUNC_SET_LINK_RATE:
903 		rates = arg;
904 		if (rates->minimum_linkrate) {
905 			pd->min_sas_lrate = rates->minimum_linkrate;
906 			pd->min_sata_lrate = rates->minimum_linkrate;
907 		}
908 		if (rates->maximum_linkrate) {
909 			pd->max_sas_lrate = rates->maximum_linkrate;
910 			pd->max_sata_lrate = rates->maximum_linkrate;
911 		}
912 		func = PHY_FUNC_LINK_RESET;
913 		break;
914 	default:
915 		break;
916 	}
917 
918 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
919 	if (!ascb)
920 		return -ENOMEM;
921 
922 	asd_build_control_phy(ascb, phy->id, phy_func_table[func]);
923 	res = asd_post_ascb_list(asd_ha, ascb , 1);
924 	if (res)
925 		asd_ascb_free(ascb);
926 
927 	return res;
928 }
929