xref: /linux/drivers/scsi/pm8001/pm8001_sas.c (revision 7adf8b1afc14832de099f9e178f08f91dc0dd6d0)
1 /*
2  * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3  *
4  * Copyright (c) 2008-2009 USI Co., Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  * 3. Neither the names of the above-listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * Alternatively, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") version 2 as published by the Free
24  * Software Foundation.
25  *
26  * NO WARRANTY
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGES.
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44 
45 /**
46  * pm8001_find_tag - from sas task to find out  tag that belongs to this task
47  * @task: the task sent to the LLDD
48  * @tag: the found tag associated with the task
49  */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 	if (task->lldd_task) {
53 		struct pm8001_ccb_info *ccb;
54 		ccb = task->lldd_task;
55 		*tag = ccb->ccb_tag;
56 		return 1;
57 	}
58 	return 0;
59 }
60 
61 /**
62   * pm8001_tag_free - free the no more needed tag
63   * @pm8001_ha: our hba struct
64   * @tag: the found tag associated with the task
65   */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 	void *bitmap = pm8001_ha->rsvd_tags;
69 	unsigned long flags;
70 
71 	if (tag >= PM8001_RESERVE_SLOT)
72 		return;
73 
74 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 	__clear_bit(tag, bitmap);
76 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
77 }
78 
79 /**
80   * pm8001_tag_alloc - allocate a empty tag for task used.
81   * @pm8001_ha: our hba struct
82   * @tag_out: the found empty tag .
83   */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
85 {
86 	void *bitmap = pm8001_ha->rsvd_tags;
87 	unsigned long flags;
88 	unsigned int tag;
89 
90 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 	tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 	if (tag >= PM8001_RESERVE_SLOT) {
93 		spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 		return -SAS_QUEUE_FULL;
95 	}
96 	__set_bit(tag, bitmap);
97 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
98 
99 	/* reserved tags are in the lower region of the tagset */
100 	*tag_out = tag;
101 	return 0;
102 }
103 
pm80xx_get_tag_opcodes(struct sas_task * task,int * ata_op,int * ata_tag,bool * task_aborted)104 static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
105 								   int *ata_tag, bool *task_aborted)
106 {
107 	unsigned long flags;
108 	struct ata_queued_cmd *qc = NULL;
109 
110 	*ata_op = 0;
111 	*ata_tag = -1;
112 	*task_aborted = false;
113 
114 	if (!task)
115 		return;
116 
117 	spin_lock_irqsave(&task->task_state_lock, flags);
118 	if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
119 		*task_aborted = true;
120 	spin_unlock_irqrestore(&task->task_state_lock, flags);
121 
122 	if (task->task_proto == SAS_PROTOCOL_STP) {
123 		// sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
124 		// This only works for scsi + libsas + libata users.
125 		qc = task->uldd_task;
126 		if (qc) {
127 			*ata_op = qc->tf.command;
128 			*ata_tag = qc->tag;
129 		}
130 	}
131 }
132 
pm80xx_show_pending_commands(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * target_pm8001_dev)133 void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
134 				  struct pm8001_device *target_pm8001_dev)
135 {
136 	int i = 0, ata_op = 0, ata_tag = -1;
137 	struct pm8001_ccb_info *ccb = NULL;
138 	struct sas_task *task = NULL;
139 	struct pm8001_device *pm8001_dev = NULL;
140 	bool task_aborted;
141 
142 	for (i = 0; i < pm8001_ha->ccb_count; i++) {
143 		ccb = &pm8001_ha->ccb_info[i];
144 		if (ccb->ccb_tag == PM8001_INVALID_TAG)
145 			continue;
146 		pm8001_dev = ccb->device;
147 		if (target_pm8001_dev && pm8001_dev &&
148 		    target_pm8001_dev != pm8001_dev)
149 			continue;
150 		task = ccb->task;
151 		pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
152 		pm8001_dbg(pm8001_ha, FAIL,
153 			"tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
154 			ccb->ccb_tag,
155 			(pm8001_dev ? pm8001_dev->device_id : 0),
156 			task, task_aborted,
157 			ata_op, ata_tag);
158 	}
159 }
160 
161 /**
162  * pm8001_mem_alloc - allocate memory for pm8001.
163  * @pdev: pci device.
164  * @virt_addr: the allocated virtual address
165  * @pphys_addr: DMA address for this device
166  * @pphys_addr_hi: the physical address high byte address.
167  * @pphys_addr_lo: the physical address low byte address.
168  * @mem_size: memory size.
169  * @align: requested byte alignment
170  */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)171 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
172 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
173 	u32 *pphys_addr_lo, u32 mem_size, u32 align)
174 {
175 	caddr_t mem_virt_alloc;
176 	dma_addr_t mem_dma_handle;
177 	u64 phys_align;
178 	u64 align_offset = 0;
179 	if (align)
180 		align_offset = (dma_addr_t)align - 1;
181 	mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
182 					    &mem_dma_handle, GFP_KERNEL);
183 	if (!mem_virt_alloc)
184 		return -ENOMEM;
185 	*pphys_addr = mem_dma_handle;
186 	phys_align = (*pphys_addr + align_offset) & ~align_offset;
187 	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
188 	*pphys_addr_hi = upper_32_bits(phys_align);
189 	*pphys_addr_lo = lower_32_bits(phys_align);
190 	return 0;
191 }
192 
193 /**
194   * pm8001_find_ha_by_dev - from domain device which come from sas layer to
195   * find out our hba struct.
196   * @dev: the domain device which from sas layer.
197   */
198 static
pm8001_find_ha_by_dev(struct domain_device * dev)199 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
200 {
201 	struct sas_ha_struct *sha = dev->port->ha;
202 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
203 	return pm8001_ha;
204 }
205 
206 /**
207   * pm8001_phy_control - this function should be registered to
208   * sas_domain_function_template to provide libsas used, note: this is just
209   * control the HBA phy rather than other expander phy if you want control
210   * other phy, you should use SMP command.
211   * @sas_phy: which phy in HBA phys.
212   * @func: the operation.
213   * @funcdata: always NULL.
214   */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)215 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
216 	void *funcdata)
217 {
218 	int rc = 0, phy_id = sas_phy->id;
219 	struct pm8001_hba_info *pm8001_ha = NULL;
220 	struct sas_phy_linkrates *rates;
221 	struct pm8001_phy *phy;
222 	DECLARE_COMPLETION_ONSTACK(completion);
223 	unsigned long flags;
224 	pm8001_ha = sas_phy->ha->lldd_ha;
225 	phy = &pm8001_ha->phy[phy_id];
226 
227 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
228 		/*
229 		 * If the controller is in fatal error state,
230 		 * we will not get a response from the controller
231 		 */
232 		pm8001_dbg(pm8001_ha, FAIL,
233 			   "Phy control failed due to fatal errors\n");
234 		return -EFAULT;
235 	}
236 
237 	switch (func) {
238 	case PHY_FUNC_SET_LINK_RATE:
239 		rates = funcdata;
240 		if (rates->minimum_linkrate) {
241 			pm8001_ha->phy[phy_id].minimum_linkrate =
242 				rates->minimum_linkrate;
243 		}
244 		if (rates->maximum_linkrate) {
245 			pm8001_ha->phy[phy_id].maximum_linkrate =
246 				rates->maximum_linkrate;
247 		}
248 		if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
249 			pm8001_ha->phy[phy_id].enable_completion = &completion;
250 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
251 			wait_for_completion(&completion);
252 		}
253 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
254 					      PHY_LINK_RESET);
255 		break;
256 	case PHY_FUNC_HARD_RESET:
257 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
258 			pm8001_ha->phy[phy_id].enable_completion = &completion;
259 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
260 			wait_for_completion(&completion);
261 		}
262 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
263 					      PHY_HARD_RESET);
264 		break;
265 	case PHY_FUNC_LINK_RESET:
266 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
267 			pm8001_ha->phy[phy_id].enable_completion = &completion;
268 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
269 			wait_for_completion(&completion);
270 		}
271 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
272 					      PHY_LINK_RESET);
273 		break;
274 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
275 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
276 					      PHY_LINK_RESET);
277 		break;
278 	case PHY_FUNC_DISABLE:
279 		if (pm8001_ha->chip_id != chip_8001) {
280 			if (pm8001_ha->phy[phy_id].phy_state ==
281 				PHY_STATE_LINK_UP_SPCV) {
282 				sas_phy_disconnected(&phy->sas_phy);
283 				sas_notify_phy_event(&phy->sas_phy,
284 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
285 				phy->phy_attached = 0;
286 			}
287 		} else {
288 			if (pm8001_ha->phy[phy_id].phy_state ==
289 				PHY_STATE_LINK_UP_SPC) {
290 				sas_phy_disconnected(&phy->sas_phy);
291 				sas_notify_phy_event(&phy->sas_phy,
292 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
293 				phy->phy_attached = 0;
294 			}
295 		}
296 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
297 		break;
298 	case PHY_FUNC_GET_EVENTS:
299 		spin_lock_irqsave(&pm8001_ha->lock, flags);
300 		if (pm8001_ha->chip_id == chip_8001) {
301 			if (-1 == pm8001_bar4_shift(pm8001_ha,
302 					(phy_id < 4) ? 0x30000 : 0x40000)) {
303 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
304 				return -EINVAL;
305 			}
306 		}
307 		{
308 			struct sas_phy *phy = sas_phy->phy;
309 			u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
310 				+ 0x1034 + (0x4000 * (phy_id & 3));
311 
312 			phy->invalid_dword_count = readl(qp);
313 			phy->running_disparity_error_count = readl(&qp[1]);
314 			phy->loss_of_dword_sync_count = readl(&qp[3]);
315 			phy->phy_reset_problem_count = readl(&qp[4]);
316 		}
317 		if (pm8001_ha->chip_id == chip_8001)
318 			pm8001_bar4_shift(pm8001_ha, 0);
319 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
320 		return 0;
321 	default:
322 		pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
323 		rc = -EOPNOTSUPP;
324 	}
325 	msleep(300);
326 	return rc;
327 }
328 
329 /**
330   * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
331   * command to HBA.
332   * @shost: the scsi host data.
333   */
pm8001_scan_start(struct Scsi_Host * shost)334 void pm8001_scan_start(struct Scsi_Host *shost)
335 {
336 	int i;
337 	struct pm8001_hba_info *pm8001_ha;
338 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
339 	DECLARE_COMPLETION_ONSTACK(completion);
340 	pm8001_ha = sha->lldd_ha;
341 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
342 	if (pm8001_ha->chip_id == chip_8001)
343 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
344 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
345 		pm8001_ha->phy[i].enable_completion = &completion;
346 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
347 		wait_for_completion(&completion);
348 		msleep(300);
349 	}
350 }
351 
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)352 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
353 {
354 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
355 
356 	/* give the phy enabling interrupt event time to come in (1s
357 	* is empirically about all it takes) */
358 	if (time < HZ)
359 		return 0;
360 	/* Wait for discovery to finish */
361 	sas_drain_work(ha);
362 	return 1;
363 }
364 
365 /**
366   * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
367   * @pm8001_ha: our hba card information
368   * @ccb: the ccb which attached to smp task
369   */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)370 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
371 	struct pm8001_ccb_info *ccb)
372 {
373 	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
374 }
375 
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)376 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
377 {
378 	struct ata_queued_cmd *qc = task->uldd_task;
379 
380 	if (qc && ata_is_ncq(qc->tf.protocol)) {
381 		*tag = qc->tag;
382 		return 1;
383 	}
384 
385 	return 0;
386 }
387 
388 /**
389   * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
390   * @pm8001_ha: our hba card information
391   * @ccb: the ccb which attached to sata task
392   */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)393 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
394 	struct pm8001_ccb_info *ccb)
395 {
396 	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
397 }
398 
399 /**
400   * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
401   *				      for internal abort task
402   * @pm8001_ha: our hba card information
403   * @ccb: the ccb which attached to sata task
404   */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)405 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
406 					   struct pm8001_ccb_info *ccb)
407 {
408 	return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
409 }
410 
411 /**
412   * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
413   * @pm8001_ha: our hba card information
414   * @ccb: the ccb which attached to TM
415   * @tmf: the task management IU
416   */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)417 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
418 	struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
419 {
420 	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
421 }
422 
423 /**
424   * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
425   * @pm8001_ha: our hba card information
426   * @ccb: the ccb which attached to ssp task
427   */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)428 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
429 	struct pm8001_ccb_info *ccb)
430 {
431 	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
432 }
433 
434 #define DEV_IS_GONE(pm8001_dev)	\
435 	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
436 
437 
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)438 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
439 				  struct pm8001_ccb_info *ccb)
440 {
441 	struct sas_task *task = ccb->task;
442 	enum sas_protocol task_proto = task->task_proto;
443 	struct sas_tmf_task *tmf = task->tmf;
444 	int is_tmf = !!tmf;
445 
446 	switch (task_proto) {
447 	case SAS_PROTOCOL_SMP:
448 		return pm8001_task_prep_smp(pm8001_ha, ccb);
449 	case SAS_PROTOCOL_SSP:
450 		if (is_tmf)
451 			return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
452 		return pm8001_task_prep_ssp(pm8001_ha, ccb);
453 	case SAS_PROTOCOL_SATA:
454 	case SAS_PROTOCOL_STP:
455 		return pm8001_task_prep_ata(pm8001_ha, ccb);
456 	case SAS_PROTOCOL_INTERNAL_ABORT:
457 		return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
458 	default:
459 		dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
460 			task_proto);
461 	}
462 
463 	return -EINVAL;
464 }
465 
466 /**
467   * pm8001_queue_command - register for upper layer used, all IO commands sent
468   * to HBA are from this interface.
469   * @task: the task to be execute.
470   * @gfp_flags: gfp_flags
471   */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)472 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
473 {
474 	struct task_status_struct *ts = &task->task_status;
475 	enum sas_protocol task_proto = task->task_proto;
476 	struct domain_device *dev = task->dev;
477 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
478 	bool internal_abort = sas_is_internal_abort(task);
479 	struct pm8001_hba_info *pm8001_ha;
480 	struct pm8001_port *port = NULL;
481 	struct pm8001_ccb_info *ccb;
482 	unsigned long flags;
483 	u32 n_elem = 0;
484 	int rc = 0;
485 
486 	if (!internal_abort && !dev->port) {
487 		ts->resp = SAS_TASK_UNDELIVERED;
488 		ts->stat = SAS_PHY_DOWN;
489 		if (dev->dev_type != SAS_SATA_DEV)
490 			task->task_done(task);
491 		return 0;
492 	}
493 
494 	pm8001_ha = pm8001_find_ha_by_dev(dev);
495 	if (pm8001_ha->controller_fatal_error) {
496 		ts->resp = SAS_TASK_UNDELIVERED;
497 		task->task_done(task);
498 		return 0;
499 	}
500 
501 	pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
502 
503 	spin_lock_irqsave(&pm8001_ha->lock, flags);
504 
505 	pm8001_dev = dev->lldd_dev;
506 	port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
507 
508 	if (!internal_abort &&
509 	    (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
510 		ts->resp = SAS_TASK_UNDELIVERED;
511 		ts->stat = SAS_PHY_DOWN;
512 		if (sas_protocol_ata(task_proto)) {
513 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
514 			task->task_done(task);
515 			spin_lock_irqsave(&pm8001_ha->lock, flags);
516 		} else {
517 			task->task_done(task);
518 		}
519 		rc = -ENODEV;
520 		goto err_out;
521 	}
522 
523 	ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
524 	if (!ccb) {
525 		rc = -SAS_QUEUE_FULL;
526 		goto err_out;
527 	}
528 
529 	if (!sas_protocol_ata(task_proto)) {
530 		if (task->num_scatter) {
531 			n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
532 					    task->num_scatter, task->data_dir);
533 			if (!n_elem) {
534 				rc = -ENOMEM;
535 				goto err_out_ccb;
536 			}
537 		}
538 	} else {
539 		n_elem = task->num_scatter;
540 	}
541 
542 	task->lldd_task = ccb;
543 	ccb->n_elem = n_elem;
544 
545 	atomic_inc(&pm8001_dev->running_req);
546 
547 	rc = pm8001_deliver_command(pm8001_ha, ccb);
548 	if (rc) {
549 		atomic_dec(&pm8001_dev->running_req);
550 		if (!sas_protocol_ata(task_proto) && n_elem)
551 			dma_unmap_sg(pm8001_ha->dev, task->scatter,
552 				     task->num_scatter, task->data_dir);
553 err_out_ccb:
554 		pm8001_ccb_free(pm8001_ha, ccb);
555 
556 err_out:
557 		pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
558 	}
559 
560 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
561 
562 	return rc;
563 }
564 
565 /**
566   * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
567   * @pm8001_ha: our hba card information
568   * @ccb: the ccb which attached to ssp task to free
569   */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)570 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
571 			  struct pm8001_ccb_info *ccb)
572 {
573 	struct sas_task *task = ccb->task;
574 	struct ata_queued_cmd *qc;
575 	struct pm8001_device *pm8001_dev;
576 
577 	if (!task)
578 		return;
579 
580 	if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
581 		dma_unmap_sg(pm8001_ha->dev, task->scatter,
582 			     task->num_scatter, task->data_dir);
583 
584 	switch (task->task_proto) {
585 	case SAS_PROTOCOL_SMP:
586 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
587 			DMA_FROM_DEVICE);
588 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
589 			DMA_TO_DEVICE);
590 		break;
591 
592 	case SAS_PROTOCOL_SATA:
593 	case SAS_PROTOCOL_STP:
594 	case SAS_PROTOCOL_SSP:
595 	default:
596 		/* do nothing */
597 		break;
598 	}
599 
600 	if (sas_protocol_ata(task->task_proto)) {
601 		/* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
602 		qc = task->uldd_task;
603 		pm8001_dev = ccb->device;
604 		trace_pm80xx_request_complete(pm8001_ha->id,
605 			pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
606 			ccb->ccb_tag, 0 /* ctlr_opcode not known */,
607 			qc ? qc->tf.command : 0, // ata opcode
608 			pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
609 	}
610 
611 	task->lldd_task = NULL;
612 	pm8001_ccb_free(pm8001_ha, ccb);
613 }
614 
pm8001_init_dev(struct pm8001_device * pm8001_dev,int id)615 static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id)
616 {
617 	pm8001_dev->id = id;
618 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
619 	atomic_set(&pm8001_dev->running_req, 0);
620 }
621 
622 /**
623  * pm8001_alloc_dev - find a empty pm8001_device
624  * @pm8001_ha: our hba card information
625  */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)626 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
627 {
628 	u32 dev;
629 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
630 		struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev];
631 
632 		if (pm8001_dev->dev_type == SAS_PHY_UNUSED) {
633 			pm8001_init_dev(pm8001_dev, dev);
634 			return pm8001_dev;
635 		}
636 	}
637 	if (dev == PM8001_MAX_DEVICES) {
638 		pm8001_dbg(pm8001_ha, FAIL,
639 			   "max support %d devices, ignore ..\n",
640 			   PM8001_MAX_DEVICES);
641 	}
642 	return NULL;
643 }
644 /**
645   * pm8001_find_dev - find a matching pm8001_device
646   * @pm8001_ha: our hba card information
647   * @device_id: device ID to match against
648   */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)649 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
650 					u32 device_id)
651 {
652 	u32 dev;
653 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
654 		if (pm8001_ha->devices[dev].device_id == device_id)
655 			return &pm8001_ha->devices[dev];
656 	}
657 	if (dev == PM8001_MAX_DEVICES) {
658 		pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
659 	}
660 	return NULL;
661 }
662 
pm8001_free_dev(struct pm8001_device * pm8001_dev)663 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
664 {
665 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
666 	pm8001_dev->dev_type = SAS_PHY_UNUSED;
667 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
668 	pm8001_dev->sas_device = NULL;
669 }
670 
671 /**
672   * pm8001_dev_found_notify - libsas notify a device is found.
673   * @dev: the device structure which sas layer used.
674   *
675   * when libsas find a sas domain device, it should tell the LLDD that
676   * device is found, and then LLDD register this device to HBA firmware
677   * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
678   * device ID(according to device's sas address) and returned it to LLDD. From
679   * now on, we communicate with HBA FW with the device ID which HBA assigned
680   * rather than sas address. it is the necessary step for our HBA but it is
681   * the optional for other HBA driver.
682   */
pm8001_dev_found_notify(struct domain_device * dev)683 static int pm8001_dev_found_notify(struct domain_device *dev)
684 {
685 	unsigned long flags = 0;
686 	int res = 0;
687 	struct pm8001_hba_info *pm8001_ha = NULL;
688 	struct domain_device *parent_dev = dev->parent;
689 	struct pm8001_device *pm8001_device;
690 	DECLARE_COMPLETION_ONSTACK(completion);
691 	u32 flag = 0;
692 	pm8001_ha = pm8001_find_ha_by_dev(dev);
693 	spin_lock_irqsave(&pm8001_ha->lock, flags);
694 
695 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
696 	if (!pm8001_device) {
697 		res = -1;
698 		goto found_out;
699 	}
700 	pm8001_device->sas_device = dev;
701 	dev->lldd_dev = pm8001_device;
702 	pm8001_device->dev_type = dev->dev_type;
703 	pm8001_device->dcompletion = &completion;
704 	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
705 		int phy_id;
706 
707 		phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
708 		if (phy_id < 0) {
709 			pm8001_dbg(pm8001_ha, FAIL,
710 				   "Error: no attached dev:%016llx at ex:%016llx.\n",
711 				   SAS_ADDR(dev->sas_addr),
712 				   SAS_ADDR(parent_dev->sas_addr));
713 			res = phy_id;
714 		} else {
715 			pm8001_device->attached_phy = phy_id;
716 		}
717 	} else {
718 		if (dev->dev_type == SAS_SATA_DEV) {
719 			pm8001_device->attached_phy =
720 				dev->rphy->identify.phy_identifier;
721 			flag = 1; /* directly sata */
722 		}
723 	} /*register this device to HBA*/
724 	pm8001_dbg(pm8001_ha, DISC, "Found device\n");
725 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
726 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
727 	wait_for_completion(&completion);
728 	if (dev->dev_type == SAS_END_DEVICE)
729 		msleep(50);
730 	pm8001_ha->flags = PM8001F_RUN_TIME;
731 	return 0;
732 found_out:
733 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
734 	return res;
735 }
736 
pm8001_dev_found(struct domain_device * dev)737 int pm8001_dev_found(struct domain_device *dev)
738 {
739 	return pm8001_dev_found_notify(dev);
740 }
741 
742 #define PM8001_TASK_TIMEOUT 20
743 
744 /**
745   * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
746   * @dev: the device structure which sas layer used.
747   */
pm8001_dev_gone_notify(struct domain_device * dev)748 static void pm8001_dev_gone_notify(struct domain_device *dev)
749 {
750 	unsigned long flags = 0;
751 	struct pm8001_hba_info *pm8001_ha;
752 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
753 
754 	pm8001_ha = pm8001_find_ha_by_dev(dev);
755 	spin_lock_irqsave(&pm8001_ha->lock, flags);
756 	if (pm8001_dev) {
757 		u32 device_id = pm8001_dev->device_id;
758 
759 		pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
760 			   pm8001_dev->device_id, pm8001_dev->dev_type);
761 		if (atomic_read(&pm8001_dev->running_req)) {
762 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
763 			sas_execute_internal_abort_dev(dev, 0, NULL);
764 			while (atomic_read(&pm8001_dev->running_req))
765 				msleep(20);
766 			spin_lock_irqsave(&pm8001_ha->lock, flags);
767 		}
768 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
769 		pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
770 		pm8001_free_dev(pm8001_dev);
771 	} else {
772 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
773 	}
774 	dev->lldd_dev = NULL;
775 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
776 }
777 
pm8001_dev_gone(struct domain_device * dev)778 void pm8001_dev_gone(struct domain_device *dev)
779 {
780 	pm8001_dev_gone_notify(dev);
781 }
782 
783 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)784 void pm8001_open_reject_retry(
785 	struct pm8001_hba_info *pm8001_ha,
786 	struct sas_task *task_to_close,
787 	struct pm8001_device *device_to_close)
788 {
789 	int i;
790 	unsigned long flags;
791 
792 	if (pm8001_ha == NULL)
793 		return;
794 
795 	spin_lock_irqsave(&pm8001_ha->lock, flags);
796 
797 	for (i = 0; i < PM8001_MAX_CCB; i++) {
798 		struct sas_task *task;
799 		struct task_status_struct *ts;
800 		struct pm8001_device *pm8001_dev;
801 		unsigned long flags1;
802 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
803 
804 		if (ccb->ccb_tag == PM8001_INVALID_TAG)
805 			continue;
806 
807 		pm8001_dev = ccb->device;
808 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
809 			continue;
810 		if (!device_to_close) {
811 			uintptr_t d = (uintptr_t)pm8001_dev
812 					- (uintptr_t)&pm8001_ha->devices;
813 			if (((d % sizeof(*pm8001_dev)) != 0)
814 			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
815 				continue;
816 		} else if (pm8001_dev != device_to_close)
817 			continue;
818 		task = ccb->task;
819 		if (!task || !task->task_done)
820 			continue;
821 		if (task_to_close && (task != task_to_close))
822 			continue;
823 		ts = &task->task_status;
824 		ts->resp = SAS_TASK_COMPLETE;
825 		/* Force the midlayer to retry */
826 		ts->stat = SAS_OPEN_REJECT;
827 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
828 		if (pm8001_dev)
829 			atomic_dec(&pm8001_dev->running_req);
830 		spin_lock_irqsave(&task->task_state_lock, flags1);
831 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
832 		task->task_state_flags |= SAS_TASK_STATE_DONE;
833 		if (unlikely((task->task_state_flags
834 				& SAS_TASK_STATE_ABORTED))) {
835 			spin_unlock_irqrestore(&task->task_state_lock,
836 				flags1);
837 			pm8001_ccb_task_free(pm8001_ha, ccb);
838 		} else {
839 			spin_unlock_irqrestore(&task->task_state_lock,
840 				flags1);
841 			pm8001_ccb_task_free(pm8001_ha, ccb);
842 			mb();/* in order to force CPU ordering */
843 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
844 			task->task_done(task);
845 			spin_lock_irqsave(&pm8001_ha->lock, flags);
846 		}
847 	}
848 
849 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
850 }
851 
852 /**
853  * pm8001_I_T_nexus_reset() - reset the initiator/target connection
854  * @dev: the device structure for the device to reset.
855  *
856  * Standard mandates link reset for ATA (type 0) and hard reset for
857  * SSP (type 1), only for RECOVERY
858  */
pm8001_I_T_nexus_reset(struct domain_device * dev)859 int pm8001_I_T_nexus_reset(struct domain_device *dev)
860 {
861 	int rc = TMF_RESP_FUNC_FAILED;
862 	struct pm8001_device *pm8001_dev;
863 	struct pm8001_hba_info *pm8001_ha;
864 	struct sas_phy *phy;
865 
866 	if (!dev || !dev->lldd_dev)
867 		return -ENODEV;
868 
869 	pm8001_dev = dev->lldd_dev;
870 	pm8001_ha = pm8001_find_ha_by_dev(dev);
871 	phy = sas_get_local_phy(dev);
872 
873 	if (dev_is_sata(dev)) {
874 		if (scsi_is_sas_phy_local(phy)) {
875 			rc = 0;
876 			goto out;
877 		}
878 		rc = sas_phy_reset(phy, 1);
879 		if (rc) {
880 			pm8001_dbg(pm8001_ha, EH,
881 				   "phy reset failed for device %x\n"
882 				   "with rc %d\n", pm8001_dev->device_id, rc);
883 			rc = TMF_RESP_FUNC_FAILED;
884 			goto out;
885 		}
886 		msleep(2000);
887 		rc = sas_execute_internal_abort_dev(dev, 0, NULL);
888 		if (rc) {
889 			pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
890 				   "with rc %d\n", pm8001_dev->device_id, rc);
891 			rc = TMF_RESP_FUNC_FAILED;
892 		}
893 	} else {
894 		rc = sas_phy_reset(phy, 1);
895 		msleep(2000);
896 	}
897 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
898 		   pm8001_dev->device_id, rc);
899  out:
900 	sas_put_local_phy(phy);
901 	return rc;
902 }
903 
904 /*
905 * This function handle the IT_NEXUS_XXX event or completion
906 * status code for SSP/SATA/SMP I/O request.
907 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)908 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
909 {
910 	int rc = TMF_RESP_FUNC_FAILED;
911 	struct pm8001_device *pm8001_dev;
912 	struct pm8001_hba_info *pm8001_ha;
913 	struct sas_phy *phy;
914 
915 	if (!dev || !dev->lldd_dev)
916 		return -1;
917 
918 	pm8001_dev = dev->lldd_dev;
919 	pm8001_ha = pm8001_find_ha_by_dev(dev);
920 
921 	pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
922 
923 	phy = sas_get_local_phy(dev);
924 
925 	if (dev_is_sata(dev)) {
926 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
927 		if (scsi_is_sas_phy_local(phy)) {
928 			rc = 0;
929 			goto out;
930 		}
931 		/* send internal ssp/sata/smp abort command to FW */
932 		sas_execute_internal_abort_dev(dev, 0, NULL);
933 		msleep(100);
934 
935 		/* deregister the target device */
936 		pm8001_dev_gone_notify(dev);
937 		msleep(200);
938 
939 		/*send phy reset to hard reset target */
940 		rc = sas_phy_reset(phy, 1);
941 		msleep(2000);
942 		pm8001_dev->setds_completion = &completion_setstate;
943 
944 		wait_for_completion(&completion_setstate);
945 	} else {
946 		/* send internal ssp/sata/smp abort command to FW */
947 		sas_execute_internal_abort_dev(dev, 0, NULL);
948 		msleep(100);
949 
950 		/* deregister the target device */
951 		pm8001_dev_gone_notify(dev);
952 		msleep(200);
953 
954 		/*send phy reset to hard reset target */
955 		rc = sas_phy_reset(phy, 1);
956 		msleep(2000);
957 	}
958 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
959 		   pm8001_dev->device_id, rc);
960 out:
961 	sas_put_local_phy(phy);
962 
963 	return rc;
964 }
965 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)966 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
967 {
968 	int rc = TMF_RESP_FUNC_FAILED;
969 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
970 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
971 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
972 
973 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
974 		/*
975 		 * If the controller is in fatal error state,
976 		 * we will not get a response from the controller
977 		 */
978 		pm8001_dbg(pm8001_ha, FAIL,
979 			   "LUN reset failed due to fatal errors\n");
980 		return rc;
981 	}
982 
983 	if (dev_is_sata(dev)) {
984 		struct sas_phy *phy = sas_get_local_phy(dev);
985 		sas_execute_internal_abort_dev(dev, 0, NULL);
986 		rc = sas_phy_reset(phy, 1);
987 		sas_put_local_phy(phy);
988 		pm8001_dev->setds_completion = &completion_setstate;
989 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
990 			pm8001_dev, DS_OPERATIONAL);
991 		wait_for_completion(&completion_setstate);
992 	} else {
993 		rc = sas_lu_reset(dev, lun);
994 	}
995 	/* If failed, fall-through I_T_Nexus reset */
996 	pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
997 		   pm8001_dev->device_id, rc);
998 	return rc;
999 }
1000 
1001 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1002 int pm8001_query_task(struct sas_task *task)
1003 {
1004 	u32 tag = 0xdeadbeef;
1005 	int rc = TMF_RESP_FUNC_FAILED;
1006 	if (unlikely(!task || !task->lldd_task || !task->dev))
1007 		return rc;
1008 
1009 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1010 		struct scsi_cmnd *cmnd = task->uldd_task;
1011 		struct domain_device *dev = task->dev;
1012 		struct pm8001_hba_info *pm8001_ha =
1013 			pm8001_find_ha_by_dev(dev);
1014 
1015 		rc = pm8001_find_tag(task, &tag);
1016 		if (rc == 0) {
1017 			rc = TMF_RESP_FUNC_FAILED;
1018 			return rc;
1019 		}
1020 		pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1021 
1022 		rc = sas_query_task(task, tag);
1023 		switch (rc) {
1024 		/* The task is still in Lun, release it then */
1025 		case TMF_RESP_FUNC_SUCC:
1026 			pm8001_dbg(pm8001_ha, EH,
1027 				   "The task is still in Lun\n");
1028 			break;
1029 		/* The task is not in Lun or failed, reset the phy */
1030 		case TMF_RESP_FUNC_FAILED:
1031 		case TMF_RESP_FUNC_COMPLETE:
1032 			pm8001_dbg(pm8001_ha, EH,
1033 				   "The task is not in Lun or failed, reset the phy\n");
1034 			break;
1035 		}
1036 	}
1037 	pr_err("pm80xx: rc= %d\n", rc);
1038 	return rc;
1039 }
1040 
1041 /*  mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)1042 int pm8001_abort_task(struct sas_task *task)
1043 {
1044 	struct pm8001_ccb_info *ccb = task->lldd_task;
1045 	unsigned long flags;
1046 	u32 tag;
1047 	struct domain_device *dev ;
1048 	struct pm8001_hba_info *pm8001_ha;
1049 	struct pm8001_device *pm8001_dev;
1050 	int rc = TMF_RESP_FUNC_FAILED, ret;
1051 	u32 phy_id, port_id;
1052 	struct sas_task_slow slow_task;
1053 
1054 	if (!task->lldd_task || !task->dev)
1055 		return TMF_RESP_FUNC_FAILED;
1056 
1057 	dev = task->dev;
1058 	pm8001_dev = dev->lldd_dev;
1059 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1060 	phy_id = pm8001_dev->attached_phy;
1061 
1062 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1063 		// If the controller is seeing fatal errors
1064 		// abort task will not get a response from the controller
1065 		return TMF_RESP_FUNC_FAILED;
1066 	}
1067 
1068 	ret = pm8001_find_tag(task, &tag);
1069 	if (ret == 0) {
1070 		pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1071 		return TMF_RESP_FUNC_FAILED;
1072 	}
1073 	spin_lock_irqsave(&task->task_state_lock, flags);
1074 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1075 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1076 		return TMF_RESP_FUNC_COMPLETE;
1077 	}
1078 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1079 	if (task->slow_task == NULL) {
1080 		init_completion(&slow_task.completion);
1081 		task->slow_task = &slow_task;
1082 	}
1083 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1084 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1085 		rc = sas_abort_task(task, tag);
1086 		sas_execute_internal_abort_single(dev, tag, 0, NULL);
1087 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1088 		task->task_proto & SAS_PROTOCOL_STP) {
1089 		if (pm8001_ha->chip_id == chip_8006) {
1090 			DECLARE_COMPLETION_ONSTACK(completion_reset);
1091 			DECLARE_COMPLETION_ONSTACK(completion);
1092 			struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1093 			port_id = phy->port->port_id;
1094 
1095 			/* 1. Set Device state as Recovery */
1096 			pm8001_dev->setds_completion = &completion;
1097 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1098 				pm8001_dev, DS_IN_RECOVERY);
1099 			wait_for_completion(&completion);
1100 
1101 			/* 2. Send Phy Control Hard Reset */
1102 			reinit_completion(&completion);
1103 			phy->port_reset_status = PORT_RESET_TMO;
1104 			phy->reset_success = false;
1105 			phy->enable_completion = &completion;
1106 			phy->reset_completion = &completion_reset;
1107 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1108 				PHY_HARD_RESET);
1109 			if (ret) {
1110 				phy->enable_completion = NULL;
1111 				phy->reset_completion = NULL;
1112 				goto out;
1113 			}
1114 
1115 			/* In the case of the reset timeout/fail we still
1116 			 * abort the command at the firmware. The assumption
1117 			 * here is that the drive is off doing something so
1118 			 * that it's not processing requests, and we want to
1119 			 * avoid getting a completion for this and either
1120 			 * leaking the task in libsas or losing the race and
1121 			 * getting a double free.
1122 			 */
1123 			pm8001_dbg(pm8001_ha, MSG,
1124 				   "Waiting for local phy ctl\n");
1125 			ret = wait_for_completion_timeout(&completion,
1126 					PM8001_TASK_TIMEOUT * HZ);
1127 			if (!ret || !phy->reset_success) {
1128 				phy->enable_completion = NULL;
1129 				phy->reset_completion = NULL;
1130 			} else {
1131 				/* 3. Wait for Port Reset complete or
1132 				 * Port reset TMO
1133 				 */
1134 				pm8001_dbg(pm8001_ha, MSG,
1135 					   "Waiting for Port reset\n");
1136 				ret = wait_for_completion_timeout(
1137 					&completion_reset,
1138 					PM8001_TASK_TIMEOUT * HZ);
1139 				if (!ret)
1140 					phy->reset_completion = NULL;
1141 				WARN_ON(phy->port_reset_status ==
1142 						PORT_RESET_TMO);
1143 				if (phy->port_reset_status == PORT_RESET_TMO) {
1144 					pm8001_dev_gone_notify(dev);
1145 					PM8001_CHIP_DISP->hw_event_ack_req(
1146 						pm8001_ha, 0,
1147 						0x07, /*HW_EVENT_PHY_DOWN ack*/
1148 						port_id, phy_id, 0, 0);
1149 					goto out;
1150 				}
1151 			}
1152 
1153 			/*
1154 			 * 4. SATA Abort ALL
1155 			 * we wait for the task to be aborted so that the task
1156 			 * is removed from the ccb. on success the caller is
1157 			 * going to free the task.
1158 			 */
1159 			ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1160 			if (ret)
1161 				goto out;
1162 			ret = wait_for_completion_timeout(
1163 				&task->slow_task->completion,
1164 				PM8001_TASK_TIMEOUT * HZ);
1165 			if (!ret)
1166 				goto out;
1167 
1168 			/* 5. Set Device State as Operational */
1169 			reinit_completion(&completion);
1170 			pm8001_dev->setds_completion = &completion;
1171 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1172 				pm8001_dev, DS_OPERATIONAL);
1173 			wait_for_completion(&completion);
1174 		} else {
1175 			/*
1176 			 * Ensure that if we see a completion for the ccb
1177 			 * associated with the task which we are trying to
1178 			 * abort then we should not touch the sas_task as it
1179 			 * may race with libsas freeing it when return here.
1180 			 */
1181 			ccb->task = NULL;
1182 			ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1183 		}
1184 		rc = TMF_RESP_FUNC_COMPLETE;
1185 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1186 		/* SMP */
1187 		rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1188 
1189 	}
1190 out:
1191 	spin_lock_irqsave(&task->task_state_lock, flags);
1192 	if (task->slow_task == &slow_task)
1193 		task->slow_task = NULL;
1194 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1195 	if (rc != TMF_RESP_FUNC_COMPLETE)
1196 		pm8001_info(pm8001_ha, "rc= %d\n", rc);
1197 	return rc;
1198 }
1199 
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1200 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1201 {
1202 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1203 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1204 
1205 	pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1206 		   pm8001_dev->device_id);
1207 	return sas_clear_task_set(dev, lun);
1208 }
1209 
pm8001_port_formed(struct asd_sas_phy * sas_phy)1210 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1211 {
1212 	struct sas_ha_struct *sas_ha = sas_phy->ha;
1213 	struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1214 	struct pm8001_phy *phy = sas_phy->lldd_phy;
1215 	struct asd_sas_port *sas_port = sas_phy->port;
1216 	struct pm8001_port *port = phy->port;
1217 
1218 	if (!sas_port) {
1219 		pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1220 		return;
1221 	}
1222 	sas_port->lldd_port = port;
1223 }
1224 
pm8001_setds_completion(struct domain_device * dev)1225 void pm8001_setds_completion(struct domain_device *dev)
1226 {
1227 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1228 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1229 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
1230 
1231 	if (pm8001_ha->chip_id != chip_8001) {
1232 		pm8001_dev->setds_completion = &completion_setstate;
1233 		PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1234 			pm8001_dev, DS_OPERATIONAL);
1235 		wait_for_completion(&completion_setstate);
1236 	}
1237 }
1238 
pm8001_tmf_aborted(struct sas_task * task)1239 void pm8001_tmf_aborted(struct sas_task *task)
1240 {
1241 	struct pm8001_ccb_info *ccb = task->lldd_task;
1242 
1243 	if (ccb)
1244 		ccb->task = NULL;
1245 }
1246