xref: /linux/drivers/scsi/pm8001/pm8001_sas.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /*
2  * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3  *
4  * Copyright (c) 2008-2009 USI Co., Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  * 3. Neither the names of the above-listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * Alternatively, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") version 2 as published by the Free
24  * Software Foundation.
25  *
26  * NO WARRANTY
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGES.
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44 
45 /**
46  * pm8001_find_tag - from sas task to find out  tag that belongs to this task
47  * @task: the task sent to the LLDD
48  * @tag: the found tag associated with the task
49  */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 	if (task->lldd_task) {
53 		struct pm8001_ccb_info *ccb;
54 		ccb = task->lldd_task;
55 		*tag = ccb->ccb_tag;
56 		return 1;
57 	}
58 	return 0;
59 }
60 
61 /**
62   * pm8001_tag_free - free the no more needed tag
63   * @pm8001_ha: our hba struct
64   * @tag: the found tag associated with the task
65   */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 	void *bitmap = pm8001_ha->rsvd_tags;
69 	unsigned long flags;
70 
71 	if (tag >= PM8001_RESERVE_SLOT)
72 		return;
73 
74 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 	__clear_bit(tag, bitmap);
76 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
77 }
78 
79 /**
80   * pm8001_tag_alloc - allocate a empty tag for task used.
81   * @pm8001_ha: our hba struct
82   * @tag_out: the found empty tag .
83   */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
85 {
86 	void *bitmap = pm8001_ha->rsvd_tags;
87 	unsigned long flags;
88 	unsigned int tag;
89 
90 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 	tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 	if (tag >= PM8001_RESERVE_SLOT) {
93 		spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 		return -SAS_QUEUE_FULL;
95 	}
96 	__set_bit(tag, bitmap);
97 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
98 
99 	/* reserved tags are in the lower region of the tagset */
100 	*tag_out = tag;
101 	return 0;
102 }
103 
104 /**
105  * pm8001_mem_alloc - allocate memory for pm8001.
106  * @pdev: pci device.
107  * @virt_addr: the allocated virtual address
108  * @pphys_addr: DMA address for this device
109  * @pphys_addr_hi: the physical address high byte address.
110  * @pphys_addr_lo: the physical address low byte address.
111  * @mem_size: memory size.
112  * @align: requested byte alignment
113  */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)114 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
115 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
116 	u32 *pphys_addr_lo, u32 mem_size, u32 align)
117 {
118 	caddr_t mem_virt_alloc;
119 	dma_addr_t mem_dma_handle;
120 	u64 phys_align;
121 	u64 align_offset = 0;
122 	if (align)
123 		align_offset = (dma_addr_t)align - 1;
124 	mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
125 					    &mem_dma_handle, GFP_KERNEL);
126 	if (!mem_virt_alloc)
127 		return -ENOMEM;
128 	*pphys_addr = mem_dma_handle;
129 	phys_align = (*pphys_addr + align_offset) & ~align_offset;
130 	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
131 	*pphys_addr_hi = upper_32_bits(phys_align);
132 	*pphys_addr_lo = lower_32_bits(phys_align);
133 	return 0;
134 }
135 
136 /**
137   * pm8001_find_ha_by_dev - from domain device which come from sas layer to
138   * find out our hba struct.
139   * @dev: the domain device which from sas layer.
140   */
141 static
pm8001_find_ha_by_dev(struct domain_device * dev)142 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
143 {
144 	struct sas_ha_struct *sha = dev->port->ha;
145 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
146 	return pm8001_ha;
147 }
148 
149 /**
150   * pm8001_phy_control - this function should be registered to
151   * sas_domain_function_template to provide libsas used, note: this is just
152   * control the HBA phy rather than other expander phy if you want control
153   * other phy, you should use SMP command.
154   * @sas_phy: which phy in HBA phys.
155   * @func: the operation.
156   * @funcdata: always NULL.
157   */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)158 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
159 	void *funcdata)
160 {
161 	int rc = 0, phy_id = sas_phy->id;
162 	struct pm8001_hba_info *pm8001_ha = NULL;
163 	struct sas_phy_linkrates *rates;
164 	struct pm8001_phy *phy;
165 	DECLARE_COMPLETION_ONSTACK(completion);
166 	unsigned long flags;
167 	pm8001_ha = sas_phy->ha->lldd_ha;
168 	phy = &pm8001_ha->phy[phy_id];
169 
170 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
171 		/*
172 		 * If the controller is in fatal error state,
173 		 * we will not get a response from the controller
174 		 */
175 		pm8001_dbg(pm8001_ha, FAIL,
176 			   "Phy control failed due to fatal errors\n");
177 		return -EFAULT;
178 	}
179 
180 	switch (func) {
181 	case PHY_FUNC_SET_LINK_RATE:
182 		rates = funcdata;
183 		if (rates->minimum_linkrate) {
184 			pm8001_ha->phy[phy_id].minimum_linkrate =
185 				rates->minimum_linkrate;
186 		}
187 		if (rates->maximum_linkrate) {
188 			pm8001_ha->phy[phy_id].maximum_linkrate =
189 				rates->maximum_linkrate;
190 		}
191 		if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
192 			pm8001_ha->phy[phy_id].enable_completion = &completion;
193 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
194 			wait_for_completion(&completion);
195 		}
196 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
197 					      PHY_LINK_RESET);
198 		break;
199 	case PHY_FUNC_HARD_RESET:
200 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
201 			pm8001_ha->phy[phy_id].enable_completion = &completion;
202 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
203 			wait_for_completion(&completion);
204 		}
205 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
206 					      PHY_HARD_RESET);
207 		break;
208 	case PHY_FUNC_LINK_RESET:
209 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
210 			pm8001_ha->phy[phy_id].enable_completion = &completion;
211 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
212 			wait_for_completion(&completion);
213 		}
214 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
215 					      PHY_LINK_RESET);
216 		break;
217 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
218 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
219 					      PHY_LINK_RESET);
220 		break;
221 	case PHY_FUNC_DISABLE:
222 		if (pm8001_ha->chip_id != chip_8001) {
223 			if (pm8001_ha->phy[phy_id].phy_state ==
224 				PHY_STATE_LINK_UP_SPCV) {
225 				sas_phy_disconnected(&phy->sas_phy);
226 				sas_notify_phy_event(&phy->sas_phy,
227 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
228 				phy->phy_attached = 0;
229 			}
230 		} else {
231 			if (pm8001_ha->phy[phy_id].phy_state ==
232 				PHY_STATE_LINK_UP_SPC) {
233 				sas_phy_disconnected(&phy->sas_phy);
234 				sas_notify_phy_event(&phy->sas_phy,
235 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
236 				phy->phy_attached = 0;
237 			}
238 		}
239 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
240 		break;
241 	case PHY_FUNC_GET_EVENTS:
242 		spin_lock_irqsave(&pm8001_ha->lock, flags);
243 		if (pm8001_ha->chip_id == chip_8001) {
244 			if (-1 == pm8001_bar4_shift(pm8001_ha,
245 					(phy_id < 4) ? 0x30000 : 0x40000)) {
246 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
247 				return -EINVAL;
248 			}
249 		}
250 		{
251 			struct sas_phy *phy = sas_phy->phy;
252 			u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
253 				+ 0x1034 + (0x4000 * (phy_id & 3));
254 
255 			phy->invalid_dword_count = readl(qp);
256 			phy->running_disparity_error_count = readl(&qp[1]);
257 			phy->loss_of_dword_sync_count = readl(&qp[3]);
258 			phy->phy_reset_problem_count = readl(&qp[4]);
259 		}
260 		if (pm8001_ha->chip_id == chip_8001)
261 			pm8001_bar4_shift(pm8001_ha, 0);
262 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
263 		return 0;
264 	default:
265 		pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
266 		rc = -EOPNOTSUPP;
267 	}
268 	msleep(300);
269 	return rc;
270 }
271 
272 /**
273   * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
274   * command to HBA.
275   * @shost: the scsi host data.
276   */
pm8001_scan_start(struct Scsi_Host * shost)277 void pm8001_scan_start(struct Scsi_Host *shost)
278 {
279 	int i;
280 	struct pm8001_hba_info *pm8001_ha;
281 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
282 	DECLARE_COMPLETION_ONSTACK(completion);
283 	pm8001_ha = sha->lldd_ha;
284 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
285 	if (pm8001_ha->chip_id == chip_8001)
286 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
287 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
288 		pm8001_ha->phy[i].enable_completion = &completion;
289 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
290 		wait_for_completion(&completion);
291 		msleep(300);
292 	}
293 }
294 
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)295 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
296 {
297 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
298 
299 	/* give the phy enabling interrupt event time to come in (1s
300 	* is empirically about all it takes) */
301 	if (time < HZ)
302 		return 0;
303 	/* Wait for discovery to finish */
304 	sas_drain_work(ha);
305 	return 1;
306 }
307 
308 /**
309   * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
310   * @pm8001_ha: our hba card information
311   * @ccb: the ccb which attached to smp task
312   */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)313 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
314 	struct pm8001_ccb_info *ccb)
315 {
316 	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
317 }
318 
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)319 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
320 {
321 	struct ata_queued_cmd *qc = task->uldd_task;
322 
323 	if (qc && ata_is_ncq(qc->tf.protocol)) {
324 		*tag = qc->tag;
325 		return 1;
326 	}
327 
328 	return 0;
329 }
330 
331 /**
332   * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
333   * @pm8001_ha: our hba card information
334   * @ccb: the ccb which attached to sata task
335   */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)336 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
337 	struct pm8001_ccb_info *ccb)
338 {
339 	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
340 }
341 
342 /**
343   * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
344   *				      for internal abort task
345   * @pm8001_ha: our hba card information
346   * @ccb: the ccb which attached to sata task
347   */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)348 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
349 					   struct pm8001_ccb_info *ccb)
350 {
351 	return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
352 }
353 
354 /**
355   * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
356   * @pm8001_ha: our hba card information
357   * @ccb: the ccb which attached to TM
358   * @tmf: the task management IU
359   */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)360 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
361 	struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
362 {
363 	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
364 }
365 
366 /**
367   * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
368   * @pm8001_ha: our hba card information
369   * @ccb: the ccb which attached to ssp task
370   */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)371 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
372 	struct pm8001_ccb_info *ccb)
373 {
374 	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
375 }
376 
377  /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)378 static int sas_find_local_port_id(struct domain_device *dev)
379 {
380 	struct domain_device *pdev = dev->parent;
381 
382 	/* Directly attached device */
383 	if (!pdev)
384 		return dev->port->id;
385 	while (pdev) {
386 		struct domain_device *pdev_p = pdev->parent;
387 		if (!pdev_p)
388 			return pdev->port->id;
389 		pdev = pdev->parent;
390 	}
391 	return 0;
392 }
393 
394 #define DEV_IS_GONE(pm8001_dev)	\
395 	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
396 
397 
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)398 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
399 				  struct pm8001_ccb_info *ccb)
400 {
401 	struct sas_task *task = ccb->task;
402 	enum sas_protocol task_proto = task->task_proto;
403 	struct sas_tmf_task *tmf = task->tmf;
404 	int is_tmf = !!tmf;
405 
406 	switch (task_proto) {
407 	case SAS_PROTOCOL_SMP:
408 		return pm8001_task_prep_smp(pm8001_ha, ccb);
409 	case SAS_PROTOCOL_SSP:
410 		if (is_tmf)
411 			return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
412 		return pm8001_task_prep_ssp(pm8001_ha, ccb);
413 	case SAS_PROTOCOL_SATA:
414 	case SAS_PROTOCOL_STP:
415 		return pm8001_task_prep_ata(pm8001_ha, ccb);
416 	case SAS_PROTOCOL_INTERNAL_ABORT:
417 		return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
418 	default:
419 		dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
420 			task_proto);
421 	}
422 
423 	return -EINVAL;
424 }
425 
426 /**
427   * pm8001_queue_command - register for upper layer used, all IO commands sent
428   * to HBA are from this interface.
429   * @task: the task to be execute.
430   * @gfp_flags: gfp_flags
431   */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)432 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
433 {
434 	struct task_status_struct *ts = &task->task_status;
435 	enum sas_protocol task_proto = task->task_proto;
436 	struct domain_device *dev = task->dev;
437 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
438 	bool internal_abort = sas_is_internal_abort(task);
439 	struct pm8001_hba_info *pm8001_ha;
440 	struct pm8001_port *port = NULL;
441 	struct pm8001_ccb_info *ccb;
442 	unsigned long flags;
443 	u32 n_elem = 0;
444 	int rc = 0;
445 
446 	if (!internal_abort && !dev->port) {
447 		ts->resp = SAS_TASK_UNDELIVERED;
448 		ts->stat = SAS_PHY_DOWN;
449 		if (dev->dev_type != SAS_SATA_DEV)
450 			task->task_done(task);
451 		return 0;
452 	}
453 
454 	pm8001_ha = pm8001_find_ha_by_dev(dev);
455 	if (pm8001_ha->controller_fatal_error) {
456 		ts->resp = SAS_TASK_UNDELIVERED;
457 		task->task_done(task);
458 		return 0;
459 	}
460 
461 	pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
462 
463 	spin_lock_irqsave(&pm8001_ha->lock, flags);
464 
465 	pm8001_dev = dev->lldd_dev;
466 	port = &pm8001_ha->port[sas_find_local_port_id(dev)];
467 
468 	if (!internal_abort &&
469 	    (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
470 		ts->resp = SAS_TASK_UNDELIVERED;
471 		ts->stat = SAS_PHY_DOWN;
472 		if (sas_protocol_ata(task_proto)) {
473 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
474 			task->task_done(task);
475 			spin_lock_irqsave(&pm8001_ha->lock, flags);
476 		} else {
477 			task->task_done(task);
478 		}
479 		rc = -ENODEV;
480 		goto err_out;
481 	}
482 
483 	ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
484 	if (!ccb) {
485 		rc = -SAS_QUEUE_FULL;
486 		goto err_out;
487 	}
488 
489 	if (!sas_protocol_ata(task_proto)) {
490 		if (task->num_scatter) {
491 			n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
492 					    task->num_scatter, task->data_dir);
493 			if (!n_elem) {
494 				rc = -ENOMEM;
495 				goto err_out_ccb;
496 			}
497 		}
498 	} else {
499 		n_elem = task->num_scatter;
500 	}
501 
502 	task->lldd_task = ccb;
503 	ccb->n_elem = n_elem;
504 
505 	atomic_inc(&pm8001_dev->running_req);
506 
507 	rc = pm8001_deliver_command(pm8001_ha, ccb);
508 	if (rc) {
509 		atomic_dec(&pm8001_dev->running_req);
510 		if (!sas_protocol_ata(task_proto) && n_elem)
511 			dma_unmap_sg(pm8001_ha->dev, task->scatter,
512 				     task->num_scatter, task->data_dir);
513 err_out_ccb:
514 		pm8001_ccb_free(pm8001_ha, ccb);
515 
516 err_out:
517 		pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
518 	}
519 
520 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
521 
522 	return rc;
523 }
524 
525 /**
526   * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
527   * @pm8001_ha: our hba card information
528   * @ccb: the ccb which attached to ssp task to free
529   */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)530 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
531 			  struct pm8001_ccb_info *ccb)
532 {
533 	struct sas_task *task = ccb->task;
534 	struct ata_queued_cmd *qc;
535 	struct pm8001_device *pm8001_dev;
536 
537 	if (!task)
538 		return;
539 
540 	if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
541 		dma_unmap_sg(pm8001_ha->dev, task->scatter,
542 			     task->num_scatter, task->data_dir);
543 
544 	switch (task->task_proto) {
545 	case SAS_PROTOCOL_SMP:
546 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
547 			DMA_FROM_DEVICE);
548 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
549 			DMA_TO_DEVICE);
550 		break;
551 
552 	case SAS_PROTOCOL_SATA:
553 	case SAS_PROTOCOL_STP:
554 	case SAS_PROTOCOL_SSP:
555 	default:
556 		/* do nothing */
557 		break;
558 	}
559 
560 	if (sas_protocol_ata(task->task_proto)) {
561 		/* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
562 		qc = task->uldd_task;
563 		pm8001_dev = ccb->device;
564 		trace_pm80xx_request_complete(pm8001_ha->id,
565 			pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
566 			ccb->ccb_tag, 0 /* ctlr_opcode not known */,
567 			qc ? qc->tf.command : 0, // ata opcode
568 			pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
569 	}
570 
571 	task->lldd_task = NULL;
572 	pm8001_ccb_free(pm8001_ha, ccb);
573 }
574 
575 /**
576  * pm8001_alloc_dev - find a empty pm8001_device
577  * @pm8001_ha: our hba card information
578  */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)579 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
580 {
581 	u32 dev;
582 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
583 		if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
584 			pm8001_ha->devices[dev].id = dev;
585 			return &pm8001_ha->devices[dev];
586 		}
587 	}
588 	if (dev == PM8001_MAX_DEVICES) {
589 		pm8001_dbg(pm8001_ha, FAIL,
590 			   "max support %d devices, ignore ..\n",
591 			   PM8001_MAX_DEVICES);
592 	}
593 	return NULL;
594 }
595 /**
596   * pm8001_find_dev - find a matching pm8001_device
597   * @pm8001_ha: our hba card information
598   * @device_id: device ID to match against
599   */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)600 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
601 					u32 device_id)
602 {
603 	u32 dev;
604 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
605 		if (pm8001_ha->devices[dev].device_id == device_id)
606 			return &pm8001_ha->devices[dev];
607 	}
608 	if (dev == PM8001_MAX_DEVICES) {
609 		pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
610 	}
611 	return NULL;
612 }
613 
pm8001_free_dev(struct pm8001_device * pm8001_dev)614 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
615 {
616 	u32 id = pm8001_dev->id;
617 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
618 	pm8001_dev->id = id;
619 	pm8001_dev->dev_type = SAS_PHY_UNUSED;
620 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
621 	pm8001_dev->sas_device = NULL;
622 }
623 
624 /**
625   * pm8001_dev_found_notify - libsas notify a device is found.
626   * @dev: the device structure which sas layer used.
627   *
628   * when libsas find a sas domain device, it should tell the LLDD that
629   * device is found, and then LLDD register this device to HBA firmware
630   * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
631   * device ID(according to device's sas address) and returned it to LLDD. From
632   * now on, we communicate with HBA FW with the device ID which HBA assigned
633   * rather than sas address. it is the necessary step for our HBA but it is
634   * the optional for other HBA driver.
635   */
pm8001_dev_found_notify(struct domain_device * dev)636 static int pm8001_dev_found_notify(struct domain_device *dev)
637 {
638 	unsigned long flags = 0;
639 	int res = 0;
640 	struct pm8001_hba_info *pm8001_ha = NULL;
641 	struct domain_device *parent_dev = dev->parent;
642 	struct pm8001_device *pm8001_device;
643 	DECLARE_COMPLETION_ONSTACK(completion);
644 	u32 flag = 0;
645 	pm8001_ha = pm8001_find_ha_by_dev(dev);
646 	spin_lock_irqsave(&pm8001_ha->lock, flags);
647 
648 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
649 	if (!pm8001_device) {
650 		res = -1;
651 		goto found_out;
652 	}
653 	pm8001_device->sas_device = dev;
654 	dev->lldd_dev = pm8001_device;
655 	pm8001_device->dev_type = dev->dev_type;
656 	pm8001_device->dcompletion = &completion;
657 	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
658 		int phy_id;
659 
660 		phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
661 		if (phy_id < 0) {
662 			pm8001_dbg(pm8001_ha, FAIL,
663 				   "Error: no attached dev:%016llx at ex:%016llx.\n",
664 				   SAS_ADDR(dev->sas_addr),
665 				   SAS_ADDR(parent_dev->sas_addr));
666 			res = phy_id;
667 		} else {
668 			pm8001_device->attached_phy = phy_id;
669 		}
670 	} else {
671 		if (dev->dev_type == SAS_SATA_DEV) {
672 			pm8001_device->attached_phy =
673 				dev->rphy->identify.phy_identifier;
674 			flag = 1; /* directly sata */
675 		}
676 	} /*register this device to HBA*/
677 	pm8001_dbg(pm8001_ha, DISC, "Found device\n");
678 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
679 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
680 	wait_for_completion(&completion);
681 	if (dev->dev_type == SAS_END_DEVICE)
682 		msleep(50);
683 	pm8001_ha->flags = PM8001F_RUN_TIME;
684 	return 0;
685 found_out:
686 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
687 	return res;
688 }
689 
pm8001_dev_found(struct domain_device * dev)690 int pm8001_dev_found(struct domain_device *dev)
691 {
692 	return pm8001_dev_found_notify(dev);
693 }
694 
695 #define PM8001_TASK_TIMEOUT 20
696 
697 /**
698   * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
699   * @dev: the device structure which sas layer used.
700   */
pm8001_dev_gone_notify(struct domain_device * dev)701 static void pm8001_dev_gone_notify(struct domain_device *dev)
702 {
703 	unsigned long flags = 0;
704 	struct pm8001_hba_info *pm8001_ha;
705 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
706 
707 	pm8001_ha = pm8001_find_ha_by_dev(dev);
708 	spin_lock_irqsave(&pm8001_ha->lock, flags);
709 	if (pm8001_dev) {
710 		u32 device_id = pm8001_dev->device_id;
711 
712 		pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
713 			   pm8001_dev->device_id, pm8001_dev->dev_type);
714 		if (atomic_read(&pm8001_dev->running_req)) {
715 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
716 			sas_execute_internal_abort_dev(dev, 0, NULL);
717 			while (atomic_read(&pm8001_dev->running_req))
718 				msleep(20);
719 			spin_lock_irqsave(&pm8001_ha->lock, flags);
720 		}
721 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
722 		pm8001_free_dev(pm8001_dev);
723 	} else {
724 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
725 	}
726 	dev->lldd_dev = NULL;
727 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
728 }
729 
pm8001_dev_gone(struct domain_device * dev)730 void pm8001_dev_gone(struct domain_device *dev)
731 {
732 	pm8001_dev_gone_notify(dev);
733 }
734 
735 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)736 void pm8001_open_reject_retry(
737 	struct pm8001_hba_info *pm8001_ha,
738 	struct sas_task *task_to_close,
739 	struct pm8001_device *device_to_close)
740 {
741 	int i;
742 	unsigned long flags;
743 
744 	if (pm8001_ha == NULL)
745 		return;
746 
747 	spin_lock_irqsave(&pm8001_ha->lock, flags);
748 
749 	for (i = 0; i < PM8001_MAX_CCB; i++) {
750 		struct sas_task *task;
751 		struct task_status_struct *ts;
752 		struct pm8001_device *pm8001_dev;
753 		unsigned long flags1;
754 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
755 
756 		if (ccb->ccb_tag == PM8001_INVALID_TAG)
757 			continue;
758 
759 		pm8001_dev = ccb->device;
760 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
761 			continue;
762 		if (!device_to_close) {
763 			uintptr_t d = (uintptr_t)pm8001_dev
764 					- (uintptr_t)&pm8001_ha->devices;
765 			if (((d % sizeof(*pm8001_dev)) != 0)
766 			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
767 				continue;
768 		} else if (pm8001_dev != device_to_close)
769 			continue;
770 		task = ccb->task;
771 		if (!task || !task->task_done)
772 			continue;
773 		if (task_to_close && (task != task_to_close))
774 			continue;
775 		ts = &task->task_status;
776 		ts->resp = SAS_TASK_COMPLETE;
777 		/* Force the midlayer to retry */
778 		ts->stat = SAS_OPEN_REJECT;
779 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
780 		if (pm8001_dev)
781 			atomic_dec(&pm8001_dev->running_req);
782 		spin_lock_irqsave(&task->task_state_lock, flags1);
783 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
784 		task->task_state_flags |= SAS_TASK_STATE_DONE;
785 		if (unlikely((task->task_state_flags
786 				& SAS_TASK_STATE_ABORTED))) {
787 			spin_unlock_irqrestore(&task->task_state_lock,
788 				flags1);
789 			pm8001_ccb_task_free(pm8001_ha, ccb);
790 		} else {
791 			spin_unlock_irqrestore(&task->task_state_lock,
792 				flags1);
793 			pm8001_ccb_task_free(pm8001_ha, ccb);
794 			mb();/* in order to force CPU ordering */
795 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
796 			task->task_done(task);
797 			spin_lock_irqsave(&pm8001_ha->lock, flags);
798 		}
799 	}
800 
801 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
802 }
803 
804 /**
805  * pm8001_I_T_nexus_reset() - reset the initiator/target connection
806  * @dev: the device structure for the device to reset.
807  *
808  * Standard mandates link reset for ATA (type 0) and hard reset for
809  * SSP (type 1), only for RECOVERY
810  */
pm8001_I_T_nexus_reset(struct domain_device * dev)811 int pm8001_I_T_nexus_reset(struct domain_device *dev)
812 {
813 	int rc = TMF_RESP_FUNC_FAILED;
814 	struct pm8001_device *pm8001_dev;
815 	struct pm8001_hba_info *pm8001_ha;
816 	struct sas_phy *phy;
817 
818 	if (!dev || !dev->lldd_dev)
819 		return -ENODEV;
820 
821 	pm8001_dev = dev->lldd_dev;
822 	pm8001_ha = pm8001_find_ha_by_dev(dev);
823 	phy = sas_get_local_phy(dev);
824 
825 	if (dev_is_sata(dev)) {
826 		if (scsi_is_sas_phy_local(phy)) {
827 			rc = 0;
828 			goto out;
829 		}
830 		rc = sas_phy_reset(phy, 1);
831 		if (rc) {
832 			pm8001_dbg(pm8001_ha, EH,
833 				   "phy reset failed for device %x\n"
834 				   "with rc %d\n", pm8001_dev->device_id, rc);
835 			rc = TMF_RESP_FUNC_FAILED;
836 			goto out;
837 		}
838 		msleep(2000);
839 		rc = sas_execute_internal_abort_dev(dev, 0, NULL);
840 		if (rc) {
841 			pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
842 				   "with rc %d\n", pm8001_dev->device_id, rc);
843 			rc = TMF_RESP_FUNC_FAILED;
844 		}
845 	} else {
846 		rc = sas_phy_reset(phy, 1);
847 		msleep(2000);
848 	}
849 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
850 		   pm8001_dev->device_id, rc);
851  out:
852 	sas_put_local_phy(phy);
853 	return rc;
854 }
855 
856 /*
857 * This function handle the IT_NEXUS_XXX event or completion
858 * status code for SSP/SATA/SMP I/O request.
859 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)860 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
861 {
862 	int rc = TMF_RESP_FUNC_FAILED;
863 	struct pm8001_device *pm8001_dev;
864 	struct pm8001_hba_info *pm8001_ha;
865 	struct sas_phy *phy;
866 
867 	if (!dev || !dev->lldd_dev)
868 		return -1;
869 
870 	pm8001_dev = dev->lldd_dev;
871 	pm8001_ha = pm8001_find_ha_by_dev(dev);
872 
873 	pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
874 
875 	phy = sas_get_local_phy(dev);
876 
877 	if (dev_is_sata(dev)) {
878 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
879 		if (scsi_is_sas_phy_local(phy)) {
880 			rc = 0;
881 			goto out;
882 		}
883 		/* send internal ssp/sata/smp abort command to FW */
884 		sas_execute_internal_abort_dev(dev, 0, NULL);
885 		msleep(100);
886 
887 		/* deregister the target device */
888 		pm8001_dev_gone_notify(dev);
889 		msleep(200);
890 
891 		/*send phy reset to hard reset target */
892 		rc = sas_phy_reset(phy, 1);
893 		msleep(2000);
894 		pm8001_dev->setds_completion = &completion_setstate;
895 
896 		wait_for_completion(&completion_setstate);
897 	} else {
898 		/* send internal ssp/sata/smp abort command to FW */
899 		sas_execute_internal_abort_dev(dev, 0, NULL);
900 		msleep(100);
901 
902 		/* deregister the target device */
903 		pm8001_dev_gone_notify(dev);
904 		msleep(200);
905 
906 		/*send phy reset to hard reset target */
907 		rc = sas_phy_reset(phy, 1);
908 		msleep(2000);
909 	}
910 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
911 		   pm8001_dev->device_id, rc);
912 out:
913 	sas_put_local_phy(phy);
914 
915 	return rc;
916 }
917 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)918 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
919 {
920 	int rc = TMF_RESP_FUNC_FAILED;
921 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
922 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
923 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
924 
925 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
926 		/*
927 		 * If the controller is in fatal error state,
928 		 * we will not get a response from the controller
929 		 */
930 		pm8001_dbg(pm8001_ha, FAIL,
931 			   "LUN reset failed due to fatal errors\n");
932 		return rc;
933 	}
934 
935 	if (dev_is_sata(dev)) {
936 		struct sas_phy *phy = sas_get_local_phy(dev);
937 		sas_execute_internal_abort_dev(dev, 0, NULL);
938 		rc = sas_phy_reset(phy, 1);
939 		sas_put_local_phy(phy);
940 		pm8001_dev->setds_completion = &completion_setstate;
941 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
942 			pm8001_dev, DS_OPERATIONAL);
943 		wait_for_completion(&completion_setstate);
944 	} else {
945 		rc = sas_lu_reset(dev, lun);
946 	}
947 	/* If failed, fall-through I_T_Nexus reset */
948 	pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
949 		   pm8001_dev->device_id, rc);
950 	return rc;
951 }
952 
953 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)954 int pm8001_query_task(struct sas_task *task)
955 {
956 	u32 tag = 0xdeadbeef;
957 	int rc = TMF_RESP_FUNC_FAILED;
958 	if (unlikely(!task || !task->lldd_task || !task->dev))
959 		return rc;
960 
961 	if (task->task_proto & SAS_PROTOCOL_SSP) {
962 		struct scsi_cmnd *cmnd = task->uldd_task;
963 		struct domain_device *dev = task->dev;
964 		struct pm8001_hba_info *pm8001_ha =
965 			pm8001_find_ha_by_dev(dev);
966 
967 		rc = pm8001_find_tag(task, &tag);
968 		if (rc == 0) {
969 			rc = TMF_RESP_FUNC_FAILED;
970 			return rc;
971 		}
972 		pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
973 
974 		rc = sas_query_task(task, tag);
975 		switch (rc) {
976 		/* The task is still in Lun, release it then */
977 		case TMF_RESP_FUNC_SUCC:
978 			pm8001_dbg(pm8001_ha, EH,
979 				   "The task is still in Lun\n");
980 			break;
981 		/* The task is not in Lun or failed, reset the phy */
982 		case TMF_RESP_FUNC_FAILED:
983 		case TMF_RESP_FUNC_COMPLETE:
984 			pm8001_dbg(pm8001_ha, EH,
985 				   "The task is not in Lun or failed, reset the phy\n");
986 			break;
987 		}
988 	}
989 	pr_err("pm80xx: rc= %d\n", rc);
990 	return rc;
991 }
992 
993 /*  mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)994 int pm8001_abort_task(struct sas_task *task)
995 {
996 	struct pm8001_ccb_info *ccb = task->lldd_task;
997 	unsigned long flags;
998 	u32 tag;
999 	struct domain_device *dev ;
1000 	struct pm8001_hba_info *pm8001_ha;
1001 	struct pm8001_device *pm8001_dev;
1002 	int rc = TMF_RESP_FUNC_FAILED, ret;
1003 	u32 phy_id, port_id;
1004 	struct sas_task_slow slow_task;
1005 
1006 	if (!task->lldd_task || !task->dev)
1007 		return TMF_RESP_FUNC_FAILED;
1008 
1009 	dev = task->dev;
1010 	pm8001_dev = dev->lldd_dev;
1011 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1012 	phy_id = pm8001_dev->attached_phy;
1013 
1014 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1015 		// If the controller is seeing fatal errors
1016 		// abort task will not get a response from the controller
1017 		return TMF_RESP_FUNC_FAILED;
1018 	}
1019 
1020 	ret = pm8001_find_tag(task, &tag);
1021 	if (ret == 0) {
1022 		pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1023 		return TMF_RESP_FUNC_FAILED;
1024 	}
1025 	spin_lock_irqsave(&task->task_state_lock, flags);
1026 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1027 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1028 		return TMF_RESP_FUNC_COMPLETE;
1029 	}
1030 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1031 	if (task->slow_task == NULL) {
1032 		init_completion(&slow_task.completion);
1033 		task->slow_task = &slow_task;
1034 	}
1035 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1036 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1037 		rc = sas_abort_task(task, tag);
1038 		sas_execute_internal_abort_single(dev, tag, 0, NULL);
1039 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1040 		task->task_proto & SAS_PROTOCOL_STP) {
1041 		if (pm8001_ha->chip_id == chip_8006) {
1042 			DECLARE_COMPLETION_ONSTACK(completion_reset);
1043 			DECLARE_COMPLETION_ONSTACK(completion);
1044 			struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1045 			port_id = phy->port->port_id;
1046 
1047 			/* 1. Set Device state as Recovery */
1048 			pm8001_dev->setds_completion = &completion;
1049 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1050 				pm8001_dev, DS_IN_RECOVERY);
1051 			wait_for_completion(&completion);
1052 
1053 			/* 2. Send Phy Control Hard Reset */
1054 			reinit_completion(&completion);
1055 			phy->port_reset_status = PORT_RESET_TMO;
1056 			phy->reset_success = false;
1057 			phy->enable_completion = &completion;
1058 			phy->reset_completion = &completion_reset;
1059 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1060 				PHY_HARD_RESET);
1061 			if (ret) {
1062 				phy->enable_completion = NULL;
1063 				phy->reset_completion = NULL;
1064 				goto out;
1065 			}
1066 
1067 			/* In the case of the reset timeout/fail we still
1068 			 * abort the command at the firmware. The assumption
1069 			 * here is that the drive is off doing something so
1070 			 * that it's not processing requests, and we want to
1071 			 * avoid getting a completion for this and either
1072 			 * leaking the task in libsas or losing the race and
1073 			 * getting a double free.
1074 			 */
1075 			pm8001_dbg(pm8001_ha, MSG,
1076 				   "Waiting for local phy ctl\n");
1077 			ret = wait_for_completion_timeout(&completion,
1078 					PM8001_TASK_TIMEOUT * HZ);
1079 			if (!ret || !phy->reset_success) {
1080 				phy->enable_completion = NULL;
1081 				phy->reset_completion = NULL;
1082 			} else {
1083 				/* 3. Wait for Port Reset complete or
1084 				 * Port reset TMO
1085 				 */
1086 				pm8001_dbg(pm8001_ha, MSG,
1087 					   "Waiting for Port reset\n");
1088 				ret = wait_for_completion_timeout(
1089 					&completion_reset,
1090 					PM8001_TASK_TIMEOUT * HZ);
1091 				if (!ret)
1092 					phy->reset_completion = NULL;
1093 				WARN_ON(phy->port_reset_status ==
1094 						PORT_RESET_TMO);
1095 				if (phy->port_reset_status == PORT_RESET_TMO) {
1096 					pm8001_dev_gone_notify(dev);
1097 					PM8001_CHIP_DISP->hw_event_ack_req(
1098 						pm8001_ha, 0,
1099 						0x07, /*HW_EVENT_PHY_DOWN ack*/
1100 						port_id, phy_id, 0, 0);
1101 					goto out;
1102 				}
1103 			}
1104 
1105 			/*
1106 			 * 4. SATA Abort ALL
1107 			 * we wait for the task to be aborted so that the task
1108 			 * is removed from the ccb. on success the caller is
1109 			 * going to free the task.
1110 			 */
1111 			ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1112 			if (ret)
1113 				goto out;
1114 			ret = wait_for_completion_timeout(
1115 				&task->slow_task->completion,
1116 				PM8001_TASK_TIMEOUT * HZ);
1117 			if (!ret)
1118 				goto out;
1119 
1120 			/* 5. Set Device State as Operational */
1121 			reinit_completion(&completion);
1122 			pm8001_dev->setds_completion = &completion;
1123 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1124 				pm8001_dev, DS_OPERATIONAL);
1125 			wait_for_completion(&completion);
1126 		} else {
1127 			/*
1128 			 * Ensure that if we see a completion for the ccb
1129 			 * associated with the task which we are trying to
1130 			 * abort then we should not touch the sas_task as it
1131 			 * may race with libsas freeing it when return here.
1132 			 */
1133 			ccb->task = NULL;
1134 			ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1135 		}
1136 		rc = TMF_RESP_FUNC_COMPLETE;
1137 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1138 		/* SMP */
1139 		rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1140 
1141 	}
1142 out:
1143 	spin_lock_irqsave(&task->task_state_lock, flags);
1144 	if (task->slow_task == &slow_task)
1145 		task->slow_task = NULL;
1146 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1147 	if (rc != TMF_RESP_FUNC_COMPLETE)
1148 		pm8001_info(pm8001_ha, "rc= %d\n", rc);
1149 	return rc;
1150 }
1151 
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1152 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1153 {
1154 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1155 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1156 
1157 	pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1158 		   pm8001_dev->device_id);
1159 	return sas_clear_task_set(dev, lun);
1160 }
1161 
pm8001_port_formed(struct asd_sas_phy * sas_phy)1162 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1163 {
1164 	struct sas_ha_struct *sas_ha = sas_phy->ha;
1165 	struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1166 	struct pm8001_phy *phy = sas_phy->lldd_phy;
1167 	struct asd_sas_port *sas_port = sas_phy->port;
1168 	struct pm8001_port *port = phy->port;
1169 
1170 	if (!sas_port) {
1171 		pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1172 		return;
1173 	}
1174 	sas_port->lldd_port = port;
1175 }
1176 
pm8001_setds_completion(struct domain_device * dev)1177 void pm8001_setds_completion(struct domain_device *dev)
1178 {
1179 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1180 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1181 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
1182 
1183 	if (pm8001_ha->chip_id != chip_8001) {
1184 		pm8001_dev->setds_completion = &completion_setstate;
1185 		PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1186 			pm8001_dev, DS_OPERATIONAL);
1187 		wait_for_completion(&completion_setstate);
1188 	}
1189 }
1190 
pm8001_tmf_aborted(struct sas_task * task)1191 void pm8001_tmf_aborted(struct sas_task *task)
1192 {
1193 	struct pm8001_ccb_info *ccb = task->lldd_task;
1194 
1195 	if (ccb)
1196 		ccb->task = NULL;
1197 }
1198