1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
49 */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
57 }
58 return 0;
59 }
60
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
65 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 void *bitmap = pm8001_ha->rsvd_tags;
69 unsigned long flags;
70
71 if (tag >= PM8001_RESERVE_SLOT)
72 return;
73
74 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 __clear_bit(tag, bitmap);
76 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
77 }
78
79 /**
80 * pm8001_tag_alloc - allocate a empty tag for task used.
81 * @pm8001_ha: our hba struct
82 * @tag_out: the found empty tag .
83 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
85 {
86 void *bitmap = pm8001_ha->rsvd_tags;
87 unsigned long flags;
88 unsigned int tag;
89
90 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 if (tag >= PM8001_RESERVE_SLOT) {
93 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 return -SAS_QUEUE_FULL;
95 }
96 __set_bit(tag, bitmap);
97 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
98
99 /* reserved tags are in the lower region of the tagset */
100 *tag_out = tag;
101 return 0;
102 }
103
pm80xx_get_tag_opcodes(struct sas_task * task,int * ata_op,int * ata_tag,bool * task_aborted)104 static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
105 int *ata_tag, bool *task_aborted)
106 {
107 unsigned long flags;
108 struct ata_queued_cmd *qc = NULL;
109
110 *ata_op = 0;
111 *ata_tag = -1;
112 *task_aborted = false;
113
114 if (!task)
115 return;
116
117 spin_lock_irqsave(&task->task_state_lock, flags);
118 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
119 *task_aborted = true;
120 spin_unlock_irqrestore(&task->task_state_lock, flags);
121
122 if (task->task_proto == SAS_PROTOCOL_STP) {
123 // sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
124 // This only works for scsi + libsas + libata users.
125 qc = task->uldd_task;
126 if (qc) {
127 *ata_op = qc->tf.command;
128 *ata_tag = qc->tag;
129 }
130 }
131 }
132
pm80xx_get_local_phy_id(struct domain_device * dev)133 u32 pm80xx_get_local_phy_id(struct domain_device *dev)
134 {
135 struct pm8001_device *pm8001_dev = dev->lldd_dev;
136
137 if (dev_parent_is_expander(dev))
138 return dev->parent->ex_dev.ex_phy->phy_id;
139
140 return pm8001_dev->attached_phy;
141 }
142
pm80xx_show_pending_commands(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * target_pm8001_dev)143 void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
144 struct pm8001_device *target_pm8001_dev)
145 {
146 int i = 0, ata_op = 0, ata_tag = -1;
147 struct pm8001_ccb_info *ccb = NULL;
148 struct sas_task *task = NULL;
149 struct pm8001_device *pm8001_dev = NULL;
150 bool task_aborted;
151
152 for (i = 0; i < pm8001_ha->ccb_count; i++) {
153 ccb = &pm8001_ha->ccb_info[i];
154 if (ccb->ccb_tag == PM8001_INVALID_TAG)
155 continue;
156 pm8001_dev = ccb->device;
157 if (target_pm8001_dev && pm8001_dev &&
158 target_pm8001_dev != pm8001_dev)
159 continue;
160 task = ccb->task;
161 pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
162 pm8001_dbg(pm8001_ha, FAIL,
163 "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
164 ccb->ccb_tag,
165 (pm8001_dev ? pm8001_dev->device_id : 0),
166 task, task_aborted,
167 ata_op, ata_tag);
168 }
169 }
170
171 /**
172 * pm8001_mem_alloc - allocate memory for pm8001.
173 * @pdev: pci device.
174 * @virt_addr: the allocated virtual address
175 * @pphys_addr: DMA address for this device
176 * @pphys_addr_hi: the physical address high byte address.
177 * @pphys_addr_lo: the physical address low byte address.
178 * @mem_size: memory size.
179 * @align: requested byte alignment
180 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)181 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
182 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
183 u32 *pphys_addr_lo, u32 mem_size, u32 align)
184 {
185 caddr_t mem_virt_alloc;
186 dma_addr_t mem_dma_handle;
187 u64 phys_align;
188 u64 align_offset = 0;
189 if (align)
190 align_offset = (dma_addr_t)align - 1;
191 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
192 &mem_dma_handle, GFP_KERNEL);
193 if (!mem_virt_alloc)
194 return -ENOMEM;
195 *pphys_addr = mem_dma_handle;
196 phys_align = (*pphys_addr + align_offset) & ~align_offset;
197 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
198 *pphys_addr_hi = upper_32_bits(phys_align);
199 *pphys_addr_lo = lower_32_bits(phys_align);
200 return 0;
201 }
202
203 /**
204 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
205 * find out our hba struct.
206 * @dev: the domain device which from sas layer.
207 */
208 static
pm8001_find_ha_by_dev(struct domain_device * dev)209 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
210 {
211 struct sas_ha_struct *sha = dev->port->ha;
212 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
213 return pm8001_ha;
214 }
215
216 /**
217 * pm8001_phy_control - this function should be registered to
218 * sas_domain_function_template to provide libsas used, note: this is just
219 * control the HBA phy rather than other expander phy if you want control
220 * other phy, you should use SMP command.
221 * @sas_phy: which phy in HBA phys.
222 * @func: the operation.
223 * @funcdata: always NULL.
224 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)225 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
226 void *funcdata)
227 {
228 int rc = 0, phy_id = sas_phy->id;
229 struct pm8001_hba_info *pm8001_ha = NULL;
230 struct sas_phy_linkrates *rates;
231 struct pm8001_phy *phy;
232 DECLARE_COMPLETION_ONSTACK(completion);
233 unsigned long flags;
234 pm8001_ha = sas_phy->ha->lldd_ha;
235 phy = &pm8001_ha->phy[phy_id];
236
237 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
238 /*
239 * If the controller is in fatal error state,
240 * we will not get a response from the controller
241 */
242 pm8001_dbg(pm8001_ha, FAIL,
243 "Phy control failed due to fatal errors\n");
244 return -EFAULT;
245 }
246
247 switch (func) {
248 case PHY_FUNC_SET_LINK_RATE:
249 rates = funcdata;
250 if (rates->minimum_linkrate) {
251 pm8001_ha->phy[phy_id].minimum_linkrate =
252 rates->minimum_linkrate;
253 }
254 if (rates->maximum_linkrate) {
255 pm8001_ha->phy[phy_id].maximum_linkrate =
256 rates->maximum_linkrate;
257 }
258 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
259 pm8001_ha->phy[phy_id].enable_completion = &completion;
260 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
261 wait_for_completion(&completion);
262 }
263 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
264 PHY_LINK_RESET);
265 break;
266 case PHY_FUNC_HARD_RESET:
267 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
268 pm8001_ha->phy[phy_id].enable_completion = &completion;
269 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
270 wait_for_completion(&completion);
271 }
272 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
273 PHY_HARD_RESET);
274 break;
275 case PHY_FUNC_LINK_RESET:
276 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
277 pm8001_ha->phy[phy_id].enable_completion = &completion;
278 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
279 wait_for_completion(&completion);
280 }
281 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
282 PHY_LINK_RESET);
283 break;
284 case PHY_FUNC_RELEASE_SPINUP_HOLD:
285 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
286 PHY_LINK_RESET);
287 break;
288 case PHY_FUNC_DISABLE:
289 if (pm8001_ha->chip_id != chip_8001) {
290 if (pm8001_ha->phy[phy_id].phy_state ==
291 PHY_STATE_LINK_UP_SPCV) {
292 sas_phy_disconnected(&phy->sas_phy);
293 sas_notify_phy_event(&phy->sas_phy,
294 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
295 phy->phy_attached = 0;
296 }
297 } else {
298 if (pm8001_ha->phy[phy_id].phy_state ==
299 PHY_STATE_LINK_UP_SPC) {
300 sas_phy_disconnected(&phy->sas_phy);
301 sas_notify_phy_event(&phy->sas_phy,
302 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
303 phy->phy_attached = 0;
304 }
305 }
306 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
307 break;
308 case PHY_FUNC_GET_EVENTS:
309 spin_lock_irqsave(&pm8001_ha->lock, flags);
310 if (pm8001_ha->chip_id == chip_8001) {
311 if (-1 == pm8001_bar4_shift(pm8001_ha,
312 (phy_id < 4) ? 0x30000 : 0x40000)) {
313 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
314 return -EINVAL;
315 }
316 }
317 {
318 struct sas_phy *phy = sas_phy->phy;
319 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
320 + 0x1034 + (0x4000 * (phy_id & 3));
321
322 phy->invalid_dword_count = readl(qp);
323 phy->running_disparity_error_count = readl(&qp[1]);
324 phy->loss_of_dword_sync_count = readl(&qp[3]);
325 phy->phy_reset_problem_count = readl(&qp[4]);
326 }
327 if (pm8001_ha->chip_id == chip_8001)
328 pm8001_bar4_shift(pm8001_ha, 0);
329 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
330 return 0;
331 default:
332 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
333 rc = -EOPNOTSUPP;
334 }
335 msleep(300);
336 return rc;
337 }
338
339 /**
340 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
341 * command to HBA.
342 * @shost: the scsi host data.
343 */
pm8001_scan_start(struct Scsi_Host * shost)344 void pm8001_scan_start(struct Scsi_Host *shost)
345 {
346 int i;
347 struct pm8001_hba_info *pm8001_ha;
348 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
349 DECLARE_COMPLETION_ONSTACK(completion);
350 pm8001_ha = sha->lldd_ha;
351 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
352 if (pm8001_ha->chip_id == chip_8001)
353 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
354 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
355 pm8001_ha->phy[i].enable_completion = &completion;
356 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
357 wait_for_completion(&completion);
358 msleep(300);
359 }
360 }
361
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)362 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
363 {
364 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
365
366 /* give the phy enabling interrupt event time to come in (1s
367 * is empirically about all it takes) */
368 if (time < HZ)
369 return 0;
370 /* Wait for discovery to finish */
371 sas_drain_work(ha);
372 return 1;
373 }
374
375 /**
376 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
377 * @pm8001_ha: our hba card information
378 * @ccb: the ccb which attached to smp task
379 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)380 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
381 struct pm8001_ccb_info *ccb)
382 {
383 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
384 }
385
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)386 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
387 {
388 struct ata_queued_cmd *qc = task->uldd_task;
389
390 if (qc && ata_is_ncq(qc->tf.protocol)) {
391 *tag = qc->tag;
392 return 1;
393 }
394
395 return 0;
396 }
397
398 /**
399 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
400 * @pm8001_ha: our hba card information
401 * @ccb: the ccb which attached to sata task
402 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)403 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
404 struct pm8001_ccb_info *ccb)
405 {
406 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
407 }
408
409 /**
410 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
411 * for internal abort task
412 * @pm8001_ha: our hba card information
413 * @ccb: the ccb which attached to sata task
414 */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)415 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
416 struct pm8001_ccb_info *ccb)
417 {
418 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
419 }
420
421 /**
422 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
423 * @pm8001_ha: our hba card information
424 * @ccb: the ccb which attached to TM
425 * @tmf: the task management IU
426 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)427 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
428 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
429 {
430 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
431 }
432
433 /**
434 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
435 * @pm8001_ha: our hba card information
436 * @ccb: the ccb which attached to ssp task
437 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)438 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
439 struct pm8001_ccb_info *ccb)
440 {
441 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
442 }
443
444 #define DEV_IS_GONE(pm8001_dev) \
445 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
446
447
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)448 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
449 struct pm8001_ccb_info *ccb)
450 {
451 struct sas_task *task = ccb->task;
452 enum sas_protocol task_proto = task->task_proto;
453 struct sas_tmf_task *tmf = task->tmf;
454 int is_tmf = !!tmf;
455
456 switch (task_proto) {
457 case SAS_PROTOCOL_SMP:
458 return pm8001_task_prep_smp(pm8001_ha, ccb);
459 case SAS_PROTOCOL_SSP:
460 if (is_tmf)
461 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
462 return pm8001_task_prep_ssp(pm8001_ha, ccb);
463 case SAS_PROTOCOL_SATA:
464 case SAS_PROTOCOL_STP:
465 return pm8001_task_prep_ata(pm8001_ha, ccb);
466 case SAS_PROTOCOL_INTERNAL_ABORT:
467 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
468 default:
469 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
470 task_proto);
471 }
472
473 return -EINVAL;
474 }
475
476 /**
477 * pm8001_queue_command - register for upper layer used, all IO commands sent
478 * to HBA are from this interface.
479 * @task: the task to be execute.
480 * @gfp_flags: gfp_flags
481 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)482 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
483 {
484 struct task_status_struct *ts = &task->task_status;
485 enum sas_protocol task_proto = task->task_proto;
486 struct domain_device *dev = task->dev;
487 struct pm8001_device *pm8001_dev = dev->lldd_dev;
488 bool internal_abort = sas_is_internal_abort(task);
489 struct pm8001_hba_info *pm8001_ha;
490 struct pm8001_port *port;
491 struct pm8001_ccb_info *ccb;
492 unsigned long flags;
493 u32 n_elem = 0;
494 int rc = 0;
495
496 if (!internal_abort && !dev->port) {
497 ts->resp = SAS_TASK_UNDELIVERED;
498 ts->stat = SAS_PHY_DOWN;
499 if (dev->dev_type != SAS_SATA_DEV)
500 task->task_done(task);
501 return 0;
502 }
503
504 pm8001_ha = pm8001_find_ha_by_dev(dev);
505 if (pm8001_ha->controller_fatal_error) {
506 ts->resp = SAS_TASK_UNDELIVERED;
507 task->task_done(task);
508 return 0;
509 }
510
511 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
512
513 spin_lock_irqsave(&pm8001_ha->lock, flags);
514
515 port = dev->port->lldd_port;
516
517 if (!internal_abort &&
518 (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
519 ts->resp = SAS_TASK_UNDELIVERED;
520 ts->stat = SAS_PHY_DOWN;
521 if (sas_protocol_ata(task_proto)) {
522 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
523 task->task_done(task);
524 spin_lock_irqsave(&pm8001_ha->lock, flags);
525 } else {
526 task->task_done(task);
527 }
528 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
529 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
530 return 0;
531 }
532
533 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
534 if (!ccb) {
535 rc = -SAS_QUEUE_FULL;
536 goto err_out;
537 }
538
539 if (!sas_protocol_ata(task_proto)) {
540 if (task->num_scatter) {
541 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
542 task->num_scatter, task->data_dir);
543 if (!n_elem) {
544 rc = -ENOMEM;
545 goto err_out_ccb;
546 }
547 }
548 } else {
549 n_elem = task->num_scatter;
550 }
551
552 task->lldd_task = ccb;
553 ccb->n_elem = n_elem;
554
555 atomic_inc(&pm8001_dev->running_req);
556
557 rc = pm8001_deliver_command(pm8001_ha, ccb);
558 if (rc) {
559 atomic_dec(&pm8001_dev->running_req);
560 if (!sas_protocol_ata(task_proto) && n_elem)
561 dma_unmap_sg(pm8001_ha->dev, task->scatter,
562 task->num_scatter, task->data_dir);
563 err_out_ccb:
564 pm8001_ccb_free(pm8001_ha, ccb);
565
566 err_out:
567 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
568 }
569
570 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
571
572 return rc;
573 }
574
575 /**
576 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
577 * @pm8001_ha: our hba card information
578 * @ccb: the ccb which attached to ssp task to free
579 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)580 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
581 struct pm8001_ccb_info *ccb)
582 {
583 struct sas_task *task = ccb->task;
584 struct ata_queued_cmd *qc;
585 struct pm8001_device *pm8001_dev;
586
587 if (!task)
588 return;
589
590 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
591 dma_unmap_sg(pm8001_ha->dev, task->scatter,
592 task->num_scatter, task->data_dir);
593
594 switch (task->task_proto) {
595 case SAS_PROTOCOL_SMP:
596 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
597 DMA_FROM_DEVICE);
598 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
599 DMA_TO_DEVICE);
600 break;
601
602 case SAS_PROTOCOL_SATA:
603 case SAS_PROTOCOL_STP:
604 case SAS_PROTOCOL_SSP:
605 default:
606 /* do nothing */
607 break;
608 }
609
610 if (sas_protocol_ata(task->task_proto)) {
611 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
612 qc = task->uldd_task;
613 pm8001_dev = ccb->device;
614 trace_pm80xx_request_complete(pm8001_ha->id,
615 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
616 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
617 qc ? qc->tf.command : 0, // ata opcode
618 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
619 }
620
621 task->lldd_task = NULL;
622 pm8001_ccb_free(pm8001_ha, ccb);
623 }
624
pm8001_init_dev(struct pm8001_device * pm8001_dev,int id)625 static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id)
626 {
627 pm8001_dev->id = id;
628 pm8001_dev->device_id = PM8001_MAX_DEVICES;
629 atomic_set(&pm8001_dev->running_req, 0);
630 }
631
632 /**
633 * pm8001_alloc_dev - find a empty pm8001_device
634 * @pm8001_ha: our hba card information
635 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)636 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
637 {
638 u32 dev;
639 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
640 struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev];
641
642 if (pm8001_dev->dev_type == SAS_PHY_UNUSED) {
643 pm8001_init_dev(pm8001_dev, dev);
644 return pm8001_dev;
645 }
646 }
647 if (dev == PM8001_MAX_DEVICES) {
648 pm8001_dbg(pm8001_ha, FAIL,
649 "max support %d devices, ignore ..\n",
650 PM8001_MAX_DEVICES);
651 }
652 return NULL;
653 }
654 /**
655 * pm8001_find_dev - find a matching pm8001_device
656 * @pm8001_ha: our hba card information
657 * @device_id: device ID to match against
658 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)659 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
660 u32 device_id)
661 {
662 u32 dev;
663 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
664 if (pm8001_ha->devices[dev].device_id == device_id)
665 return &pm8001_ha->devices[dev];
666 }
667 if (dev == PM8001_MAX_DEVICES) {
668 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
669 }
670 return NULL;
671 }
672
pm8001_free_dev(struct pm8001_device * pm8001_dev)673 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
674 {
675 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
676 pm8001_dev->dev_type = SAS_PHY_UNUSED;
677 pm8001_dev->device_id = PM8001_MAX_DEVICES;
678 pm8001_dev->sas_device = NULL;
679 }
680
681 /**
682 * pm8001_dev_found_notify - libsas notify a device is found.
683 * @dev: the device structure which sas layer used.
684 *
685 * when libsas find a sas domain device, it should tell the LLDD that
686 * device is found, and then LLDD register this device to HBA firmware
687 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
688 * device ID(according to device's sas address) and returned it to LLDD. From
689 * now on, we communicate with HBA FW with the device ID which HBA assigned
690 * rather than sas address. it is the necessary step for our HBA but it is
691 * the optional for other HBA driver.
692 */
pm8001_dev_found_notify(struct domain_device * dev)693 static int pm8001_dev_found_notify(struct domain_device *dev)
694 {
695 unsigned long flags = 0;
696 int res = 0;
697 struct pm8001_hba_info *pm8001_ha = NULL;
698 struct domain_device *parent_dev = dev->parent;
699 struct pm8001_device *pm8001_device;
700 DECLARE_COMPLETION_ONSTACK(completion);
701 u32 flag = 0;
702 pm8001_ha = pm8001_find_ha_by_dev(dev);
703 spin_lock_irqsave(&pm8001_ha->lock, flags);
704
705 pm8001_device = pm8001_alloc_dev(pm8001_ha);
706 if (!pm8001_device) {
707 res = -1;
708 goto found_out;
709 }
710 pm8001_device->sas_device = dev;
711 dev->lldd_dev = pm8001_device;
712 pm8001_device->dev_type = dev->dev_type;
713 pm8001_device->dcompletion = &completion;
714 if (dev_parent_is_expander(dev)) {
715 int phy_id;
716
717 phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
718 if (phy_id < 0) {
719 pm8001_dbg(pm8001_ha, FAIL,
720 "Error: no attached dev:%016llx at ex:%016llx.\n",
721 SAS_ADDR(dev->sas_addr),
722 SAS_ADDR(parent_dev->sas_addr));
723 res = phy_id;
724 } else {
725 pm8001_device->attached_phy = phy_id;
726 }
727 } else {
728 if (dev->dev_type == SAS_SATA_DEV) {
729 pm8001_device->attached_phy =
730 dev->rphy->identify.phy_identifier;
731 flag = 1; /* directly sata */
732 }
733 } /*register this device to HBA*/
734 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
735 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
736 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
737 wait_for_completion(&completion);
738 if (dev->dev_type == SAS_END_DEVICE)
739 msleep(50);
740 pm8001_ha->flags = PM8001F_RUN_TIME;
741 return 0;
742 found_out:
743 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
744 return res;
745 }
746
pm8001_dev_found(struct domain_device * dev)747 int pm8001_dev_found(struct domain_device *dev)
748 {
749 return pm8001_dev_found_notify(dev);
750 }
751
752 #define PM8001_TASK_TIMEOUT 20
753
754 /**
755 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
756 * @dev: the device structure which sas layer used.
757 */
pm8001_dev_gone_notify(struct domain_device * dev)758 static void pm8001_dev_gone_notify(struct domain_device *dev)
759 {
760 unsigned long flags = 0;
761 struct pm8001_hba_info *pm8001_ha;
762 struct pm8001_device *pm8001_dev = dev->lldd_dev;
763
764 pm8001_ha = pm8001_find_ha_by_dev(dev);
765 spin_lock_irqsave(&pm8001_ha->lock, flags);
766 if (pm8001_dev) {
767 u32 device_id = pm8001_dev->device_id;
768
769 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
770 pm8001_dev->device_id, pm8001_dev->dev_type);
771 if (atomic_read(&pm8001_dev->running_req)) {
772 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
773 sas_execute_internal_abort_dev(dev, 0, NULL);
774 while (atomic_read(&pm8001_dev->running_req))
775 msleep(20);
776 spin_lock_irqsave(&pm8001_ha->lock, flags);
777 }
778 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
779
780 /*
781 * The phy array only contains local phys. Thus, we cannot clear
782 * phy_attached for a device behind an expander.
783 */
784 if (!dev_parent_is_expander(dev)) {
785 u32 phy_id = pm80xx_get_local_phy_id(dev);
786
787 pm8001_ha->phy[phy_id].phy_attached = 0;
788 }
789 pm8001_free_dev(pm8001_dev);
790 } else {
791 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
792 }
793 dev->lldd_dev = NULL;
794 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
795 }
796
pm8001_dev_gone(struct domain_device * dev)797 void pm8001_dev_gone(struct domain_device *dev)
798 {
799 pm8001_dev_gone_notify(dev);
800 }
801
802 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)803 void pm8001_open_reject_retry(
804 struct pm8001_hba_info *pm8001_ha,
805 struct sas_task *task_to_close,
806 struct pm8001_device *device_to_close)
807 {
808 int i;
809 unsigned long flags;
810
811 if (pm8001_ha == NULL)
812 return;
813
814 spin_lock_irqsave(&pm8001_ha->lock, flags);
815
816 for (i = 0; i < PM8001_MAX_CCB; i++) {
817 struct sas_task *task;
818 struct task_status_struct *ts;
819 struct pm8001_device *pm8001_dev;
820 unsigned long flags1;
821 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
822
823 if (ccb->ccb_tag == PM8001_INVALID_TAG)
824 continue;
825
826 pm8001_dev = ccb->device;
827 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
828 continue;
829 if (!device_to_close) {
830 uintptr_t d = (uintptr_t)pm8001_dev
831 - (uintptr_t)&pm8001_ha->devices;
832 if (((d % sizeof(*pm8001_dev)) != 0)
833 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
834 continue;
835 } else if (pm8001_dev != device_to_close)
836 continue;
837 task = ccb->task;
838 if (!task || !task->task_done)
839 continue;
840 if (task_to_close && (task != task_to_close))
841 continue;
842 ts = &task->task_status;
843 ts->resp = SAS_TASK_COMPLETE;
844 /* Force the midlayer to retry */
845 ts->stat = SAS_OPEN_REJECT;
846 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
847 if (pm8001_dev)
848 atomic_dec(&pm8001_dev->running_req);
849 spin_lock_irqsave(&task->task_state_lock, flags1);
850 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
851 task->task_state_flags |= SAS_TASK_STATE_DONE;
852 if (unlikely((task->task_state_flags
853 & SAS_TASK_STATE_ABORTED))) {
854 spin_unlock_irqrestore(&task->task_state_lock,
855 flags1);
856 pm8001_ccb_task_free(pm8001_ha, ccb);
857 } else {
858 spin_unlock_irqrestore(&task->task_state_lock,
859 flags1);
860 pm8001_ccb_task_free(pm8001_ha, ccb);
861 mb();/* in order to force CPU ordering */
862 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
863 task->task_done(task);
864 spin_lock_irqsave(&pm8001_ha->lock, flags);
865 }
866 }
867
868 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
869 }
870
871 /**
872 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
873 * @dev: the device structure for the device to reset.
874 *
875 * Standard mandates link reset for ATA (type 0) and hard reset for
876 * SSP (type 1), only for RECOVERY
877 */
pm8001_I_T_nexus_reset(struct domain_device * dev)878 int pm8001_I_T_nexus_reset(struct domain_device *dev)
879 {
880 int rc = TMF_RESP_FUNC_FAILED;
881 struct pm8001_device *pm8001_dev;
882 struct pm8001_hba_info *pm8001_ha;
883 struct sas_phy *phy;
884
885 if (!dev || !dev->lldd_dev)
886 return -ENODEV;
887
888 pm8001_dev = dev->lldd_dev;
889 pm8001_ha = pm8001_find_ha_by_dev(dev);
890 phy = sas_get_local_phy(dev);
891
892 if (dev_is_sata(dev)) {
893 if (scsi_is_sas_phy_local(phy)) {
894 rc = 0;
895 goto out;
896 }
897 rc = sas_phy_reset(phy, 1);
898 if (rc) {
899 pm8001_dbg(pm8001_ha, EH,
900 "phy reset failed for device %x\n"
901 "with rc %d\n", pm8001_dev->device_id, rc);
902 rc = TMF_RESP_FUNC_FAILED;
903 goto out;
904 }
905 msleep(2000);
906 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
907 if (rc) {
908 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
909 "with rc %d\n", pm8001_dev->device_id, rc);
910 rc = TMF_RESP_FUNC_FAILED;
911 }
912 } else {
913 rc = sas_phy_reset(phy, 1);
914 msleep(2000);
915 }
916 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
917 pm8001_dev->device_id, rc);
918 out:
919 sas_put_local_phy(phy);
920 return rc;
921 }
922
923 /*
924 * This function handle the IT_NEXUS_XXX event or completion
925 * status code for SSP/SATA/SMP I/O request.
926 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)927 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
928 {
929 int rc = TMF_RESP_FUNC_FAILED;
930 struct pm8001_device *pm8001_dev;
931 struct pm8001_hba_info *pm8001_ha;
932 struct sas_phy *phy;
933
934 if (!dev || !dev->lldd_dev)
935 return -1;
936
937 pm8001_dev = dev->lldd_dev;
938 pm8001_ha = pm8001_find_ha_by_dev(dev);
939
940 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
941
942 phy = sas_get_local_phy(dev);
943
944 if (dev_is_sata(dev)) {
945 DECLARE_COMPLETION_ONSTACK(completion_setstate);
946 if (scsi_is_sas_phy_local(phy)) {
947 rc = 0;
948 goto out;
949 }
950 /* send internal ssp/sata/smp abort command to FW */
951 sas_execute_internal_abort_dev(dev, 0, NULL);
952 msleep(100);
953
954 /* deregister the target device */
955 pm8001_dev_gone_notify(dev);
956 msleep(200);
957
958 /*send phy reset to hard reset target */
959 rc = sas_phy_reset(phy, 1);
960 msleep(2000);
961 pm8001_dev->setds_completion = &completion_setstate;
962
963 wait_for_completion(&completion_setstate);
964 } else {
965 /* send internal ssp/sata/smp abort command to FW */
966 sas_execute_internal_abort_dev(dev, 0, NULL);
967 msleep(100);
968
969 /* deregister the target device */
970 pm8001_dev_gone_notify(dev);
971 msleep(200);
972
973 /*send phy reset to hard reset target */
974 rc = sas_phy_reset(phy, 1);
975 msleep(2000);
976 }
977 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
978 pm8001_dev->device_id, rc);
979 out:
980 sas_put_local_phy(phy);
981
982 return rc;
983 }
984 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)985 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
986 {
987 int rc = TMF_RESP_FUNC_FAILED;
988 struct pm8001_device *pm8001_dev = dev->lldd_dev;
989 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
990 DECLARE_COMPLETION_ONSTACK(completion_setstate);
991
992 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
993 /*
994 * If the controller is in fatal error state,
995 * we will not get a response from the controller
996 */
997 pm8001_dbg(pm8001_ha, FAIL,
998 "LUN reset failed due to fatal errors\n");
999 return rc;
1000 }
1001
1002 if (dev_is_sata(dev)) {
1003 struct sas_phy *phy = sas_get_local_phy(dev);
1004 sas_execute_internal_abort_dev(dev, 0, NULL);
1005 rc = sas_phy_reset(phy, 1);
1006 sas_put_local_phy(phy);
1007 pm8001_dev->setds_completion = &completion_setstate;
1008 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1009 pm8001_dev, DS_OPERATIONAL);
1010 wait_for_completion(&completion_setstate);
1011 } else {
1012 rc = sas_lu_reset(dev, lun);
1013 }
1014 /* If failed, fall-through I_T_Nexus reset */
1015 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
1016 pm8001_dev->device_id, rc);
1017 return rc;
1018 }
1019
1020 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1021 int pm8001_query_task(struct sas_task *task)
1022 {
1023 u32 tag = 0xdeadbeef;
1024 int rc = TMF_RESP_FUNC_FAILED;
1025 if (unlikely(!task || !task->lldd_task || !task->dev))
1026 return rc;
1027
1028 if (task->task_proto & SAS_PROTOCOL_SSP) {
1029 struct scsi_cmnd *cmnd = task->uldd_task;
1030 struct domain_device *dev = task->dev;
1031 struct pm8001_hba_info *pm8001_ha =
1032 pm8001_find_ha_by_dev(dev);
1033
1034 rc = pm8001_find_tag(task, &tag);
1035 if (rc == 0) {
1036 rc = TMF_RESP_FUNC_FAILED;
1037 return rc;
1038 }
1039 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1040
1041 rc = sas_query_task(task, tag);
1042 switch (rc) {
1043 /* The task is still in Lun, release it then */
1044 case TMF_RESP_FUNC_SUCC:
1045 pm8001_dbg(pm8001_ha, EH,
1046 "The task is still in Lun\n");
1047 break;
1048 /* The task is not in Lun or failed, reset the phy */
1049 case TMF_RESP_FUNC_FAILED:
1050 case TMF_RESP_FUNC_COMPLETE:
1051 pm8001_dbg(pm8001_ha, EH,
1052 "The task is not in Lun or failed, reset the phy\n");
1053 break;
1054 }
1055 }
1056 pr_err("pm80xx: rc= %d\n", rc);
1057 return rc;
1058 }
1059
1060 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)1061 int pm8001_abort_task(struct sas_task *task)
1062 {
1063 struct pm8001_ccb_info *ccb = task->lldd_task;
1064 unsigned long flags;
1065 u32 tag;
1066 struct domain_device *dev ;
1067 struct pm8001_hba_info *pm8001_ha;
1068 struct pm8001_device *pm8001_dev;
1069 int rc = TMF_RESP_FUNC_FAILED, ret;
1070 u32 port_id;
1071 struct sas_task_slow slow_task;
1072
1073 if (!task->lldd_task || !task->dev)
1074 return TMF_RESP_FUNC_FAILED;
1075
1076 dev = task->dev;
1077 pm8001_dev = dev->lldd_dev;
1078 pm8001_ha = pm8001_find_ha_by_dev(dev);
1079
1080 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1081 // If the controller is seeing fatal errors
1082 // abort task will not get a response from the controller
1083 return TMF_RESP_FUNC_FAILED;
1084 }
1085
1086 ret = pm8001_find_tag(task, &tag);
1087 if (ret == 0) {
1088 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1089 return TMF_RESP_FUNC_FAILED;
1090 }
1091 spin_lock_irqsave(&task->task_state_lock, flags);
1092 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1093 spin_unlock_irqrestore(&task->task_state_lock, flags);
1094 return TMF_RESP_FUNC_COMPLETE;
1095 }
1096 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1097 if (task->slow_task == NULL) {
1098 init_completion(&slow_task.completion);
1099 task->slow_task = &slow_task;
1100 }
1101 spin_unlock_irqrestore(&task->task_state_lock, flags);
1102 if (task->task_proto & SAS_PROTOCOL_SSP) {
1103 rc = sas_abort_task(task, tag);
1104 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1105 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1106 task->task_proto & SAS_PROTOCOL_STP) {
1107 if (pm8001_ha->chip_id == chip_8006) {
1108 DECLARE_COMPLETION_ONSTACK(completion_reset);
1109 DECLARE_COMPLETION_ONSTACK(completion);
1110 u32 phy_id = pm80xx_get_local_phy_id(dev);
1111 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
1112 port_id = phy->port->port_id;
1113
1114 /* 1. Set Device state as Recovery */
1115 pm8001_dev->setds_completion = &completion;
1116 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1117 pm8001_dev, DS_IN_RECOVERY);
1118 wait_for_completion(&completion);
1119
1120 /* 2. Send Phy Control Hard Reset */
1121 reinit_completion(&completion);
1122 phy->port_reset_status = PORT_RESET_TMO;
1123 phy->reset_success = false;
1124 phy->enable_completion = &completion;
1125 phy->reset_completion = &completion_reset;
1126 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1127 PHY_HARD_RESET);
1128 if (ret) {
1129 phy->enable_completion = NULL;
1130 phy->reset_completion = NULL;
1131 goto out;
1132 }
1133
1134 /* In the case of the reset timeout/fail we still
1135 * abort the command at the firmware. The assumption
1136 * here is that the drive is off doing something so
1137 * that it's not processing requests, and we want to
1138 * avoid getting a completion for this and either
1139 * leaking the task in libsas or losing the race and
1140 * getting a double free.
1141 */
1142 pm8001_dbg(pm8001_ha, MSG,
1143 "Waiting for local phy ctl\n");
1144 ret = wait_for_completion_timeout(&completion,
1145 PM8001_TASK_TIMEOUT * HZ);
1146 if (!ret || !phy->reset_success) {
1147 phy->enable_completion = NULL;
1148 phy->reset_completion = NULL;
1149 } else {
1150 /* 3. Wait for Port Reset complete or
1151 * Port reset TMO
1152 */
1153 pm8001_dbg(pm8001_ha, MSG,
1154 "Waiting for Port reset\n");
1155 ret = wait_for_completion_timeout(
1156 &completion_reset,
1157 PM8001_TASK_TIMEOUT * HZ);
1158 if (!ret)
1159 phy->reset_completion = NULL;
1160 WARN_ON(phy->port_reset_status ==
1161 PORT_RESET_TMO);
1162 if (phy->port_reset_status == PORT_RESET_TMO) {
1163 pm8001_dev_gone_notify(dev);
1164 PM8001_CHIP_DISP->hw_event_ack_req(
1165 pm8001_ha, 0,
1166 0x07, /*HW_EVENT_PHY_DOWN ack*/
1167 port_id, phy_id, 0, 0);
1168 goto out;
1169 }
1170 }
1171
1172 /*
1173 * 4. SATA Abort ALL
1174 * we wait for the task to be aborted so that the task
1175 * is removed from the ccb. on success the caller is
1176 * going to free the task.
1177 */
1178 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1179 if (ret)
1180 goto out;
1181 ret = wait_for_completion_timeout(
1182 &task->slow_task->completion,
1183 PM8001_TASK_TIMEOUT * HZ);
1184 if (!ret)
1185 goto out;
1186
1187 /* 5. Set Device State as Operational */
1188 reinit_completion(&completion);
1189 pm8001_dev->setds_completion = &completion;
1190 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1191 pm8001_dev, DS_OPERATIONAL);
1192 wait_for_completion(&completion);
1193 } else {
1194 /*
1195 * Ensure that if we see a completion for the ccb
1196 * associated with the task which we are trying to
1197 * abort then we should not touch the sas_task as it
1198 * may race with libsas freeing it when return here.
1199 */
1200 ccb->task = NULL;
1201 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1202 }
1203 rc = TMF_RESP_FUNC_COMPLETE;
1204 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1205 /* SMP */
1206 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1207
1208 }
1209 out:
1210 spin_lock_irqsave(&task->task_state_lock, flags);
1211 if (task->slow_task == &slow_task)
1212 task->slow_task = NULL;
1213 spin_unlock_irqrestore(&task->task_state_lock, flags);
1214 if (rc != TMF_RESP_FUNC_COMPLETE)
1215 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1216 return rc;
1217 }
1218
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1219 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1220 {
1221 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1222 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1223
1224 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1225 pm8001_dev->device_id);
1226 return sas_clear_task_set(dev, lun);
1227 }
1228
pm8001_port_formed(struct asd_sas_phy * sas_phy)1229 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1230 {
1231 struct sas_ha_struct *sas_ha = sas_phy->ha;
1232 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1233 struct pm8001_phy *phy = sas_phy->lldd_phy;
1234 struct asd_sas_port *sas_port = sas_phy->port;
1235 struct pm8001_port *port = phy->port;
1236
1237 if (!sas_port) {
1238 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1239 return;
1240 }
1241 sas_port->lldd_port = port;
1242 }
1243
pm8001_setds_completion(struct domain_device * dev)1244 void pm8001_setds_completion(struct domain_device *dev)
1245 {
1246 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1247 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1248 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1249
1250 if (pm8001_ha->chip_id != chip_8001) {
1251 pm8001_dev->setds_completion = &completion_setstate;
1252 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1253 pm8001_dev, DS_OPERATIONAL);
1254 wait_for_completion(&completion_setstate);
1255 }
1256 }
1257
pm8001_tmf_aborted(struct sas_task * task)1258 void pm8001_tmf_aborted(struct sas_task *task)
1259 {
1260 struct pm8001_ccb_info *ccb = task->lldd_task;
1261
1262 if (ccb)
1263 ccb->task = NULL;
1264 }
1265