1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
49 */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
57 }
58 return 0;
59 }
60
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
65 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 void *bitmap = pm8001_ha->rsvd_tags;
69 unsigned long flags;
70
71 if (tag >= PM8001_RESERVE_SLOT)
72 return;
73
74 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 __clear_bit(tag, bitmap);
76 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
77 }
78
79 /**
80 * pm8001_tag_alloc - allocate a empty tag for task used.
81 * @pm8001_ha: our hba struct
82 * @tag_out: the found empty tag .
83 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
85 {
86 void *bitmap = pm8001_ha->rsvd_tags;
87 unsigned long flags;
88 unsigned int tag;
89
90 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 if (tag >= PM8001_RESERVE_SLOT) {
93 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 return -SAS_QUEUE_FULL;
95 }
96 __set_bit(tag, bitmap);
97 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
98
99 /* reserved tags are in the lower region of the tagset */
100 *tag_out = tag;
101 return 0;
102 }
103
pm80xx_get_tag_opcodes(struct sas_task * task,int * ata_op,int * ata_tag,bool * task_aborted)104 static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
105 int *ata_tag, bool *task_aborted)
106 {
107 unsigned long flags;
108 struct ata_queued_cmd *qc = NULL;
109
110 *ata_op = 0;
111 *ata_tag = -1;
112 *task_aborted = false;
113
114 if (!task)
115 return;
116
117 spin_lock_irqsave(&task->task_state_lock, flags);
118 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
119 *task_aborted = true;
120 spin_unlock_irqrestore(&task->task_state_lock, flags);
121
122 if (task->task_proto == SAS_PROTOCOL_STP) {
123 // sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
124 // This only works for scsi + libsas + libata users.
125 qc = task->uldd_task;
126 if (qc) {
127 *ata_op = qc->tf.command;
128 *ata_tag = qc->tag;
129 }
130 }
131 }
132
pm80xx_show_pending_commands(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * target_pm8001_dev)133 void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
134 struct pm8001_device *target_pm8001_dev)
135 {
136 int i = 0, ata_op = 0, ata_tag = -1;
137 struct pm8001_ccb_info *ccb = NULL;
138 struct sas_task *task = NULL;
139 struct pm8001_device *pm8001_dev = NULL;
140 bool task_aborted;
141
142 for (i = 0; i < pm8001_ha->ccb_count; i++) {
143 ccb = &pm8001_ha->ccb_info[i];
144 if (ccb->ccb_tag == PM8001_INVALID_TAG)
145 continue;
146 pm8001_dev = ccb->device;
147 if (target_pm8001_dev && pm8001_dev &&
148 target_pm8001_dev != pm8001_dev)
149 continue;
150 task = ccb->task;
151 pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
152 pm8001_dbg(pm8001_ha, FAIL,
153 "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
154 ccb->ccb_tag,
155 (pm8001_dev ? pm8001_dev->device_id : 0),
156 task, task_aborted,
157 ata_op, ata_tag);
158 }
159 }
160
161 /**
162 * pm8001_mem_alloc - allocate memory for pm8001.
163 * @pdev: pci device.
164 * @virt_addr: the allocated virtual address
165 * @pphys_addr: DMA address for this device
166 * @pphys_addr_hi: the physical address high byte address.
167 * @pphys_addr_lo: the physical address low byte address.
168 * @mem_size: memory size.
169 * @align: requested byte alignment
170 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)171 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
172 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
173 u32 *pphys_addr_lo, u32 mem_size, u32 align)
174 {
175 caddr_t mem_virt_alloc;
176 dma_addr_t mem_dma_handle;
177 u64 phys_align;
178 u64 align_offset = 0;
179 if (align)
180 align_offset = (dma_addr_t)align - 1;
181 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
182 &mem_dma_handle, GFP_KERNEL);
183 if (!mem_virt_alloc)
184 return -ENOMEM;
185 *pphys_addr = mem_dma_handle;
186 phys_align = (*pphys_addr + align_offset) & ~align_offset;
187 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
188 *pphys_addr_hi = upper_32_bits(phys_align);
189 *pphys_addr_lo = lower_32_bits(phys_align);
190 return 0;
191 }
192
193 /**
194 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
195 * find out our hba struct.
196 * @dev: the domain device which from sas layer.
197 */
198 static
pm8001_find_ha_by_dev(struct domain_device * dev)199 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
200 {
201 struct sas_ha_struct *sha = dev->port->ha;
202 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
203 return pm8001_ha;
204 }
205
206 /**
207 * pm8001_phy_control - this function should be registered to
208 * sas_domain_function_template to provide libsas used, note: this is just
209 * control the HBA phy rather than other expander phy if you want control
210 * other phy, you should use SMP command.
211 * @sas_phy: which phy in HBA phys.
212 * @func: the operation.
213 * @funcdata: always NULL.
214 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)215 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
216 void *funcdata)
217 {
218 int rc = 0, phy_id = sas_phy->id;
219 struct pm8001_hba_info *pm8001_ha = NULL;
220 struct sas_phy_linkrates *rates;
221 struct pm8001_phy *phy;
222 DECLARE_COMPLETION_ONSTACK(completion);
223 unsigned long flags;
224 pm8001_ha = sas_phy->ha->lldd_ha;
225 phy = &pm8001_ha->phy[phy_id];
226
227 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
228 /*
229 * If the controller is in fatal error state,
230 * we will not get a response from the controller
231 */
232 pm8001_dbg(pm8001_ha, FAIL,
233 "Phy control failed due to fatal errors\n");
234 return -EFAULT;
235 }
236
237 switch (func) {
238 case PHY_FUNC_SET_LINK_RATE:
239 rates = funcdata;
240 if (rates->minimum_linkrate) {
241 pm8001_ha->phy[phy_id].minimum_linkrate =
242 rates->minimum_linkrate;
243 }
244 if (rates->maximum_linkrate) {
245 pm8001_ha->phy[phy_id].maximum_linkrate =
246 rates->maximum_linkrate;
247 }
248 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
249 pm8001_ha->phy[phy_id].enable_completion = &completion;
250 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
251 wait_for_completion(&completion);
252 }
253 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
254 PHY_LINK_RESET);
255 break;
256 case PHY_FUNC_HARD_RESET:
257 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
258 pm8001_ha->phy[phy_id].enable_completion = &completion;
259 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
260 wait_for_completion(&completion);
261 }
262 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
263 PHY_HARD_RESET);
264 break;
265 case PHY_FUNC_LINK_RESET:
266 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
267 pm8001_ha->phy[phy_id].enable_completion = &completion;
268 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
269 wait_for_completion(&completion);
270 }
271 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
272 PHY_LINK_RESET);
273 break;
274 case PHY_FUNC_RELEASE_SPINUP_HOLD:
275 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
276 PHY_LINK_RESET);
277 break;
278 case PHY_FUNC_DISABLE:
279 if (pm8001_ha->chip_id != chip_8001) {
280 if (pm8001_ha->phy[phy_id].phy_state ==
281 PHY_STATE_LINK_UP_SPCV) {
282 sas_phy_disconnected(&phy->sas_phy);
283 sas_notify_phy_event(&phy->sas_phy,
284 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
285 phy->phy_attached = 0;
286 }
287 } else {
288 if (pm8001_ha->phy[phy_id].phy_state ==
289 PHY_STATE_LINK_UP_SPC) {
290 sas_phy_disconnected(&phy->sas_phy);
291 sas_notify_phy_event(&phy->sas_phy,
292 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
293 phy->phy_attached = 0;
294 }
295 }
296 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
297 break;
298 case PHY_FUNC_GET_EVENTS:
299 spin_lock_irqsave(&pm8001_ha->lock, flags);
300 if (pm8001_ha->chip_id == chip_8001) {
301 if (-1 == pm8001_bar4_shift(pm8001_ha,
302 (phy_id < 4) ? 0x30000 : 0x40000)) {
303 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
304 return -EINVAL;
305 }
306 }
307 {
308 struct sas_phy *phy = sas_phy->phy;
309 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
310 + 0x1034 + (0x4000 * (phy_id & 3));
311
312 phy->invalid_dword_count = readl(qp);
313 phy->running_disparity_error_count = readl(&qp[1]);
314 phy->loss_of_dword_sync_count = readl(&qp[3]);
315 phy->phy_reset_problem_count = readl(&qp[4]);
316 }
317 if (pm8001_ha->chip_id == chip_8001)
318 pm8001_bar4_shift(pm8001_ha, 0);
319 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
320 return 0;
321 default:
322 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
323 rc = -EOPNOTSUPP;
324 }
325 msleep(300);
326 return rc;
327 }
328
329 /**
330 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
331 * command to HBA.
332 * @shost: the scsi host data.
333 */
pm8001_scan_start(struct Scsi_Host * shost)334 void pm8001_scan_start(struct Scsi_Host *shost)
335 {
336 int i;
337 struct pm8001_hba_info *pm8001_ha;
338 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
339 DECLARE_COMPLETION_ONSTACK(completion);
340 pm8001_ha = sha->lldd_ha;
341 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
342 if (pm8001_ha->chip_id == chip_8001)
343 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
344 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
345 pm8001_ha->phy[i].enable_completion = &completion;
346 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
347 wait_for_completion(&completion);
348 msleep(300);
349 }
350 }
351
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)352 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
353 {
354 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
355
356 /* give the phy enabling interrupt event time to come in (1s
357 * is empirically about all it takes) */
358 if (time < HZ)
359 return 0;
360 /* Wait for discovery to finish */
361 sas_drain_work(ha);
362 return 1;
363 }
364
365 /**
366 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
367 * @pm8001_ha: our hba card information
368 * @ccb: the ccb which attached to smp task
369 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)370 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
371 struct pm8001_ccb_info *ccb)
372 {
373 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
374 }
375
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)376 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
377 {
378 struct ata_queued_cmd *qc = task->uldd_task;
379
380 if (qc && ata_is_ncq(qc->tf.protocol)) {
381 *tag = qc->tag;
382 return 1;
383 }
384
385 return 0;
386 }
387
388 /**
389 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
390 * @pm8001_ha: our hba card information
391 * @ccb: the ccb which attached to sata task
392 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)393 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
394 struct pm8001_ccb_info *ccb)
395 {
396 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
397 }
398
399 /**
400 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
401 * for internal abort task
402 * @pm8001_ha: our hba card information
403 * @ccb: the ccb which attached to sata task
404 */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)405 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
406 struct pm8001_ccb_info *ccb)
407 {
408 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
409 }
410
411 /**
412 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
413 * @pm8001_ha: our hba card information
414 * @ccb: the ccb which attached to TM
415 * @tmf: the task management IU
416 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)417 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
418 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
419 {
420 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
421 }
422
423 /**
424 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
425 * @pm8001_ha: our hba card information
426 * @ccb: the ccb which attached to ssp task
427 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)428 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
429 struct pm8001_ccb_info *ccb)
430 {
431 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
432 }
433
434 #define DEV_IS_GONE(pm8001_dev) \
435 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
436
437
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)438 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
439 struct pm8001_ccb_info *ccb)
440 {
441 struct sas_task *task = ccb->task;
442 enum sas_protocol task_proto = task->task_proto;
443 struct sas_tmf_task *tmf = task->tmf;
444 int is_tmf = !!tmf;
445
446 switch (task_proto) {
447 case SAS_PROTOCOL_SMP:
448 return pm8001_task_prep_smp(pm8001_ha, ccb);
449 case SAS_PROTOCOL_SSP:
450 if (is_tmf)
451 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
452 return pm8001_task_prep_ssp(pm8001_ha, ccb);
453 case SAS_PROTOCOL_SATA:
454 case SAS_PROTOCOL_STP:
455 return pm8001_task_prep_ata(pm8001_ha, ccb);
456 case SAS_PROTOCOL_INTERNAL_ABORT:
457 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
458 default:
459 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
460 task_proto);
461 }
462
463 return -EINVAL;
464 }
465
466 /**
467 * pm8001_queue_command - register for upper layer used, all IO commands sent
468 * to HBA are from this interface.
469 * @task: the task to be execute.
470 * @gfp_flags: gfp_flags
471 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)472 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
473 {
474 struct task_status_struct *ts = &task->task_status;
475 enum sas_protocol task_proto = task->task_proto;
476 struct domain_device *dev = task->dev;
477 struct pm8001_device *pm8001_dev = dev->lldd_dev;
478 bool internal_abort = sas_is_internal_abort(task);
479 struct pm8001_hba_info *pm8001_ha;
480 struct pm8001_port *port = NULL;
481 struct pm8001_ccb_info *ccb;
482 unsigned long flags;
483 u32 n_elem = 0;
484 int rc = 0;
485
486 if (!internal_abort && !dev->port) {
487 ts->resp = SAS_TASK_UNDELIVERED;
488 ts->stat = SAS_PHY_DOWN;
489 if (dev->dev_type != SAS_SATA_DEV)
490 task->task_done(task);
491 return 0;
492 }
493
494 pm8001_ha = pm8001_find_ha_by_dev(dev);
495 if (pm8001_ha->controller_fatal_error) {
496 ts->resp = SAS_TASK_UNDELIVERED;
497 task->task_done(task);
498 return 0;
499 }
500
501 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
502
503 spin_lock_irqsave(&pm8001_ha->lock, flags);
504
505 pm8001_dev = dev->lldd_dev;
506 port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
507
508 if (!internal_abort &&
509 (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
510 ts->resp = SAS_TASK_UNDELIVERED;
511 ts->stat = SAS_PHY_DOWN;
512 if (sas_protocol_ata(task_proto)) {
513 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
514 task->task_done(task);
515 spin_lock_irqsave(&pm8001_ha->lock, flags);
516 } else {
517 task->task_done(task);
518 }
519 rc = -ENODEV;
520 goto err_out;
521 }
522
523 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
524 if (!ccb) {
525 rc = -SAS_QUEUE_FULL;
526 goto err_out;
527 }
528
529 if (!sas_protocol_ata(task_proto)) {
530 if (task->num_scatter) {
531 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
532 task->num_scatter, task->data_dir);
533 if (!n_elem) {
534 rc = -ENOMEM;
535 goto err_out_ccb;
536 }
537 }
538 } else {
539 n_elem = task->num_scatter;
540 }
541
542 task->lldd_task = ccb;
543 ccb->n_elem = n_elem;
544
545 atomic_inc(&pm8001_dev->running_req);
546
547 rc = pm8001_deliver_command(pm8001_ha, ccb);
548 if (rc) {
549 atomic_dec(&pm8001_dev->running_req);
550 if (!sas_protocol_ata(task_proto) && n_elem)
551 dma_unmap_sg(pm8001_ha->dev, task->scatter,
552 task->num_scatter, task->data_dir);
553 err_out_ccb:
554 pm8001_ccb_free(pm8001_ha, ccb);
555
556 err_out:
557 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
558 }
559
560 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
561
562 return rc;
563 }
564
565 /**
566 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
567 * @pm8001_ha: our hba card information
568 * @ccb: the ccb which attached to ssp task to free
569 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)570 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
571 struct pm8001_ccb_info *ccb)
572 {
573 struct sas_task *task = ccb->task;
574 struct ata_queued_cmd *qc;
575 struct pm8001_device *pm8001_dev;
576
577 if (!task)
578 return;
579
580 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
581 dma_unmap_sg(pm8001_ha->dev, task->scatter,
582 task->num_scatter, task->data_dir);
583
584 switch (task->task_proto) {
585 case SAS_PROTOCOL_SMP:
586 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
587 DMA_FROM_DEVICE);
588 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
589 DMA_TO_DEVICE);
590 break;
591
592 case SAS_PROTOCOL_SATA:
593 case SAS_PROTOCOL_STP:
594 case SAS_PROTOCOL_SSP:
595 default:
596 /* do nothing */
597 break;
598 }
599
600 if (sas_protocol_ata(task->task_proto)) {
601 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
602 qc = task->uldd_task;
603 pm8001_dev = ccb->device;
604 trace_pm80xx_request_complete(pm8001_ha->id,
605 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
606 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
607 qc ? qc->tf.command : 0, // ata opcode
608 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
609 }
610
611 task->lldd_task = NULL;
612 pm8001_ccb_free(pm8001_ha, ccb);
613 }
614
pm8001_init_dev(struct pm8001_device * pm8001_dev,int id)615 static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id)
616 {
617 pm8001_dev->id = id;
618 pm8001_dev->device_id = PM8001_MAX_DEVICES;
619 atomic_set(&pm8001_dev->running_req, 0);
620 }
621
622 /**
623 * pm8001_alloc_dev - find a empty pm8001_device
624 * @pm8001_ha: our hba card information
625 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)626 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
627 {
628 u32 dev;
629 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
630 struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev];
631
632 if (pm8001_dev->dev_type == SAS_PHY_UNUSED) {
633 pm8001_init_dev(pm8001_dev, dev);
634 return pm8001_dev;
635 }
636 }
637 if (dev == PM8001_MAX_DEVICES) {
638 pm8001_dbg(pm8001_ha, FAIL,
639 "max support %d devices, ignore ..\n",
640 PM8001_MAX_DEVICES);
641 }
642 return NULL;
643 }
644 /**
645 * pm8001_find_dev - find a matching pm8001_device
646 * @pm8001_ha: our hba card information
647 * @device_id: device ID to match against
648 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)649 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
650 u32 device_id)
651 {
652 u32 dev;
653 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
654 if (pm8001_ha->devices[dev].device_id == device_id)
655 return &pm8001_ha->devices[dev];
656 }
657 if (dev == PM8001_MAX_DEVICES) {
658 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
659 }
660 return NULL;
661 }
662
pm8001_free_dev(struct pm8001_device * pm8001_dev)663 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
664 {
665 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
666 pm8001_dev->dev_type = SAS_PHY_UNUSED;
667 pm8001_dev->device_id = PM8001_MAX_DEVICES;
668 pm8001_dev->sas_device = NULL;
669 }
670
671 /**
672 * pm8001_dev_found_notify - libsas notify a device is found.
673 * @dev: the device structure which sas layer used.
674 *
675 * when libsas find a sas domain device, it should tell the LLDD that
676 * device is found, and then LLDD register this device to HBA firmware
677 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
678 * device ID(according to device's sas address) and returned it to LLDD. From
679 * now on, we communicate with HBA FW with the device ID which HBA assigned
680 * rather than sas address. it is the necessary step for our HBA but it is
681 * the optional for other HBA driver.
682 */
pm8001_dev_found_notify(struct domain_device * dev)683 static int pm8001_dev_found_notify(struct domain_device *dev)
684 {
685 unsigned long flags = 0;
686 int res = 0;
687 struct pm8001_hba_info *pm8001_ha = NULL;
688 struct domain_device *parent_dev = dev->parent;
689 struct pm8001_device *pm8001_device;
690 DECLARE_COMPLETION_ONSTACK(completion);
691 u32 flag = 0;
692 pm8001_ha = pm8001_find_ha_by_dev(dev);
693 spin_lock_irqsave(&pm8001_ha->lock, flags);
694
695 pm8001_device = pm8001_alloc_dev(pm8001_ha);
696 if (!pm8001_device) {
697 res = -1;
698 goto found_out;
699 }
700 pm8001_device->sas_device = dev;
701 dev->lldd_dev = pm8001_device;
702 pm8001_device->dev_type = dev->dev_type;
703 pm8001_device->dcompletion = &completion;
704 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
705 int phy_id;
706
707 phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
708 if (phy_id < 0) {
709 pm8001_dbg(pm8001_ha, FAIL,
710 "Error: no attached dev:%016llx at ex:%016llx.\n",
711 SAS_ADDR(dev->sas_addr),
712 SAS_ADDR(parent_dev->sas_addr));
713 res = phy_id;
714 } else {
715 pm8001_device->attached_phy = phy_id;
716 }
717 } else {
718 if (dev->dev_type == SAS_SATA_DEV) {
719 pm8001_device->attached_phy =
720 dev->rphy->identify.phy_identifier;
721 flag = 1; /* directly sata */
722 }
723 } /*register this device to HBA*/
724 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
725 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
726 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
727 wait_for_completion(&completion);
728 if (dev->dev_type == SAS_END_DEVICE)
729 msleep(50);
730 pm8001_ha->flags = PM8001F_RUN_TIME;
731 return 0;
732 found_out:
733 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
734 return res;
735 }
736
pm8001_dev_found(struct domain_device * dev)737 int pm8001_dev_found(struct domain_device *dev)
738 {
739 return pm8001_dev_found_notify(dev);
740 }
741
742 #define PM8001_TASK_TIMEOUT 20
743
744 /**
745 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
746 * @dev: the device structure which sas layer used.
747 */
pm8001_dev_gone_notify(struct domain_device * dev)748 static void pm8001_dev_gone_notify(struct domain_device *dev)
749 {
750 unsigned long flags = 0;
751 struct pm8001_hba_info *pm8001_ha;
752 struct pm8001_device *pm8001_dev = dev->lldd_dev;
753
754 pm8001_ha = pm8001_find_ha_by_dev(dev);
755 spin_lock_irqsave(&pm8001_ha->lock, flags);
756 if (pm8001_dev) {
757 u32 device_id = pm8001_dev->device_id;
758
759 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
760 pm8001_dev->device_id, pm8001_dev->dev_type);
761 if (atomic_read(&pm8001_dev->running_req)) {
762 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
763 sas_execute_internal_abort_dev(dev, 0, NULL);
764 while (atomic_read(&pm8001_dev->running_req))
765 msleep(20);
766 spin_lock_irqsave(&pm8001_ha->lock, flags);
767 }
768 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
769 pm8001_free_dev(pm8001_dev);
770 } else {
771 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
772 }
773 dev->lldd_dev = NULL;
774 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
775 }
776
pm8001_dev_gone(struct domain_device * dev)777 void pm8001_dev_gone(struct domain_device *dev)
778 {
779 pm8001_dev_gone_notify(dev);
780 }
781
782 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)783 void pm8001_open_reject_retry(
784 struct pm8001_hba_info *pm8001_ha,
785 struct sas_task *task_to_close,
786 struct pm8001_device *device_to_close)
787 {
788 int i;
789 unsigned long flags;
790
791 if (pm8001_ha == NULL)
792 return;
793
794 spin_lock_irqsave(&pm8001_ha->lock, flags);
795
796 for (i = 0; i < PM8001_MAX_CCB; i++) {
797 struct sas_task *task;
798 struct task_status_struct *ts;
799 struct pm8001_device *pm8001_dev;
800 unsigned long flags1;
801 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
802
803 if (ccb->ccb_tag == PM8001_INVALID_TAG)
804 continue;
805
806 pm8001_dev = ccb->device;
807 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
808 continue;
809 if (!device_to_close) {
810 uintptr_t d = (uintptr_t)pm8001_dev
811 - (uintptr_t)&pm8001_ha->devices;
812 if (((d % sizeof(*pm8001_dev)) != 0)
813 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
814 continue;
815 } else if (pm8001_dev != device_to_close)
816 continue;
817 task = ccb->task;
818 if (!task || !task->task_done)
819 continue;
820 if (task_to_close && (task != task_to_close))
821 continue;
822 ts = &task->task_status;
823 ts->resp = SAS_TASK_COMPLETE;
824 /* Force the midlayer to retry */
825 ts->stat = SAS_OPEN_REJECT;
826 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
827 if (pm8001_dev)
828 atomic_dec(&pm8001_dev->running_req);
829 spin_lock_irqsave(&task->task_state_lock, flags1);
830 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
831 task->task_state_flags |= SAS_TASK_STATE_DONE;
832 if (unlikely((task->task_state_flags
833 & SAS_TASK_STATE_ABORTED))) {
834 spin_unlock_irqrestore(&task->task_state_lock,
835 flags1);
836 pm8001_ccb_task_free(pm8001_ha, ccb);
837 } else {
838 spin_unlock_irqrestore(&task->task_state_lock,
839 flags1);
840 pm8001_ccb_task_free(pm8001_ha, ccb);
841 mb();/* in order to force CPU ordering */
842 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
843 task->task_done(task);
844 spin_lock_irqsave(&pm8001_ha->lock, flags);
845 }
846 }
847
848 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
849 }
850
851 /**
852 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
853 * @dev: the device structure for the device to reset.
854 *
855 * Standard mandates link reset for ATA (type 0) and hard reset for
856 * SSP (type 1), only for RECOVERY
857 */
pm8001_I_T_nexus_reset(struct domain_device * dev)858 int pm8001_I_T_nexus_reset(struct domain_device *dev)
859 {
860 int rc = TMF_RESP_FUNC_FAILED;
861 struct pm8001_device *pm8001_dev;
862 struct pm8001_hba_info *pm8001_ha;
863 struct sas_phy *phy;
864
865 if (!dev || !dev->lldd_dev)
866 return -ENODEV;
867
868 pm8001_dev = dev->lldd_dev;
869 pm8001_ha = pm8001_find_ha_by_dev(dev);
870 phy = sas_get_local_phy(dev);
871
872 if (dev_is_sata(dev)) {
873 if (scsi_is_sas_phy_local(phy)) {
874 rc = 0;
875 goto out;
876 }
877 rc = sas_phy_reset(phy, 1);
878 if (rc) {
879 pm8001_dbg(pm8001_ha, EH,
880 "phy reset failed for device %x\n"
881 "with rc %d\n", pm8001_dev->device_id, rc);
882 rc = TMF_RESP_FUNC_FAILED;
883 goto out;
884 }
885 msleep(2000);
886 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
887 if (rc) {
888 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
889 "with rc %d\n", pm8001_dev->device_id, rc);
890 rc = TMF_RESP_FUNC_FAILED;
891 }
892 } else {
893 rc = sas_phy_reset(phy, 1);
894 msleep(2000);
895 }
896 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
897 pm8001_dev->device_id, rc);
898 out:
899 sas_put_local_phy(phy);
900 return rc;
901 }
902
903 /*
904 * This function handle the IT_NEXUS_XXX event or completion
905 * status code for SSP/SATA/SMP I/O request.
906 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)907 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
908 {
909 int rc = TMF_RESP_FUNC_FAILED;
910 struct pm8001_device *pm8001_dev;
911 struct pm8001_hba_info *pm8001_ha;
912 struct sas_phy *phy;
913
914 if (!dev || !dev->lldd_dev)
915 return -1;
916
917 pm8001_dev = dev->lldd_dev;
918 pm8001_ha = pm8001_find_ha_by_dev(dev);
919
920 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
921
922 phy = sas_get_local_phy(dev);
923
924 if (dev_is_sata(dev)) {
925 DECLARE_COMPLETION_ONSTACK(completion_setstate);
926 if (scsi_is_sas_phy_local(phy)) {
927 rc = 0;
928 goto out;
929 }
930 /* send internal ssp/sata/smp abort command to FW */
931 sas_execute_internal_abort_dev(dev, 0, NULL);
932 msleep(100);
933
934 /* deregister the target device */
935 pm8001_dev_gone_notify(dev);
936 msleep(200);
937
938 /*send phy reset to hard reset target */
939 rc = sas_phy_reset(phy, 1);
940 msleep(2000);
941 pm8001_dev->setds_completion = &completion_setstate;
942
943 wait_for_completion(&completion_setstate);
944 } else {
945 /* send internal ssp/sata/smp abort command to FW */
946 sas_execute_internal_abort_dev(dev, 0, NULL);
947 msleep(100);
948
949 /* deregister the target device */
950 pm8001_dev_gone_notify(dev);
951 msleep(200);
952
953 /*send phy reset to hard reset target */
954 rc = sas_phy_reset(phy, 1);
955 msleep(2000);
956 }
957 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
958 pm8001_dev->device_id, rc);
959 out:
960 sas_put_local_phy(phy);
961
962 return rc;
963 }
964 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)965 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
966 {
967 int rc = TMF_RESP_FUNC_FAILED;
968 struct pm8001_device *pm8001_dev = dev->lldd_dev;
969 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
970 DECLARE_COMPLETION_ONSTACK(completion_setstate);
971
972 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
973 /*
974 * If the controller is in fatal error state,
975 * we will not get a response from the controller
976 */
977 pm8001_dbg(pm8001_ha, FAIL,
978 "LUN reset failed due to fatal errors\n");
979 return rc;
980 }
981
982 if (dev_is_sata(dev)) {
983 struct sas_phy *phy = sas_get_local_phy(dev);
984 sas_execute_internal_abort_dev(dev, 0, NULL);
985 rc = sas_phy_reset(phy, 1);
986 sas_put_local_phy(phy);
987 pm8001_dev->setds_completion = &completion_setstate;
988 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
989 pm8001_dev, DS_OPERATIONAL);
990 wait_for_completion(&completion_setstate);
991 } else {
992 rc = sas_lu_reset(dev, lun);
993 }
994 /* If failed, fall-through I_T_Nexus reset */
995 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
996 pm8001_dev->device_id, rc);
997 return rc;
998 }
999
1000 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1001 int pm8001_query_task(struct sas_task *task)
1002 {
1003 u32 tag = 0xdeadbeef;
1004 int rc = TMF_RESP_FUNC_FAILED;
1005 if (unlikely(!task || !task->lldd_task || !task->dev))
1006 return rc;
1007
1008 if (task->task_proto & SAS_PROTOCOL_SSP) {
1009 struct scsi_cmnd *cmnd = task->uldd_task;
1010 struct domain_device *dev = task->dev;
1011 struct pm8001_hba_info *pm8001_ha =
1012 pm8001_find_ha_by_dev(dev);
1013
1014 rc = pm8001_find_tag(task, &tag);
1015 if (rc == 0) {
1016 rc = TMF_RESP_FUNC_FAILED;
1017 return rc;
1018 }
1019 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1020
1021 rc = sas_query_task(task, tag);
1022 switch (rc) {
1023 /* The task is still in Lun, release it then */
1024 case TMF_RESP_FUNC_SUCC:
1025 pm8001_dbg(pm8001_ha, EH,
1026 "The task is still in Lun\n");
1027 break;
1028 /* The task is not in Lun or failed, reset the phy */
1029 case TMF_RESP_FUNC_FAILED:
1030 case TMF_RESP_FUNC_COMPLETE:
1031 pm8001_dbg(pm8001_ha, EH,
1032 "The task is not in Lun or failed, reset the phy\n");
1033 break;
1034 }
1035 }
1036 pr_err("pm80xx: rc= %d\n", rc);
1037 return rc;
1038 }
1039
1040 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)1041 int pm8001_abort_task(struct sas_task *task)
1042 {
1043 struct pm8001_ccb_info *ccb = task->lldd_task;
1044 unsigned long flags;
1045 u32 tag;
1046 struct domain_device *dev ;
1047 struct pm8001_hba_info *pm8001_ha;
1048 struct pm8001_device *pm8001_dev;
1049 int rc = TMF_RESP_FUNC_FAILED, ret;
1050 u32 phy_id, port_id;
1051 struct sas_task_slow slow_task;
1052
1053 if (!task->lldd_task || !task->dev)
1054 return TMF_RESP_FUNC_FAILED;
1055
1056 dev = task->dev;
1057 pm8001_dev = dev->lldd_dev;
1058 pm8001_ha = pm8001_find_ha_by_dev(dev);
1059 phy_id = pm8001_dev->attached_phy;
1060
1061 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1062 // If the controller is seeing fatal errors
1063 // abort task will not get a response from the controller
1064 return TMF_RESP_FUNC_FAILED;
1065 }
1066
1067 ret = pm8001_find_tag(task, &tag);
1068 if (ret == 0) {
1069 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1070 return TMF_RESP_FUNC_FAILED;
1071 }
1072 spin_lock_irqsave(&task->task_state_lock, flags);
1073 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1074 spin_unlock_irqrestore(&task->task_state_lock, flags);
1075 return TMF_RESP_FUNC_COMPLETE;
1076 }
1077 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1078 if (task->slow_task == NULL) {
1079 init_completion(&slow_task.completion);
1080 task->slow_task = &slow_task;
1081 }
1082 spin_unlock_irqrestore(&task->task_state_lock, flags);
1083 if (task->task_proto & SAS_PROTOCOL_SSP) {
1084 rc = sas_abort_task(task, tag);
1085 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1086 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1087 task->task_proto & SAS_PROTOCOL_STP) {
1088 if (pm8001_ha->chip_id == chip_8006) {
1089 DECLARE_COMPLETION_ONSTACK(completion_reset);
1090 DECLARE_COMPLETION_ONSTACK(completion);
1091 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1092 port_id = phy->port->port_id;
1093
1094 /* 1. Set Device state as Recovery */
1095 pm8001_dev->setds_completion = &completion;
1096 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1097 pm8001_dev, DS_IN_RECOVERY);
1098 wait_for_completion(&completion);
1099
1100 /* 2. Send Phy Control Hard Reset */
1101 reinit_completion(&completion);
1102 phy->port_reset_status = PORT_RESET_TMO;
1103 phy->reset_success = false;
1104 phy->enable_completion = &completion;
1105 phy->reset_completion = &completion_reset;
1106 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1107 PHY_HARD_RESET);
1108 if (ret) {
1109 phy->enable_completion = NULL;
1110 phy->reset_completion = NULL;
1111 goto out;
1112 }
1113
1114 /* In the case of the reset timeout/fail we still
1115 * abort the command at the firmware. The assumption
1116 * here is that the drive is off doing something so
1117 * that it's not processing requests, and we want to
1118 * avoid getting a completion for this and either
1119 * leaking the task in libsas or losing the race and
1120 * getting a double free.
1121 */
1122 pm8001_dbg(pm8001_ha, MSG,
1123 "Waiting for local phy ctl\n");
1124 ret = wait_for_completion_timeout(&completion,
1125 PM8001_TASK_TIMEOUT * HZ);
1126 if (!ret || !phy->reset_success) {
1127 phy->enable_completion = NULL;
1128 phy->reset_completion = NULL;
1129 } else {
1130 /* 3. Wait for Port Reset complete or
1131 * Port reset TMO
1132 */
1133 pm8001_dbg(pm8001_ha, MSG,
1134 "Waiting for Port reset\n");
1135 ret = wait_for_completion_timeout(
1136 &completion_reset,
1137 PM8001_TASK_TIMEOUT * HZ);
1138 if (!ret)
1139 phy->reset_completion = NULL;
1140 WARN_ON(phy->port_reset_status ==
1141 PORT_RESET_TMO);
1142 if (phy->port_reset_status == PORT_RESET_TMO) {
1143 pm8001_dev_gone_notify(dev);
1144 PM8001_CHIP_DISP->hw_event_ack_req(
1145 pm8001_ha, 0,
1146 0x07, /*HW_EVENT_PHY_DOWN ack*/
1147 port_id, phy_id, 0, 0);
1148 goto out;
1149 }
1150 }
1151
1152 /*
1153 * 4. SATA Abort ALL
1154 * we wait for the task to be aborted so that the task
1155 * is removed from the ccb. on success the caller is
1156 * going to free the task.
1157 */
1158 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1159 if (ret)
1160 goto out;
1161 ret = wait_for_completion_timeout(
1162 &task->slow_task->completion,
1163 PM8001_TASK_TIMEOUT * HZ);
1164 if (!ret)
1165 goto out;
1166
1167 /* 5. Set Device State as Operational */
1168 reinit_completion(&completion);
1169 pm8001_dev->setds_completion = &completion;
1170 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1171 pm8001_dev, DS_OPERATIONAL);
1172 wait_for_completion(&completion);
1173 } else {
1174 /*
1175 * Ensure that if we see a completion for the ccb
1176 * associated with the task which we are trying to
1177 * abort then we should not touch the sas_task as it
1178 * may race with libsas freeing it when return here.
1179 */
1180 ccb->task = NULL;
1181 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1182 }
1183 rc = TMF_RESP_FUNC_COMPLETE;
1184 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1185 /* SMP */
1186 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1187
1188 }
1189 out:
1190 spin_lock_irqsave(&task->task_state_lock, flags);
1191 if (task->slow_task == &slow_task)
1192 task->slow_task = NULL;
1193 spin_unlock_irqrestore(&task->task_state_lock, flags);
1194 if (rc != TMF_RESP_FUNC_COMPLETE)
1195 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1196 return rc;
1197 }
1198
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1199 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1200 {
1201 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1202 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1203
1204 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1205 pm8001_dev->device_id);
1206 return sas_clear_task_set(dev, lun);
1207 }
1208
pm8001_port_formed(struct asd_sas_phy * sas_phy)1209 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1210 {
1211 struct sas_ha_struct *sas_ha = sas_phy->ha;
1212 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1213 struct pm8001_phy *phy = sas_phy->lldd_phy;
1214 struct asd_sas_port *sas_port = sas_phy->port;
1215 struct pm8001_port *port = phy->port;
1216
1217 if (!sas_port) {
1218 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1219 return;
1220 }
1221 sas_port->lldd_port = port;
1222 }
1223
pm8001_setds_completion(struct domain_device * dev)1224 void pm8001_setds_completion(struct domain_device *dev)
1225 {
1226 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1227 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1228 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1229
1230 if (pm8001_ha->chip_id != chip_8001) {
1231 pm8001_dev->setds_completion = &completion_setstate;
1232 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1233 pm8001_dev, DS_OPERATIONAL);
1234 wait_for_completion(&completion_setstate);
1235 }
1236 }
1237
pm8001_tmf_aborted(struct sas_task * task)1238 void pm8001_tmf_aborted(struct sas_task *task)
1239 {
1240 struct pm8001_ccb_info *ccb = task->lldd_task;
1241
1242 if (ccb)
1243 ccb->task = NULL;
1244 }
1245