1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include <scsi/scsi_cmnd.h> 57 #include "isci.h" 58 #include "task.h" 59 #include "request.h" 60 #include "scu_completion_codes.h" 61 #include "scu_event_codes.h" 62 #include "sas.h" 63 64 #undef C 65 #define C(a) (#a) 66 const char *req_state_name(enum sci_base_request_states state) 67 { 68 static const char * const strings[] = REQUEST_STATES; 69 70 return strings[state]; 71 } 72 #undef C 73 74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 75 int idx) 76 { 77 if (idx == 0) 78 return &ireq->tc->sgl_pair_ab; 79 else if (idx == 1) 80 return &ireq->tc->sgl_pair_cd; 81 else if (idx < 0) 82 return NULL; 83 else 84 return &ireq->sg_table[idx - 2]; 85 } 86 87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 88 struct isci_request *ireq, u32 idx) 89 { 90 u32 offset; 91 92 if (idx == 0) { 93 offset = (void *) &ireq->tc->sgl_pair_ab - 94 (void *) &ihost->task_context_table[0]; 95 return ihost->tc_dma + offset; 96 } else if (idx == 1) { 97 offset = (void *) &ireq->tc->sgl_pair_cd - 98 (void *) &ihost->task_context_table[0]; 99 return ihost->tc_dma + offset; 100 } 101 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 103 } 104 105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 106 { 107 e->length = sg_dma_len(sg); 108 e->address_upper = upper_32_bits(sg_dma_address(sg)); 109 e->address_lower = lower_32_bits(sg_dma_address(sg)); 110 e->address_modifier = 0; 111 } 112 113 static void sci_request_build_sgl(struct isci_request *ireq) 114 { 115 struct isci_host *ihost = ireq->isci_host; 116 struct sas_task *task = isci_request_access_task(ireq); 117 struct scatterlist *sg = NULL; 118 dma_addr_t dma_addr; 119 u32 sg_idx = 0; 120 struct scu_sgl_element_pair *scu_sg = NULL; 121 struct scu_sgl_element_pair *prev_sg = NULL; 122 123 if (task->num_scatter > 0) { 124 sg = task->scatter; 125 126 while (sg) { 127 scu_sg = to_sgl_element_pair(ireq, sg_idx); 128 init_sgl_element(&scu_sg->A, sg); 129 sg = sg_next(sg); 130 if (sg) { 131 init_sgl_element(&scu_sg->B, sg); 132 sg = sg_next(sg); 133 } else 134 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 135 136 if (prev_sg) { 137 dma_addr = to_sgl_element_pair_dma(ihost, 138 ireq, 139 sg_idx); 140 141 prev_sg->next_pair_upper = 142 upper_32_bits(dma_addr); 143 prev_sg->next_pair_lower = 144 lower_32_bits(dma_addr); 145 } 146 147 prev_sg = scu_sg; 148 sg_idx++; 149 } 150 } else { /* handle when no sg */ 151 scu_sg = to_sgl_element_pair(ireq, sg_idx); 152 153 dma_addr = dma_map_single(&ihost->pdev->dev, 154 task->scatter, 155 task->total_xfer_len, 156 task->data_dir); 157 158 ireq->zero_scatter_daddr = dma_addr; 159 160 scu_sg->A.length = task->total_xfer_len; 161 scu_sg->A.address_upper = upper_32_bits(dma_addr); 162 scu_sg->A.address_lower = lower_32_bits(dma_addr); 163 } 164 165 if (scu_sg) { 166 scu_sg->next_pair_upper = 0; 167 scu_sg->next_pair_lower = 0; 168 } 169 } 170 171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 172 { 173 struct ssp_cmd_iu *cmd_iu; 174 struct sas_task *task = isci_request_access_task(ireq); 175 176 cmd_iu = &ireq->ssp.cmd; 177 178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 179 cmd_iu->add_cdb_len = 0; 180 cmd_iu->_r_a = 0; 181 cmd_iu->_r_b = 0; 182 cmd_iu->en_fburst = 0; /* unsupported */ 183 cmd_iu->task_prio = task->ssp_task.task_prio; 184 cmd_iu->task_attr = task->ssp_task.task_attr; 185 cmd_iu->_r_c = 0; 186 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); 189 } 190 191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 192 { 193 struct ssp_task_iu *task_iu; 194 struct sas_task *task = isci_request_access_task(ireq); 195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 196 197 task_iu = &ireq->ssp.tmf; 198 199 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 200 201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 202 203 task_iu->task_func = isci_tmf->tmf_code; 204 task_iu->task_tag = 205 (test_bit(IREQ_TMF, &ireq->flags)) ? 206 isci_tmf->io_tag : 207 SCI_CONTROLLER_INVALID_IO_TAG; 208 } 209 210 /** 211 * This method is will fill in the SCU Task Context for any type of SSP request. 212 * @sci_req: 213 * @task_context: 214 * 215 */ 216 static void scu_ssp_request_construct_task_context( 217 struct isci_request *ireq, 218 struct scu_task_context *task_context) 219 { 220 dma_addr_t dma_addr; 221 struct isci_remote_device *idev; 222 struct isci_port *iport; 223 224 idev = ireq->target_device; 225 iport = idev->owning_port; 226 227 /* Fill in the TC with its required data */ 228 task_context->abort = 0; 229 task_context->priority = 0; 230 task_context->initiator_request = 1; 231 task_context->connection_rate = idev->connection_rate; 232 task_context->protocol_engine_index = ISCI_PEG; 233 task_context->logical_port_index = iport->physical_port_index; 234 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 235 task_context->valid = SCU_TASK_CONTEXT_VALID; 236 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 237 238 task_context->remote_node_index = idev->rnc.remote_node_index; 239 task_context->command_code = 0; 240 241 task_context->link_layer_control = 0; 242 task_context->do_not_dma_ssp_good_response = 1; 243 task_context->strict_ordering = 0; 244 task_context->control_frame = 0; 245 task_context->timeout_enable = 0; 246 task_context->block_guard_enable = 0; 247 248 task_context->address_modifier = 0; 249 250 /* task_context->type.ssp.tag = ireq->io_tag; */ 251 task_context->task_phase = 0x01; 252 253 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 254 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 255 (iport->physical_port_index << 256 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 257 ISCI_TAG_TCI(ireq->io_tag)); 258 259 /* 260 * Copy the physical address for the command buffer to the 261 * SCU Task Context 262 */ 263 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 264 265 task_context->command_iu_upper = upper_32_bits(dma_addr); 266 task_context->command_iu_lower = lower_32_bits(dma_addr); 267 268 /* 269 * Copy the physical address for the response buffer to the 270 * SCU Task Context 271 */ 272 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 273 274 task_context->response_iu_upper = upper_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr); 276 } 277 278 static u8 scu_bg_blk_size(struct scsi_device *sdp) 279 { 280 switch (sdp->sector_size) { 281 case 512: 282 return 0; 283 case 1024: 284 return 1; 285 case 4096: 286 return 3; 287 default: 288 return 0xff; 289 } 290 } 291 292 static u32 scu_dif_bytes(u32 len, u32 sector_size) 293 { 294 return (len >> ilog2(sector_size)) * 8; 295 } 296 297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 298 { 299 struct scu_task_context *tc = ireq->tc; 300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 301 u8 blk_sz = scu_bg_blk_size(scmd->device); 302 303 tc->block_guard_enable = 1; 304 tc->blk_prot_en = 1; 305 tc->blk_sz = blk_sz; 306 /* DIF write insert */ 307 tc->blk_prot_func = 0x2; 308 309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 310 scmd->device->sector_size); 311 312 /* always init to 0, used by hw */ 313 tc->interm_crc_val = 0; 314 315 tc->init_crc_seed = 0; 316 tc->app_tag_verify = 0; 317 tc->app_tag_gen = 0; 318 tc->ref_tag_seed_verify = 0; 319 320 /* always init to same as bg_blk_sz */ 321 tc->UD_bytes_immed_val = scmd->device->sector_size; 322 323 tc->reserved_DC_0 = 0; 324 325 /* always init to 8 */ 326 tc->DIF_bytes_immed_val = 8; 327 328 tc->reserved_DC_1 = 0; 329 tc->bgc_blk_sz = scmd->device->sector_size; 330 tc->reserved_E0_0 = 0; 331 tc->app_tag_gen_mask = 0; 332 333 /** setup block guard control **/ 334 tc->bgctl = 0; 335 336 /* DIF write insert */ 337 tc->bgctl_f.op = 0x2; 338 339 tc->app_tag_verify_mask = 0; 340 341 /* must init to 0 for hw */ 342 tc->blk_guard_err = 0; 343 344 tc->reserved_E8_0 = 0; 345 346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 348 else if (type & SCSI_PROT_DIF_TYPE3) 349 tc->ref_tag_seed_gen = 0; 350 } 351 352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 353 { 354 struct scu_task_context *tc = ireq->tc; 355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 356 u8 blk_sz = scu_bg_blk_size(scmd->device); 357 358 tc->block_guard_enable = 1; 359 tc->blk_prot_en = 1; 360 tc->blk_sz = blk_sz; 361 /* DIF read strip */ 362 tc->blk_prot_func = 0x1; 363 364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 365 scmd->device->sector_size); 366 367 /* always init to 0, used by hw */ 368 tc->interm_crc_val = 0; 369 370 tc->init_crc_seed = 0; 371 tc->app_tag_verify = 0; 372 tc->app_tag_gen = 0; 373 374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 376 else if (type & SCSI_PROT_DIF_TYPE3) 377 tc->ref_tag_seed_verify = 0; 378 379 /* always init to same as bg_blk_sz */ 380 tc->UD_bytes_immed_val = scmd->device->sector_size; 381 382 tc->reserved_DC_0 = 0; 383 384 /* always init to 8 */ 385 tc->DIF_bytes_immed_val = 8; 386 387 tc->reserved_DC_1 = 0; 388 tc->bgc_blk_sz = scmd->device->sector_size; 389 tc->reserved_E0_0 = 0; 390 tc->app_tag_gen_mask = 0; 391 392 /** setup block guard control **/ 393 tc->bgctl = 0; 394 395 /* DIF read strip */ 396 tc->bgctl_f.crc_verify = 1; 397 tc->bgctl_f.op = 0x1; 398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 399 tc->bgctl_f.ref_tag_chk = 1; 400 tc->bgctl_f.app_f_detect = 1; 401 } else if (type & SCSI_PROT_DIF_TYPE3) 402 tc->bgctl_f.app_ref_f_detect = 1; 403 404 tc->app_tag_verify_mask = 0; 405 406 /* must init to 0 for hw */ 407 tc->blk_guard_err = 0; 408 409 tc->reserved_E8_0 = 0; 410 tc->ref_tag_seed_gen = 0; 411 } 412 413 /** 414 * This method is will fill in the SCU Task Context for a SSP IO request. 415 * @sci_req: 416 * 417 */ 418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 419 enum dma_data_direction dir, 420 u32 len) 421 { 422 struct scu_task_context *task_context = ireq->tc; 423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 424 struct scsi_cmnd *scmd = sas_task->uldd_task; 425 u8 prot_type = scsi_get_prot_type(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd); 427 428 scu_ssp_request_construct_task_context(ireq, task_context); 429 430 task_context->ssp_command_iu_length = 431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 432 task_context->type.ssp.frame_type = SSP_COMMAND; 433 434 switch (dir) { 435 case DMA_FROM_DEVICE: 436 case DMA_NONE: 437 default: 438 task_context->task_type = SCU_TASK_TYPE_IOREAD; 439 break; 440 case DMA_TO_DEVICE: 441 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 442 break; 443 } 444 445 task_context->transfer_length_bytes = len; 446 447 if (task_context->transfer_length_bytes > 0) 448 sci_request_build_sgl(ireq); 449 450 if (prot_type != SCSI_PROT_DIF_TYPE0) { 451 if (prot_op == SCSI_PROT_READ_STRIP) 452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 453 else if (prot_op == SCSI_PROT_WRITE_INSERT) 454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 455 } 456 } 457 458 /** 459 * This method will fill in the SCU Task Context for a SSP Task request. The 460 * following important settings are utilized: -# priority == 461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 462 * ahead of other task destined for the same Remote Node. -# task_type == 463 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 464 * (i.e. non-raw frame) is being utilized to perform task management. -# 465 * control_frame == 1. This ensures that the proper endianess is set so 466 * that the bytes are transmitted in the right order for a task frame. 467 * @sci_req: This parameter specifies the task request object being 468 * constructed. 469 * 470 */ 471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 472 { 473 struct scu_task_context *task_context = ireq->tc; 474 475 scu_ssp_request_construct_task_context(ireq, task_context); 476 477 task_context->control_frame = 1; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 479 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 480 task_context->transfer_length_bytes = 0; 481 task_context->type.ssp.frame_type = SSP_TASK; 482 task_context->ssp_command_iu_length = 483 sizeof(struct ssp_task_iu) / sizeof(u32); 484 } 485 486 /** 487 * This method is will fill in the SCU Task Context for any type of SATA 488 * request. This is called from the various SATA constructors. 489 * @sci_req: The general IO request object which is to be used in 490 * constructing the SCU task context. 491 * @task_context: The buffer pointer for the SCU task context which is being 492 * constructed. 493 * 494 * The general io request construction is complete. The buffer assignment for 495 * the command buffer is complete. none Revisit task context construction to 496 * determine what is common for SSP/SMP/STP task context structures. 497 */ 498 static void scu_sata_request_construct_task_context( 499 struct isci_request *ireq, 500 struct scu_task_context *task_context) 501 { 502 dma_addr_t dma_addr; 503 struct isci_remote_device *idev; 504 struct isci_port *iport; 505 506 idev = ireq->target_device; 507 iport = idev->owning_port; 508 509 /* Fill in the TC with its required data */ 510 task_context->abort = 0; 511 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 512 task_context->initiator_request = 1; 513 task_context->connection_rate = idev->connection_rate; 514 task_context->protocol_engine_index = ISCI_PEG; 515 task_context->logical_port_index = iport->physical_port_index; 516 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 517 task_context->valid = SCU_TASK_CONTEXT_VALID; 518 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 519 520 task_context->remote_node_index = idev->rnc.remote_node_index; 521 task_context->command_code = 0; 522 523 task_context->link_layer_control = 0; 524 task_context->do_not_dma_ssp_good_response = 1; 525 task_context->strict_ordering = 0; 526 task_context->control_frame = 0; 527 task_context->timeout_enable = 0; 528 task_context->block_guard_enable = 0; 529 530 task_context->address_modifier = 0; 531 task_context->task_phase = 0x01; 532 533 task_context->ssp_command_iu_length = 534 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 535 536 /* Set the first word of the H2D REG FIS */ 537 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 538 539 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 540 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 541 (iport->physical_port_index << 542 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 543 ISCI_TAG_TCI(ireq->io_tag)); 544 /* 545 * Copy the physical address for the command buffer to the SCU Task 546 * Context. We must offset the command buffer by 4 bytes because the 547 * first 4 bytes are transfered in the body of the TC. 548 */ 549 dma_addr = sci_io_request_get_dma_addr(ireq, 550 ((char *) &ireq->stp.cmd) + 551 sizeof(u32)); 552 553 task_context->command_iu_upper = upper_32_bits(dma_addr); 554 task_context->command_iu_lower = lower_32_bits(dma_addr); 555 556 /* SATA Requests do not have a response buffer */ 557 task_context->response_iu_upper = 0; 558 task_context->response_iu_lower = 0; 559 } 560 561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 562 { 563 struct scu_task_context *task_context = ireq->tc; 564 565 scu_sata_request_construct_task_context(ireq, task_context); 566 567 task_context->control_frame = 0; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 569 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 570 task_context->type.stp.fis_type = FIS_REGH2D; 571 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 572 } 573 574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 575 bool copy_rx_frame) 576 { 577 struct isci_stp_request *stp_req = &ireq->stp.req; 578 579 scu_stp_raw_request_construct_task_context(ireq); 580 581 stp_req->status = 0; 582 stp_req->sgl.offset = 0; 583 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 584 585 if (copy_rx_frame) { 586 sci_request_build_sgl(ireq); 587 stp_req->sgl.index = 0; 588 } else { 589 /* The user does not want the data copied to the SGL buffer location */ 590 stp_req->sgl.index = -1; 591 } 592 593 return SCI_SUCCESS; 594 } 595 596 /** 597 * 598 * @sci_req: This parameter specifies the request to be constructed as an 599 * optimized request. 600 * @optimized_task_type: This parameter specifies whether the request is to be 601 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 602 * value of 1 indicates NCQ. 603 * 604 * This method will perform request construction common to all types of STP 605 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 606 * returns an indication as to whether the construction was successful. 607 */ 608 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 609 u8 optimized_task_type, 610 u32 len, 611 enum dma_data_direction dir) 612 { 613 struct scu_task_context *task_context = ireq->tc; 614 615 /* Build the STP task context structure */ 616 scu_sata_request_construct_task_context(ireq, task_context); 617 618 /* Copy over the SGL elements */ 619 sci_request_build_sgl(ireq); 620 621 /* Copy over the number of bytes to be transfered */ 622 task_context->transfer_length_bytes = len; 623 624 if (dir == DMA_TO_DEVICE) { 625 /* 626 * The difference between the DMA IN and DMA OUT request task type 627 * values are consistent with the difference between FPDMA READ 628 * and FPDMA WRITE values. Add the supplied task type parameter 629 * to this difference to set the task type properly for this 630 * DATA OUT (WRITE) case. */ 631 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 632 - SCU_TASK_TYPE_DMA_IN); 633 } else { 634 /* 635 * For the DATA IN (READ) case, simply save the supplied 636 * optimized task type. */ 637 task_context->task_type = optimized_task_type; 638 } 639 } 640 641 static void sci_atapi_construct(struct isci_request *ireq) 642 { 643 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 644 struct sas_task *task; 645 646 /* To simplify the implementation we take advantage of the 647 * silicon's partial acceleration of atapi protocol (dma data 648 * transfers), so we promote all commands to dma protocol. This 649 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 650 */ 651 h2d_fis->features |= ATAPI_PKT_DMA; 652 653 scu_stp_raw_request_construct_task_context(ireq); 654 655 task = isci_request_access_task(ireq); 656 if (task->data_dir == DMA_NONE) 657 task->total_xfer_len = 0; 658 659 /* clear the response so we can detect arrivial of an 660 * unsolicited h2d fis 661 */ 662 ireq->stp.rsp.fis_type = 0; 663 } 664 665 static enum sci_status 666 sci_io_request_construct_sata(struct isci_request *ireq, 667 u32 len, 668 enum dma_data_direction dir, 669 bool copy) 670 { 671 enum sci_status status = SCI_SUCCESS; 672 struct sas_task *task = isci_request_access_task(ireq); 673 struct domain_device *dev = ireq->target_device->domain_dev; 674 675 /* check for management protocols */ 676 if (test_bit(IREQ_TMF, &ireq->flags)) { 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 678 679 dev_err(&ireq->owning_controller->pdev->dev, 680 "%s: Request 0x%p received un-handled SAT " 681 "management protocol 0x%x.\n", 682 __func__, ireq, tmf->tmf_code); 683 684 return SCI_FAILURE; 685 } 686 687 if (!sas_protocol_ata(task->task_proto)) { 688 dev_err(&ireq->owning_controller->pdev->dev, 689 "%s: Non-ATA protocol in SATA path: 0x%x\n", 690 __func__, 691 task->task_proto); 692 return SCI_FAILURE; 693 694 } 695 696 /* ATAPI */ 697 if (dev->sata_dev.class == ATA_DEV_ATAPI && 698 task->ata_task.fis.command == ATA_CMD_PACKET) { 699 sci_atapi_construct(ireq); 700 return SCI_SUCCESS; 701 } 702 703 /* non data */ 704 if (task->data_dir == DMA_NONE) { 705 scu_stp_raw_request_construct_task_context(ireq); 706 return SCI_SUCCESS; 707 } 708 709 /* NCQ */ 710 if (task->ata_task.use_ncq) { 711 sci_stp_optimized_request_construct(ireq, 712 SCU_TASK_TYPE_FPDMAQ_READ, 713 len, dir); 714 return SCI_SUCCESS; 715 } 716 717 /* DMA */ 718 if (task->ata_task.dma_xfer) { 719 sci_stp_optimized_request_construct(ireq, 720 SCU_TASK_TYPE_DMA_IN, 721 len, dir); 722 return SCI_SUCCESS; 723 } else /* PIO */ 724 return sci_stp_pio_request_construct(ireq, copy); 725 726 return status; 727 } 728 729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 730 { 731 struct sas_task *task = isci_request_access_task(ireq); 732 733 ireq->protocol = SAS_PROTOCOL_SSP; 734 735 scu_ssp_io_request_construct_task_context(ireq, 736 task->data_dir, 737 task->total_xfer_len); 738 739 sci_io_request_build_ssp_command_iu(ireq); 740 741 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 742 743 return SCI_SUCCESS; 744 } 745 746 enum sci_status sci_task_request_construct_ssp( 747 struct isci_request *ireq) 748 { 749 /* Construct the SSP Task SCU Task Context */ 750 scu_ssp_task_request_construct_task_context(ireq); 751 752 /* Fill in the SSP Task IU */ 753 sci_task_request_build_ssp_task_iu(ireq); 754 755 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 756 757 return SCI_SUCCESS; 758 } 759 760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 761 { 762 enum sci_status status; 763 bool copy = false; 764 struct sas_task *task = isci_request_access_task(ireq); 765 766 ireq->protocol = SAS_PROTOCOL_STP; 767 768 copy = (task->data_dir == DMA_NONE) ? false : true; 769 770 status = sci_io_request_construct_sata(ireq, 771 task->total_xfer_len, 772 task->data_dir, 773 copy); 774 775 if (status == SCI_SUCCESS) 776 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 777 778 return status; 779 } 780 781 /** 782 * sci_req_tx_bytes - bytes transferred when reply underruns request 783 * @ireq: request that was terminated early 784 */ 785 #define SCU_TASK_CONTEXT_SRAM 0x200000 786 static u32 sci_req_tx_bytes(struct isci_request *ireq) 787 { 788 struct isci_host *ihost = ireq->owning_controller; 789 u32 ret_val = 0; 790 791 if (readl(&ihost->smu_registers->address_modifier) == 0) { 792 void __iomem *scu_reg_base = ihost->scu_registers; 793 794 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 795 * BAR1 is the scu_registers 796 * 0x20002C = 0x200000 + 0x2c 797 * = start of task context SRAM + offset of (type.ssp.data_offset) 798 * TCi is the io_tag of struct sci_request 799 */ 800 ret_val = readl(scu_reg_base + 801 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 802 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 803 } 804 805 return ret_val; 806 } 807 808 enum sci_status sci_request_start(struct isci_request *ireq) 809 { 810 enum sci_base_request_states state; 811 struct scu_task_context *tc = ireq->tc; 812 struct isci_host *ihost = ireq->owning_controller; 813 814 state = ireq->sm.current_state_id; 815 if (state != SCI_REQ_CONSTRUCTED) { 816 dev_warn(&ihost->pdev->dev, 817 "%s: SCIC IO Request requested to start while in wrong " 818 "state %d\n", __func__, state); 819 return SCI_FAILURE_INVALID_STATE; 820 } 821 822 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 823 824 switch (tc->protocol_type) { 825 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 826 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 827 /* SSP/SMP Frame */ 828 tc->type.ssp.tag = ireq->io_tag; 829 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 830 break; 831 832 case SCU_TASK_CONTEXT_PROTOCOL_STP: 833 /* STP/SATA Frame 834 * tc->type.stp.ncq_tag = ireq->ncq_tag; 835 */ 836 break; 837 838 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 839 /* / @todo When do we set no protocol type? */ 840 break; 841 842 default: 843 /* This should never happen since we build the IO 844 * requests */ 845 break; 846 } 847 848 /* Add to the post_context the io tag value */ 849 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 850 851 /* Everything is good go ahead and change state */ 852 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 853 854 return SCI_SUCCESS; 855 } 856 857 enum sci_status 858 sci_io_request_terminate(struct isci_request *ireq) 859 { 860 enum sci_base_request_states state; 861 862 state = ireq->sm.current_state_id; 863 864 switch (state) { 865 case SCI_REQ_CONSTRUCTED: 866 /* Set to make sure no HW terminate posting is done: */ 867 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); 868 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 869 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 870 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 871 return SCI_SUCCESS; 872 case SCI_REQ_STARTED: 873 case SCI_REQ_TASK_WAIT_TC_COMP: 874 case SCI_REQ_SMP_WAIT_RESP: 875 case SCI_REQ_SMP_WAIT_TC_COMP: 876 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 877 case SCI_REQ_STP_UDMA_WAIT_D2H: 878 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 879 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 880 case SCI_REQ_STP_PIO_WAIT_H2D: 881 case SCI_REQ_STP_PIO_WAIT_FRAME: 882 case SCI_REQ_STP_PIO_DATA_IN: 883 case SCI_REQ_STP_PIO_DATA_OUT: 884 case SCI_REQ_ATAPI_WAIT_H2D: 885 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 886 case SCI_REQ_ATAPI_WAIT_D2H: 887 case SCI_REQ_ATAPI_WAIT_TC_COMP: 888 /* Fall through and change state to ABORTING... */ 889 case SCI_REQ_TASK_WAIT_TC_RESP: 890 /* The task frame was already confirmed to have been 891 * sent by the SCU HW. Since the state machine is 892 * now only waiting for the task response itself, 893 * abort the request and complete it immediately 894 * and don't wait for the task response. 895 */ 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 897 fallthrough; /* and handle like ABORTING */ 898 case SCI_REQ_ABORTING: 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags); 901 else 902 clear_bit(IREQ_PENDING_ABORT, &ireq->flags); 903 /* If the request is only waiting on the remote device 904 * suspension, return SUCCESS so the caller will wait too. 905 */ 906 return SCI_SUCCESS; 907 case SCI_REQ_COMPLETED: 908 default: 909 dev_warn(&ireq->owning_controller->pdev->dev, 910 "%s: SCIC IO Request requested to abort while in wrong " 911 "state %d\n", __func__, ireq->sm.current_state_id); 912 break; 913 } 914 915 return SCI_FAILURE_INVALID_STATE; 916 } 917 918 enum sci_status sci_request_complete(struct isci_request *ireq) 919 { 920 enum sci_base_request_states state; 921 struct isci_host *ihost = ireq->owning_controller; 922 923 state = ireq->sm.current_state_id; 924 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 925 "isci: request completion from wrong state (%s)\n", 926 req_state_name(state))) 927 return SCI_FAILURE_INVALID_STATE; 928 929 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 930 sci_controller_release_frame(ihost, 931 ireq->saved_rx_frame_index); 932 933 /* XXX can we just stop the machine and remove the 'final' state? */ 934 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 935 return SCI_SUCCESS; 936 } 937 938 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 939 u32 event_code) 940 { 941 enum sci_base_request_states state; 942 struct isci_host *ihost = ireq->owning_controller; 943 944 state = ireq->sm.current_state_id; 945 946 if (state != SCI_REQ_STP_PIO_DATA_IN) { 947 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", 948 __func__, event_code, req_state_name(state)); 949 950 return SCI_FAILURE_INVALID_STATE; 951 } 952 953 switch (scu_get_event_specifier(event_code)) { 954 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 955 /* We are waiting for data and the SCU has R_ERR the data frame. 956 * Go back to waiting for the D2H Register FIS 957 */ 958 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 959 return SCI_SUCCESS; 960 default: 961 dev_err(&ihost->pdev->dev, 962 "%s: pio request unexpected event %#x\n", 963 __func__, event_code); 964 965 /* TODO Should we fail the PIO request when we get an 966 * unexpected event? 967 */ 968 return SCI_FAILURE; 969 } 970 } 971 972 /* 973 * This function copies response data for requests returning response data 974 * instead of sense data. 975 * @sci_req: This parameter specifies the request object for which to copy 976 * the response data. 977 */ 978 static void sci_io_request_copy_response(struct isci_request *ireq) 979 { 980 void *resp_buf; 981 u32 len; 982 struct ssp_response_iu *ssp_response; 983 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 984 985 ssp_response = &ireq->ssp.rsp; 986 987 resp_buf = &isci_tmf->resp.resp_iu; 988 989 len = min_t(u32, 990 SSP_RESP_IU_MAX_SIZE, 991 be32_to_cpu(ssp_response->response_data_len)); 992 993 memcpy(resp_buf, ssp_response->resp_data, len); 994 } 995 996 static enum sci_status 997 request_started_state_tc_event(struct isci_request *ireq, 998 u32 completion_code) 999 { 1000 struct ssp_response_iu *resp_iu; 1001 u8 datapres; 1002 1003 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 1004 * to determine SDMA status 1005 */ 1006 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1007 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1008 ireq->scu_status = SCU_TASK_DONE_GOOD; 1009 ireq->sci_status = SCI_SUCCESS; 1010 break; 1011 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 1012 /* There are times when the SCU hardware will return an early 1013 * response because the io request specified more data than is 1014 * returned by the target device (mode pages, inquiry data, 1015 * etc.). We must check the response stats to see if this is 1016 * truly a failed request or a good request that just got 1017 * completed early. 1018 */ 1019 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1020 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1021 1022 sci_swab32_cpy(&ireq->ssp.rsp, 1023 &ireq->ssp.rsp, 1024 word_cnt); 1025 1026 if (resp->status == 0) { 1027 ireq->scu_status = SCU_TASK_DONE_GOOD; 1028 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1029 } else { 1030 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1031 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1032 } 1033 break; 1034 } 1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1036 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1037 1038 sci_swab32_cpy(&ireq->ssp.rsp, 1039 &ireq->ssp.rsp, 1040 word_cnt); 1041 1042 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1043 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1044 break; 1045 } 1046 1047 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1048 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1049 * guaranteed to be received before this completion status is 1050 * posted? 1051 */ 1052 resp_iu = &ireq->ssp.rsp; 1053 datapres = resp_iu->datapres; 1054 1055 if (datapres == 1 || datapres == 2) { 1056 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1057 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1058 } else { 1059 ireq->scu_status = SCU_TASK_DONE_GOOD; 1060 ireq->sci_status = SCI_SUCCESS; 1061 } 1062 break; 1063 /* only stp device gets suspended. */ 1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1075 if (ireq->protocol == SAS_PROTOCOL_STP) { 1076 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1077 SCU_COMPLETION_TL_STATUS_SHIFT; 1078 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1079 } else { 1080 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1081 SCU_COMPLETION_TL_STATUS_SHIFT; 1082 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1083 } 1084 break; 1085 1086 /* both stp/ssp device gets suspended */ 1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1097 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1098 SCU_COMPLETION_TL_STATUS_SHIFT; 1099 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1100 break; 1101 1102 /* neither ssp nor stp gets suspended. */ 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1116 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1117 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1118 default: 1119 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1120 SCU_COMPLETION_TL_STATUS_SHIFT; 1121 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1122 break; 1123 } 1124 1125 /* 1126 * TODO: This is probably wrong for ACK/NAK timeout conditions 1127 */ 1128 1129 /* In all cases we will treat this as the completion of the IO req. */ 1130 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1131 return SCI_SUCCESS; 1132 } 1133 1134 static enum sci_status 1135 request_aborting_state_tc_event(struct isci_request *ireq, 1136 u32 completion_code) 1137 { 1138 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1139 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1140 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1141 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1142 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1143 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1144 break; 1145 1146 default: 1147 /* Unless we get some strange error wait for the task abort to complete 1148 * TODO: Should there be a state change for this completion? 1149 */ 1150 break; 1151 } 1152 1153 return SCI_SUCCESS; 1154 } 1155 1156 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1157 u32 completion_code) 1158 { 1159 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1161 ireq->scu_status = SCU_TASK_DONE_GOOD; 1162 ireq->sci_status = SCI_SUCCESS; 1163 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1164 break; 1165 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1166 /* Currently, the decision is to simply allow the task request 1167 * to timeout if the task IU wasn't received successfully. 1168 * There is a potential for receiving multiple task responses if 1169 * we decide to send the task IU again. 1170 */ 1171 dev_warn(&ireq->owning_controller->pdev->dev, 1172 "%s: TaskRequest:0x%p CompletionCode:%x - " 1173 "ACK/NAK timeout\n", __func__, ireq, 1174 completion_code); 1175 1176 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1177 break; 1178 default: 1179 /* 1180 * All other completion status cause the IO to be complete. 1181 * If a NAK was received, then it is up to the user to retry 1182 * the request. 1183 */ 1184 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1185 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1186 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1187 break; 1188 } 1189 1190 return SCI_SUCCESS; 1191 } 1192 1193 static enum sci_status 1194 smp_request_await_response_tc_event(struct isci_request *ireq, 1195 u32 completion_code) 1196 { 1197 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1198 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1199 /* In the AWAIT RESPONSE state, any TC completion is 1200 * unexpected. but if the TC has success status, we 1201 * complete the IO anyway. 1202 */ 1203 ireq->scu_status = SCU_TASK_DONE_GOOD; 1204 ireq->sci_status = SCI_SUCCESS; 1205 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1206 break; 1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1209 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1210 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1211 /* These status has been seen in a specific LSI 1212 * expander, which sometimes is not able to send smp 1213 * response within 2 ms. This causes our hardware break 1214 * the connection and set TC completion with one of 1215 * these SMP_XXX_XX_ERR status. For these type of error, 1216 * we ask ihost user to retry the request. 1217 */ 1218 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1219 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1220 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1221 break; 1222 default: 1223 /* All other completion status cause the IO to be complete. If a NAK 1224 * was received, then it is up to the user to retry the request 1225 */ 1226 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1227 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1228 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1229 break; 1230 } 1231 1232 return SCI_SUCCESS; 1233 } 1234 1235 static enum sci_status 1236 smp_request_await_tc_event(struct isci_request *ireq, 1237 u32 completion_code) 1238 { 1239 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1240 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1241 ireq->scu_status = SCU_TASK_DONE_GOOD; 1242 ireq->sci_status = SCI_SUCCESS; 1243 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1244 break; 1245 default: 1246 /* All other completion status cause the IO to be 1247 * complete. If a NAK was received, then it is up to 1248 * the user to retry the request. 1249 */ 1250 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1251 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1252 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1253 break; 1254 } 1255 1256 return SCI_SUCCESS; 1257 } 1258 1259 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1260 { 1261 struct scu_sgl_element *sgl; 1262 struct scu_sgl_element_pair *sgl_pair; 1263 struct isci_request *ireq = to_ireq(stp_req); 1264 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1265 1266 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1267 if (!sgl_pair) 1268 sgl = NULL; 1269 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1270 if (sgl_pair->B.address_lower == 0 && 1271 sgl_pair->B.address_upper == 0) { 1272 sgl = NULL; 1273 } else { 1274 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1275 sgl = &sgl_pair->B; 1276 } 1277 } else { 1278 if (sgl_pair->next_pair_lower == 0 && 1279 sgl_pair->next_pair_upper == 0) { 1280 sgl = NULL; 1281 } else { 1282 pio_sgl->index++; 1283 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1284 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1285 sgl = &sgl_pair->A; 1286 } 1287 } 1288 1289 return sgl; 1290 } 1291 1292 static enum sci_status 1293 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1294 u32 completion_code) 1295 { 1296 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1297 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1298 ireq->scu_status = SCU_TASK_DONE_GOOD; 1299 ireq->sci_status = SCI_SUCCESS; 1300 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1301 break; 1302 1303 default: 1304 /* All other completion status cause the IO to be 1305 * complete. If a NAK was received, then it is up to 1306 * the user to retry the request. 1307 */ 1308 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1309 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1310 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1311 break; 1312 } 1313 1314 return SCI_SUCCESS; 1315 } 1316 1317 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1318 1319 /* transmit DATA_FIS from (current sgl + offset) for input 1320 * parameter length. current sgl and offset is alreay stored in the IO request 1321 */ 1322 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1323 struct isci_request *ireq, 1324 u32 length) 1325 { 1326 struct isci_stp_request *stp_req = &ireq->stp.req; 1327 struct scu_task_context *task_context = ireq->tc; 1328 struct scu_sgl_element_pair *sgl_pair; 1329 struct scu_sgl_element *current_sgl; 1330 1331 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1332 * for the data from current_sgl+offset for the input length 1333 */ 1334 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1335 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1336 current_sgl = &sgl_pair->A; 1337 else 1338 current_sgl = &sgl_pair->B; 1339 1340 /* update the TC */ 1341 task_context->command_iu_upper = current_sgl->address_upper; 1342 task_context->command_iu_lower = current_sgl->address_lower; 1343 task_context->transfer_length_bytes = length; 1344 task_context->type.stp.fis_type = FIS_DATA; 1345 1346 /* send the new TC out. */ 1347 return sci_controller_continue_io(ireq); 1348 } 1349 1350 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1351 { 1352 struct isci_stp_request *stp_req = &ireq->stp.req; 1353 struct scu_sgl_element_pair *sgl_pair; 1354 enum sci_status status = SCI_SUCCESS; 1355 struct scu_sgl_element *sgl; 1356 u32 offset; 1357 u32 len = 0; 1358 1359 offset = stp_req->sgl.offset; 1360 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1361 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1362 return SCI_FAILURE; 1363 1364 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1365 sgl = &sgl_pair->A; 1366 len = sgl_pair->A.length - offset; 1367 } else { 1368 sgl = &sgl_pair->B; 1369 len = sgl_pair->B.length - offset; 1370 } 1371 1372 if (stp_req->pio_len == 0) 1373 return SCI_SUCCESS; 1374 1375 if (stp_req->pio_len >= len) { 1376 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1377 if (status != SCI_SUCCESS) 1378 return status; 1379 stp_req->pio_len -= len; 1380 1381 /* update the current sgl, offset and save for future */ 1382 sgl = pio_sgl_next(stp_req); 1383 offset = 0; 1384 } else if (stp_req->pio_len < len) { 1385 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1386 1387 /* Sgl offset will be adjusted and saved for future */ 1388 offset += stp_req->pio_len; 1389 sgl->address_lower += stp_req->pio_len; 1390 stp_req->pio_len = 0; 1391 } 1392 1393 stp_req->sgl.offset = offset; 1394 1395 return status; 1396 } 1397 1398 /** 1399 * 1400 * @stp_request: The request that is used for the SGL processing. 1401 * @data_buffer: The buffer of data to be copied. 1402 * @length: The length of the data transfer. 1403 * 1404 * Copy the data from the buffer for the length specified to the IO request SGL 1405 * specified data region. enum sci_status 1406 */ 1407 static enum sci_status 1408 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1409 u8 *data_buf, u32 len) 1410 { 1411 struct isci_request *ireq; 1412 u8 *src_addr; 1413 int copy_len; 1414 struct sas_task *task; 1415 struct scatterlist *sg; 1416 void *kaddr; 1417 int total_len = len; 1418 1419 ireq = to_ireq(stp_req); 1420 task = isci_request_access_task(ireq); 1421 src_addr = data_buf; 1422 1423 if (task->num_scatter > 0) { 1424 sg = task->scatter; 1425 1426 while (total_len > 0) { 1427 struct page *page = sg_page(sg); 1428 1429 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1430 kaddr = kmap_atomic(page); 1431 memcpy(kaddr + sg->offset, src_addr, copy_len); 1432 kunmap_atomic(kaddr); 1433 total_len -= copy_len; 1434 src_addr += copy_len; 1435 sg = sg_next(sg); 1436 } 1437 } else { 1438 BUG_ON(task->total_xfer_len < total_len); 1439 memcpy(task->scatter, src_addr, total_len); 1440 } 1441 1442 return SCI_SUCCESS; 1443 } 1444 1445 /** 1446 * 1447 * @sci_req: The PIO DATA IN request that is to receive the data. 1448 * @data_buffer: The buffer to copy from. 1449 * 1450 * Copy the data buffer to the io request data region. enum sci_status 1451 */ 1452 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1453 struct isci_stp_request *stp_req, 1454 u8 *data_buffer) 1455 { 1456 enum sci_status status; 1457 1458 /* 1459 * If there is less than 1K remaining in the transfer request 1460 * copy just the data for the transfer */ 1461 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1462 status = sci_stp_request_pio_data_in_copy_data_buffer( 1463 stp_req, data_buffer, stp_req->pio_len); 1464 1465 if (status == SCI_SUCCESS) 1466 stp_req->pio_len = 0; 1467 } else { 1468 /* We are transfering the whole frame so copy */ 1469 status = sci_stp_request_pio_data_in_copy_data_buffer( 1470 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1471 1472 if (status == SCI_SUCCESS) 1473 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1474 } 1475 1476 return status; 1477 } 1478 1479 static enum sci_status 1480 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1481 u32 completion_code) 1482 { 1483 enum sci_status status = SCI_SUCCESS; 1484 1485 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1486 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1487 ireq->scu_status = SCU_TASK_DONE_GOOD; 1488 ireq->sci_status = SCI_SUCCESS; 1489 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1490 break; 1491 1492 default: 1493 /* All other completion status cause the IO to be 1494 * complete. If a NAK was received, then it is up to 1495 * the user to retry the request. 1496 */ 1497 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1498 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1499 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1500 break; 1501 } 1502 1503 return status; 1504 } 1505 1506 static enum sci_status 1507 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1508 u32 completion_code) 1509 { 1510 enum sci_status status = SCI_SUCCESS; 1511 bool all_frames_transferred = false; 1512 struct isci_stp_request *stp_req = &ireq->stp.req; 1513 1514 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1515 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1516 /* Transmit data */ 1517 if (stp_req->pio_len != 0) { 1518 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1519 if (status == SCI_SUCCESS) { 1520 if (stp_req->pio_len == 0) 1521 all_frames_transferred = true; 1522 } 1523 } else if (stp_req->pio_len == 0) { 1524 /* 1525 * this will happen if the all data is written at the 1526 * first time after the pio setup fis is received 1527 */ 1528 all_frames_transferred = true; 1529 } 1530 1531 /* all data transferred. */ 1532 if (all_frames_transferred) { 1533 /* 1534 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1535 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1536 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1537 } 1538 break; 1539 1540 default: 1541 /* 1542 * All other completion status cause the IO to be complete. 1543 * If a NAK was received, then it is up to the user to retry 1544 * the request. 1545 */ 1546 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1547 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1548 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1549 break; 1550 } 1551 1552 return status; 1553 } 1554 1555 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1556 u32 frame_index) 1557 { 1558 struct isci_host *ihost = ireq->owning_controller; 1559 struct dev_to_host_fis *frame_header; 1560 enum sci_status status; 1561 u32 *frame_buffer; 1562 1563 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1564 frame_index, 1565 (void **)&frame_header); 1566 1567 if ((status == SCI_SUCCESS) && 1568 (frame_header->fis_type == FIS_REGD2H)) { 1569 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1570 frame_index, 1571 (void **)&frame_buffer); 1572 1573 sci_controller_copy_sata_response(&ireq->stp.rsp, 1574 frame_header, 1575 frame_buffer); 1576 } 1577 1578 sci_controller_release_frame(ihost, frame_index); 1579 1580 return status; 1581 } 1582 1583 static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1584 u32 frame_index) 1585 { 1586 struct isci_host *ihost = ireq->owning_controller; 1587 enum sci_status status; 1588 struct dev_to_host_fis *frame_header; 1589 u32 *frame_buffer; 1590 1591 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1592 frame_index, 1593 (void **)&frame_header); 1594 1595 if (status != SCI_SUCCESS) 1596 return status; 1597 1598 if (frame_header->fis_type != FIS_REGD2H) { 1599 dev_err(&ireq->isci_host->pdev->dev, 1600 "%s ERROR: invalid fis type 0x%X\n", 1601 __func__, frame_header->fis_type); 1602 return SCI_FAILURE; 1603 } 1604 1605 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1606 frame_index, 1607 (void **)&frame_buffer); 1608 1609 sci_controller_copy_sata_response(&ireq->stp.rsp, 1610 (u32 *)frame_header, 1611 frame_buffer); 1612 1613 /* Frame has been decoded return it to the controller */ 1614 sci_controller_release_frame(ihost, frame_index); 1615 1616 return status; 1617 } 1618 1619 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1620 u32 frame_index) 1621 { 1622 struct sas_task *task = isci_request_access_task(ireq); 1623 enum sci_status status; 1624 1625 status = process_unsolicited_fis(ireq, frame_index); 1626 1627 if (status == SCI_SUCCESS) { 1628 if (ireq->stp.rsp.status & ATA_ERR) 1629 status = SCI_FAILURE_IO_RESPONSE_VALID; 1630 } else { 1631 status = SCI_FAILURE_IO_RESPONSE_VALID; 1632 } 1633 1634 if (status != SCI_SUCCESS) { 1635 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1636 ireq->sci_status = status; 1637 } else { 1638 ireq->scu_status = SCU_TASK_DONE_GOOD; 1639 ireq->sci_status = SCI_SUCCESS; 1640 } 1641 1642 /* the d2h ufi is the end of non-data commands */ 1643 if (task->data_dir == DMA_NONE) 1644 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1645 1646 return status; 1647 } 1648 1649 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1650 { 1651 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1652 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1653 struct scu_task_context *task_context = ireq->tc; 1654 1655 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1656 * type. The TC for previous Packet fis was already there, we only need to 1657 * change the H2D fis content. 1658 */ 1659 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1660 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1661 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1662 task_context->type.stp.fis_type = FIS_DATA; 1663 task_context->transfer_length_bytes = dev->cdb_len; 1664 } 1665 1666 static void scu_atapi_construct_task_context(struct isci_request *ireq) 1667 { 1668 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1669 struct sas_task *task = isci_request_access_task(ireq); 1670 struct scu_task_context *task_context = ireq->tc; 1671 int cdb_len = dev->cdb_len; 1672 1673 /* reference: SSTL 1.13.4.2 1674 * task_type, sata_direction 1675 */ 1676 if (task->data_dir == DMA_TO_DEVICE) { 1677 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1678 task_context->sata_direction = 0; 1679 } else { 1680 /* todo: for NO_DATA command, we need to send out raw frame. */ 1681 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1682 task_context->sata_direction = 1; 1683 } 1684 1685 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1686 task_context->type.stp.fis_type = FIS_DATA; 1687 1688 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1689 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1690 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1691 1692 /* task phase is set to TX_CMD */ 1693 task_context->task_phase = 0x1; 1694 1695 /* retry counter */ 1696 task_context->stp_retry_count = 0; 1697 1698 /* data transfer size. */ 1699 task_context->transfer_length_bytes = task->total_xfer_len; 1700 1701 /* setup sgl */ 1702 sci_request_build_sgl(ireq); 1703 } 1704 1705 enum sci_status 1706 sci_io_request_frame_handler(struct isci_request *ireq, 1707 u32 frame_index) 1708 { 1709 struct isci_host *ihost = ireq->owning_controller; 1710 struct isci_stp_request *stp_req = &ireq->stp.req; 1711 enum sci_base_request_states state; 1712 enum sci_status status; 1713 ssize_t word_cnt; 1714 1715 state = ireq->sm.current_state_id; 1716 switch (state) { 1717 case SCI_REQ_STARTED: { 1718 struct ssp_frame_hdr ssp_hdr; 1719 void *frame_header; 1720 1721 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1722 frame_index, 1723 &frame_header); 1724 1725 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1726 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1727 1728 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1729 struct ssp_response_iu *resp_iu; 1730 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1731 1732 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1733 frame_index, 1734 (void **)&resp_iu); 1735 1736 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1737 1738 resp_iu = &ireq->ssp.rsp; 1739 1740 if (resp_iu->datapres == 0x01 || 1741 resp_iu->datapres == 0x02) { 1742 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1743 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1744 } else { 1745 ireq->scu_status = SCU_TASK_DONE_GOOD; 1746 ireq->sci_status = SCI_SUCCESS; 1747 } 1748 } else { 1749 /* not a response frame, why did it get forwarded? */ 1750 dev_err(&ihost->pdev->dev, 1751 "%s: SCIC IO Request 0x%p received unexpected " 1752 "frame %d type 0x%02x\n", __func__, ireq, 1753 frame_index, ssp_hdr.frame_type); 1754 } 1755 1756 /* 1757 * In any case we are done with this frame buffer return it to 1758 * the controller 1759 */ 1760 sci_controller_release_frame(ihost, frame_index); 1761 1762 return SCI_SUCCESS; 1763 } 1764 1765 case SCI_REQ_TASK_WAIT_TC_RESP: 1766 sci_io_request_copy_response(ireq); 1767 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1768 sci_controller_release_frame(ihost, frame_index); 1769 return SCI_SUCCESS; 1770 1771 case SCI_REQ_SMP_WAIT_RESP: { 1772 struct sas_task *task = isci_request_access_task(ireq); 1773 struct scatterlist *sg = &task->smp_task.smp_resp; 1774 void *frame_header, *kaddr; 1775 u8 *rsp; 1776 1777 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1778 frame_index, 1779 &frame_header); 1780 kaddr = kmap_atomic(sg_page(sg)); 1781 rsp = kaddr + sg->offset; 1782 sci_swab32_cpy(rsp, frame_header, 1); 1783 1784 if (rsp[0] == SMP_RESPONSE) { 1785 void *smp_resp; 1786 1787 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1788 frame_index, 1789 &smp_resp); 1790 1791 word_cnt = (sg->length/4)-1; 1792 if (word_cnt > 0) 1793 word_cnt = min_t(unsigned int, word_cnt, 1794 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1795 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1796 1797 ireq->scu_status = SCU_TASK_DONE_GOOD; 1798 ireq->sci_status = SCI_SUCCESS; 1799 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1800 } else { 1801 /* 1802 * This was not a response frame why did it get 1803 * forwarded? 1804 */ 1805 dev_err(&ihost->pdev->dev, 1806 "%s: SCIC SMP Request 0x%p received unexpected " 1807 "frame %d type 0x%02x\n", 1808 __func__, 1809 ireq, 1810 frame_index, 1811 rsp[0]); 1812 1813 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1814 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1815 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1816 } 1817 kunmap_atomic(kaddr); 1818 1819 sci_controller_release_frame(ihost, frame_index); 1820 1821 return SCI_SUCCESS; 1822 } 1823 1824 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1825 return sci_stp_request_udma_general_frame_handler(ireq, 1826 frame_index); 1827 1828 case SCI_REQ_STP_UDMA_WAIT_D2H: 1829 /* Use the general frame handler to copy the resposne data */ 1830 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1831 1832 if (status != SCI_SUCCESS) 1833 return status; 1834 1835 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1836 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1837 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1838 return SCI_SUCCESS; 1839 1840 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1841 struct dev_to_host_fis *frame_header; 1842 u32 *frame_buffer; 1843 1844 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1845 frame_index, 1846 (void **)&frame_header); 1847 1848 if (status != SCI_SUCCESS) { 1849 dev_err(&ihost->pdev->dev, 1850 "%s: SCIC IO Request 0x%p could not get frame " 1851 "header for frame index %d, status %x\n", 1852 __func__, 1853 stp_req, 1854 frame_index, 1855 status); 1856 1857 return status; 1858 } 1859 1860 switch (frame_header->fis_type) { 1861 case FIS_REGD2H: 1862 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1863 frame_index, 1864 (void **)&frame_buffer); 1865 1866 sci_controller_copy_sata_response(&ireq->stp.rsp, 1867 frame_header, 1868 frame_buffer); 1869 1870 /* The command has completed with error */ 1871 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1872 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1873 break; 1874 1875 default: 1876 dev_warn(&ihost->pdev->dev, 1877 "%s: IO Request:0x%p Frame Id:%d protocol " 1878 "violation occurred\n", __func__, stp_req, 1879 frame_index); 1880 1881 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1882 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1883 break; 1884 } 1885 1886 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1887 1888 /* Frame has been decoded return it to the controller */ 1889 sci_controller_release_frame(ihost, frame_index); 1890 1891 return status; 1892 } 1893 1894 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1895 struct sas_task *task = isci_request_access_task(ireq); 1896 struct dev_to_host_fis *frame_header; 1897 u32 *frame_buffer; 1898 1899 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1900 frame_index, 1901 (void **)&frame_header); 1902 1903 if (status != SCI_SUCCESS) { 1904 dev_err(&ihost->pdev->dev, 1905 "%s: SCIC IO Request 0x%p could not get frame " 1906 "header for frame index %d, status %x\n", 1907 __func__, stp_req, frame_index, status); 1908 return status; 1909 } 1910 1911 switch (frame_header->fis_type) { 1912 case FIS_PIO_SETUP: 1913 /* Get from the frame buffer the PIO Setup Data */ 1914 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1915 frame_index, 1916 (void **)&frame_buffer); 1917 1918 /* Get the data from the PIO Setup The SCU Hardware 1919 * returns first word in the frame_header and the rest 1920 * of the data is in the frame buffer so we need to 1921 * back up one dword 1922 */ 1923 1924 /* transfer_count: first 16bits in the 4th dword */ 1925 stp_req->pio_len = frame_buffer[3] & 0xffff; 1926 1927 /* status: 4th byte in the 3rd dword */ 1928 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1929 1930 sci_controller_copy_sata_response(&ireq->stp.rsp, 1931 frame_header, 1932 frame_buffer); 1933 1934 ireq->stp.rsp.status = stp_req->status; 1935 1936 /* The next state is dependent on whether the 1937 * request was PIO Data-in or Data out 1938 */ 1939 if (task->data_dir == DMA_FROM_DEVICE) { 1940 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1941 } else if (task->data_dir == DMA_TO_DEVICE) { 1942 /* Transmit data */ 1943 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1944 if (status != SCI_SUCCESS) 1945 break; 1946 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1947 } 1948 break; 1949 1950 case FIS_SETDEVBITS: 1951 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1952 break; 1953 1954 case FIS_REGD2H: 1955 if (frame_header->status & ATA_BUSY) { 1956 /* 1957 * Now why is the drive sending a D2H Register 1958 * FIS when it is still busy? Do nothing since 1959 * we are still in the right state. 1960 */ 1961 dev_dbg(&ihost->pdev->dev, 1962 "%s: SCIC PIO Request 0x%p received " 1963 "D2H Register FIS with BSY status " 1964 "0x%x\n", 1965 __func__, 1966 stp_req, 1967 frame_header->status); 1968 break; 1969 } 1970 1971 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1972 frame_index, 1973 (void **)&frame_buffer); 1974 1975 sci_controller_copy_sata_response(&ireq->stp.rsp, 1976 frame_header, 1977 frame_buffer); 1978 1979 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1980 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1981 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1982 break; 1983 1984 default: 1985 /* FIXME: what do we do here? */ 1986 break; 1987 } 1988 1989 /* Frame is decoded return it to the controller */ 1990 sci_controller_release_frame(ihost, frame_index); 1991 1992 return status; 1993 } 1994 1995 case SCI_REQ_STP_PIO_DATA_IN: { 1996 struct dev_to_host_fis *frame_header; 1997 struct sata_fis_data *frame_buffer; 1998 1999 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 2000 frame_index, 2001 (void **)&frame_header); 2002 2003 if (status != SCI_SUCCESS) { 2004 dev_err(&ihost->pdev->dev, 2005 "%s: SCIC IO Request 0x%p could not get frame " 2006 "header for frame index %d, status %x\n", 2007 __func__, 2008 stp_req, 2009 frame_index, 2010 status); 2011 return status; 2012 } 2013 2014 if (frame_header->fis_type != FIS_DATA) { 2015 dev_err(&ihost->pdev->dev, 2016 "%s: SCIC PIO Request 0x%p received frame %d " 2017 "with fis type 0x%02x when expecting a data " 2018 "fis.\n", 2019 __func__, 2020 stp_req, 2021 frame_index, 2022 frame_header->fis_type); 2023 2024 ireq->scu_status = SCU_TASK_DONE_GOOD; 2025 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2026 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2027 2028 /* Frame is decoded return it to the controller */ 2029 sci_controller_release_frame(ihost, frame_index); 2030 return status; 2031 } 2032 2033 if (stp_req->sgl.index < 0) { 2034 ireq->saved_rx_frame_index = frame_index; 2035 stp_req->pio_len = 0; 2036 } else { 2037 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2038 frame_index, 2039 (void **)&frame_buffer); 2040 2041 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2042 (u8 *)frame_buffer); 2043 2044 /* Frame is decoded return it to the controller */ 2045 sci_controller_release_frame(ihost, frame_index); 2046 } 2047 2048 /* Check for the end of the transfer, are there more 2049 * bytes remaining for this data transfer 2050 */ 2051 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2052 return status; 2053 2054 if ((stp_req->status & ATA_BUSY) == 0) { 2055 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2056 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2057 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2058 } else { 2059 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2060 } 2061 return status; 2062 } 2063 2064 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2065 struct sas_task *task = isci_request_access_task(ireq); 2066 2067 sci_controller_release_frame(ihost, frame_index); 2068 ireq->target_device->working_request = ireq; 2069 if (task->data_dir == DMA_NONE) { 2070 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2071 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2072 } else { 2073 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2074 scu_atapi_construct_task_context(ireq); 2075 } 2076 2077 sci_controller_continue_io(ireq); 2078 return SCI_SUCCESS; 2079 } 2080 case SCI_REQ_ATAPI_WAIT_D2H: 2081 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2082 case SCI_REQ_ABORTING: 2083 /* 2084 * TODO: Is it even possible to get an unsolicited frame in the 2085 * aborting state? 2086 */ 2087 sci_controller_release_frame(ihost, frame_index); 2088 return SCI_SUCCESS; 2089 2090 default: 2091 dev_warn(&ihost->pdev->dev, 2092 "%s: SCIC IO Request given unexpected frame %x while " 2093 "in state %d\n", 2094 __func__, 2095 frame_index, 2096 state); 2097 2098 sci_controller_release_frame(ihost, frame_index); 2099 return SCI_FAILURE_INVALID_STATE; 2100 } 2101 } 2102 2103 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2104 u32 completion_code) 2105 { 2106 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2108 ireq->scu_status = SCU_TASK_DONE_GOOD; 2109 ireq->sci_status = SCI_SUCCESS; 2110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2111 break; 2112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2114 /* We must check ther response buffer to see if the D2H 2115 * Register FIS was received before we got the TC 2116 * completion. 2117 */ 2118 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2119 sci_remote_device_suspend(ireq->target_device, 2120 SCI_SW_SUSPEND_NORMAL); 2121 2122 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2123 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2124 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2125 } else { 2126 /* If we have an error completion status for the 2127 * TC then we can expect a D2H register FIS from 2128 * the device so we must change state to wait 2129 * for it 2130 */ 2131 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2132 } 2133 break; 2134 2135 /* TODO Check to see if any of these completion status need to 2136 * wait for the device to host register fis. 2137 */ 2138 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2139 * - this comes only for B0 2140 */ 2141 default: 2142 /* All other completion status cause the IO to be complete. */ 2143 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2144 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2145 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2146 break; 2147 } 2148 2149 return SCI_SUCCESS; 2150 } 2151 2152 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2153 enum sci_base_request_states next) 2154 { 2155 enum sci_status status = SCI_SUCCESS; 2156 2157 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2158 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2159 ireq->scu_status = SCU_TASK_DONE_GOOD; 2160 ireq->sci_status = SCI_SUCCESS; 2161 sci_change_state(&ireq->sm, next); 2162 break; 2163 default: 2164 /* All other completion status cause the IO to be complete. 2165 * If a NAK was received, then it is up to the user to retry 2166 * the request. 2167 */ 2168 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2169 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2170 2171 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2172 break; 2173 } 2174 2175 return status; 2176 } 2177 2178 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2179 u32 completion_code) 2180 { 2181 struct isci_remote_device *idev = ireq->target_device; 2182 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2183 enum sci_status status = SCI_SUCCESS; 2184 2185 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2186 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2187 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2188 break; 2189 2190 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2191 u16 len = sci_req_tx_bytes(ireq); 2192 2193 /* likely non-error data underrrun, workaround missing 2194 * d2h frame from the controller 2195 */ 2196 if (d2h->fis_type != FIS_REGD2H) { 2197 d2h->fis_type = FIS_REGD2H; 2198 d2h->flags = (1 << 6); 2199 d2h->status = 0x50; 2200 d2h->error = 0; 2201 d2h->lbal = 0; 2202 d2h->byte_count_low = len & 0xff; 2203 d2h->byte_count_high = len >> 8; 2204 d2h->device = 0xa0; 2205 d2h->lbal_exp = 0; 2206 d2h->lbam_exp = 0; 2207 d2h->lbah_exp = 0; 2208 d2h->_r_a = 0; 2209 d2h->sector_count = 0x3; 2210 d2h->sector_count_exp = 0; 2211 d2h->_r_b = 0; 2212 d2h->_r_c = 0; 2213 d2h->_r_d = 0; 2214 } 2215 2216 ireq->scu_status = SCU_TASK_DONE_GOOD; 2217 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2218 status = ireq->sci_status; 2219 2220 /* the hw will have suspended the rnc, so complete the 2221 * request upon pending resume 2222 */ 2223 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2224 break; 2225 } 2226 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2227 /* In this case, there is no UF coming after. 2228 * compelte the IO now. 2229 */ 2230 ireq->scu_status = SCU_TASK_DONE_GOOD; 2231 ireq->sci_status = SCI_SUCCESS; 2232 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2233 break; 2234 2235 default: 2236 if (d2h->fis_type == FIS_REGD2H) { 2237 /* UF received change the device state to ATAPI_ERROR */ 2238 status = ireq->sci_status; 2239 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2240 } else { 2241 /* If receiving any non-success TC status, no UF 2242 * received yet, then an UF for the status fis 2243 * is coming after (XXX: suspect this is 2244 * actually a protocol error or a bug like the 2245 * DONE_UNEXP_FIS case) 2246 */ 2247 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2248 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2249 2250 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2251 } 2252 break; 2253 } 2254 2255 return status; 2256 } 2257 2258 static int sci_request_smp_completion_status_is_tx_suspend( 2259 unsigned int completion_status) 2260 { 2261 switch (completion_status) { 2262 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2263 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2264 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2265 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2266 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2267 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2268 return 1; 2269 } 2270 return 0; 2271 } 2272 2273 static int sci_request_smp_completion_status_is_tx_rx_suspend( 2274 unsigned int completion_status) 2275 { 2276 return 0; /* There are no Tx/Rx SMP suspend conditions. */ 2277 } 2278 2279 static int sci_request_ssp_completion_status_is_tx_suspend( 2280 unsigned int completion_status) 2281 { 2282 switch (completion_status) { 2283 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2284 case SCU_TASK_DONE_LF_ERR: 2285 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2286 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2287 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2288 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2289 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2290 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2291 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2292 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2293 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2294 return 1; 2295 } 2296 return 0; 2297 } 2298 2299 static int sci_request_ssp_completion_status_is_tx_rx_suspend( 2300 unsigned int completion_status) 2301 { 2302 return 0; /* There are no Tx/Rx SSP suspend conditions. */ 2303 } 2304 2305 static int sci_request_stpsata_completion_status_is_tx_suspend( 2306 unsigned int completion_status) 2307 { 2308 switch (completion_status) { 2309 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2310 case SCU_TASK_DONE_LL_R_ERR: 2311 case SCU_TASK_DONE_LL_PERR: 2312 case SCU_TASK_DONE_REG_ERR: 2313 case SCU_TASK_DONE_SDB_ERR: 2314 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2315 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2316 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2317 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2318 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2319 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2320 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2321 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2322 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2323 return 1; 2324 } 2325 return 0; 2326 } 2327 2328 2329 static int sci_request_stpsata_completion_status_is_tx_rx_suspend( 2330 unsigned int completion_status) 2331 { 2332 switch (completion_status) { 2333 case SCU_TASK_DONE_LF_ERR: 2334 case SCU_TASK_DONE_LL_SY_TERM: 2335 case SCU_TASK_DONE_LL_LF_TERM: 2336 case SCU_TASK_DONE_BREAK_RCVD: 2337 case SCU_TASK_DONE_INV_FIS_LEN: 2338 case SCU_TASK_DONE_UNEXP_FIS: 2339 case SCU_TASK_DONE_UNEXP_SDBFIS: 2340 case SCU_TASK_DONE_MAX_PLD_ERR: 2341 return 1; 2342 } 2343 return 0; 2344 } 2345 2346 static void sci_request_handle_suspending_completions( 2347 struct isci_request *ireq, 2348 u32 completion_code) 2349 { 2350 int is_tx = 0; 2351 int is_tx_rx = 0; 2352 2353 switch (ireq->protocol) { 2354 case SAS_PROTOCOL_SMP: 2355 is_tx = sci_request_smp_completion_status_is_tx_suspend( 2356 completion_code); 2357 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( 2358 completion_code); 2359 break; 2360 case SAS_PROTOCOL_SSP: 2361 is_tx = sci_request_ssp_completion_status_is_tx_suspend( 2362 completion_code); 2363 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( 2364 completion_code); 2365 break; 2366 case SAS_PROTOCOL_STP: 2367 is_tx = sci_request_stpsata_completion_status_is_tx_suspend( 2368 completion_code); 2369 is_tx_rx = 2370 sci_request_stpsata_completion_status_is_tx_rx_suspend( 2371 completion_code); 2372 break; 2373 default: 2374 dev_warn(&ireq->isci_host->pdev->dev, 2375 "%s: request %p has no valid protocol\n", 2376 __func__, ireq); 2377 break; 2378 } 2379 if (is_tx || is_tx_rx) { 2380 BUG_ON(is_tx && is_tx_rx); 2381 2382 sci_remote_node_context_suspend( 2383 &ireq->target_device->rnc, 2384 SCI_HW_SUSPEND, 2385 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX 2386 : SCU_EVENT_TL_RNC_SUSPEND_TX); 2387 } 2388 } 2389 2390 enum sci_status 2391 sci_io_request_tc_completion(struct isci_request *ireq, 2392 u32 completion_code) 2393 { 2394 enum sci_base_request_states state; 2395 struct isci_host *ihost = ireq->owning_controller; 2396 2397 state = ireq->sm.current_state_id; 2398 2399 /* Decode those completions that signal upcoming suspension events. */ 2400 sci_request_handle_suspending_completions( 2401 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); 2402 2403 switch (state) { 2404 case SCI_REQ_STARTED: 2405 return request_started_state_tc_event(ireq, completion_code); 2406 2407 case SCI_REQ_TASK_WAIT_TC_COMP: 2408 return ssp_task_request_await_tc_event(ireq, 2409 completion_code); 2410 2411 case SCI_REQ_SMP_WAIT_RESP: 2412 return smp_request_await_response_tc_event(ireq, 2413 completion_code); 2414 2415 case SCI_REQ_SMP_WAIT_TC_COMP: 2416 return smp_request_await_tc_event(ireq, completion_code); 2417 2418 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2419 return stp_request_udma_await_tc_event(ireq, 2420 completion_code); 2421 2422 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2423 return stp_request_non_data_await_h2d_tc_event(ireq, 2424 completion_code); 2425 2426 case SCI_REQ_STP_PIO_WAIT_H2D: 2427 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2428 completion_code); 2429 2430 case SCI_REQ_STP_PIO_DATA_OUT: 2431 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2432 2433 case SCI_REQ_ABORTING: 2434 return request_aborting_state_tc_event(ireq, 2435 completion_code); 2436 2437 case SCI_REQ_ATAPI_WAIT_H2D: 2438 return atapi_raw_completion(ireq, completion_code, 2439 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2440 2441 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2442 return atapi_raw_completion(ireq, completion_code, 2443 SCI_REQ_ATAPI_WAIT_D2H); 2444 2445 case SCI_REQ_ATAPI_WAIT_D2H: 2446 return atapi_data_tc_completion_handler(ireq, completion_code); 2447 2448 default: 2449 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", 2450 __func__, completion_code, req_state_name(state)); 2451 return SCI_FAILURE_INVALID_STATE; 2452 } 2453 } 2454 2455 /** 2456 * isci_request_process_response_iu() - This function sets the status and 2457 * response iu, in the task struct, from the request object for the upper 2458 * layer driver. 2459 * @sas_task: This parameter is the task struct from the upper layer driver. 2460 * @resp_iu: This parameter points to the response iu of the completed request. 2461 * @dev: This parameter specifies the linux device struct. 2462 * 2463 * none. 2464 */ 2465 static void isci_request_process_response_iu( 2466 struct sas_task *task, 2467 struct ssp_response_iu *resp_iu, 2468 struct device *dev) 2469 { 2470 dev_dbg(dev, 2471 "%s: resp_iu = %p " 2472 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2473 "resp_iu->response_data_len = %x, " 2474 "resp_iu->sense_data_len = %x\nresponse data: ", 2475 __func__, 2476 resp_iu, 2477 resp_iu->status, 2478 resp_iu->datapres, 2479 resp_iu->response_data_len, 2480 resp_iu->sense_data_len); 2481 2482 task->task_status.stat = resp_iu->status; 2483 2484 /* libsas updates the task status fields based on the response iu. */ 2485 sas_ssp_task_response(dev, task, resp_iu); 2486 } 2487 2488 /** 2489 * isci_request_set_open_reject_status() - This function prepares the I/O 2490 * completion for OPEN_REJECT conditions. 2491 * @request: This parameter is the completed isci_request object. 2492 * @response_ptr: This parameter specifies the service response for the I/O. 2493 * @status_ptr: This parameter specifies the exec status for the I/O. 2494 * @open_rej_reason: This parameter specifies the encoded reason for the 2495 * abandon-class reject. 2496 * 2497 * none. 2498 */ 2499 static void isci_request_set_open_reject_status( 2500 struct isci_request *request, 2501 struct sas_task *task, 2502 enum service_response *response_ptr, 2503 enum exec_status *status_ptr, 2504 enum sas_open_rej_reason open_rej_reason) 2505 { 2506 /* Task in the target is done. */ 2507 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2508 *response_ptr = SAS_TASK_UNDELIVERED; 2509 *status_ptr = SAS_OPEN_REJECT; 2510 task->task_status.open_rej_reason = open_rej_reason; 2511 } 2512 2513 /** 2514 * isci_request_handle_controller_specific_errors() - This function decodes 2515 * controller-specific I/O completion error conditions. 2516 * @request: This parameter is the completed isci_request object. 2517 * @response_ptr: This parameter specifies the service response for the I/O. 2518 * @status_ptr: This parameter specifies the exec status for the I/O. 2519 * 2520 * none. 2521 */ 2522 static void isci_request_handle_controller_specific_errors( 2523 struct isci_remote_device *idev, 2524 struct isci_request *request, 2525 struct sas_task *task, 2526 enum service_response *response_ptr, 2527 enum exec_status *status_ptr) 2528 { 2529 unsigned int cstatus; 2530 2531 cstatus = request->scu_status; 2532 2533 dev_dbg(&request->isci_host->pdev->dev, 2534 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2535 "- controller status = 0x%x\n", 2536 __func__, request, cstatus); 2537 2538 /* Decode the controller-specific errors; most 2539 * important is to recognize those conditions in which 2540 * the target may still have a task outstanding that 2541 * must be aborted. 2542 * 2543 * Note that there are SCU completion codes being 2544 * named in the decode below for which SCIC has already 2545 * done work to handle them in a way other than as 2546 * a controller-specific completion code; these are left 2547 * in the decode below for completeness sake. 2548 */ 2549 switch (cstatus) { 2550 case SCU_TASK_DONE_DMASETUP_DIRERR: 2551 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2552 case SCU_TASK_DONE_XFERCNT_ERR: 2553 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2554 if (task->task_proto == SAS_PROTOCOL_SMP) { 2555 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2556 *response_ptr = SAS_TASK_COMPLETE; 2557 2558 /* See if the device has been/is being stopped. Note 2559 * that we ignore the quiesce state, since we are 2560 * concerned about the actual device state. 2561 */ 2562 if (!idev) 2563 *status_ptr = SAS_DEVICE_UNKNOWN; 2564 else 2565 *status_ptr = SAS_ABORTED_TASK; 2566 2567 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2568 } else { 2569 /* Task in the target is not done. */ 2570 *response_ptr = SAS_TASK_UNDELIVERED; 2571 2572 if (!idev) 2573 *status_ptr = SAS_DEVICE_UNKNOWN; 2574 else 2575 *status_ptr = SAM_STAT_TASK_ABORTED; 2576 2577 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2578 } 2579 2580 break; 2581 2582 case SCU_TASK_DONE_CRC_ERR: 2583 case SCU_TASK_DONE_NAK_CMD_ERR: 2584 case SCU_TASK_DONE_EXCESS_DATA: 2585 case SCU_TASK_DONE_UNEXP_FIS: 2586 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2587 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2588 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2589 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2590 /* These are conditions in which the target 2591 * has completed the task, so that no cleanup 2592 * is necessary. 2593 */ 2594 *response_ptr = SAS_TASK_COMPLETE; 2595 2596 /* See if the device has been/is being stopped. Note 2597 * that we ignore the quiesce state, since we are 2598 * concerned about the actual device state. 2599 */ 2600 if (!idev) 2601 *status_ptr = SAS_DEVICE_UNKNOWN; 2602 else 2603 *status_ptr = SAS_ABORTED_TASK; 2604 2605 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2606 break; 2607 2608 2609 /* Note that the only open reject completion codes seen here will be 2610 * abandon-class codes; all others are automatically retried in the SCU. 2611 */ 2612 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2613 2614 isci_request_set_open_reject_status( 2615 request, task, response_ptr, status_ptr, 2616 SAS_OREJ_WRONG_DEST); 2617 break; 2618 2619 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2620 2621 /* Note - the return of AB0 will change when 2622 * libsas implements detection of zone violations. 2623 */ 2624 isci_request_set_open_reject_status( 2625 request, task, response_ptr, status_ptr, 2626 SAS_OREJ_RESV_AB0); 2627 break; 2628 2629 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2630 2631 isci_request_set_open_reject_status( 2632 request, task, response_ptr, status_ptr, 2633 SAS_OREJ_RESV_AB1); 2634 break; 2635 2636 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2637 2638 isci_request_set_open_reject_status( 2639 request, task, response_ptr, status_ptr, 2640 SAS_OREJ_RESV_AB2); 2641 break; 2642 2643 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2644 2645 isci_request_set_open_reject_status( 2646 request, task, response_ptr, status_ptr, 2647 SAS_OREJ_RESV_AB3); 2648 break; 2649 2650 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2651 2652 isci_request_set_open_reject_status( 2653 request, task, response_ptr, status_ptr, 2654 SAS_OREJ_BAD_DEST); 2655 break; 2656 2657 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2658 2659 isci_request_set_open_reject_status( 2660 request, task, response_ptr, status_ptr, 2661 SAS_OREJ_STP_NORES); 2662 break; 2663 2664 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2665 2666 isci_request_set_open_reject_status( 2667 request, task, response_ptr, status_ptr, 2668 SAS_OREJ_EPROTO); 2669 break; 2670 2671 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2672 2673 isci_request_set_open_reject_status( 2674 request, task, response_ptr, status_ptr, 2675 SAS_OREJ_CONN_RATE); 2676 break; 2677 2678 case SCU_TASK_DONE_LL_R_ERR: 2679 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2680 case SCU_TASK_DONE_LL_PERR: 2681 case SCU_TASK_DONE_LL_SY_TERM: 2682 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2683 case SCU_TASK_DONE_LL_LF_TERM: 2684 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2685 case SCU_TASK_DONE_LL_ABORT_ERR: 2686 case SCU_TASK_DONE_SEQ_INV_TYPE: 2687 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2688 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2689 case SCU_TASK_DONE_INV_FIS_LEN: 2690 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2691 case SCU_TASK_DONE_SDMA_ERR: 2692 case SCU_TASK_DONE_OFFSET_ERR: 2693 case SCU_TASK_DONE_MAX_PLD_ERR: 2694 case SCU_TASK_DONE_LF_ERR: 2695 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2696 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2697 case SCU_TASK_DONE_UNEXP_DATA: 2698 case SCU_TASK_DONE_UNEXP_SDBFIS: 2699 case SCU_TASK_DONE_REG_ERR: 2700 case SCU_TASK_DONE_SDB_ERR: 2701 case SCU_TASK_DONE_TASK_ABORT: 2702 default: 2703 /* Task in the target is not done. */ 2704 *response_ptr = SAS_TASK_UNDELIVERED; 2705 *status_ptr = SAM_STAT_TASK_ABORTED; 2706 2707 if (task->task_proto == SAS_PROTOCOL_SMP) 2708 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2709 else 2710 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2711 break; 2712 } 2713 } 2714 2715 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2716 { 2717 struct task_status_struct *ts = &task->task_status; 2718 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2719 2720 resp->frame_len = sizeof(*fis); 2721 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2722 ts->buf_valid_size = sizeof(*resp); 2723 2724 /* If an error is flagged let libata decode the fis */ 2725 if (ac_err_mask(fis->status)) 2726 ts->stat = SAS_PROTO_RESPONSE; 2727 else 2728 ts->stat = SAM_STAT_GOOD; 2729 2730 ts->resp = SAS_TASK_COMPLETE; 2731 } 2732 2733 static void isci_request_io_request_complete(struct isci_host *ihost, 2734 struct isci_request *request, 2735 enum sci_io_status completion_status) 2736 { 2737 struct sas_task *task = isci_request_access_task(request); 2738 struct ssp_response_iu *resp_iu; 2739 unsigned long task_flags; 2740 struct isci_remote_device *idev = request->target_device; 2741 enum service_response response = SAS_TASK_UNDELIVERED; 2742 enum exec_status status = SAS_ABORTED_TASK; 2743 2744 dev_dbg(&ihost->pdev->dev, 2745 "%s: request = %p, task = %p, " 2746 "task->data_dir = %d completion_status = 0x%x\n", 2747 __func__, request, task, task->data_dir, completion_status); 2748 2749 /* The request is done from an SCU HW perspective. */ 2750 2751 /* This is an active request being completed from the core. */ 2752 switch (completion_status) { 2753 2754 case SCI_IO_FAILURE_RESPONSE_VALID: 2755 dev_dbg(&ihost->pdev->dev, 2756 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2757 __func__, request, task); 2758 2759 if (sas_protocol_ata(task->task_proto)) { 2760 isci_process_stp_response(task, &request->stp.rsp); 2761 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2762 2763 /* crack the iu response buffer. */ 2764 resp_iu = &request->ssp.rsp; 2765 isci_request_process_response_iu(task, resp_iu, 2766 &ihost->pdev->dev); 2767 2768 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2769 2770 dev_err(&ihost->pdev->dev, 2771 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2772 "SAS_PROTOCOL_SMP protocol\n", 2773 __func__); 2774 2775 } else 2776 dev_err(&ihost->pdev->dev, 2777 "%s: unknown protocol\n", __func__); 2778 2779 /* use the task status set in the task struct by the 2780 * isci_request_process_response_iu call. 2781 */ 2782 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2783 response = task->task_status.resp; 2784 status = task->task_status.stat; 2785 break; 2786 2787 case SCI_IO_SUCCESS: 2788 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2789 2790 response = SAS_TASK_COMPLETE; 2791 status = SAM_STAT_GOOD; 2792 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2793 2794 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2795 2796 /* This was an SSP / STP / SATA transfer. 2797 * There is a possibility that less data than 2798 * the maximum was transferred. 2799 */ 2800 u32 transferred_length = sci_req_tx_bytes(request); 2801 2802 task->task_status.residual 2803 = task->total_xfer_len - transferred_length; 2804 2805 /* If there were residual bytes, call this an 2806 * underrun. 2807 */ 2808 if (task->task_status.residual != 0) 2809 status = SAS_DATA_UNDERRUN; 2810 2811 dev_dbg(&ihost->pdev->dev, 2812 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2813 __func__, status); 2814 2815 } else 2816 dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", 2817 __func__); 2818 break; 2819 2820 case SCI_IO_FAILURE_TERMINATED: 2821 2822 dev_dbg(&ihost->pdev->dev, 2823 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2824 __func__, request, task); 2825 2826 /* The request was terminated explicitly. */ 2827 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2828 response = SAS_TASK_UNDELIVERED; 2829 2830 /* See if the device has been/is being stopped. Note 2831 * that we ignore the quiesce state, since we are 2832 * concerned about the actual device state. 2833 */ 2834 if (!idev) 2835 status = SAS_DEVICE_UNKNOWN; 2836 else 2837 status = SAS_ABORTED_TASK; 2838 break; 2839 2840 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2841 2842 isci_request_handle_controller_specific_errors(idev, request, 2843 task, &response, 2844 &status); 2845 break; 2846 2847 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2848 /* This is a special case, in that the I/O completion 2849 * is telling us that the device needs a reset. 2850 * In order for the device reset condition to be 2851 * noticed, the I/O has to be handled in the error 2852 * handler. Set the reset flag and cause the 2853 * SCSI error thread to be scheduled. 2854 */ 2855 spin_lock_irqsave(&task->task_state_lock, task_flags); 2856 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2857 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2858 2859 /* Fail the I/O. */ 2860 response = SAS_TASK_UNDELIVERED; 2861 status = SAM_STAT_TASK_ABORTED; 2862 2863 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2864 break; 2865 2866 case SCI_FAILURE_RETRY_REQUIRED: 2867 2868 /* Fail the I/O so it can be retried. */ 2869 response = SAS_TASK_UNDELIVERED; 2870 if (!idev) 2871 status = SAS_DEVICE_UNKNOWN; 2872 else 2873 status = SAS_ABORTED_TASK; 2874 2875 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2876 break; 2877 2878 2879 default: 2880 /* Catch any otherwise unhandled error codes here. */ 2881 dev_dbg(&ihost->pdev->dev, 2882 "%s: invalid completion code: 0x%x - " 2883 "isci_request = %p\n", 2884 __func__, completion_status, request); 2885 2886 response = SAS_TASK_UNDELIVERED; 2887 2888 /* See if the device has been/is being stopped. Note 2889 * that we ignore the quiesce state, since we are 2890 * concerned about the actual device state. 2891 */ 2892 if (!idev) 2893 status = SAS_DEVICE_UNKNOWN; 2894 else 2895 status = SAS_ABORTED_TASK; 2896 2897 if (SAS_PROTOCOL_SMP == task->task_proto) 2898 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2899 else 2900 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2901 break; 2902 } 2903 2904 switch (task->task_proto) { 2905 case SAS_PROTOCOL_SSP: 2906 if (task->data_dir == DMA_NONE) 2907 break; 2908 if (task->num_scatter == 0) 2909 /* 0 indicates a single dma address */ 2910 dma_unmap_single(&ihost->pdev->dev, 2911 request->zero_scatter_daddr, 2912 task->total_xfer_len, task->data_dir); 2913 else /* unmap the sgl dma addresses */ 2914 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 2915 request->num_sg_entries, task->data_dir); 2916 break; 2917 case SAS_PROTOCOL_SMP: { 2918 struct scatterlist *sg = &task->smp_task.smp_req; 2919 struct smp_req *smp_req; 2920 void *kaddr; 2921 2922 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 2923 2924 /* need to swab it back in case the command buffer is re-used */ 2925 kaddr = kmap_atomic(sg_page(sg)); 2926 smp_req = kaddr + sg->offset; 2927 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 2928 kunmap_atomic(kaddr); 2929 break; 2930 } 2931 default: 2932 break; 2933 } 2934 2935 spin_lock_irqsave(&task->task_state_lock, task_flags); 2936 2937 task->task_status.resp = response; 2938 task->task_status.stat = status; 2939 2940 if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { 2941 /* Normal notification (task_done) */ 2942 task->task_state_flags |= SAS_TASK_STATE_DONE; 2943 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 2944 SAS_TASK_STATE_PENDING); 2945 } 2946 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2947 2948 /* complete the io request to the core. */ 2949 sci_controller_complete_io(ihost, request->target_device, request); 2950 2951 /* set terminated handle so it cannot be completed or 2952 * terminated again, and to cause any calls into abort 2953 * task to recognize the already completed case. 2954 */ 2955 set_bit(IREQ_TERMINATED, &request->flags); 2956 2957 ireq_done(ihost, request, task); 2958 } 2959 2960 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2961 { 2962 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2963 struct domain_device *dev = ireq->target_device->domain_dev; 2964 enum sci_base_request_states state; 2965 struct sas_task *task; 2966 2967 /* XXX as hch said always creating an internal sas_task for tmf 2968 * requests would simplify the driver 2969 */ 2970 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 2971 2972 /* all unaccelerated request types (non ssp or ncq) handled with 2973 * substates 2974 */ 2975 if (!task && dev->dev_type == SAS_END_DEVICE) { 2976 state = SCI_REQ_TASK_WAIT_TC_COMP; 2977 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2978 state = SCI_REQ_SMP_WAIT_RESP; 2979 } else if (task && sas_protocol_ata(task->task_proto) && 2980 !task->ata_task.use_ncq) { 2981 if (dev->sata_dev.class == ATA_DEV_ATAPI && 2982 task->ata_task.fis.command == ATA_CMD_PACKET) { 2983 state = SCI_REQ_ATAPI_WAIT_H2D; 2984 } else if (task->data_dir == DMA_NONE) { 2985 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 2986 } else if (task->ata_task.dma_xfer) { 2987 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 2988 } else /* PIO */ { 2989 state = SCI_REQ_STP_PIO_WAIT_H2D; 2990 } 2991 } else { 2992 /* SSP or NCQ are fully accelerated, no substates */ 2993 return; 2994 } 2995 sci_change_state(sm, state); 2996 } 2997 2998 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 2999 { 3000 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3001 struct isci_host *ihost = ireq->owning_controller; 3002 3003 /* Tell the SCI_USER that the IO request is complete */ 3004 if (!test_bit(IREQ_TMF, &ireq->flags)) 3005 isci_request_io_request_complete(ihost, ireq, 3006 ireq->sci_status); 3007 else 3008 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3009 } 3010 3011 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3012 { 3013 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3014 3015 /* Setting the abort bit in the Task Context is required by the silicon. */ 3016 ireq->tc->abort = 1; 3017 } 3018 3019 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3020 { 3021 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3022 3023 ireq->target_device->working_request = ireq; 3024 } 3025 3026 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3027 { 3028 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3029 3030 ireq->target_device->working_request = ireq; 3031 } 3032 3033 static const struct sci_base_state sci_request_state_table[] = { 3034 [SCI_REQ_INIT] = { }, 3035 [SCI_REQ_CONSTRUCTED] = { }, 3036 [SCI_REQ_STARTED] = { 3037 .enter_state = sci_request_started_state_enter, 3038 }, 3039 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3040 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3041 }, 3042 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3043 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3044 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3045 }, 3046 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3047 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3048 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3049 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3050 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3051 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3052 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3053 [SCI_REQ_SMP_WAIT_RESP] = { }, 3054 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3055 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3056 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3057 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3058 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3059 [SCI_REQ_COMPLETED] = { 3060 .enter_state = sci_request_completed_state_enter, 3061 }, 3062 [SCI_REQ_ABORTING] = { 3063 .enter_state = sci_request_aborting_state_enter, 3064 }, 3065 [SCI_REQ_FINAL] = { }, 3066 }; 3067 3068 static void 3069 sci_general_request_construct(struct isci_host *ihost, 3070 struct isci_remote_device *idev, 3071 struct isci_request *ireq) 3072 { 3073 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3074 3075 ireq->target_device = idev; 3076 ireq->protocol = SAS_PROTOCOL_NONE; 3077 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3078 3079 ireq->sci_status = SCI_SUCCESS; 3080 ireq->scu_status = 0; 3081 ireq->post_context = 0xFFFFFFFF; 3082 } 3083 3084 static enum sci_status 3085 sci_io_request_construct(struct isci_host *ihost, 3086 struct isci_remote_device *idev, 3087 struct isci_request *ireq) 3088 { 3089 struct domain_device *dev = idev->domain_dev; 3090 enum sci_status status = SCI_SUCCESS; 3091 3092 /* Build the common part of the request */ 3093 sci_general_request_construct(ihost, idev, ireq); 3094 3095 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3096 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3097 3098 if (dev->dev_type == SAS_END_DEVICE) 3099 /* pass */; 3100 else if (dev_is_sata(dev)) 3101 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3102 else if (dev_is_expander(dev->dev_type)) 3103 /* pass */; 3104 else 3105 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3106 3107 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3108 3109 return status; 3110 } 3111 3112 enum sci_status sci_task_request_construct(struct isci_host *ihost, 3113 struct isci_remote_device *idev, 3114 u16 io_tag, struct isci_request *ireq) 3115 { 3116 struct domain_device *dev = idev->domain_dev; 3117 enum sci_status status = SCI_SUCCESS; 3118 3119 /* Build the common part of the request */ 3120 sci_general_request_construct(ihost, idev, ireq); 3121 3122 if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { 3123 set_bit(IREQ_TMF, &ireq->flags); 3124 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3125 3126 /* Set the protocol indicator. */ 3127 if (dev_is_sata(dev)) 3128 ireq->protocol = SAS_PROTOCOL_STP; 3129 else 3130 ireq->protocol = SAS_PROTOCOL_SSP; 3131 } else 3132 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3133 3134 return status; 3135 } 3136 3137 static enum sci_status isci_request_ssp_request_construct( 3138 struct isci_request *request) 3139 { 3140 enum sci_status status; 3141 3142 dev_dbg(&request->isci_host->pdev->dev, 3143 "%s: request = %p\n", 3144 __func__, 3145 request); 3146 status = sci_io_request_construct_basic_ssp(request); 3147 return status; 3148 } 3149 3150 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3151 { 3152 struct sas_task *task = isci_request_access_task(ireq); 3153 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3154 struct ata_queued_cmd *qc = task->uldd_task; 3155 enum sci_status status; 3156 3157 dev_dbg(&ireq->isci_host->pdev->dev, 3158 "%s: ireq = %p\n", 3159 __func__, 3160 ireq); 3161 3162 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3163 if (!task->ata_task.device_control_reg_update) 3164 fis->flags |= 0x80; 3165 fis->flags &= 0xF0; 3166 3167 status = sci_io_request_construct_basic_sata(ireq); 3168 3169 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3170 qc->tf.command == ATA_CMD_FPDMA_READ || 3171 qc->tf.command == ATA_CMD_FPDMA_RECV || 3172 qc->tf.command == ATA_CMD_FPDMA_SEND || 3173 qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { 3174 fis->sector_count = qc->tag << 3; 3175 ireq->tc->type.stp.ncq_tag = qc->tag; 3176 } 3177 3178 return status; 3179 } 3180 3181 static enum sci_status 3182 sci_io_request_construct_smp(struct device *dev, 3183 struct isci_request *ireq, 3184 struct sas_task *task) 3185 { 3186 struct scatterlist *sg = &task->smp_task.smp_req; 3187 struct isci_remote_device *idev; 3188 struct scu_task_context *task_context; 3189 struct isci_port *iport; 3190 struct smp_req *smp_req; 3191 void *kaddr; 3192 u8 req_len; 3193 u32 cmd; 3194 3195 kaddr = kmap_atomic(sg_page(sg)); 3196 smp_req = kaddr + sg->offset; 3197 /* 3198 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3199 * functions under SAS 2.0, a zero request length really indicates 3200 * a non-zero default length. 3201 */ 3202 if (smp_req->req_len == 0) { 3203 switch (smp_req->func) { 3204 case SMP_DISCOVER: 3205 case SMP_REPORT_PHY_ERR_LOG: 3206 case SMP_REPORT_PHY_SATA: 3207 case SMP_REPORT_ROUTE_INFO: 3208 smp_req->req_len = 2; 3209 break; 3210 case SMP_CONF_ROUTE_INFO: 3211 case SMP_PHY_CONTROL: 3212 case SMP_PHY_TEST_FUNCTION: 3213 smp_req->req_len = 9; 3214 break; 3215 /* Default - zero is a valid default for 2.0. */ 3216 } 3217 } 3218 req_len = smp_req->req_len; 3219 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3220 cmd = *(u32 *) smp_req; 3221 kunmap_atomic(kaddr); 3222 3223 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3224 return SCI_FAILURE; 3225 3226 ireq->protocol = SAS_PROTOCOL_SMP; 3227 3228 /* byte swap the smp request. */ 3229 3230 task_context = ireq->tc; 3231 3232 idev = ireq->target_device; 3233 iport = idev->owning_port; 3234 3235 /* 3236 * Fill in the TC with its required data 3237 * 00h 3238 */ 3239 task_context->priority = 0; 3240 task_context->initiator_request = 1; 3241 task_context->connection_rate = idev->connection_rate; 3242 task_context->protocol_engine_index = ISCI_PEG; 3243 task_context->logical_port_index = iport->physical_port_index; 3244 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3245 task_context->abort = 0; 3246 task_context->valid = SCU_TASK_CONTEXT_VALID; 3247 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3248 3249 /* 04h */ 3250 task_context->remote_node_index = idev->rnc.remote_node_index; 3251 task_context->command_code = 0; 3252 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3253 3254 /* 08h */ 3255 task_context->link_layer_control = 0; 3256 task_context->do_not_dma_ssp_good_response = 1; 3257 task_context->strict_ordering = 0; 3258 task_context->control_frame = 1; 3259 task_context->timeout_enable = 0; 3260 task_context->block_guard_enable = 0; 3261 3262 /* 0ch */ 3263 task_context->address_modifier = 0; 3264 3265 /* 10h */ 3266 task_context->ssp_command_iu_length = req_len; 3267 3268 /* 14h */ 3269 task_context->transfer_length_bytes = 0; 3270 3271 /* 3272 * 18h ~ 30h, protocol specific 3273 * since commandIU has been build by framework at this point, we just 3274 * copy the frist DWord from command IU to this location. */ 3275 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3276 3277 /* 3278 * 40h 3279 * "For SMP you could program it to zero. We would prefer that way 3280 * so that done code will be consistent." - Venki 3281 */ 3282 task_context->task_phase = 0; 3283 3284 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3285 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3286 (iport->physical_port_index << 3287 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3288 ISCI_TAG_TCI(ireq->io_tag)); 3289 /* 3290 * Copy the physical address for the command buffer to the SCU Task 3291 * Context command buffer should not contain command header. 3292 */ 3293 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3294 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3295 3296 /* SMP response comes as UF, so no need to set response IU address. */ 3297 task_context->response_iu_upper = 0; 3298 task_context->response_iu_lower = 0; 3299 3300 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3301 3302 return SCI_SUCCESS; 3303 } 3304 3305 /* 3306 * isci_smp_request_build() - This function builds the smp request. 3307 * @ireq: This parameter points to the isci_request allocated in the 3308 * request construct function. 3309 * 3310 * SCI_SUCCESS on successfull completion, or specific failure code. 3311 */ 3312 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3313 { 3314 struct sas_task *task = isci_request_access_task(ireq); 3315 struct device *dev = &ireq->isci_host->pdev->dev; 3316 enum sci_status status = SCI_FAILURE; 3317 3318 status = sci_io_request_construct_smp(dev, ireq, task); 3319 if (status != SCI_SUCCESS) 3320 dev_dbg(&ireq->isci_host->pdev->dev, 3321 "%s: failed with status = %d\n", 3322 __func__, 3323 status); 3324 3325 return status; 3326 } 3327 3328 /** 3329 * isci_io_request_build() - This function builds the io request object. 3330 * @ihost: This parameter specifies the ISCI host object 3331 * @request: This parameter points to the isci_request object allocated in the 3332 * request construct function. 3333 * @sci_device: This parameter is the handle for the sci core's remote device 3334 * object that is the destination for this request. 3335 * 3336 * SCI_SUCCESS on successfull completion, or specific failure code. 3337 */ 3338 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3339 struct isci_request *request, 3340 struct isci_remote_device *idev) 3341 { 3342 enum sci_status status = SCI_SUCCESS; 3343 struct sas_task *task = isci_request_access_task(request); 3344 3345 dev_dbg(&ihost->pdev->dev, 3346 "%s: idev = 0x%p; request = %p, " 3347 "num_scatter = %d\n", 3348 __func__, 3349 idev, 3350 request, 3351 task->num_scatter); 3352 3353 /* map the sgl addresses, if present. 3354 * libata does the mapping for sata devices 3355 * before we get the request. 3356 */ 3357 if (task->num_scatter && 3358 !sas_protocol_ata(task->task_proto) && 3359 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3360 3361 request->num_sg_entries = dma_map_sg( 3362 &ihost->pdev->dev, 3363 task->scatter, 3364 task->num_scatter, 3365 task->data_dir 3366 ); 3367 3368 if (request->num_sg_entries == 0) 3369 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3370 } 3371 3372 status = sci_io_request_construct(ihost, idev, request); 3373 3374 if (status != SCI_SUCCESS) { 3375 dev_dbg(&ihost->pdev->dev, 3376 "%s: failed request construct\n", 3377 __func__); 3378 return SCI_FAILURE; 3379 } 3380 3381 switch (task->task_proto) { 3382 case SAS_PROTOCOL_SMP: 3383 status = isci_smp_request_build(request); 3384 break; 3385 case SAS_PROTOCOL_SSP: 3386 status = isci_request_ssp_request_construct(request); 3387 break; 3388 case SAS_PROTOCOL_SATA: 3389 case SAS_PROTOCOL_STP: 3390 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3391 status = isci_request_stp_request_construct(request); 3392 break; 3393 default: 3394 dev_dbg(&ihost->pdev->dev, 3395 "%s: unknown protocol\n", __func__); 3396 return SCI_FAILURE; 3397 } 3398 3399 return SCI_SUCCESS; 3400 } 3401 3402 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3403 { 3404 struct isci_request *ireq; 3405 3406 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3407 ireq->io_tag = tag; 3408 ireq->io_request_completion = NULL; 3409 ireq->flags = 0; 3410 ireq->num_sg_entries = 0; 3411 3412 return ireq; 3413 } 3414 3415 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3416 struct sas_task *task, 3417 u16 tag) 3418 { 3419 struct isci_request *ireq; 3420 3421 ireq = isci_request_from_tag(ihost, tag); 3422 ireq->ttype_ptr.io_task_ptr = task; 3423 clear_bit(IREQ_TMF, &ireq->flags); 3424 task->lldd_task = ireq; 3425 3426 return ireq; 3427 } 3428 3429 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3430 struct isci_tmf *isci_tmf, 3431 u16 tag) 3432 { 3433 struct isci_request *ireq; 3434 3435 ireq = isci_request_from_tag(ihost, tag); 3436 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3437 set_bit(IREQ_TMF, &ireq->flags); 3438 3439 return ireq; 3440 } 3441 3442 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3443 struct sas_task *task, u16 tag) 3444 { 3445 enum sci_status status; 3446 struct isci_request *ireq; 3447 unsigned long flags; 3448 int ret = 0; 3449 3450 /* do common allocation and init of request object. */ 3451 ireq = isci_io_request_from_tag(ihost, task, tag); 3452 3453 status = isci_io_request_build(ihost, ireq, idev); 3454 if (status != SCI_SUCCESS) { 3455 dev_dbg(&ihost->pdev->dev, 3456 "%s: request_construct failed - status = 0x%x\n", 3457 __func__, 3458 status); 3459 return status; 3460 } 3461 3462 spin_lock_irqsave(&ihost->scic_lock, flags); 3463 3464 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3465 3466 if (isci_task_is_ncq_recovery(task)) { 3467 3468 /* The device is in an NCQ recovery state. Issue the 3469 * request on the task side. Note that it will 3470 * complete on the I/O request side because the 3471 * request was built that way (ie. 3472 * ireq->is_task_management_request is false). 3473 */ 3474 status = sci_controller_start_task(ihost, 3475 idev, 3476 ireq); 3477 } else { 3478 status = SCI_FAILURE; 3479 } 3480 } else { 3481 /* send the request, let the core assign the IO TAG. */ 3482 status = sci_controller_start_io(ihost, idev, 3483 ireq); 3484 } 3485 3486 if (status != SCI_SUCCESS && 3487 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3488 dev_dbg(&ihost->pdev->dev, 3489 "%s: failed request start (0x%x)\n", 3490 __func__, status); 3491 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3492 return status; 3493 } 3494 /* Either I/O started OK, or the core has signaled that 3495 * the device needs a target reset. 3496 */ 3497 if (status != SCI_SUCCESS) { 3498 /* The request did not really start in the 3499 * hardware, so clear the request handle 3500 * here so no terminations will be done. 3501 */ 3502 set_bit(IREQ_TERMINATED, &ireq->flags); 3503 } 3504 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3505 3506 if (status == 3507 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3508 /* Signal libsas that we need the SCSI error 3509 * handler thread to work on this I/O and that 3510 * we want a device reset. 3511 */ 3512 spin_lock_irqsave(&task->task_state_lock, flags); 3513 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3514 spin_unlock_irqrestore(&task->task_state_lock, flags); 3515 3516 /* Cause this task to be scheduled in the SCSI error 3517 * handler thread. 3518 */ 3519 sas_task_abort(task); 3520 3521 /* Change the status, since we are holding 3522 * the I/O until it is managed by the SCSI 3523 * error handler. 3524 */ 3525 status = SCI_SUCCESS; 3526 } 3527 3528 return ret; 3529 } 3530