1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include "isci.h" 57 #include "task.h" 58 #include "request.h" 59 #include "sata.h" 60 #include "scu_completion_codes.h" 61 #include "sas.h" 62 63 /** 64 * This method returns the sgl element pair for the specificed sgl_pair index. 65 * @sci_req: This parameter specifies the IO request for which to retrieve 66 * the Scatter-Gather List element pair. 67 * @sgl_pair_index: This parameter specifies the index into the SGL element 68 * pair to be retrieved. 69 * 70 * This method returns a pointer to an struct scu_sgl_element_pair. 71 */ 72 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( 73 struct scic_sds_request *sci_req, 74 u32 sgl_pair_index 75 ) { 76 struct scu_task_context *task_context; 77 78 task_context = (struct scu_task_context *)sci_req->task_context_buffer; 79 80 if (sgl_pair_index == 0) { 81 return &task_context->sgl_pair_ab; 82 } else if (sgl_pair_index == 1) { 83 return &task_context->sgl_pair_cd; 84 } 85 86 return &sci_req->sg_table[sgl_pair_index - 2]; 87 } 88 89 /** 90 * This function will build the SGL list for an IO request. 91 * @sci_req: This parameter specifies the IO request for which to build 92 * the Scatter-Gather List. 93 * 94 */ 95 void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 96 { 97 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 98 struct isci_host *isci_host = isci_request->isci_host; 99 struct sas_task *task = isci_request_access_task(isci_request); 100 struct scatterlist *sg = NULL; 101 dma_addr_t dma_addr; 102 u32 sg_idx = 0; 103 struct scu_sgl_element_pair *scu_sg = NULL; 104 struct scu_sgl_element_pair *prev_sg = NULL; 105 106 if (task->num_scatter > 0) { 107 sg = task->scatter; 108 109 while (sg) { 110 scu_sg = scic_sds_request_get_sgl_element_pair( 111 sds_request, 112 sg_idx); 113 114 SCU_SGL_COPY(scu_sg->A, sg); 115 116 sg = sg_next(sg); 117 118 if (sg) { 119 SCU_SGL_COPY(scu_sg->B, sg); 120 sg = sg_next(sg); 121 } else 122 SCU_SGL_ZERO(scu_sg->B); 123 124 if (prev_sg) { 125 dma_addr = 126 scic_io_request_get_dma_addr( 127 sds_request, 128 scu_sg); 129 130 prev_sg->next_pair_upper = 131 upper_32_bits(dma_addr); 132 prev_sg->next_pair_lower = 133 lower_32_bits(dma_addr); 134 } 135 136 prev_sg = scu_sg; 137 sg_idx++; 138 } 139 } else { /* handle when no sg */ 140 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, 141 sg_idx); 142 143 dma_addr = dma_map_single(&isci_host->pdev->dev, 144 task->scatter, 145 task->total_xfer_len, 146 task->data_dir); 147 148 isci_request->zero_scatter_daddr = dma_addr; 149 150 scu_sg->A.length = task->total_xfer_len; 151 scu_sg->A.address_upper = upper_32_bits(dma_addr); 152 scu_sg->A.address_lower = lower_32_bits(dma_addr); 153 } 154 155 if (scu_sg) { 156 scu_sg->next_pair_upper = 0; 157 scu_sg->next_pair_lower = 0; 158 } 159 } 160 161 static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req) 162 { 163 if (sci_req->was_tag_assigned_by_user == false) 164 sci_req->task_context_buffer = &sci_req->tc; 165 } 166 167 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) 168 { 169 struct ssp_cmd_iu *cmd_iu; 170 struct isci_request *ireq = sci_req_to_ireq(sci_req); 171 struct sas_task *task = isci_request_access_task(ireq); 172 173 cmd_iu = &sci_req->ssp.cmd; 174 175 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 176 cmd_iu->add_cdb_len = 0; 177 cmd_iu->_r_a = 0; 178 cmd_iu->_r_b = 0; 179 cmd_iu->en_fburst = 0; /* unsupported */ 180 cmd_iu->task_prio = task->ssp_task.task_prio; 181 cmd_iu->task_attr = task->ssp_task.task_attr; 182 cmd_iu->_r_c = 0; 183 184 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 185 sizeof(task->ssp_task.cdb) / sizeof(u32)); 186 } 187 188 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) 189 { 190 struct ssp_task_iu *task_iu; 191 struct isci_request *ireq = sci_req_to_ireq(sci_req); 192 struct sas_task *task = isci_request_access_task(ireq); 193 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 194 195 task_iu = &sci_req->ssp.tmf; 196 197 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 198 199 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 200 201 task_iu->task_func = isci_tmf->tmf_code; 202 task_iu->task_tag = 203 (ireq->ttype == tmf_task) ? 204 isci_tmf->io_tag : 205 SCI_CONTROLLER_INVALID_IO_TAG; 206 } 207 208 /** 209 * This method is will fill in the SCU Task Context for any type of SSP request. 210 * @sci_req: 211 * @task_context: 212 * 213 */ 214 static void scu_ssp_reqeust_construct_task_context( 215 struct scic_sds_request *sds_request, 216 struct scu_task_context *task_context) 217 { 218 dma_addr_t dma_addr; 219 struct scic_sds_controller *controller; 220 struct scic_sds_remote_device *target_device; 221 struct scic_sds_port *target_port; 222 223 controller = scic_sds_request_get_controller(sds_request); 224 target_device = scic_sds_request_get_device(sds_request); 225 target_port = scic_sds_request_get_port(sds_request); 226 227 /* Fill in the TC with the its required data */ 228 task_context->abort = 0; 229 task_context->priority = 0; 230 task_context->initiator_request = 1; 231 task_context->connection_rate = target_device->connection_rate; 232 task_context->protocol_engine_index = 233 scic_sds_controller_get_protocol_engine_group(controller); 234 task_context->logical_port_index = 235 scic_sds_port_get_index(target_port); 236 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 237 task_context->valid = SCU_TASK_CONTEXT_VALID; 238 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 239 240 task_context->remote_node_index = 241 scic_sds_remote_device_get_index(sds_request->target_device); 242 task_context->command_code = 0; 243 244 task_context->link_layer_control = 0; 245 task_context->do_not_dma_ssp_good_response = 1; 246 task_context->strict_ordering = 0; 247 task_context->control_frame = 0; 248 task_context->timeout_enable = 0; 249 task_context->block_guard_enable = 0; 250 251 task_context->address_modifier = 0; 252 253 /* task_context->type.ssp.tag = sci_req->io_tag; */ 254 task_context->task_phase = 0x01; 255 256 if (sds_request->was_tag_assigned_by_user) { 257 /* 258 * Build the task context now since we have already read 259 * the data 260 */ 261 sds_request->post_context = 262 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 263 (scic_sds_controller_get_protocol_engine_group( 264 controller) << 265 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 266 (scic_sds_port_get_index(target_port) << 267 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 268 scic_sds_io_tag_get_index(sds_request->io_tag)); 269 } else { 270 /* 271 * Build the task context now since we have already read 272 * the data 273 * 274 * I/O tag index is not assigned because we have to wait 275 * until we get a TCi 276 */ 277 sds_request->post_context = 278 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 279 (scic_sds_controller_get_protocol_engine_group( 280 owning_controller) << 281 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 282 (scic_sds_port_get_index(target_port) << 283 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); 284 } 285 286 /* 287 * Copy the physical address for the command buffer to the 288 * SCU Task Context 289 */ 290 dma_addr = scic_io_request_get_dma_addr(sds_request, 291 &sds_request->ssp.cmd); 292 293 task_context->command_iu_upper = upper_32_bits(dma_addr); 294 task_context->command_iu_lower = lower_32_bits(dma_addr); 295 296 /* 297 * Copy the physical address for the response buffer to the 298 * SCU Task Context 299 */ 300 dma_addr = scic_io_request_get_dma_addr(sds_request, 301 &sds_request->ssp.rsp); 302 303 task_context->response_iu_upper = upper_32_bits(dma_addr); 304 task_context->response_iu_lower = lower_32_bits(dma_addr); 305 } 306 307 /** 308 * This method is will fill in the SCU Task Context for a SSP IO request. 309 * @sci_req: 310 * 311 */ 312 static void scu_ssp_io_request_construct_task_context( 313 struct scic_sds_request *sci_req, 314 enum dma_data_direction dir, 315 u32 len) 316 { 317 struct scu_task_context *task_context; 318 319 task_context = scic_sds_request_get_task_context(sci_req); 320 321 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 322 323 task_context->ssp_command_iu_length = 324 sizeof(struct ssp_cmd_iu) / sizeof(u32); 325 task_context->type.ssp.frame_type = SSP_COMMAND; 326 327 switch (dir) { 328 case DMA_FROM_DEVICE: 329 case DMA_NONE: 330 default: 331 task_context->task_type = SCU_TASK_TYPE_IOREAD; 332 break; 333 case DMA_TO_DEVICE: 334 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 335 break; 336 } 337 338 task_context->transfer_length_bytes = len; 339 340 if (task_context->transfer_length_bytes > 0) 341 scic_sds_request_build_sgl(sci_req); 342 } 343 344 static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req) 345 { 346 if (sci_req->was_tag_assigned_by_user == false) 347 sci_req->task_context_buffer = &sci_req->tc; 348 } 349 350 /** 351 * This method will fill in the SCU Task Context for a SSP Task request. The 352 * following important settings are utilized: -# priority == 353 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 354 * ahead of other task destined for the same Remote Node. -# task_type == 355 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 356 * (i.e. non-raw frame) is being utilized to perform task management. -# 357 * control_frame == 1. This ensures that the proper endianess is set so 358 * that the bytes are transmitted in the right order for a task frame. 359 * @sci_req: This parameter specifies the task request object being 360 * constructed. 361 * 362 */ 363 static void scu_ssp_task_request_construct_task_context( 364 struct scic_sds_request *sci_req) 365 { 366 struct scu_task_context *task_context; 367 368 task_context = scic_sds_request_get_task_context(sci_req); 369 370 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 371 372 task_context->control_frame = 1; 373 task_context->priority = SCU_TASK_PRIORITY_HIGH; 374 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 375 task_context->transfer_length_bytes = 0; 376 task_context->type.ssp.frame_type = SSP_TASK; 377 task_context->ssp_command_iu_length = 378 sizeof(struct ssp_task_iu) / sizeof(u32); 379 } 380 381 382 /** 383 * This method constructs the SSP Command IU data for this ssp passthrough 384 * comand request object. 385 * @sci_req: This parameter specifies the request object for which the SSP 386 * command information unit is being built. 387 * 388 * enum sci_status, returns invalid parameter is cdb > 16 389 */ 390 391 392 /** 393 * This method constructs the SATA request object. 394 * @sci_req: 395 * @sat_protocol: 396 * @transfer_length: 397 * @data_direction: 398 * @copy_rx_frame: 399 * 400 * enum sci_status 401 */ 402 static enum sci_status 403 scic_io_request_construct_sata(struct scic_sds_request *sci_req, 404 u32 len, 405 enum dma_data_direction dir, 406 bool copy) 407 { 408 enum sci_status status = SCI_SUCCESS; 409 struct isci_request *ireq = sci_req_to_ireq(sci_req); 410 struct sas_task *task = isci_request_access_task(ireq); 411 412 /* check for management protocols */ 413 if (ireq->ttype == tmf_task) { 414 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 415 416 if (tmf->tmf_code == isci_tmf_sata_srst_high || 417 tmf->tmf_code == isci_tmf_sata_srst_low) 418 return scic_sds_stp_soft_reset_request_construct(sci_req); 419 else { 420 dev_err(scic_to_dev(sci_req->owning_controller), 421 "%s: Request 0x%p received un-handled SAT " 422 "management protocol 0x%x.\n", 423 __func__, sci_req, tmf->tmf_code); 424 425 return SCI_FAILURE; 426 } 427 } 428 429 if (!sas_protocol_ata(task->task_proto)) { 430 dev_err(scic_to_dev(sci_req->owning_controller), 431 "%s: Non-ATA protocol in SATA path: 0x%x\n", 432 __func__, 433 task->task_proto); 434 return SCI_FAILURE; 435 436 } 437 438 /* non data */ 439 if (task->data_dir == DMA_NONE) 440 return scic_sds_stp_non_data_request_construct(sci_req); 441 442 /* NCQ */ 443 if (task->ata_task.use_ncq) 444 return scic_sds_stp_ncq_request_construct(sci_req, len, dir); 445 446 /* DMA */ 447 if (task->ata_task.dma_xfer) 448 return scic_sds_stp_udma_request_construct(sci_req, len, dir); 449 else /* PIO */ 450 return scic_sds_stp_pio_request_construct(sci_req, copy); 451 452 return status; 453 } 454 455 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) 456 { 457 struct isci_request *ireq = sci_req_to_ireq(sci_req); 458 struct sas_task *task = isci_request_access_task(ireq); 459 460 sci_req->protocol = SCIC_SSP_PROTOCOL; 461 462 scu_ssp_io_request_construct_task_context(sci_req, 463 task->data_dir, 464 task->total_xfer_len); 465 466 scic_sds_io_request_build_ssp_command_iu(sci_req); 467 468 sci_base_state_machine_change_state( 469 &sci_req->state_machine, 470 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 471 472 return SCI_SUCCESS; 473 } 474 475 enum sci_status scic_task_request_construct_ssp( 476 struct scic_sds_request *sci_req) 477 { 478 /* Construct the SSP Task SCU Task Context */ 479 scu_ssp_task_request_construct_task_context(sci_req); 480 481 /* Fill in the SSP Task IU */ 482 scic_sds_task_request_build_ssp_task_iu(sci_req); 483 484 sci_base_state_machine_change_state(&sci_req->state_machine, 485 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 486 487 return SCI_SUCCESS; 488 } 489 490 491 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 492 { 493 enum sci_status status; 494 struct scic_sds_stp_request *stp_req; 495 bool copy = false; 496 struct isci_request *isci_request = sci_req_to_ireq(sci_req); 497 struct sas_task *task = isci_request_access_task(isci_request); 498 499 stp_req = &sci_req->stp.req; 500 sci_req->protocol = SCIC_STP_PROTOCOL; 501 502 copy = (task->data_dir == DMA_NONE) ? false : true; 503 504 status = scic_io_request_construct_sata(sci_req, 505 task->total_xfer_len, 506 task->data_dir, 507 copy); 508 509 if (status == SCI_SUCCESS) 510 sci_base_state_machine_change_state(&sci_req->state_machine, 511 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 512 513 return status; 514 } 515 516 517 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 518 { 519 enum sci_status status = SCI_SUCCESS; 520 struct isci_request *ireq = sci_req_to_ireq(sci_req); 521 522 /* check for management protocols */ 523 if (ireq->ttype == tmf_task) { 524 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 525 526 if (tmf->tmf_code == isci_tmf_sata_srst_high || 527 tmf->tmf_code == isci_tmf_sata_srst_low) { 528 status = scic_sds_stp_soft_reset_request_construct(sci_req); 529 } else { 530 dev_err(scic_to_dev(sci_req->owning_controller), 531 "%s: Request 0x%p received un-handled SAT " 532 "Protocol 0x%x.\n", 533 __func__, sci_req, tmf->tmf_code); 534 535 return SCI_FAILURE; 536 } 537 } 538 539 if (status == SCI_SUCCESS) 540 sci_base_state_machine_change_state( 541 &sci_req->state_machine, 542 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 543 544 return status; 545 } 546 547 /** 548 * sci_req_tx_bytes - bytes transferred when reply underruns request 549 * @sci_req: request that was terminated early 550 */ 551 #define SCU_TASK_CONTEXT_SRAM 0x200000 552 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) 553 { 554 struct scic_sds_controller *scic = sci_req->owning_controller; 555 u32 ret_val = 0; 556 557 if (readl(&scic->smu_registers->address_modifier) == 0) { 558 void __iomem *scu_reg_base = scic->scu_registers; 559 560 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 561 * BAR1 is the scu_registers 562 * 0x20002C = 0x200000 + 0x2c 563 * = start of task context SRAM + offset of (type.ssp.data_offset) 564 * TCi is the io_tag of struct scic_sds_request 565 */ 566 ret_val = readl(scu_reg_base + 567 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 568 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag))); 569 } 570 571 return ret_val; 572 } 573 574 enum sci_status 575 scic_sds_request_start(struct scic_sds_request *request) 576 { 577 if (request->device_sequence != 578 scic_sds_remote_device_get_sequence(request->target_device)) 579 return SCI_FAILURE; 580 581 if (request->state_handlers->start_handler) 582 return request->state_handlers->start_handler(request); 583 584 dev_warn(scic_to_dev(request->owning_controller), 585 "%s: SCIC IO Request requested to start while in wrong " 586 "state %d\n", 587 __func__, 588 sci_base_state_machine_get_state(&request->state_machine)); 589 590 return SCI_FAILURE_INVALID_STATE; 591 } 592 593 enum sci_status 594 scic_sds_io_request_terminate(struct scic_sds_request *request) 595 { 596 if (request->state_handlers->abort_handler) 597 return request->state_handlers->abort_handler(request); 598 599 dev_warn(scic_to_dev(request->owning_controller), 600 "%s: SCIC IO Request requested to abort while in wrong " 601 "state %d\n", 602 __func__, 603 sci_base_state_machine_get_state(&request->state_machine)); 604 605 return SCI_FAILURE_INVALID_STATE; 606 } 607 608 enum sci_status scic_sds_io_request_event_handler( 609 struct scic_sds_request *request, 610 u32 event_code) 611 { 612 if (request->state_handlers->event_handler) 613 return request->state_handlers->event_handler(request, event_code); 614 615 dev_warn(scic_to_dev(request->owning_controller), 616 "%s: SCIC IO Request given event code notification %x while " 617 "in wrong state %d\n", 618 __func__, 619 event_code, 620 sci_base_state_machine_get_state(&request->state_machine)); 621 622 return SCI_FAILURE_INVALID_STATE; 623 } 624 625 /** 626 * 627 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start 628 * operation is to be executed. 629 * @frame_index: The frame index returned by the hardware for the reqeust 630 * object. 631 * 632 * This method invokes the core state frame handler for the 633 * SCIC_SDS_IO_REQUEST_T object. enum sci_status 634 */ 635 enum sci_status scic_sds_io_request_frame_handler( 636 struct scic_sds_request *request, 637 u32 frame_index) 638 { 639 if (request->state_handlers->frame_handler) 640 return request->state_handlers->frame_handler(request, frame_index); 641 642 dev_warn(scic_to_dev(request->owning_controller), 643 "%s: SCIC IO Request given unexpected frame %x while in " 644 "state %d\n", 645 __func__, 646 frame_index, 647 sci_base_state_machine_get_state(&request->state_machine)); 648 649 scic_sds_controller_release_frame(request->owning_controller, frame_index); 650 return SCI_FAILURE_INVALID_STATE; 651 } 652 653 /* 654 * This function copies response data for requests returning response data 655 * instead of sense data. 656 * @sci_req: This parameter specifies the request object for which to copy 657 * the response data. 658 */ 659 void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) 660 { 661 void *resp_buf; 662 u32 len; 663 struct ssp_response_iu *ssp_response; 664 struct isci_request *ireq = sci_req_to_ireq(sci_req); 665 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 666 667 ssp_response = &sci_req->ssp.rsp; 668 669 resp_buf = &isci_tmf->resp.resp_iu; 670 671 len = min_t(u32, 672 SSP_RESP_IU_MAX_SIZE, 673 be32_to_cpu(ssp_response->response_data_len)); 674 675 memcpy(resp_buf, ssp_response->resp_data, len); 676 } 677 678 /* 679 * This method implements the action taken when a constructed 680 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request. 681 * This method will, if necessary, allocate a TCi for the io request object and 682 * then will, if necessary, copy the constructed TC data into the actual TC 683 * buffer. If everything is successful the post context field is updated with 684 * the TCi so the controller can post the request to the hardware. enum sci_status 685 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES 686 */ 687 static enum sci_status scic_sds_request_constructed_state_start_handler( 688 struct scic_sds_request *request) 689 { 690 struct scu_task_context *task_context; 691 692 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 693 request->io_tag = 694 scic_controller_allocate_io_tag(request->owning_controller); 695 } 696 697 /* Record the IO Tag in the request */ 698 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { 699 task_context = request->task_context_buffer; 700 701 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag); 702 703 switch (task_context->protocol_type) { 704 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 705 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 706 /* SSP/SMP Frame */ 707 task_context->type.ssp.tag = request->io_tag; 708 task_context->type.ssp.target_port_transfer_tag = 0xFFFF; 709 break; 710 711 case SCU_TASK_CONTEXT_PROTOCOL_STP: 712 /* 713 * STP/SATA Frame 714 * task_context->type.stp.ncq_tag = request->ncq_tag; */ 715 break; 716 717 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 718 /* / @todo When do we set no protocol type? */ 719 break; 720 721 default: 722 /* This should never happen since we build the IO requests */ 723 break; 724 } 725 726 /* 727 * Check to see if we need to copy the task context buffer 728 * or have been building into the task context buffer */ 729 if (request->was_tag_assigned_by_user == false) { 730 scic_sds_controller_copy_task_context( 731 request->owning_controller, request); 732 } 733 734 /* Add to the post_context the io tag value */ 735 request->post_context |= scic_sds_io_tag_get_index(request->io_tag); 736 737 /* Everything is good go ahead and change state */ 738 sci_base_state_machine_change_state(&request->state_machine, 739 SCI_BASE_REQUEST_STATE_STARTED); 740 741 return SCI_SUCCESS; 742 } 743 744 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 745 } 746 747 /* 748 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 749 * object receives a scic_sds_request_terminate() request. Since the request 750 * has not yet been posted to the hardware the request transitions to the 751 * completed state. enum sci_status SCI_SUCCESS 752 */ 753 static enum sci_status scic_sds_request_constructed_state_abort_handler( 754 struct scic_sds_request *request) 755 { 756 /* 757 * This request has been terminated by the user make sure that the correct 758 * status code is returned */ 759 scic_sds_request_set_status(request, 760 SCU_TASK_DONE_TASK_ABORT, 761 SCI_FAILURE_IO_TERMINATED); 762 763 sci_base_state_machine_change_state(&request->state_machine, 764 SCI_BASE_REQUEST_STATE_COMPLETED); 765 return SCI_SUCCESS; 766 } 767 768 /* 769 * ***************************************************************************** 770 * * STARTED STATE HANDLERS 771 * ***************************************************************************** */ 772 773 /* 774 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 775 * object receives a scic_sds_request_terminate() request. Since the request 776 * has been posted to the hardware the io request state is changed to the 777 * aborting state. enum sci_status SCI_SUCCESS 778 */ 779 enum sci_status scic_sds_request_started_state_abort_handler( 780 struct scic_sds_request *request) 781 { 782 if (request->has_started_substate_machine) 783 sci_base_state_machine_stop(&request->started_substate_machine); 784 785 sci_base_state_machine_change_state(&request->state_machine, 786 SCI_BASE_REQUEST_STATE_ABORTING); 787 return SCI_SUCCESS; 788 } 789 790 /* 791 * scic_sds_request_started_state_tc_completion_handler() - This method process 792 * TC (task context) completions for normal IO request (i.e. Task/Abort 793 * Completions of type 0). This method will update the 794 * SCIC_SDS_IO_REQUEST_T::status field. 795 * @sci_req: This parameter specifies the request for which a completion 796 * occurred. 797 * @completion_code: This parameter specifies the completion code received from 798 * the SCU. 799 * 800 */ 801 static enum sci_status 802 scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req, 803 u32 completion_code) 804 { 805 u8 datapres; 806 struct ssp_response_iu *resp_iu; 807 808 /* 809 * TODO: Any SDMA return code of other than 0 is bad 810 * decode 0x003C0000 to determine SDMA status 811 */ 812 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 813 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 814 scic_sds_request_set_status(sci_req, 815 SCU_TASK_DONE_GOOD, 816 SCI_SUCCESS); 817 break; 818 819 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): 820 { 821 /* 822 * There are times when the SCU hardware will return an early 823 * response because the io request specified more data than is 824 * returned by the target device (mode pages, inquiry data, 825 * etc.). We must check the response stats to see if this is 826 * truly a failed request or a good request that just got 827 * completed early. 828 */ 829 struct ssp_response_iu *resp = &sci_req->ssp.rsp; 830 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 831 832 sci_swab32_cpy(&sci_req->ssp.rsp, 833 &sci_req->ssp.rsp, 834 word_cnt); 835 836 if (resp->status == 0) { 837 scic_sds_request_set_status( 838 sci_req, 839 SCU_TASK_DONE_GOOD, 840 SCI_SUCCESS_IO_DONE_EARLY); 841 } else { 842 scic_sds_request_set_status( 843 sci_req, 844 SCU_TASK_DONE_CHECK_RESPONSE, 845 SCI_FAILURE_IO_RESPONSE_VALID); 846 } 847 } 848 break; 849 850 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): 851 { 852 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 853 854 sci_swab32_cpy(&sci_req->ssp.rsp, 855 &sci_req->ssp.rsp, 856 word_cnt); 857 858 scic_sds_request_set_status(sci_req, 859 SCU_TASK_DONE_CHECK_RESPONSE, 860 SCI_FAILURE_IO_RESPONSE_VALID); 861 break; 862 } 863 864 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 865 /* 866 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame 867 * guaranteed to be received before this completion status is 868 * posted? 869 */ 870 resp_iu = &sci_req->ssp.rsp; 871 datapres = resp_iu->datapres; 872 873 if ((datapres == 0x01) || (datapres == 0x02)) { 874 scic_sds_request_set_status( 875 sci_req, 876 SCU_TASK_DONE_CHECK_RESPONSE, 877 SCI_FAILURE_IO_RESPONSE_VALID); 878 } else 879 scic_sds_request_set_status( 880 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 881 break; 882 883 /* only stp device gets suspended. */ 884 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 885 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 886 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 887 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 888 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 889 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 891 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 892 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 893 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 894 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 895 if (sci_req->protocol == SCIC_STP_PROTOCOL) { 896 scic_sds_request_set_status( 897 sci_req, 898 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 899 SCU_COMPLETION_TL_STATUS_SHIFT, 900 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 901 } else { 902 scic_sds_request_set_status( 903 sci_req, 904 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 905 SCU_COMPLETION_TL_STATUS_SHIFT, 906 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 907 } 908 break; 909 910 /* both stp/ssp device gets suspended */ 911 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 912 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 914 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 915 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 916 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 918 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 919 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 920 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 921 scic_sds_request_set_status( 922 sci_req, 923 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 924 SCU_COMPLETION_TL_STATUS_SHIFT, 925 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 926 break; 927 928 /* neither ssp nor stp gets suspended. */ 929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 944 default: 945 scic_sds_request_set_status( 946 sci_req, 947 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 948 SCU_COMPLETION_TL_STATUS_SHIFT, 949 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 950 break; 951 } 952 953 /* 954 * TODO: This is probably wrong for ACK/NAK timeout conditions 955 */ 956 957 /* In all cases we will treat this as the completion of the IO req. */ 958 sci_base_state_machine_change_state( 959 &sci_req->state_machine, 960 SCI_BASE_REQUEST_STATE_COMPLETED); 961 return SCI_SUCCESS; 962 } 963 964 enum sci_status 965 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) 966 { 967 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && 968 request->has_started_substate_machine == false) 969 return scic_sds_request_started_state_tc_completion_handler(request, completion_code); 970 else if (request->state_handlers->tc_completion_handler) 971 return request->state_handlers->tc_completion_handler(request, completion_code); 972 973 dev_warn(scic_to_dev(request->owning_controller), 974 "%s: SCIC IO Request given task completion notification %x " 975 "while in wrong state %d\n", 976 __func__, 977 completion_code, 978 sci_base_state_machine_get_state(&request->state_machine)); 979 980 return SCI_FAILURE_INVALID_STATE; 981 982 } 983 984 /* 985 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 986 * object receives a scic_sds_request_frame_handler() request. This method 987 * first determines the frame type received. If this is a response frame then 988 * the response data is copied to the io request response buffer for processing 989 * at completion time. If the frame type is not a response buffer an error is 990 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE 991 */ 992 static enum sci_status 993 scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req, 994 u32 frame_index) 995 { 996 enum sci_status status; 997 u32 *frame_header; 998 struct ssp_frame_hdr ssp_hdr; 999 ssize_t word_cnt; 1000 1001 status = scic_sds_unsolicited_frame_control_get_header( 1002 &(scic_sds_request_get_controller(sci_req)->uf_control), 1003 frame_index, 1004 (void **)&frame_header); 1005 1006 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1007 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1008 1009 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1010 struct ssp_response_iu *resp_iu; 1011 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1012 1013 status = scic_sds_unsolicited_frame_control_get_buffer( 1014 &(scic_sds_request_get_controller(sci_req)->uf_control), 1015 frame_index, 1016 (void **)&resp_iu); 1017 1018 sci_swab32_cpy(&sci_req->ssp.rsp, 1019 resp_iu, word_cnt); 1020 1021 resp_iu = &sci_req->ssp.rsp; 1022 1023 if ((resp_iu->datapres == 0x01) || 1024 (resp_iu->datapres == 0x02)) { 1025 scic_sds_request_set_status( 1026 sci_req, 1027 SCU_TASK_DONE_CHECK_RESPONSE, 1028 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1029 } else 1030 scic_sds_request_set_status( 1031 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 1032 } else { 1033 /* This was not a response frame why did it get forwarded? */ 1034 dev_err(scic_to_dev(sci_req->owning_controller), 1035 "%s: SCIC IO Request 0x%p received unexpected " 1036 "frame %d type 0x%02x\n", 1037 __func__, 1038 sci_req, 1039 frame_index, 1040 ssp_hdr.frame_type); 1041 } 1042 1043 /* 1044 * In any case we are done with this frame buffer return it to the 1045 * controller 1046 */ 1047 scic_sds_controller_release_frame( 1048 sci_req->owning_controller, frame_index); 1049 1050 return SCI_SUCCESS; 1051 } 1052 1053 /* 1054 * ***************************************************************************** 1055 * * COMPLETED STATE HANDLERS 1056 * ***************************************************************************** */ 1057 1058 1059 /* 1060 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1061 * object receives a scic_sds_request_complete() request. This method frees up 1062 * any io request resources that have been allocated and transitions the 1063 * request to its final state. Consider stopping the state machine instead of 1064 * transitioning to the final state? enum sci_status SCI_SUCCESS 1065 */ 1066 static enum sci_status scic_sds_request_completed_state_complete_handler( 1067 struct scic_sds_request *request) 1068 { 1069 if (request->was_tag_assigned_by_user != true) { 1070 scic_controller_free_io_tag( 1071 request->owning_controller, request->io_tag); 1072 } 1073 1074 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) { 1075 scic_sds_controller_release_frame( 1076 request->owning_controller, request->saved_rx_frame_index); 1077 } 1078 1079 sci_base_state_machine_change_state(&request->state_machine, 1080 SCI_BASE_REQUEST_STATE_FINAL); 1081 return SCI_SUCCESS; 1082 } 1083 1084 /* 1085 * ***************************************************************************** 1086 * * ABORTING STATE HANDLERS 1087 * ***************************************************************************** */ 1088 1089 /* 1090 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1091 * object receives a scic_sds_request_terminate() request. This method is the 1092 * io request aborting state abort handlers. On receipt of a multiple 1093 * terminate requests the io request will transition to the completed state. 1094 * This should not happen in normal operation. enum sci_status SCI_SUCCESS 1095 */ 1096 static enum sci_status scic_sds_request_aborting_state_abort_handler( 1097 struct scic_sds_request *request) 1098 { 1099 sci_base_state_machine_change_state(&request->state_machine, 1100 SCI_BASE_REQUEST_STATE_COMPLETED); 1101 return SCI_SUCCESS; 1102 } 1103 1104 /* 1105 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1106 * object receives a scic_sds_request_task_completion() request. This method 1107 * decodes the completion type waiting for the abort task complete 1108 * notification. When the abort task complete is received the io request 1109 * transitions to the completed state. enum sci_status SCI_SUCCESS 1110 */ 1111 static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( 1112 struct scic_sds_request *sci_req, 1113 u32 completion_code) 1114 { 1115 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1116 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1117 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1118 scic_sds_request_set_status( 1119 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED 1120 ); 1121 1122 sci_base_state_machine_change_state(&sci_req->state_machine, 1123 SCI_BASE_REQUEST_STATE_COMPLETED); 1124 break; 1125 1126 default: 1127 /* 1128 * Unless we get some strange error wait for the task abort to complete 1129 * TODO: Should there be a state change for this completion? */ 1130 break; 1131 } 1132 1133 return SCI_SUCCESS; 1134 } 1135 1136 /* 1137 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1138 * object receives a scic_sds_request_frame_handler() request. This method 1139 * discards the unsolicited frame since we are waiting for the abort task 1140 * completion. enum sci_status SCI_SUCCESS 1141 */ 1142 static enum sci_status scic_sds_request_aborting_state_frame_handler( 1143 struct scic_sds_request *sci_req, 1144 u32 frame_index) 1145 { 1146 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */ 1147 1148 scic_sds_controller_release_frame( 1149 sci_req->owning_controller, frame_index); 1150 1151 return SCI_SUCCESS; 1152 } 1153 1154 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { 1155 [SCI_BASE_REQUEST_STATE_INITIAL] = { 1156 }, 1157 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 1158 .start_handler = scic_sds_request_constructed_state_start_handler, 1159 .abort_handler = scic_sds_request_constructed_state_abort_handler, 1160 }, 1161 [SCI_BASE_REQUEST_STATE_STARTED] = { 1162 .abort_handler = scic_sds_request_started_state_abort_handler, 1163 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, 1164 .frame_handler = scic_sds_request_started_state_frame_handler, 1165 }, 1166 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 1167 .complete_handler = scic_sds_request_completed_state_complete_handler, 1168 }, 1169 [SCI_BASE_REQUEST_STATE_ABORTING] = { 1170 .abort_handler = scic_sds_request_aborting_state_abort_handler, 1171 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, 1172 .frame_handler = scic_sds_request_aborting_state_frame_handler, 1173 }, 1174 [SCI_BASE_REQUEST_STATE_FINAL] = { 1175 }, 1176 }; 1177 1178 1179 /** 1180 * isci_request_process_response_iu() - This function sets the status and 1181 * response iu, in the task struct, from the request object for the upper 1182 * layer driver. 1183 * @sas_task: This parameter is the task struct from the upper layer driver. 1184 * @resp_iu: This parameter points to the response iu of the completed request. 1185 * @dev: This parameter specifies the linux device struct. 1186 * 1187 * none. 1188 */ 1189 static void isci_request_process_response_iu( 1190 struct sas_task *task, 1191 struct ssp_response_iu *resp_iu, 1192 struct device *dev) 1193 { 1194 dev_dbg(dev, 1195 "%s: resp_iu = %p " 1196 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 1197 "resp_iu->response_data_len = %x, " 1198 "resp_iu->sense_data_len = %x\nrepsonse data: ", 1199 __func__, 1200 resp_iu, 1201 resp_iu->status, 1202 resp_iu->datapres, 1203 resp_iu->response_data_len, 1204 resp_iu->sense_data_len); 1205 1206 task->task_status.stat = resp_iu->status; 1207 1208 /* libsas updates the task status fields based on the response iu. */ 1209 sas_ssp_task_response(dev, task, resp_iu); 1210 } 1211 1212 /** 1213 * isci_request_set_open_reject_status() - This function prepares the I/O 1214 * completion for OPEN_REJECT conditions. 1215 * @request: This parameter is the completed isci_request object. 1216 * @response_ptr: This parameter specifies the service response for the I/O. 1217 * @status_ptr: This parameter specifies the exec status for the I/O. 1218 * @complete_to_host_ptr: This parameter specifies the action to be taken by 1219 * the LLDD with respect to completing this request or forcing an abort 1220 * condition on the I/O. 1221 * @open_rej_reason: This parameter specifies the encoded reason for the 1222 * abandon-class reject. 1223 * 1224 * none. 1225 */ 1226 static void isci_request_set_open_reject_status( 1227 struct isci_request *request, 1228 struct sas_task *task, 1229 enum service_response *response_ptr, 1230 enum exec_status *status_ptr, 1231 enum isci_completion_selection *complete_to_host_ptr, 1232 enum sas_open_rej_reason open_rej_reason) 1233 { 1234 /* Task in the target is done. */ 1235 request->complete_in_target = true; 1236 *response_ptr = SAS_TASK_UNDELIVERED; 1237 *status_ptr = SAS_OPEN_REJECT; 1238 *complete_to_host_ptr = isci_perform_normal_io_completion; 1239 task->task_status.open_rej_reason = open_rej_reason; 1240 } 1241 1242 /** 1243 * isci_request_handle_controller_specific_errors() - This function decodes 1244 * controller-specific I/O completion error conditions. 1245 * @request: This parameter is the completed isci_request object. 1246 * @response_ptr: This parameter specifies the service response for the I/O. 1247 * @status_ptr: This parameter specifies the exec status for the I/O. 1248 * @complete_to_host_ptr: This parameter specifies the action to be taken by 1249 * the LLDD with respect to completing this request or forcing an abort 1250 * condition on the I/O. 1251 * 1252 * none. 1253 */ 1254 static void isci_request_handle_controller_specific_errors( 1255 struct isci_remote_device *isci_device, 1256 struct isci_request *request, 1257 struct sas_task *task, 1258 enum service_response *response_ptr, 1259 enum exec_status *status_ptr, 1260 enum isci_completion_selection *complete_to_host_ptr) 1261 { 1262 unsigned int cstatus; 1263 1264 cstatus = request->sci.scu_status; 1265 1266 dev_dbg(&request->isci_host->pdev->dev, 1267 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 1268 "- controller status = 0x%x\n", 1269 __func__, request, cstatus); 1270 1271 /* Decode the controller-specific errors; most 1272 * important is to recognize those conditions in which 1273 * the target may still have a task outstanding that 1274 * must be aborted. 1275 * 1276 * Note that there are SCU completion codes being 1277 * named in the decode below for which SCIC has already 1278 * done work to handle them in a way other than as 1279 * a controller-specific completion code; these are left 1280 * in the decode below for completeness sake. 1281 */ 1282 switch (cstatus) { 1283 case SCU_TASK_DONE_DMASETUP_DIRERR: 1284 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 1285 case SCU_TASK_DONE_XFERCNT_ERR: 1286 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 1287 if (task->task_proto == SAS_PROTOCOL_SMP) { 1288 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 1289 *response_ptr = SAS_TASK_COMPLETE; 1290 1291 /* See if the device has been/is being stopped. Note 1292 * that we ignore the quiesce state, since we are 1293 * concerned about the actual device state. 1294 */ 1295 if ((isci_device->status == isci_stopping) || 1296 (isci_device->status == isci_stopped)) 1297 *status_ptr = SAS_DEVICE_UNKNOWN; 1298 else 1299 *status_ptr = SAS_ABORTED_TASK; 1300 1301 request->complete_in_target = true; 1302 1303 *complete_to_host_ptr = 1304 isci_perform_normal_io_completion; 1305 } else { 1306 /* Task in the target is not done. */ 1307 *response_ptr = SAS_TASK_UNDELIVERED; 1308 1309 if ((isci_device->status == isci_stopping) || 1310 (isci_device->status == isci_stopped)) 1311 *status_ptr = SAS_DEVICE_UNKNOWN; 1312 else 1313 *status_ptr = SAM_STAT_TASK_ABORTED; 1314 1315 request->complete_in_target = false; 1316 1317 *complete_to_host_ptr = 1318 isci_perform_error_io_completion; 1319 } 1320 1321 break; 1322 1323 case SCU_TASK_DONE_CRC_ERR: 1324 case SCU_TASK_DONE_NAK_CMD_ERR: 1325 case SCU_TASK_DONE_EXCESS_DATA: 1326 case SCU_TASK_DONE_UNEXP_FIS: 1327 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 1328 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 1329 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 1330 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 1331 /* These are conditions in which the target 1332 * has completed the task, so that no cleanup 1333 * is necessary. 1334 */ 1335 *response_ptr = SAS_TASK_COMPLETE; 1336 1337 /* See if the device has been/is being stopped. Note 1338 * that we ignore the quiesce state, since we are 1339 * concerned about the actual device state. 1340 */ 1341 if ((isci_device->status == isci_stopping) || 1342 (isci_device->status == isci_stopped)) 1343 *status_ptr = SAS_DEVICE_UNKNOWN; 1344 else 1345 *status_ptr = SAS_ABORTED_TASK; 1346 1347 request->complete_in_target = true; 1348 1349 *complete_to_host_ptr = isci_perform_normal_io_completion; 1350 break; 1351 1352 1353 /* Note that the only open reject completion codes seen here will be 1354 * abandon-class codes; all others are automatically retried in the SCU. 1355 */ 1356 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 1357 1358 isci_request_set_open_reject_status( 1359 request, task, response_ptr, status_ptr, 1360 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 1361 break; 1362 1363 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 1364 1365 /* Note - the return of AB0 will change when 1366 * libsas implements detection of zone violations. 1367 */ 1368 isci_request_set_open_reject_status( 1369 request, task, response_ptr, status_ptr, 1370 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 1371 break; 1372 1373 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 1374 1375 isci_request_set_open_reject_status( 1376 request, task, response_ptr, status_ptr, 1377 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 1378 break; 1379 1380 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 1381 1382 isci_request_set_open_reject_status( 1383 request, task, response_ptr, status_ptr, 1384 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 1385 break; 1386 1387 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 1388 1389 isci_request_set_open_reject_status( 1390 request, task, response_ptr, status_ptr, 1391 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 1392 break; 1393 1394 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 1395 1396 isci_request_set_open_reject_status( 1397 request, task, response_ptr, status_ptr, 1398 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 1399 break; 1400 1401 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 1402 1403 isci_request_set_open_reject_status( 1404 request, task, response_ptr, status_ptr, 1405 complete_to_host_ptr, SAS_OREJ_STP_NORES); 1406 break; 1407 1408 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 1409 1410 isci_request_set_open_reject_status( 1411 request, task, response_ptr, status_ptr, 1412 complete_to_host_ptr, SAS_OREJ_EPROTO); 1413 break; 1414 1415 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 1416 1417 isci_request_set_open_reject_status( 1418 request, task, response_ptr, status_ptr, 1419 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 1420 break; 1421 1422 case SCU_TASK_DONE_LL_R_ERR: 1423 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 1424 case SCU_TASK_DONE_LL_PERR: 1425 case SCU_TASK_DONE_LL_SY_TERM: 1426 /* Also SCU_TASK_DONE_NAK_ERR:*/ 1427 case SCU_TASK_DONE_LL_LF_TERM: 1428 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 1429 case SCU_TASK_DONE_LL_ABORT_ERR: 1430 case SCU_TASK_DONE_SEQ_INV_TYPE: 1431 /* Also SCU_TASK_DONE_UNEXP_XR: */ 1432 case SCU_TASK_DONE_XR_IU_LEN_ERR: 1433 case SCU_TASK_DONE_INV_FIS_LEN: 1434 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 1435 case SCU_TASK_DONE_SDMA_ERR: 1436 case SCU_TASK_DONE_OFFSET_ERR: 1437 case SCU_TASK_DONE_MAX_PLD_ERR: 1438 case SCU_TASK_DONE_LF_ERR: 1439 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 1440 case SCU_TASK_DONE_SMP_LL_RX_ERR: 1441 case SCU_TASK_DONE_UNEXP_DATA: 1442 case SCU_TASK_DONE_UNEXP_SDBFIS: 1443 case SCU_TASK_DONE_REG_ERR: 1444 case SCU_TASK_DONE_SDB_ERR: 1445 case SCU_TASK_DONE_TASK_ABORT: 1446 default: 1447 /* Task in the target is not done. */ 1448 *response_ptr = SAS_TASK_UNDELIVERED; 1449 *status_ptr = SAM_STAT_TASK_ABORTED; 1450 request->complete_in_target = false; 1451 1452 *complete_to_host_ptr = isci_perform_error_io_completion; 1453 break; 1454 } 1455 } 1456 1457 /** 1458 * isci_task_save_for_upper_layer_completion() - This function saves the 1459 * request for later completion to the upper layer driver. 1460 * @host: This parameter is a pointer to the host on which the the request 1461 * should be queued (either as an error or success). 1462 * @request: This parameter is the completed request. 1463 * @response: This parameter is the response code for the completed task. 1464 * @status: This parameter is the status code for the completed task. 1465 * 1466 * none. 1467 */ 1468 static void isci_task_save_for_upper_layer_completion( 1469 struct isci_host *host, 1470 struct isci_request *request, 1471 enum service_response response, 1472 enum exec_status status, 1473 enum isci_completion_selection task_notification_selection) 1474 { 1475 struct sas_task *task = isci_request_access_task(request); 1476 1477 task_notification_selection 1478 = isci_task_set_completion_status(task, response, status, 1479 task_notification_selection); 1480 1481 /* Tasks aborted specifically by a call to the lldd_abort_task 1482 * function should not be completed to the host in the regular path. 1483 */ 1484 switch (task_notification_selection) { 1485 1486 case isci_perform_normal_io_completion: 1487 1488 /* Normal notification (task_done) */ 1489 dev_dbg(&host->pdev->dev, 1490 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", 1491 __func__, 1492 task, 1493 task->task_status.resp, response, 1494 task->task_status.stat, status); 1495 /* Add to the completed list. */ 1496 list_add(&request->completed_node, 1497 &host->requests_to_complete); 1498 1499 /* Take the request off the device's pending request list. */ 1500 list_del_init(&request->dev_node); 1501 break; 1502 1503 case isci_perform_aborted_io_completion: 1504 /* No notification to libsas because this request is 1505 * already in the abort path. 1506 */ 1507 dev_warn(&host->pdev->dev, 1508 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", 1509 __func__, 1510 task, 1511 task->task_status.resp, response, 1512 task->task_status.stat, status); 1513 1514 /* Wake up whatever process was waiting for this 1515 * request to complete. 1516 */ 1517 WARN_ON(request->io_request_completion == NULL); 1518 1519 if (request->io_request_completion != NULL) { 1520 1521 /* Signal whoever is waiting that this 1522 * request is complete. 1523 */ 1524 complete(request->io_request_completion); 1525 } 1526 break; 1527 1528 case isci_perform_error_io_completion: 1529 /* Use sas_task_abort */ 1530 dev_warn(&host->pdev->dev, 1531 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", 1532 __func__, 1533 task, 1534 task->task_status.resp, response, 1535 task->task_status.stat, status); 1536 /* Add to the aborted list. */ 1537 list_add(&request->completed_node, 1538 &host->requests_to_errorback); 1539 break; 1540 1541 default: 1542 dev_warn(&host->pdev->dev, 1543 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", 1544 __func__, 1545 task, 1546 task->task_status.resp, response, 1547 task->task_status.stat, status); 1548 1549 /* Add to the error to libsas list. */ 1550 list_add(&request->completed_node, 1551 &host->requests_to_errorback); 1552 break; 1553 } 1554 } 1555 1556 static void isci_request_io_request_complete(struct isci_host *isci_host, 1557 struct isci_request *request, 1558 enum sci_io_status completion_status) 1559 { 1560 struct sas_task *task = isci_request_access_task(request); 1561 struct ssp_response_iu *resp_iu; 1562 void *resp_buf; 1563 unsigned long task_flags; 1564 struct isci_remote_device *isci_device = request->isci_device; 1565 enum service_response response = SAS_TASK_UNDELIVERED; 1566 enum exec_status status = SAS_ABORTED_TASK; 1567 enum isci_request_status request_status; 1568 enum isci_completion_selection complete_to_host 1569 = isci_perform_normal_io_completion; 1570 1571 dev_dbg(&isci_host->pdev->dev, 1572 "%s: request = %p, task = %p,\n" 1573 "task->data_dir = %d completion_status = 0x%x\n", 1574 __func__, 1575 request, 1576 task, 1577 task->data_dir, 1578 completion_status); 1579 1580 spin_lock(&request->state_lock); 1581 request_status = isci_request_get_state(request); 1582 1583 /* Decode the request status. Note that if the request has been 1584 * aborted by a task management function, we don't care 1585 * what the status is. 1586 */ 1587 switch (request_status) { 1588 1589 case aborted: 1590 /* "aborted" indicates that the request was aborted by a task 1591 * management function, since once a task management request is 1592 * perfomed by the device, the request only completes because 1593 * of the subsequent driver terminate. 1594 * 1595 * Aborted also means an external thread is explicitly managing 1596 * this request, so that we do not complete it up the stack. 1597 * 1598 * The target is still there (since the TMF was successful). 1599 */ 1600 request->complete_in_target = true; 1601 response = SAS_TASK_COMPLETE; 1602 1603 /* See if the device has been/is being stopped. Note 1604 * that we ignore the quiesce state, since we are 1605 * concerned about the actual device state. 1606 */ 1607 if ((isci_device->status == isci_stopping) 1608 || (isci_device->status == isci_stopped) 1609 ) 1610 status = SAS_DEVICE_UNKNOWN; 1611 else 1612 status = SAS_ABORTED_TASK; 1613 1614 complete_to_host = isci_perform_aborted_io_completion; 1615 /* This was an aborted request. */ 1616 1617 spin_unlock(&request->state_lock); 1618 break; 1619 1620 case aborting: 1621 /* aborting means that the task management function tried and 1622 * failed to abort the request. We need to note the request 1623 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 1624 * target as down. 1625 * 1626 * Aborting also means an external thread is explicitly managing 1627 * this request, so that we do not complete it up the stack. 1628 */ 1629 request->complete_in_target = true; 1630 response = SAS_TASK_UNDELIVERED; 1631 1632 if ((isci_device->status == isci_stopping) || 1633 (isci_device->status == isci_stopped)) 1634 /* The device has been /is being stopped. Note that 1635 * we ignore the quiesce state, since we are 1636 * concerned about the actual device state. 1637 */ 1638 status = SAS_DEVICE_UNKNOWN; 1639 else 1640 status = SAS_PHY_DOWN; 1641 1642 complete_to_host = isci_perform_aborted_io_completion; 1643 1644 /* This was an aborted request. */ 1645 1646 spin_unlock(&request->state_lock); 1647 break; 1648 1649 case terminating: 1650 1651 /* This was an terminated request. This happens when 1652 * the I/O is being terminated because of an action on 1653 * the device (reset, tear down, etc.), and the I/O needs 1654 * to be completed up the stack. 1655 */ 1656 request->complete_in_target = true; 1657 response = SAS_TASK_UNDELIVERED; 1658 1659 /* See if the device has been/is being stopped. Note 1660 * that we ignore the quiesce state, since we are 1661 * concerned about the actual device state. 1662 */ 1663 if ((isci_device->status == isci_stopping) || 1664 (isci_device->status == isci_stopped)) 1665 status = SAS_DEVICE_UNKNOWN; 1666 else 1667 status = SAS_ABORTED_TASK; 1668 1669 complete_to_host = isci_perform_aborted_io_completion; 1670 1671 /* This was a terminated request. */ 1672 1673 spin_unlock(&request->state_lock); 1674 break; 1675 1676 default: 1677 1678 /* The request is done from an SCU HW perspective. */ 1679 request->status = completed; 1680 1681 spin_unlock(&request->state_lock); 1682 1683 /* This is an active request being completed from the core. */ 1684 switch (completion_status) { 1685 1686 case SCI_IO_FAILURE_RESPONSE_VALID: 1687 dev_dbg(&isci_host->pdev->dev, 1688 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 1689 __func__, 1690 request, 1691 task); 1692 1693 if (sas_protocol_ata(task->task_proto)) { 1694 resp_buf = &request->sci.stp.rsp; 1695 isci_request_process_stp_response(task, 1696 resp_buf); 1697 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 1698 1699 /* crack the iu response buffer. */ 1700 resp_iu = &request->sci.ssp.rsp; 1701 isci_request_process_response_iu(task, resp_iu, 1702 &isci_host->pdev->dev); 1703 1704 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 1705 1706 dev_err(&isci_host->pdev->dev, 1707 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 1708 "SAS_PROTOCOL_SMP protocol\n", 1709 __func__); 1710 1711 } else 1712 dev_err(&isci_host->pdev->dev, 1713 "%s: unknown protocol\n", __func__); 1714 1715 /* use the task status set in the task struct by the 1716 * isci_request_process_response_iu call. 1717 */ 1718 request->complete_in_target = true; 1719 response = task->task_status.resp; 1720 status = task->task_status.stat; 1721 break; 1722 1723 case SCI_IO_SUCCESS: 1724 case SCI_IO_SUCCESS_IO_DONE_EARLY: 1725 1726 response = SAS_TASK_COMPLETE; 1727 status = SAM_STAT_GOOD; 1728 request->complete_in_target = true; 1729 1730 if (task->task_proto == SAS_PROTOCOL_SMP) { 1731 void *rsp = &request->sci.smp.rsp; 1732 1733 dev_dbg(&isci_host->pdev->dev, 1734 "%s: SMP protocol completion\n", 1735 __func__); 1736 1737 sg_copy_from_buffer( 1738 &task->smp_task.smp_resp, 1, 1739 rsp, sizeof(struct smp_resp)); 1740 } else if (completion_status 1741 == SCI_IO_SUCCESS_IO_DONE_EARLY) { 1742 1743 /* This was an SSP / STP / SATA transfer. 1744 * There is a possibility that less data than 1745 * the maximum was transferred. 1746 */ 1747 u32 transferred_length = sci_req_tx_bytes(&request->sci); 1748 1749 task->task_status.residual 1750 = task->total_xfer_len - transferred_length; 1751 1752 /* If there were residual bytes, call this an 1753 * underrun. 1754 */ 1755 if (task->task_status.residual != 0) 1756 status = SAS_DATA_UNDERRUN; 1757 1758 dev_dbg(&isci_host->pdev->dev, 1759 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 1760 __func__, 1761 status); 1762 1763 } else 1764 dev_dbg(&isci_host->pdev->dev, 1765 "%s: SCI_IO_SUCCESS\n", 1766 __func__); 1767 1768 break; 1769 1770 case SCI_IO_FAILURE_TERMINATED: 1771 dev_dbg(&isci_host->pdev->dev, 1772 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 1773 __func__, 1774 request, 1775 task); 1776 1777 /* The request was terminated explicitly. No handling 1778 * is needed in the SCSI error handler path. 1779 */ 1780 request->complete_in_target = true; 1781 response = SAS_TASK_UNDELIVERED; 1782 1783 /* See if the device has been/is being stopped. Note 1784 * that we ignore the quiesce state, since we are 1785 * concerned about the actual device state. 1786 */ 1787 if ((isci_device->status == isci_stopping) || 1788 (isci_device->status == isci_stopped)) 1789 status = SAS_DEVICE_UNKNOWN; 1790 else 1791 status = SAS_ABORTED_TASK; 1792 1793 complete_to_host = isci_perform_normal_io_completion; 1794 break; 1795 1796 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 1797 1798 isci_request_handle_controller_specific_errors( 1799 isci_device, request, task, &response, &status, 1800 &complete_to_host); 1801 1802 break; 1803 1804 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 1805 /* This is a special case, in that the I/O completion 1806 * is telling us that the device needs a reset. 1807 * In order for the device reset condition to be 1808 * noticed, the I/O has to be handled in the error 1809 * handler. Set the reset flag and cause the 1810 * SCSI error thread to be scheduled. 1811 */ 1812 spin_lock_irqsave(&task->task_state_lock, task_flags); 1813 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 1814 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 1815 1816 /* Fail the I/O. */ 1817 response = SAS_TASK_UNDELIVERED; 1818 status = SAM_STAT_TASK_ABORTED; 1819 1820 complete_to_host = isci_perform_error_io_completion; 1821 request->complete_in_target = false; 1822 break; 1823 1824 default: 1825 /* Catch any otherwise unhandled error codes here. */ 1826 dev_warn(&isci_host->pdev->dev, 1827 "%s: invalid completion code: 0x%x - " 1828 "isci_request = %p\n", 1829 __func__, completion_status, request); 1830 1831 response = SAS_TASK_UNDELIVERED; 1832 1833 /* See if the device has been/is being stopped. Note 1834 * that we ignore the quiesce state, since we are 1835 * concerned about the actual device state. 1836 */ 1837 if ((isci_device->status == isci_stopping) || 1838 (isci_device->status == isci_stopped)) 1839 status = SAS_DEVICE_UNKNOWN; 1840 else 1841 status = SAS_ABORTED_TASK; 1842 1843 complete_to_host = isci_perform_error_io_completion; 1844 request->complete_in_target = false; 1845 break; 1846 } 1847 break; 1848 } 1849 1850 isci_request_unmap_sgl(request, isci_host->pdev); 1851 1852 /* Put the completed request on the correct list */ 1853 isci_task_save_for_upper_layer_completion(isci_host, request, response, 1854 status, complete_to_host 1855 ); 1856 1857 /* complete the io request to the core. */ 1858 scic_controller_complete_io(&isci_host->sci, 1859 &isci_device->sci, 1860 &request->sci); 1861 /* set terminated handle so it cannot be completed or 1862 * terminated again, and to cause any calls into abort 1863 * task to recognize the already completed case. 1864 */ 1865 request->terminated = true; 1866 1867 isci_host_can_dequeue(isci_host, 1); 1868 } 1869 1870 /** 1871 * scic_sds_request_initial_state_enter() - 1872 * @object: This parameter specifies the base object for which the state 1873 * transition is occurring. 1874 * 1875 * This method implements the actions taken when entering the 1876 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial 1877 * base request is constructed. Entry into the initial state sets all handlers 1878 * for the io request object to their default handlers. none 1879 */ 1880 static void scic_sds_request_initial_state_enter(void *object) 1881 { 1882 struct scic_sds_request *sci_req = object; 1883 1884 SET_STATE_HANDLER( 1885 sci_req, 1886 scic_sds_request_state_handler_table, 1887 SCI_BASE_REQUEST_STATE_INITIAL 1888 ); 1889 } 1890 1891 /** 1892 * scic_sds_request_constructed_state_enter() - 1893 * @object: The io request object that is to enter the constructed state. 1894 * 1895 * This method implements the actions taken when entering the 1896 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers 1897 * for the the constructed state. none 1898 */ 1899 static void scic_sds_request_constructed_state_enter(void *object) 1900 { 1901 struct scic_sds_request *sci_req = object; 1902 1903 SET_STATE_HANDLER( 1904 sci_req, 1905 scic_sds_request_state_handler_table, 1906 SCI_BASE_REQUEST_STATE_CONSTRUCTED 1907 ); 1908 } 1909 1910 /** 1911 * scic_sds_request_started_state_enter() - 1912 * @object: This parameter specifies the base object for which the state 1913 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. 1914 * 1915 * This method implements the actions taken when entering the 1916 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a 1917 * SCSI Task request we must enter the started substate machine. none 1918 */ 1919 static void scic_sds_request_started_state_enter(void *object) 1920 { 1921 struct scic_sds_request *sci_req = object; 1922 1923 SET_STATE_HANDLER( 1924 sci_req, 1925 scic_sds_request_state_handler_table, 1926 SCI_BASE_REQUEST_STATE_STARTED 1927 ); 1928 1929 /* 1930 * Most of the request state machines have a started substate machine so 1931 * start its execution on the entry to the started state. */ 1932 if (sci_req->has_started_substate_machine == true) 1933 sci_base_state_machine_start(&sci_req->started_substate_machine); 1934 } 1935 1936 /** 1937 * scic_sds_request_started_state_exit() - 1938 * @object: This parameter specifies the base object for which the state 1939 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1940 * object. 1941 * 1942 * This method implements the actions taken when exiting the 1943 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be 1944 * to stop the started substate machine. none 1945 */ 1946 static void scic_sds_request_started_state_exit(void *object) 1947 { 1948 struct scic_sds_request *sci_req = object; 1949 1950 if (sci_req->has_started_substate_machine == true) 1951 sci_base_state_machine_stop(&sci_req->started_substate_machine); 1952 } 1953 1954 /** 1955 * scic_sds_request_completed_state_enter() - 1956 * @object: This parameter specifies the base object for which the state 1957 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1958 * object. 1959 * 1960 * This method implements the actions taken when entering the 1961 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the 1962 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request 1963 * completion status and convert it to an enum sci_status to return in the 1964 * completion callback function. none 1965 */ 1966 static void scic_sds_request_completed_state_enter(void *object) 1967 { 1968 struct scic_sds_request *sci_req = object; 1969 struct scic_sds_controller *scic = 1970 scic_sds_request_get_controller(sci_req); 1971 struct isci_host *ihost = scic_to_ihost(scic); 1972 struct isci_request *ireq = sci_req_to_ireq(sci_req); 1973 1974 SET_STATE_HANDLER(sci_req, 1975 scic_sds_request_state_handler_table, 1976 SCI_BASE_REQUEST_STATE_COMPLETED); 1977 1978 /* Tell the SCI_USER that the IO request is complete */ 1979 if (sci_req->is_task_management_request == false) 1980 isci_request_io_request_complete(ihost, ireq, 1981 sci_req->sci_status); 1982 else 1983 isci_task_request_complete(ihost, ireq, sci_req->sci_status); 1984 } 1985 1986 /** 1987 * scic_sds_request_aborting_state_enter() - 1988 * @object: This parameter specifies the base object for which the state 1989 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1990 * object. 1991 * 1992 * This method implements the actions taken when entering the 1993 * SCI_BASE_REQUEST_STATE_ABORTING state. none 1994 */ 1995 static void scic_sds_request_aborting_state_enter(void *object) 1996 { 1997 struct scic_sds_request *sci_req = object; 1998 1999 /* Setting the abort bit in the Task Context is required by the silicon. */ 2000 sci_req->task_context_buffer->abort = 1; 2001 2002 SET_STATE_HANDLER( 2003 sci_req, 2004 scic_sds_request_state_handler_table, 2005 SCI_BASE_REQUEST_STATE_ABORTING 2006 ); 2007 } 2008 2009 /** 2010 * scic_sds_request_final_state_enter() - 2011 * @object: This parameter specifies the base object for which the state 2012 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. 2013 * 2014 * This method implements the actions taken when entering the 2015 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the 2016 * state handlers in place. none 2017 */ 2018 static void scic_sds_request_final_state_enter(void *object) 2019 { 2020 struct scic_sds_request *sci_req = object; 2021 2022 SET_STATE_HANDLER( 2023 sci_req, 2024 scic_sds_request_state_handler_table, 2025 SCI_BASE_REQUEST_STATE_FINAL 2026 ); 2027 } 2028 2029 static const struct sci_base_state scic_sds_request_state_table[] = { 2030 [SCI_BASE_REQUEST_STATE_INITIAL] = { 2031 .enter_state = scic_sds_request_initial_state_enter, 2032 }, 2033 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 2034 .enter_state = scic_sds_request_constructed_state_enter, 2035 }, 2036 [SCI_BASE_REQUEST_STATE_STARTED] = { 2037 .enter_state = scic_sds_request_started_state_enter, 2038 .exit_state = scic_sds_request_started_state_exit 2039 }, 2040 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 2041 .enter_state = scic_sds_request_completed_state_enter, 2042 }, 2043 [SCI_BASE_REQUEST_STATE_ABORTING] = { 2044 .enter_state = scic_sds_request_aborting_state_enter, 2045 }, 2046 [SCI_BASE_REQUEST_STATE_FINAL] = { 2047 .enter_state = scic_sds_request_final_state_enter, 2048 }, 2049 }; 2050 2051 static void scic_sds_general_request_construct(struct scic_sds_controller *scic, 2052 struct scic_sds_remote_device *sci_dev, 2053 u16 io_tag, struct scic_sds_request *sci_req) 2054 { 2055 sci_base_state_machine_construct(&sci_req->state_machine, sci_req, 2056 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL); 2057 sci_base_state_machine_start(&sci_req->state_machine); 2058 2059 sci_req->io_tag = io_tag; 2060 sci_req->owning_controller = scic; 2061 sci_req->target_device = sci_dev; 2062 sci_req->has_started_substate_machine = false; 2063 sci_req->protocol = SCIC_NO_PROTOCOL; 2064 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 2065 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); 2066 2067 sci_req->sci_status = SCI_SUCCESS; 2068 sci_req->scu_status = 0; 2069 sci_req->post_context = 0xFFFFFFFF; 2070 2071 sci_req->is_task_management_request = false; 2072 2073 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 2074 sci_req->was_tag_assigned_by_user = false; 2075 sci_req->task_context_buffer = NULL; 2076 } else { 2077 sci_req->was_tag_assigned_by_user = true; 2078 2079 sci_req->task_context_buffer = 2080 scic_sds_controller_get_task_context_buffer(scic, io_tag); 2081 } 2082 } 2083 2084 static enum sci_status 2085 scic_io_request_construct(struct scic_sds_controller *scic, 2086 struct scic_sds_remote_device *sci_dev, 2087 u16 io_tag, struct scic_sds_request *sci_req) 2088 { 2089 struct domain_device *dev = sci_dev_to_domain(sci_dev); 2090 enum sci_status status = SCI_SUCCESS; 2091 2092 /* Build the common part of the request */ 2093 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 2094 2095 if (sci_dev->rnc.remote_node_index == 2096 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 2097 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 2098 2099 if (dev->dev_type == SAS_END_DEV) 2100 scic_sds_ssp_io_request_assign_buffers(sci_req); 2101 else if ((dev->dev_type == SATA_DEV) || 2102 (dev->tproto & SAS_PROTOCOL_STP)) { 2103 scic_sds_stp_request_assign_buffers(sci_req); 2104 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); 2105 } else if (dev_is_expander(dev)) { 2106 scic_sds_smp_request_assign_buffers(sci_req); 2107 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); 2108 } else 2109 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2110 2111 if (status == SCI_SUCCESS) { 2112 memset(sci_req->task_context_buffer, 0, 2113 offsetof(struct scu_task_context, sgl_pair_ab)); 2114 } 2115 2116 return status; 2117 } 2118 2119 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, 2120 struct scic_sds_remote_device *sci_dev, 2121 u16 io_tag, struct scic_sds_request *sci_req) 2122 { 2123 struct domain_device *dev = sci_dev_to_domain(sci_dev); 2124 enum sci_status status = SCI_SUCCESS; 2125 2126 /* Build the common part of the request */ 2127 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 2128 2129 if (dev->dev_type == SAS_END_DEV) { 2130 scic_sds_ssp_task_request_assign_buffers(sci_req); 2131 2132 sci_req->has_started_substate_machine = true; 2133 2134 /* Construct the started sub-state machine. */ 2135 sci_base_state_machine_construct( 2136 &sci_req->started_substate_machine, 2137 sci_req, 2138 scic_sds_io_request_started_task_mgmt_substate_table, 2139 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION 2140 ); 2141 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 2142 scic_sds_stp_request_assign_buffers(sci_req); 2143 else 2144 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2145 2146 if (status == SCI_SUCCESS) { 2147 sci_req->is_task_management_request = true; 2148 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); 2149 } 2150 2151 return status; 2152 } 2153 2154 static enum sci_status isci_request_ssp_request_construct( 2155 struct isci_request *request) 2156 { 2157 enum sci_status status; 2158 2159 dev_dbg(&request->isci_host->pdev->dev, 2160 "%s: request = %p\n", 2161 __func__, 2162 request); 2163 status = scic_io_request_construct_basic_ssp(&request->sci); 2164 return status; 2165 } 2166 2167 static enum sci_status isci_request_stp_request_construct( 2168 struct isci_request *request) 2169 { 2170 struct sas_task *task = isci_request_access_task(request); 2171 enum sci_status status; 2172 struct host_to_dev_fis *register_fis; 2173 2174 dev_dbg(&request->isci_host->pdev->dev, 2175 "%s: request = %p\n", 2176 __func__, 2177 request); 2178 2179 /* Get the host_to_dev_fis from the core and copy 2180 * the fis from the task into it. 2181 */ 2182 register_fis = isci_sata_task_to_fis_copy(task); 2183 2184 status = scic_io_request_construct_basic_sata(&request->sci); 2185 2186 /* Set the ncq tag in the fis, from the queue 2187 * command in the task. 2188 */ 2189 if (isci_sata_is_task_ncq(task)) { 2190 2191 isci_sata_set_ncq_tag( 2192 register_fis, 2193 task 2194 ); 2195 } 2196 2197 return status; 2198 } 2199 2200 /* 2201 * isci_smp_request_build() - This function builds the smp request. 2202 * @ireq: This parameter points to the isci_request allocated in the 2203 * request construct function. 2204 * 2205 * SCI_SUCCESS on successfull completion, or specific failure code. 2206 */ 2207 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 2208 { 2209 enum sci_status status = SCI_FAILURE; 2210 struct sas_task *task = isci_request_access_task(ireq); 2211 struct scic_sds_request *sci_req = &ireq->sci; 2212 2213 dev_dbg(&ireq->isci_host->pdev->dev, 2214 "%s: request = %p\n", __func__, ireq); 2215 2216 dev_dbg(&ireq->isci_host->pdev->dev, 2217 "%s: smp_req len = %d\n", 2218 __func__, 2219 task->smp_task.smp_req.length); 2220 2221 /* copy the smp_command to the address; */ 2222 sg_copy_to_buffer(&task->smp_task.smp_req, 1, 2223 &sci_req->smp.cmd, 2224 sizeof(struct smp_req)); 2225 2226 status = scic_io_request_construct_smp(sci_req); 2227 if (status != SCI_SUCCESS) 2228 dev_warn(&ireq->isci_host->pdev->dev, 2229 "%s: failed with status = %d\n", 2230 __func__, 2231 status); 2232 2233 return status; 2234 } 2235 2236 /** 2237 * isci_io_request_build() - This function builds the io request object. 2238 * @isci_host: This parameter specifies the ISCI host object 2239 * @request: This parameter points to the isci_request object allocated in the 2240 * request construct function. 2241 * @sci_device: This parameter is the handle for the sci core's remote device 2242 * object that is the destination for this request. 2243 * 2244 * SCI_SUCCESS on successfull completion, or specific failure code. 2245 */ 2246 static enum sci_status isci_io_request_build( 2247 struct isci_host *isci_host, 2248 struct isci_request *request, 2249 struct isci_remote_device *isci_device) 2250 { 2251 enum sci_status status = SCI_SUCCESS; 2252 struct sas_task *task = isci_request_access_task(request); 2253 struct scic_sds_remote_device *sci_device = &isci_device->sci; 2254 2255 dev_dbg(&isci_host->pdev->dev, 2256 "%s: isci_device = 0x%p; request = %p, " 2257 "num_scatter = %d\n", 2258 __func__, 2259 isci_device, 2260 request, 2261 task->num_scatter); 2262 2263 /* map the sgl addresses, if present. 2264 * libata does the mapping for sata devices 2265 * before we get the request. 2266 */ 2267 if (task->num_scatter && 2268 !sas_protocol_ata(task->task_proto) && 2269 !(SAS_PROTOCOL_SMP & task->task_proto)) { 2270 2271 request->num_sg_entries = dma_map_sg( 2272 &isci_host->pdev->dev, 2273 task->scatter, 2274 task->num_scatter, 2275 task->data_dir 2276 ); 2277 2278 if (request->num_sg_entries == 0) 2279 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 2280 } 2281 2282 /* build the common request object. For now, 2283 * we will let the core allocate the IO tag. 2284 */ 2285 status = scic_io_request_construct(&isci_host->sci, sci_device, 2286 SCI_CONTROLLER_INVALID_IO_TAG, 2287 &request->sci); 2288 2289 if (status != SCI_SUCCESS) { 2290 dev_warn(&isci_host->pdev->dev, 2291 "%s: failed request construct\n", 2292 __func__); 2293 return SCI_FAILURE; 2294 } 2295 2296 switch (task->task_proto) { 2297 case SAS_PROTOCOL_SMP: 2298 status = isci_smp_request_build(request); 2299 break; 2300 case SAS_PROTOCOL_SSP: 2301 status = isci_request_ssp_request_construct(request); 2302 break; 2303 case SAS_PROTOCOL_SATA: 2304 case SAS_PROTOCOL_STP: 2305 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2306 status = isci_request_stp_request_construct(request); 2307 break; 2308 default: 2309 dev_warn(&isci_host->pdev->dev, 2310 "%s: unknown protocol\n", __func__); 2311 return SCI_FAILURE; 2312 } 2313 2314 return SCI_SUCCESS; 2315 } 2316 2317 /** 2318 * isci_request_alloc_core() - This function gets the request object from the 2319 * isci_host dma cache. 2320 * @isci_host: This parameter specifies the ISCI host object 2321 * @isci_request: This parameter will contain the pointer to the new 2322 * isci_request object. 2323 * @isci_device: This parameter is the pointer to the isci remote device object 2324 * that is the destination for this request. 2325 * @gfp_flags: This parameter specifies the os allocation flags. 2326 * 2327 * SCI_SUCCESS on successfull completion, or specific failure code. 2328 */ 2329 static int isci_request_alloc_core( 2330 struct isci_host *isci_host, 2331 struct isci_request **isci_request, 2332 struct isci_remote_device *isci_device, 2333 gfp_t gfp_flags) 2334 { 2335 int ret = 0; 2336 dma_addr_t handle; 2337 struct isci_request *request; 2338 2339 2340 /* get pointer to dma memory. This actually points 2341 * to both the isci_remote_device object and the 2342 * sci object. The isci object is at the beginning 2343 * of the memory allocated here. 2344 */ 2345 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); 2346 if (!request) { 2347 dev_warn(&isci_host->pdev->dev, 2348 "%s: dma_pool_alloc returned NULL\n", __func__); 2349 return -ENOMEM; 2350 } 2351 2352 /* initialize the request object. */ 2353 spin_lock_init(&request->state_lock); 2354 request->request_daddr = handle; 2355 request->isci_host = isci_host; 2356 request->isci_device = isci_device; 2357 request->io_request_completion = NULL; 2358 request->terminated = false; 2359 2360 request->num_sg_entries = 0; 2361 2362 request->complete_in_target = false; 2363 2364 INIT_LIST_HEAD(&request->completed_node); 2365 INIT_LIST_HEAD(&request->dev_node); 2366 2367 *isci_request = request; 2368 isci_request_change_state(request, allocated); 2369 2370 return ret; 2371 } 2372 2373 static int isci_request_alloc_io( 2374 struct isci_host *isci_host, 2375 struct sas_task *task, 2376 struct isci_request **isci_request, 2377 struct isci_remote_device *isci_device, 2378 gfp_t gfp_flags) 2379 { 2380 int retval = isci_request_alloc_core(isci_host, isci_request, 2381 isci_device, gfp_flags); 2382 2383 if (!retval) { 2384 (*isci_request)->ttype_ptr.io_task_ptr = task; 2385 (*isci_request)->ttype = io_task; 2386 2387 task->lldd_task = *isci_request; 2388 } 2389 return retval; 2390 } 2391 2392 /** 2393 * isci_request_alloc_tmf() - This function gets the request object from the 2394 * isci_host dma cache and initializes the relevant fields as a sas_task. 2395 * @isci_host: This parameter specifies the ISCI host object 2396 * @sas_task: This parameter is the task struct from the upper layer driver. 2397 * @isci_request: This parameter will contain the pointer to the new 2398 * isci_request object. 2399 * @isci_device: This parameter is the pointer to the isci remote device object 2400 * that is the destination for this request. 2401 * @gfp_flags: This parameter specifies the os allocation flags. 2402 * 2403 * SCI_SUCCESS on successfull completion, or specific failure code. 2404 */ 2405 int isci_request_alloc_tmf( 2406 struct isci_host *isci_host, 2407 struct isci_tmf *isci_tmf, 2408 struct isci_request **isci_request, 2409 struct isci_remote_device *isci_device, 2410 gfp_t gfp_flags) 2411 { 2412 int retval = isci_request_alloc_core(isci_host, isci_request, 2413 isci_device, gfp_flags); 2414 2415 if (!retval) { 2416 2417 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; 2418 (*isci_request)->ttype = tmf_task; 2419 } 2420 return retval; 2421 } 2422 2423 /** 2424 * isci_request_execute() - This function allocates the isci_request object, 2425 * all fills in some common fields. 2426 * @isci_host: This parameter specifies the ISCI host object 2427 * @sas_task: This parameter is the task struct from the upper layer driver. 2428 * @isci_request: This parameter will contain the pointer to the new 2429 * isci_request object. 2430 * @gfp_flags: This parameter specifies the os allocation flags. 2431 * 2432 * SCI_SUCCESS on successfull completion, or specific failure code. 2433 */ 2434 int isci_request_execute( 2435 struct isci_host *isci_host, 2436 struct sas_task *task, 2437 struct isci_request **isci_request, 2438 gfp_t gfp_flags) 2439 { 2440 int ret = 0; 2441 struct scic_sds_remote_device *sci_device; 2442 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2443 struct isci_remote_device *isci_device; 2444 struct isci_request *request; 2445 unsigned long flags; 2446 2447 isci_device = task->dev->lldd_dev; 2448 sci_device = &isci_device->sci; 2449 2450 /* do common allocation and init of request object. */ 2451 ret = isci_request_alloc_io( 2452 isci_host, 2453 task, 2454 &request, 2455 isci_device, 2456 gfp_flags 2457 ); 2458 2459 if (ret) 2460 goto out; 2461 2462 status = isci_io_request_build(isci_host, request, isci_device); 2463 if (status != SCI_SUCCESS) { 2464 dev_warn(&isci_host->pdev->dev, 2465 "%s: request_construct failed - status = 0x%x\n", 2466 __func__, 2467 status); 2468 goto out; 2469 } 2470 2471 spin_lock_irqsave(&isci_host->scic_lock, flags); 2472 2473 /* send the request, let the core assign the IO TAG. */ 2474 status = scic_controller_start_io(&isci_host->sci, sci_device, 2475 &request->sci, 2476 SCI_CONTROLLER_INVALID_IO_TAG); 2477 if (status != SCI_SUCCESS && 2478 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 2479 dev_warn(&isci_host->pdev->dev, 2480 "%s: failed request start (0x%x)\n", 2481 __func__, status); 2482 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 2483 goto out; 2484 } 2485 2486 /* Either I/O started OK, or the core has signaled that 2487 * the device needs a target reset. 2488 * 2489 * In either case, hold onto the I/O for later. 2490 * 2491 * Update it's status and add it to the list in the 2492 * remote device object. 2493 */ 2494 isci_request_change_state(request, started); 2495 list_add(&request->dev_node, &isci_device->reqs_in_process); 2496 2497 if (status == SCI_SUCCESS) { 2498 /* Save the tag for possible task mgmt later. */ 2499 request->io_tag = request->sci.io_tag; 2500 } else { 2501 /* The request did not really start in the 2502 * hardware, so clear the request handle 2503 * here so no terminations will be done. 2504 */ 2505 request->terminated = true; 2506 } 2507 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 2508 2509 if (status == 2510 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 2511 /* Signal libsas that we need the SCSI error 2512 * handler thread to work on this I/O and that 2513 * we want a device reset. 2514 */ 2515 spin_lock_irqsave(&task->task_state_lock, flags); 2516 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2517 spin_unlock_irqrestore(&task->task_state_lock, flags); 2518 2519 /* Cause this task to be scheduled in the SCSI error 2520 * handler thread. 2521 */ 2522 isci_execpath_callback(isci_host, task, 2523 sas_task_abort); 2524 2525 /* Change the status, since we are holding 2526 * the I/O until it is managed by the SCSI 2527 * error handler. 2528 */ 2529 status = SCI_SUCCESS; 2530 } 2531 2532 out: 2533 if (status != SCI_SUCCESS) { 2534 /* release dma memory on failure. */ 2535 isci_request_free(isci_host, request); 2536 request = NULL; 2537 ret = SCI_FAILURE; 2538 } 2539 2540 *isci_request = request; 2541 return ret; 2542 } 2543 2544 2545 2546