1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include "isci.h" 57 #include "scic_port.h" 58 #include "task.h" 59 #include "request.h" 60 #include "sata.h" 61 #include "scu_completion_codes.h" 62 #include "sas.h" 63 64 /** 65 * This method returns the sgl element pair for the specificed sgl_pair index. 66 * @sci_req: This parameter specifies the IO request for which to retrieve 67 * the Scatter-Gather List element pair. 68 * @sgl_pair_index: This parameter specifies the index into the SGL element 69 * pair to be retrieved. 70 * 71 * This method returns a pointer to an struct scu_sgl_element_pair. 72 */ 73 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( 74 struct scic_sds_request *sci_req, 75 u32 sgl_pair_index 76 ) { 77 struct scu_task_context *task_context; 78 79 task_context = (struct scu_task_context *)sci_req->task_context_buffer; 80 81 if (sgl_pair_index == 0) { 82 return &task_context->sgl_pair_ab; 83 } else if (sgl_pair_index == 1) { 84 return &task_context->sgl_pair_cd; 85 } 86 87 return &sci_req->sg_table[sgl_pair_index - 2]; 88 } 89 90 /** 91 * This function will build the SGL list for an IO request. 92 * @sci_req: This parameter specifies the IO request for which to build 93 * the Scatter-Gather List. 94 * 95 */ 96 void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 97 { 98 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 99 struct isci_host *isci_host = isci_request->isci_host; 100 struct sas_task *task = isci_request_access_task(isci_request); 101 struct scatterlist *sg = NULL; 102 dma_addr_t dma_addr; 103 u32 sg_idx = 0; 104 struct scu_sgl_element_pair *scu_sg = NULL; 105 struct scu_sgl_element_pair *prev_sg = NULL; 106 107 if (task->num_scatter > 0) { 108 sg = task->scatter; 109 110 while (sg) { 111 scu_sg = scic_sds_request_get_sgl_element_pair( 112 sds_request, 113 sg_idx); 114 115 SCU_SGL_COPY(scu_sg->A, sg); 116 117 sg = sg_next(sg); 118 119 if (sg) { 120 SCU_SGL_COPY(scu_sg->B, sg); 121 sg = sg_next(sg); 122 } else 123 SCU_SGL_ZERO(scu_sg->B); 124 125 if (prev_sg) { 126 dma_addr = 127 scic_io_request_get_dma_addr( 128 sds_request, 129 scu_sg); 130 131 prev_sg->next_pair_upper = 132 upper_32_bits(dma_addr); 133 prev_sg->next_pair_lower = 134 lower_32_bits(dma_addr); 135 } 136 137 prev_sg = scu_sg; 138 sg_idx++; 139 } 140 } else { /* handle when no sg */ 141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, 142 sg_idx); 143 144 dma_addr = dma_map_single(&isci_host->pdev->dev, 145 task->scatter, 146 task->total_xfer_len, 147 task->data_dir); 148 149 isci_request->zero_scatter_daddr = dma_addr; 150 151 scu_sg->A.length = task->total_xfer_len; 152 scu_sg->A.address_upper = upper_32_bits(dma_addr); 153 scu_sg->A.address_lower = lower_32_bits(dma_addr); 154 } 155 156 if (scu_sg) { 157 scu_sg->next_pair_upper = 0; 158 scu_sg->next_pair_lower = 0; 159 } 160 } 161 162 static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req) 163 { 164 if (sci_req->was_tag_assigned_by_user == false) 165 sci_req->task_context_buffer = &sci_req->tc; 166 } 167 168 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) 169 { 170 struct ssp_cmd_iu *cmd_iu; 171 struct isci_request *ireq = sci_req_to_ireq(sci_req); 172 struct sas_task *task = isci_request_access_task(ireq); 173 174 cmd_iu = &sci_req->ssp.cmd; 175 176 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 177 cmd_iu->add_cdb_len = 0; 178 cmd_iu->_r_a = 0; 179 cmd_iu->_r_b = 0; 180 cmd_iu->en_fburst = 0; /* unsupported */ 181 cmd_iu->task_prio = task->ssp_task.task_prio; 182 cmd_iu->task_attr = task->ssp_task.task_attr; 183 cmd_iu->_r_c = 0; 184 185 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 186 sizeof(task->ssp_task.cdb) / sizeof(u32)); 187 } 188 189 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) 190 { 191 struct ssp_task_iu *task_iu; 192 struct isci_request *ireq = sci_req_to_ireq(sci_req); 193 struct sas_task *task = isci_request_access_task(ireq); 194 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 195 196 task_iu = &sci_req->ssp.tmf; 197 198 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 199 200 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 201 202 task_iu->task_func = isci_tmf->tmf_code; 203 task_iu->task_tag = 204 (ireq->ttype == tmf_task) ? 205 isci_tmf->io_tag : 206 SCI_CONTROLLER_INVALID_IO_TAG; 207 } 208 209 /** 210 * This method is will fill in the SCU Task Context for any type of SSP request. 211 * @sci_req: 212 * @task_context: 213 * 214 */ 215 static void scu_ssp_reqeust_construct_task_context( 216 struct scic_sds_request *sds_request, 217 struct scu_task_context *task_context) 218 { 219 dma_addr_t dma_addr; 220 struct scic_sds_controller *controller; 221 struct scic_sds_remote_device *target_device; 222 struct scic_sds_port *target_port; 223 224 controller = scic_sds_request_get_controller(sds_request); 225 target_device = scic_sds_request_get_device(sds_request); 226 target_port = scic_sds_request_get_port(sds_request); 227 228 /* Fill in the TC with the its required data */ 229 task_context->abort = 0; 230 task_context->priority = 0; 231 task_context->initiator_request = 1; 232 task_context->connection_rate = target_device->connection_rate; 233 task_context->protocol_engine_index = 234 scic_sds_controller_get_protocol_engine_group(controller); 235 task_context->logical_port_index = 236 scic_sds_port_get_index(target_port); 237 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 238 task_context->valid = SCU_TASK_CONTEXT_VALID; 239 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 240 241 task_context->remote_node_index = 242 scic_sds_remote_device_get_index(sds_request->target_device); 243 task_context->command_code = 0; 244 245 task_context->link_layer_control = 0; 246 task_context->do_not_dma_ssp_good_response = 1; 247 task_context->strict_ordering = 0; 248 task_context->control_frame = 0; 249 task_context->timeout_enable = 0; 250 task_context->block_guard_enable = 0; 251 252 task_context->address_modifier = 0; 253 254 /* task_context->type.ssp.tag = sci_req->io_tag; */ 255 task_context->task_phase = 0x01; 256 257 if (sds_request->was_tag_assigned_by_user) { 258 /* 259 * Build the task context now since we have already read 260 * the data 261 */ 262 sds_request->post_context = 263 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 264 (scic_sds_controller_get_protocol_engine_group( 265 controller) << 266 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 267 (scic_sds_port_get_index(target_port) << 268 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 269 scic_sds_io_tag_get_index(sds_request->io_tag)); 270 } else { 271 /* 272 * Build the task context now since we have already read 273 * the data 274 * 275 * I/O tag index is not assigned because we have to wait 276 * until we get a TCi 277 */ 278 sds_request->post_context = 279 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 280 (scic_sds_controller_get_protocol_engine_group( 281 owning_controller) << 282 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 283 (scic_sds_port_get_index(target_port) << 284 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); 285 } 286 287 /* 288 * Copy the physical address for the command buffer to the 289 * SCU Task Context 290 */ 291 dma_addr = scic_io_request_get_dma_addr(sds_request, 292 &sds_request->ssp.cmd); 293 294 task_context->command_iu_upper = upper_32_bits(dma_addr); 295 task_context->command_iu_lower = lower_32_bits(dma_addr); 296 297 /* 298 * Copy the physical address for the response buffer to the 299 * SCU Task Context 300 */ 301 dma_addr = scic_io_request_get_dma_addr(sds_request, 302 &sds_request->ssp.rsp); 303 304 task_context->response_iu_upper = upper_32_bits(dma_addr); 305 task_context->response_iu_lower = lower_32_bits(dma_addr); 306 } 307 308 /** 309 * This method is will fill in the SCU Task Context for a SSP IO request. 310 * @sci_req: 311 * 312 */ 313 static void scu_ssp_io_request_construct_task_context( 314 struct scic_sds_request *sci_req, 315 enum dma_data_direction dir, 316 u32 len) 317 { 318 struct scu_task_context *task_context; 319 320 task_context = scic_sds_request_get_task_context(sci_req); 321 322 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 323 324 task_context->ssp_command_iu_length = 325 sizeof(struct ssp_cmd_iu) / sizeof(u32); 326 task_context->type.ssp.frame_type = SSP_COMMAND; 327 328 switch (dir) { 329 case DMA_FROM_DEVICE: 330 case DMA_NONE: 331 default: 332 task_context->task_type = SCU_TASK_TYPE_IOREAD; 333 break; 334 case DMA_TO_DEVICE: 335 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 336 break; 337 } 338 339 task_context->transfer_length_bytes = len; 340 341 if (task_context->transfer_length_bytes > 0) 342 scic_sds_request_build_sgl(sci_req); 343 } 344 345 static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req) 346 { 347 if (sci_req->was_tag_assigned_by_user == false) 348 sci_req->task_context_buffer = &sci_req->tc; 349 } 350 351 /** 352 * This method will fill in the SCU Task Context for a SSP Task request. The 353 * following important settings are utilized: -# priority == 354 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 355 * ahead of other task destined for the same Remote Node. -# task_type == 356 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 357 * (i.e. non-raw frame) is being utilized to perform task management. -# 358 * control_frame == 1. This ensures that the proper endianess is set so 359 * that the bytes are transmitted in the right order for a task frame. 360 * @sci_req: This parameter specifies the task request object being 361 * constructed. 362 * 363 */ 364 static void scu_ssp_task_request_construct_task_context( 365 struct scic_sds_request *sci_req) 366 { 367 struct scu_task_context *task_context; 368 369 task_context = scic_sds_request_get_task_context(sci_req); 370 371 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 372 373 task_context->control_frame = 1; 374 task_context->priority = SCU_TASK_PRIORITY_HIGH; 375 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 376 task_context->transfer_length_bytes = 0; 377 task_context->type.ssp.frame_type = SSP_TASK; 378 task_context->ssp_command_iu_length = 379 sizeof(struct ssp_task_iu) / sizeof(u32); 380 } 381 382 383 /** 384 * This method constructs the SSP Command IU data for this ssp passthrough 385 * comand request object. 386 * @sci_req: This parameter specifies the request object for which the SSP 387 * command information unit is being built. 388 * 389 * enum sci_status, returns invalid parameter is cdb > 16 390 */ 391 392 393 /** 394 * This method constructs the SATA request object. 395 * @sci_req: 396 * @sat_protocol: 397 * @transfer_length: 398 * @data_direction: 399 * @copy_rx_frame: 400 * 401 * enum sci_status 402 */ 403 static enum sci_status 404 scic_io_request_construct_sata(struct scic_sds_request *sci_req, 405 u32 len, 406 enum dma_data_direction dir, 407 bool copy) 408 { 409 enum sci_status status = SCI_SUCCESS; 410 struct isci_request *ireq = sci_req_to_ireq(sci_req); 411 struct sas_task *task = isci_request_access_task(ireq); 412 413 /* check for management protocols */ 414 if (ireq->ttype == tmf_task) { 415 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 416 417 if (tmf->tmf_code == isci_tmf_sata_srst_high || 418 tmf->tmf_code == isci_tmf_sata_srst_low) 419 return scic_sds_stp_soft_reset_request_construct(sci_req); 420 else { 421 dev_err(scic_to_dev(sci_req->owning_controller), 422 "%s: Request 0x%p received un-handled SAT " 423 "management protocol 0x%x.\n", 424 __func__, sci_req, tmf->tmf_code); 425 426 return SCI_FAILURE; 427 } 428 } 429 430 if (!sas_protocol_ata(task->task_proto)) { 431 dev_err(scic_to_dev(sci_req->owning_controller), 432 "%s: Non-ATA protocol in SATA path: 0x%x\n", 433 __func__, 434 task->task_proto); 435 return SCI_FAILURE; 436 437 } 438 439 /* non data */ 440 if (task->data_dir == DMA_NONE) 441 return scic_sds_stp_non_data_request_construct(sci_req); 442 443 /* NCQ */ 444 if (task->ata_task.use_ncq) 445 return scic_sds_stp_ncq_request_construct(sci_req, len, dir); 446 447 /* DMA */ 448 if (task->ata_task.dma_xfer) 449 return scic_sds_stp_udma_request_construct(sci_req, len, dir); 450 else /* PIO */ 451 return scic_sds_stp_pio_request_construct(sci_req, copy); 452 453 return status; 454 } 455 456 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) 457 { 458 struct isci_request *ireq = sci_req_to_ireq(sci_req); 459 struct sas_task *task = isci_request_access_task(ireq); 460 461 sci_req->protocol = SCIC_SSP_PROTOCOL; 462 463 scu_ssp_io_request_construct_task_context(sci_req, 464 task->data_dir, 465 task->total_xfer_len); 466 467 scic_sds_io_request_build_ssp_command_iu(sci_req); 468 469 sci_base_state_machine_change_state( 470 &sci_req->state_machine, 471 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 472 473 return SCI_SUCCESS; 474 } 475 476 enum sci_status scic_task_request_construct_ssp( 477 struct scic_sds_request *sci_req) 478 { 479 /* Construct the SSP Task SCU Task Context */ 480 scu_ssp_task_request_construct_task_context(sci_req); 481 482 /* Fill in the SSP Task IU */ 483 scic_sds_task_request_build_ssp_task_iu(sci_req); 484 485 sci_base_state_machine_change_state(&sci_req->state_machine, 486 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 487 488 return SCI_SUCCESS; 489 } 490 491 492 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 493 { 494 enum sci_status status; 495 struct scic_sds_stp_request *stp_req; 496 bool copy = false; 497 struct isci_request *isci_request = sci_req_to_ireq(sci_req); 498 struct sas_task *task = isci_request_access_task(isci_request); 499 500 stp_req = &sci_req->stp.req; 501 sci_req->protocol = SCIC_STP_PROTOCOL; 502 503 copy = (task->data_dir == DMA_NONE) ? false : true; 504 505 status = scic_io_request_construct_sata(sci_req, 506 task->total_xfer_len, 507 task->data_dir, 508 copy); 509 510 if (status == SCI_SUCCESS) 511 sci_base_state_machine_change_state(&sci_req->state_machine, 512 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 513 514 return status; 515 } 516 517 518 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 519 { 520 enum sci_status status = SCI_SUCCESS; 521 struct isci_request *ireq = sci_req_to_ireq(sci_req); 522 523 /* check for management protocols */ 524 if (ireq->ttype == tmf_task) { 525 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 526 527 if (tmf->tmf_code == isci_tmf_sata_srst_high || 528 tmf->tmf_code == isci_tmf_sata_srst_low) { 529 status = scic_sds_stp_soft_reset_request_construct(sci_req); 530 } else { 531 dev_err(scic_to_dev(sci_req->owning_controller), 532 "%s: Request 0x%p received un-handled SAT " 533 "Protocol 0x%x.\n", 534 __func__, sci_req, tmf->tmf_code); 535 536 return SCI_FAILURE; 537 } 538 } 539 540 if (status == SCI_SUCCESS) 541 sci_base_state_machine_change_state( 542 &sci_req->state_machine, 543 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 544 545 return status; 546 } 547 548 /** 549 * sci_req_tx_bytes - bytes transferred when reply underruns request 550 * @sci_req: request that was terminated early 551 */ 552 #define SCU_TASK_CONTEXT_SRAM 0x200000 553 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) 554 { 555 struct scic_sds_controller *scic = sci_req->owning_controller; 556 u32 ret_val = 0; 557 558 if (readl(&scic->smu_registers->address_modifier) == 0) { 559 void __iomem *scu_reg_base = scic->scu_registers; 560 561 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 562 * BAR1 is the scu_registers 563 * 0x20002C = 0x200000 + 0x2c 564 * = start of task context SRAM + offset of (type.ssp.data_offset) 565 * TCi is the io_tag of struct scic_sds_request 566 */ 567 ret_val = readl(scu_reg_base + 568 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 569 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag))); 570 } 571 572 return ret_val; 573 } 574 575 enum sci_status 576 scic_sds_request_start(struct scic_sds_request *request) 577 { 578 if (request->device_sequence != 579 scic_sds_remote_device_get_sequence(request->target_device)) 580 return SCI_FAILURE; 581 582 if (request->state_handlers->start_handler) 583 return request->state_handlers->start_handler(request); 584 585 dev_warn(scic_to_dev(request->owning_controller), 586 "%s: SCIC IO Request requested to start while in wrong " 587 "state %d\n", 588 __func__, 589 sci_base_state_machine_get_state(&request->state_machine)); 590 591 return SCI_FAILURE_INVALID_STATE; 592 } 593 594 enum sci_status 595 scic_sds_io_request_terminate(struct scic_sds_request *request) 596 { 597 if (request->state_handlers->abort_handler) 598 return request->state_handlers->abort_handler(request); 599 600 dev_warn(scic_to_dev(request->owning_controller), 601 "%s: SCIC IO Request requested to abort while in wrong " 602 "state %d\n", 603 __func__, 604 sci_base_state_machine_get_state(&request->state_machine)); 605 606 return SCI_FAILURE_INVALID_STATE; 607 } 608 609 enum sci_status scic_sds_io_request_event_handler( 610 struct scic_sds_request *request, 611 u32 event_code) 612 { 613 if (request->state_handlers->event_handler) 614 return request->state_handlers->event_handler(request, event_code); 615 616 dev_warn(scic_to_dev(request->owning_controller), 617 "%s: SCIC IO Request given event code notification %x while " 618 "in wrong state %d\n", 619 __func__, 620 event_code, 621 sci_base_state_machine_get_state(&request->state_machine)); 622 623 return SCI_FAILURE_INVALID_STATE; 624 } 625 626 /** 627 * 628 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start 629 * operation is to be executed. 630 * @frame_index: The frame index returned by the hardware for the reqeust 631 * object. 632 * 633 * This method invokes the core state frame handler for the 634 * SCIC_SDS_IO_REQUEST_T object. enum sci_status 635 */ 636 enum sci_status scic_sds_io_request_frame_handler( 637 struct scic_sds_request *request, 638 u32 frame_index) 639 { 640 if (request->state_handlers->frame_handler) 641 return request->state_handlers->frame_handler(request, frame_index); 642 643 dev_warn(scic_to_dev(request->owning_controller), 644 "%s: SCIC IO Request given unexpected frame %x while in " 645 "state %d\n", 646 __func__, 647 frame_index, 648 sci_base_state_machine_get_state(&request->state_machine)); 649 650 scic_sds_controller_release_frame(request->owning_controller, frame_index); 651 return SCI_FAILURE_INVALID_STATE; 652 } 653 654 /* 655 * This function copies response data for requests returning response data 656 * instead of sense data. 657 * @sci_req: This parameter specifies the request object for which to copy 658 * the response data. 659 */ 660 void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) 661 { 662 void *resp_buf; 663 u32 len; 664 struct ssp_response_iu *ssp_response; 665 struct isci_request *ireq = sci_req_to_ireq(sci_req); 666 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 667 668 ssp_response = &sci_req->ssp.rsp; 669 670 resp_buf = &isci_tmf->resp.resp_iu; 671 672 len = min_t(u32, 673 SSP_RESP_IU_MAX_SIZE, 674 be32_to_cpu(ssp_response->response_data_len)); 675 676 memcpy(resp_buf, ssp_response->resp_data, len); 677 } 678 679 /* 680 * This method implements the action taken when a constructed 681 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request. 682 * This method will, if necessary, allocate a TCi for the io request object and 683 * then will, if necessary, copy the constructed TC data into the actual TC 684 * buffer. If everything is successful the post context field is updated with 685 * the TCi so the controller can post the request to the hardware. enum sci_status 686 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES 687 */ 688 static enum sci_status scic_sds_request_constructed_state_start_handler( 689 struct scic_sds_request *request) 690 { 691 struct scu_task_context *task_context; 692 693 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 694 request->io_tag = 695 scic_controller_allocate_io_tag(request->owning_controller); 696 } 697 698 /* Record the IO Tag in the request */ 699 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { 700 task_context = request->task_context_buffer; 701 702 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag); 703 704 switch (task_context->protocol_type) { 705 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 706 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 707 /* SSP/SMP Frame */ 708 task_context->type.ssp.tag = request->io_tag; 709 task_context->type.ssp.target_port_transfer_tag = 0xFFFF; 710 break; 711 712 case SCU_TASK_CONTEXT_PROTOCOL_STP: 713 /* 714 * STP/SATA Frame 715 * task_context->type.stp.ncq_tag = request->ncq_tag; */ 716 break; 717 718 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 719 /* / @todo When do we set no protocol type? */ 720 break; 721 722 default: 723 /* This should never happen since we build the IO requests */ 724 break; 725 } 726 727 /* 728 * Check to see if we need to copy the task context buffer 729 * or have been building into the task context buffer */ 730 if (request->was_tag_assigned_by_user == false) { 731 scic_sds_controller_copy_task_context( 732 request->owning_controller, request); 733 } 734 735 /* Add to the post_context the io tag value */ 736 request->post_context |= scic_sds_io_tag_get_index(request->io_tag); 737 738 /* Everything is good go ahead and change state */ 739 sci_base_state_machine_change_state(&request->state_machine, 740 SCI_BASE_REQUEST_STATE_STARTED); 741 742 return SCI_SUCCESS; 743 } 744 745 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 746 } 747 748 /* 749 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 750 * object receives a scic_sds_request_terminate() request. Since the request 751 * has not yet been posted to the hardware the request transitions to the 752 * completed state. enum sci_status SCI_SUCCESS 753 */ 754 static enum sci_status scic_sds_request_constructed_state_abort_handler( 755 struct scic_sds_request *request) 756 { 757 /* 758 * This request has been terminated by the user make sure that the correct 759 * status code is returned */ 760 scic_sds_request_set_status(request, 761 SCU_TASK_DONE_TASK_ABORT, 762 SCI_FAILURE_IO_TERMINATED); 763 764 sci_base_state_machine_change_state(&request->state_machine, 765 SCI_BASE_REQUEST_STATE_COMPLETED); 766 return SCI_SUCCESS; 767 } 768 769 /* 770 * ***************************************************************************** 771 * * STARTED STATE HANDLERS 772 * ***************************************************************************** */ 773 774 /* 775 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 776 * object receives a scic_sds_request_terminate() request. Since the request 777 * has been posted to the hardware the io request state is changed to the 778 * aborting state. enum sci_status SCI_SUCCESS 779 */ 780 enum sci_status scic_sds_request_started_state_abort_handler( 781 struct scic_sds_request *request) 782 { 783 if (request->has_started_substate_machine) 784 sci_base_state_machine_stop(&request->started_substate_machine); 785 786 sci_base_state_machine_change_state(&request->state_machine, 787 SCI_BASE_REQUEST_STATE_ABORTING); 788 return SCI_SUCCESS; 789 } 790 791 /* 792 * scic_sds_request_started_state_tc_completion_handler() - This method process 793 * TC (task context) completions for normal IO request (i.e. Task/Abort 794 * Completions of type 0). This method will update the 795 * SCIC_SDS_IO_REQUEST_T::status field. 796 * @sci_req: This parameter specifies the request for which a completion 797 * occurred. 798 * @completion_code: This parameter specifies the completion code received from 799 * the SCU. 800 * 801 */ 802 static enum sci_status 803 scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req, 804 u32 completion_code) 805 { 806 u8 datapres; 807 struct ssp_response_iu *resp_iu; 808 809 /* 810 * TODO: Any SDMA return code of other than 0 is bad 811 * decode 0x003C0000 to determine SDMA status 812 */ 813 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 814 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 815 scic_sds_request_set_status(sci_req, 816 SCU_TASK_DONE_GOOD, 817 SCI_SUCCESS); 818 break; 819 820 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): 821 { 822 /* 823 * There are times when the SCU hardware will return an early 824 * response because the io request specified more data than is 825 * returned by the target device (mode pages, inquiry data, 826 * etc.). We must check the response stats to see if this is 827 * truly a failed request or a good request that just got 828 * completed early. 829 */ 830 struct ssp_response_iu *resp = &sci_req->ssp.rsp; 831 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 832 833 sci_swab32_cpy(&sci_req->ssp.rsp, 834 &sci_req->ssp.rsp, 835 word_cnt); 836 837 if (resp->status == 0) { 838 scic_sds_request_set_status( 839 sci_req, 840 SCU_TASK_DONE_GOOD, 841 SCI_SUCCESS_IO_DONE_EARLY); 842 } else { 843 scic_sds_request_set_status( 844 sci_req, 845 SCU_TASK_DONE_CHECK_RESPONSE, 846 SCI_FAILURE_IO_RESPONSE_VALID); 847 } 848 } 849 break; 850 851 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): 852 { 853 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 854 855 sci_swab32_cpy(&sci_req->ssp.rsp, 856 &sci_req->ssp.rsp, 857 word_cnt); 858 859 scic_sds_request_set_status(sci_req, 860 SCU_TASK_DONE_CHECK_RESPONSE, 861 SCI_FAILURE_IO_RESPONSE_VALID); 862 break; 863 } 864 865 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 866 /* 867 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame 868 * guaranteed to be received before this completion status is 869 * posted? 870 */ 871 resp_iu = &sci_req->ssp.rsp; 872 datapres = resp_iu->datapres; 873 874 if ((datapres == 0x01) || (datapres == 0x02)) { 875 scic_sds_request_set_status( 876 sci_req, 877 SCU_TASK_DONE_CHECK_RESPONSE, 878 SCI_FAILURE_IO_RESPONSE_VALID); 879 } else 880 scic_sds_request_set_status( 881 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 882 break; 883 884 /* only stp device gets suspended. */ 885 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 886 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 887 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 888 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 889 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 891 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 892 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 893 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 894 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 895 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 896 if (sci_req->protocol == SCIC_STP_PROTOCOL) { 897 scic_sds_request_set_status( 898 sci_req, 899 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 900 SCU_COMPLETION_TL_STATUS_SHIFT, 901 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 902 } else { 903 scic_sds_request_set_status( 904 sci_req, 905 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 906 SCU_COMPLETION_TL_STATUS_SHIFT, 907 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 908 } 909 break; 910 911 /* both stp/ssp device gets suspended */ 912 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 914 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 915 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 916 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 918 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 919 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 920 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 922 scic_sds_request_set_status( 923 sci_req, 924 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 925 SCU_COMPLETION_TL_STATUS_SHIFT, 926 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 927 break; 928 929 /* neither ssp nor stp gets suspended. */ 930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 945 default: 946 scic_sds_request_set_status( 947 sci_req, 948 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 949 SCU_COMPLETION_TL_STATUS_SHIFT, 950 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 951 break; 952 } 953 954 /* 955 * TODO: This is probably wrong for ACK/NAK timeout conditions 956 */ 957 958 /* In all cases we will treat this as the completion of the IO req. */ 959 sci_base_state_machine_change_state( 960 &sci_req->state_machine, 961 SCI_BASE_REQUEST_STATE_COMPLETED); 962 return SCI_SUCCESS; 963 } 964 965 enum sci_status 966 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) 967 { 968 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && 969 request->has_started_substate_machine == false) 970 return scic_sds_request_started_state_tc_completion_handler(request, completion_code); 971 else if (request->state_handlers->tc_completion_handler) 972 return request->state_handlers->tc_completion_handler(request, completion_code); 973 974 dev_warn(scic_to_dev(request->owning_controller), 975 "%s: SCIC IO Request given task completion notification %x " 976 "while in wrong state %d\n", 977 __func__, 978 completion_code, 979 sci_base_state_machine_get_state(&request->state_machine)); 980 981 return SCI_FAILURE_INVALID_STATE; 982 983 } 984 985 /* 986 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 987 * object receives a scic_sds_request_frame_handler() request. This method 988 * first determines the frame type received. If this is a response frame then 989 * the response data is copied to the io request response buffer for processing 990 * at completion time. If the frame type is not a response buffer an error is 991 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE 992 */ 993 static enum sci_status 994 scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req, 995 u32 frame_index) 996 { 997 enum sci_status status; 998 u32 *frame_header; 999 struct ssp_frame_hdr ssp_hdr; 1000 ssize_t word_cnt; 1001 1002 status = scic_sds_unsolicited_frame_control_get_header( 1003 &(scic_sds_request_get_controller(sci_req)->uf_control), 1004 frame_index, 1005 (void **)&frame_header); 1006 1007 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1008 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1009 1010 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1011 struct ssp_response_iu *resp_iu; 1012 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1013 1014 status = scic_sds_unsolicited_frame_control_get_buffer( 1015 &(scic_sds_request_get_controller(sci_req)->uf_control), 1016 frame_index, 1017 (void **)&resp_iu); 1018 1019 sci_swab32_cpy(&sci_req->ssp.rsp, 1020 resp_iu, word_cnt); 1021 1022 resp_iu = &sci_req->ssp.rsp; 1023 1024 if ((resp_iu->datapres == 0x01) || 1025 (resp_iu->datapres == 0x02)) { 1026 scic_sds_request_set_status( 1027 sci_req, 1028 SCU_TASK_DONE_CHECK_RESPONSE, 1029 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1030 } else 1031 scic_sds_request_set_status( 1032 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 1033 } else { 1034 /* This was not a response frame why did it get forwarded? */ 1035 dev_err(scic_to_dev(sci_req->owning_controller), 1036 "%s: SCIC IO Request 0x%p received unexpected " 1037 "frame %d type 0x%02x\n", 1038 __func__, 1039 sci_req, 1040 frame_index, 1041 ssp_hdr.frame_type); 1042 } 1043 1044 /* 1045 * In any case we are done with this frame buffer return it to the 1046 * controller 1047 */ 1048 scic_sds_controller_release_frame( 1049 sci_req->owning_controller, frame_index); 1050 1051 return SCI_SUCCESS; 1052 } 1053 1054 /* 1055 * ***************************************************************************** 1056 * * COMPLETED STATE HANDLERS 1057 * ***************************************************************************** */ 1058 1059 1060 /* 1061 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1062 * object receives a scic_sds_request_complete() request. This method frees up 1063 * any io request resources that have been allocated and transitions the 1064 * request to its final state. Consider stopping the state machine instead of 1065 * transitioning to the final state? enum sci_status SCI_SUCCESS 1066 */ 1067 static enum sci_status scic_sds_request_completed_state_complete_handler( 1068 struct scic_sds_request *request) 1069 { 1070 if (request->was_tag_assigned_by_user != true) { 1071 scic_controller_free_io_tag( 1072 request->owning_controller, request->io_tag); 1073 } 1074 1075 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) { 1076 scic_sds_controller_release_frame( 1077 request->owning_controller, request->saved_rx_frame_index); 1078 } 1079 1080 sci_base_state_machine_change_state(&request->state_machine, 1081 SCI_BASE_REQUEST_STATE_FINAL); 1082 return SCI_SUCCESS; 1083 } 1084 1085 /* 1086 * ***************************************************************************** 1087 * * ABORTING STATE HANDLERS 1088 * ***************************************************************************** */ 1089 1090 /* 1091 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1092 * object receives a scic_sds_request_terminate() request. This method is the 1093 * io request aborting state abort handlers. On receipt of a multiple 1094 * terminate requests the io request will transition to the completed state. 1095 * This should not happen in normal operation. enum sci_status SCI_SUCCESS 1096 */ 1097 static enum sci_status scic_sds_request_aborting_state_abort_handler( 1098 struct scic_sds_request *request) 1099 { 1100 sci_base_state_machine_change_state(&request->state_machine, 1101 SCI_BASE_REQUEST_STATE_COMPLETED); 1102 return SCI_SUCCESS; 1103 } 1104 1105 /* 1106 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1107 * object receives a scic_sds_request_task_completion() request. This method 1108 * decodes the completion type waiting for the abort task complete 1109 * notification. When the abort task complete is received the io request 1110 * transitions to the completed state. enum sci_status SCI_SUCCESS 1111 */ 1112 static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( 1113 struct scic_sds_request *sci_req, 1114 u32 completion_code) 1115 { 1116 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1117 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1118 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1119 scic_sds_request_set_status( 1120 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED 1121 ); 1122 1123 sci_base_state_machine_change_state(&sci_req->state_machine, 1124 SCI_BASE_REQUEST_STATE_COMPLETED); 1125 break; 1126 1127 default: 1128 /* 1129 * Unless we get some strange error wait for the task abort to complete 1130 * TODO: Should there be a state change for this completion? */ 1131 break; 1132 } 1133 1134 return SCI_SUCCESS; 1135 } 1136 1137 /* 1138 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T 1139 * object receives a scic_sds_request_frame_handler() request. This method 1140 * discards the unsolicited frame since we are waiting for the abort task 1141 * completion. enum sci_status SCI_SUCCESS 1142 */ 1143 static enum sci_status scic_sds_request_aborting_state_frame_handler( 1144 struct scic_sds_request *sci_req, 1145 u32 frame_index) 1146 { 1147 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */ 1148 1149 scic_sds_controller_release_frame( 1150 sci_req->owning_controller, frame_index); 1151 1152 return SCI_SUCCESS; 1153 } 1154 1155 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { 1156 [SCI_BASE_REQUEST_STATE_INITIAL] = { 1157 }, 1158 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 1159 .start_handler = scic_sds_request_constructed_state_start_handler, 1160 .abort_handler = scic_sds_request_constructed_state_abort_handler, 1161 }, 1162 [SCI_BASE_REQUEST_STATE_STARTED] = { 1163 .abort_handler = scic_sds_request_started_state_abort_handler, 1164 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, 1165 .frame_handler = scic_sds_request_started_state_frame_handler, 1166 }, 1167 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 1168 .complete_handler = scic_sds_request_completed_state_complete_handler, 1169 }, 1170 [SCI_BASE_REQUEST_STATE_ABORTING] = { 1171 .abort_handler = scic_sds_request_aborting_state_abort_handler, 1172 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, 1173 .frame_handler = scic_sds_request_aborting_state_frame_handler, 1174 }, 1175 [SCI_BASE_REQUEST_STATE_FINAL] = { 1176 }, 1177 }; 1178 1179 1180 /** 1181 * isci_request_process_response_iu() - This function sets the status and 1182 * response iu, in the task struct, from the request object for the upper 1183 * layer driver. 1184 * @sas_task: This parameter is the task struct from the upper layer driver. 1185 * @resp_iu: This parameter points to the response iu of the completed request. 1186 * @dev: This parameter specifies the linux device struct. 1187 * 1188 * none. 1189 */ 1190 static void isci_request_process_response_iu( 1191 struct sas_task *task, 1192 struct ssp_response_iu *resp_iu, 1193 struct device *dev) 1194 { 1195 dev_dbg(dev, 1196 "%s: resp_iu = %p " 1197 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 1198 "resp_iu->response_data_len = %x, " 1199 "resp_iu->sense_data_len = %x\nrepsonse data: ", 1200 __func__, 1201 resp_iu, 1202 resp_iu->status, 1203 resp_iu->datapres, 1204 resp_iu->response_data_len, 1205 resp_iu->sense_data_len); 1206 1207 task->task_status.stat = resp_iu->status; 1208 1209 /* libsas updates the task status fields based on the response iu. */ 1210 sas_ssp_task_response(dev, task, resp_iu); 1211 } 1212 1213 /** 1214 * isci_request_set_open_reject_status() - This function prepares the I/O 1215 * completion for OPEN_REJECT conditions. 1216 * @request: This parameter is the completed isci_request object. 1217 * @response_ptr: This parameter specifies the service response for the I/O. 1218 * @status_ptr: This parameter specifies the exec status for the I/O. 1219 * @complete_to_host_ptr: This parameter specifies the action to be taken by 1220 * the LLDD with respect to completing this request or forcing an abort 1221 * condition on the I/O. 1222 * @open_rej_reason: This parameter specifies the encoded reason for the 1223 * abandon-class reject. 1224 * 1225 * none. 1226 */ 1227 static void isci_request_set_open_reject_status( 1228 struct isci_request *request, 1229 struct sas_task *task, 1230 enum service_response *response_ptr, 1231 enum exec_status *status_ptr, 1232 enum isci_completion_selection *complete_to_host_ptr, 1233 enum sas_open_rej_reason open_rej_reason) 1234 { 1235 /* Task in the target is done. */ 1236 request->complete_in_target = true; 1237 *response_ptr = SAS_TASK_UNDELIVERED; 1238 *status_ptr = SAS_OPEN_REJECT; 1239 *complete_to_host_ptr = isci_perform_normal_io_completion; 1240 task->task_status.open_rej_reason = open_rej_reason; 1241 } 1242 1243 /** 1244 * isci_request_handle_controller_specific_errors() - This function decodes 1245 * controller-specific I/O completion error conditions. 1246 * @request: This parameter is the completed isci_request object. 1247 * @response_ptr: This parameter specifies the service response for the I/O. 1248 * @status_ptr: This parameter specifies the exec status for the I/O. 1249 * @complete_to_host_ptr: This parameter specifies the action to be taken by 1250 * the LLDD with respect to completing this request or forcing an abort 1251 * condition on the I/O. 1252 * 1253 * none. 1254 */ 1255 static void isci_request_handle_controller_specific_errors( 1256 struct isci_remote_device *isci_device, 1257 struct isci_request *request, 1258 struct sas_task *task, 1259 enum service_response *response_ptr, 1260 enum exec_status *status_ptr, 1261 enum isci_completion_selection *complete_to_host_ptr) 1262 { 1263 unsigned int cstatus; 1264 1265 cstatus = request->sci.scu_status; 1266 1267 dev_dbg(&request->isci_host->pdev->dev, 1268 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 1269 "- controller status = 0x%x\n", 1270 __func__, request, cstatus); 1271 1272 /* Decode the controller-specific errors; most 1273 * important is to recognize those conditions in which 1274 * the target may still have a task outstanding that 1275 * must be aborted. 1276 * 1277 * Note that there are SCU completion codes being 1278 * named in the decode below for which SCIC has already 1279 * done work to handle them in a way other than as 1280 * a controller-specific completion code; these are left 1281 * in the decode below for completeness sake. 1282 */ 1283 switch (cstatus) { 1284 case SCU_TASK_DONE_DMASETUP_DIRERR: 1285 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 1286 case SCU_TASK_DONE_XFERCNT_ERR: 1287 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 1288 if (task->task_proto == SAS_PROTOCOL_SMP) { 1289 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 1290 *response_ptr = SAS_TASK_COMPLETE; 1291 1292 /* See if the device has been/is being stopped. Note 1293 * that we ignore the quiesce state, since we are 1294 * concerned about the actual device state. 1295 */ 1296 if ((isci_device->status == isci_stopping) || 1297 (isci_device->status == isci_stopped)) 1298 *status_ptr = SAS_DEVICE_UNKNOWN; 1299 else 1300 *status_ptr = SAS_ABORTED_TASK; 1301 1302 request->complete_in_target = true; 1303 1304 *complete_to_host_ptr = 1305 isci_perform_normal_io_completion; 1306 } else { 1307 /* Task in the target is not done. */ 1308 *response_ptr = SAS_TASK_UNDELIVERED; 1309 1310 if ((isci_device->status == isci_stopping) || 1311 (isci_device->status == isci_stopped)) 1312 *status_ptr = SAS_DEVICE_UNKNOWN; 1313 else 1314 *status_ptr = SAM_STAT_TASK_ABORTED; 1315 1316 request->complete_in_target = false; 1317 1318 *complete_to_host_ptr = 1319 isci_perform_error_io_completion; 1320 } 1321 1322 break; 1323 1324 case SCU_TASK_DONE_CRC_ERR: 1325 case SCU_TASK_DONE_NAK_CMD_ERR: 1326 case SCU_TASK_DONE_EXCESS_DATA: 1327 case SCU_TASK_DONE_UNEXP_FIS: 1328 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 1329 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 1330 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 1331 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 1332 /* These are conditions in which the target 1333 * has completed the task, so that no cleanup 1334 * is necessary. 1335 */ 1336 *response_ptr = SAS_TASK_COMPLETE; 1337 1338 /* See if the device has been/is being stopped. Note 1339 * that we ignore the quiesce state, since we are 1340 * concerned about the actual device state. 1341 */ 1342 if ((isci_device->status == isci_stopping) || 1343 (isci_device->status == isci_stopped)) 1344 *status_ptr = SAS_DEVICE_UNKNOWN; 1345 else 1346 *status_ptr = SAS_ABORTED_TASK; 1347 1348 request->complete_in_target = true; 1349 1350 *complete_to_host_ptr = isci_perform_normal_io_completion; 1351 break; 1352 1353 1354 /* Note that the only open reject completion codes seen here will be 1355 * abandon-class codes; all others are automatically retried in the SCU. 1356 */ 1357 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 1358 1359 isci_request_set_open_reject_status( 1360 request, task, response_ptr, status_ptr, 1361 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 1362 break; 1363 1364 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 1365 1366 /* Note - the return of AB0 will change when 1367 * libsas implements detection of zone violations. 1368 */ 1369 isci_request_set_open_reject_status( 1370 request, task, response_ptr, status_ptr, 1371 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 1372 break; 1373 1374 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 1375 1376 isci_request_set_open_reject_status( 1377 request, task, response_ptr, status_ptr, 1378 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 1379 break; 1380 1381 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 1382 1383 isci_request_set_open_reject_status( 1384 request, task, response_ptr, status_ptr, 1385 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 1386 break; 1387 1388 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 1389 1390 isci_request_set_open_reject_status( 1391 request, task, response_ptr, status_ptr, 1392 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 1393 break; 1394 1395 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 1396 1397 isci_request_set_open_reject_status( 1398 request, task, response_ptr, status_ptr, 1399 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 1400 break; 1401 1402 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 1403 1404 isci_request_set_open_reject_status( 1405 request, task, response_ptr, status_ptr, 1406 complete_to_host_ptr, SAS_OREJ_STP_NORES); 1407 break; 1408 1409 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 1410 1411 isci_request_set_open_reject_status( 1412 request, task, response_ptr, status_ptr, 1413 complete_to_host_ptr, SAS_OREJ_EPROTO); 1414 break; 1415 1416 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 1417 1418 isci_request_set_open_reject_status( 1419 request, task, response_ptr, status_ptr, 1420 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 1421 break; 1422 1423 case SCU_TASK_DONE_LL_R_ERR: 1424 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 1425 case SCU_TASK_DONE_LL_PERR: 1426 case SCU_TASK_DONE_LL_SY_TERM: 1427 /* Also SCU_TASK_DONE_NAK_ERR:*/ 1428 case SCU_TASK_DONE_LL_LF_TERM: 1429 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 1430 case SCU_TASK_DONE_LL_ABORT_ERR: 1431 case SCU_TASK_DONE_SEQ_INV_TYPE: 1432 /* Also SCU_TASK_DONE_UNEXP_XR: */ 1433 case SCU_TASK_DONE_XR_IU_LEN_ERR: 1434 case SCU_TASK_DONE_INV_FIS_LEN: 1435 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 1436 case SCU_TASK_DONE_SDMA_ERR: 1437 case SCU_TASK_DONE_OFFSET_ERR: 1438 case SCU_TASK_DONE_MAX_PLD_ERR: 1439 case SCU_TASK_DONE_LF_ERR: 1440 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 1441 case SCU_TASK_DONE_SMP_LL_RX_ERR: 1442 case SCU_TASK_DONE_UNEXP_DATA: 1443 case SCU_TASK_DONE_UNEXP_SDBFIS: 1444 case SCU_TASK_DONE_REG_ERR: 1445 case SCU_TASK_DONE_SDB_ERR: 1446 case SCU_TASK_DONE_TASK_ABORT: 1447 default: 1448 /* Task in the target is not done. */ 1449 *response_ptr = SAS_TASK_UNDELIVERED; 1450 *status_ptr = SAM_STAT_TASK_ABORTED; 1451 request->complete_in_target = false; 1452 1453 *complete_to_host_ptr = isci_perform_error_io_completion; 1454 break; 1455 } 1456 } 1457 1458 /** 1459 * isci_task_save_for_upper_layer_completion() - This function saves the 1460 * request for later completion to the upper layer driver. 1461 * @host: This parameter is a pointer to the host on which the the request 1462 * should be queued (either as an error or success). 1463 * @request: This parameter is the completed request. 1464 * @response: This parameter is the response code for the completed task. 1465 * @status: This parameter is the status code for the completed task. 1466 * 1467 * none. 1468 */ 1469 static void isci_task_save_for_upper_layer_completion( 1470 struct isci_host *host, 1471 struct isci_request *request, 1472 enum service_response response, 1473 enum exec_status status, 1474 enum isci_completion_selection task_notification_selection) 1475 { 1476 struct sas_task *task = isci_request_access_task(request); 1477 1478 task_notification_selection 1479 = isci_task_set_completion_status(task, response, status, 1480 task_notification_selection); 1481 1482 /* Tasks aborted specifically by a call to the lldd_abort_task 1483 * function should not be completed to the host in the regular path. 1484 */ 1485 switch (task_notification_selection) { 1486 1487 case isci_perform_normal_io_completion: 1488 1489 /* Normal notification (task_done) */ 1490 dev_dbg(&host->pdev->dev, 1491 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", 1492 __func__, 1493 task, 1494 task->task_status.resp, response, 1495 task->task_status.stat, status); 1496 /* Add to the completed list. */ 1497 list_add(&request->completed_node, 1498 &host->requests_to_complete); 1499 1500 /* Take the request off the device's pending request list. */ 1501 list_del_init(&request->dev_node); 1502 break; 1503 1504 case isci_perform_aborted_io_completion: 1505 /* No notification to libsas because this request is 1506 * already in the abort path. 1507 */ 1508 dev_warn(&host->pdev->dev, 1509 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", 1510 __func__, 1511 task, 1512 task->task_status.resp, response, 1513 task->task_status.stat, status); 1514 1515 /* Wake up whatever process was waiting for this 1516 * request to complete. 1517 */ 1518 WARN_ON(request->io_request_completion == NULL); 1519 1520 if (request->io_request_completion != NULL) { 1521 1522 /* Signal whoever is waiting that this 1523 * request is complete. 1524 */ 1525 complete(request->io_request_completion); 1526 } 1527 break; 1528 1529 case isci_perform_error_io_completion: 1530 /* Use sas_task_abort */ 1531 dev_warn(&host->pdev->dev, 1532 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", 1533 __func__, 1534 task, 1535 task->task_status.resp, response, 1536 task->task_status.stat, status); 1537 /* Add to the aborted list. */ 1538 list_add(&request->completed_node, 1539 &host->requests_to_errorback); 1540 break; 1541 1542 default: 1543 dev_warn(&host->pdev->dev, 1544 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", 1545 __func__, 1546 task, 1547 task->task_status.resp, response, 1548 task->task_status.stat, status); 1549 1550 /* Add to the error to libsas list. */ 1551 list_add(&request->completed_node, 1552 &host->requests_to_errorback); 1553 break; 1554 } 1555 } 1556 1557 static void isci_request_io_request_complete(struct isci_host *isci_host, 1558 struct isci_request *request, 1559 enum sci_io_status completion_status) 1560 { 1561 struct sas_task *task = isci_request_access_task(request); 1562 struct ssp_response_iu *resp_iu; 1563 void *resp_buf; 1564 unsigned long task_flags; 1565 struct isci_remote_device *isci_device = request->isci_device; 1566 enum service_response response = SAS_TASK_UNDELIVERED; 1567 enum exec_status status = SAS_ABORTED_TASK; 1568 enum isci_request_status request_status; 1569 enum isci_completion_selection complete_to_host 1570 = isci_perform_normal_io_completion; 1571 1572 dev_dbg(&isci_host->pdev->dev, 1573 "%s: request = %p, task = %p,\n" 1574 "task->data_dir = %d completion_status = 0x%x\n", 1575 __func__, 1576 request, 1577 task, 1578 task->data_dir, 1579 completion_status); 1580 1581 spin_lock(&request->state_lock); 1582 request_status = isci_request_get_state(request); 1583 1584 /* Decode the request status. Note that if the request has been 1585 * aborted by a task management function, we don't care 1586 * what the status is. 1587 */ 1588 switch (request_status) { 1589 1590 case aborted: 1591 /* "aborted" indicates that the request was aborted by a task 1592 * management function, since once a task management request is 1593 * perfomed by the device, the request only completes because 1594 * of the subsequent driver terminate. 1595 * 1596 * Aborted also means an external thread is explicitly managing 1597 * this request, so that we do not complete it up the stack. 1598 * 1599 * The target is still there (since the TMF was successful). 1600 */ 1601 request->complete_in_target = true; 1602 response = SAS_TASK_COMPLETE; 1603 1604 /* See if the device has been/is being stopped. Note 1605 * that we ignore the quiesce state, since we are 1606 * concerned about the actual device state. 1607 */ 1608 if ((isci_device->status == isci_stopping) 1609 || (isci_device->status == isci_stopped) 1610 ) 1611 status = SAS_DEVICE_UNKNOWN; 1612 else 1613 status = SAS_ABORTED_TASK; 1614 1615 complete_to_host = isci_perform_aborted_io_completion; 1616 /* This was an aborted request. */ 1617 1618 spin_unlock(&request->state_lock); 1619 break; 1620 1621 case aborting: 1622 /* aborting means that the task management function tried and 1623 * failed to abort the request. We need to note the request 1624 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 1625 * target as down. 1626 * 1627 * Aborting also means an external thread is explicitly managing 1628 * this request, so that we do not complete it up the stack. 1629 */ 1630 request->complete_in_target = true; 1631 response = SAS_TASK_UNDELIVERED; 1632 1633 if ((isci_device->status == isci_stopping) || 1634 (isci_device->status == isci_stopped)) 1635 /* The device has been /is being stopped. Note that 1636 * we ignore the quiesce state, since we are 1637 * concerned about the actual device state. 1638 */ 1639 status = SAS_DEVICE_UNKNOWN; 1640 else 1641 status = SAS_PHY_DOWN; 1642 1643 complete_to_host = isci_perform_aborted_io_completion; 1644 1645 /* This was an aborted request. */ 1646 1647 spin_unlock(&request->state_lock); 1648 break; 1649 1650 case terminating: 1651 1652 /* This was an terminated request. This happens when 1653 * the I/O is being terminated because of an action on 1654 * the device (reset, tear down, etc.), and the I/O needs 1655 * to be completed up the stack. 1656 */ 1657 request->complete_in_target = true; 1658 response = SAS_TASK_UNDELIVERED; 1659 1660 /* See if the device has been/is being stopped. Note 1661 * that we ignore the quiesce state, since we are 1662 * concerned about the actual device state. 1663 */ 1664 if ((isci_device->status == isci_stopping) || 1665 (isci_device->status == isci_stopped)) 1666 status = SAS_DEVICE_UNKNOWN; 1667 else 1668 status = SAS_ABORTED_TASK; 1669 1670 complete_to_host = isci_perform_aborted_io_completion; 1671 1672 /* This was a terminated request. */ 1673 1674 spin_unlock(&request->state_lock); 1675 break; 1676 1677 default: 1678 1679 /* The request is done from an SCU HW perspective. */ 1680 request->status = completed; 1681 1682 spin_unlock(&request->state_lock); 1683 1684 /* This is an active request being completed from the core. */ 1685 switch (completion_status) { 1686 1687 case SCI_IO_FAILURE_RESPONSE_VALID: 1688 dev_dbg(&isci_host->pdev->dev, 1689 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 1690 __func__, 1691 request, 1692 task); 1693 1694 if (sas_protocol_ata(task->task_proto)) { 1695 resp_buf = &request->sci.stp.rsp; 1696 isci_request_process_stp_response(task, 1697 resp_buf); 1698 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 1699 1700 /* crack the iu response buffer. */ 1701 resp_iu = &request->sci.ssp.rsp; 1702 isci_request_process_response_iu(task, resp_iu, 1703 &isci_host->pdev->dev); 1704 1705 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 1706 1707 dev_err(&isci_host->pdev->dev, 1708 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 1709 "SAS_PROTOCOL_SMP protocol\n", 1710 __func__); 1711 1712 } else 1713 dev_err(&isci_host->pdev->dev, 1714 "%s: unknown protocol\n", __func__); 1715 1716 /* use the task status set in the task struct by the 1717 * isci_request_process_response_iu call. 1718 */ 1719 request->complete_in_target = true; 1720 response = task->task_status.resp; 1721 status = task->task_status.stat; 1722 break; 1723 1724 case SCI_IO_SUCCESS: 1725 case SCI_IO_SUCCESS_IO_DONE_EARLY: 1726 1727 response = SAS_TASK_COMPLETE; 1728 status = SAM_STAT_GOOD; 1729 request->complete_in_target = true; 1730 1731 if (task->task_proto == SAS_PROTOCOL_SMP) { 1732 void *rsp = &request->sci.smp.rsp; 1733 1734 dev_dbg(&isci_host->pdev->dev, 1735 "%s: SMP protocol completion\n", 1736 __func__); 1737 1738 sg_copy_from_buffer( 1739 &task->smp_task.smp_resp, 1, 1740 rsp, sizeof(struct smp_resp)); 1741 } else if (completion_status 1742 == SCI_IO_SUCCESS_IO_DONE_EARLY) { 1743 1744 /* This was an SSP / STP / SATA transfer. 1745 * There is a possibility that less data than 1746 * the maximum was transferred. 1747 */ 1748 u32 transferred_length = sci_req_tx_bytes(&request->sci); 1749 1750 task->task_status.residual 1751 = task->total_xfer_len - transferred_length; 1752 1753 /* If there were residual bytes, call this an 1754 * underrun. 1755 */ 1756 if (task->task_status.residual != 0) 1757 status = SAS_DATA_UNDERRUN; 1758 1759 dev_dbg(&isci_host->pdev->dev, 1760 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 1761 __func__, 1762 status); 1763 1764 } else 1765 dev_dbg(&isci_host->pdev->dev, 1766 "%s: SCI_IO_SUCCESS\n", 1767 __func__); 1768 1769 break; 1770 1771 case SCI_IO_FAILURE_TERMINATED: 1772 dev_dbg(&isci_host->pdev->dev, 1773 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 1774 __func__, 1775 request, 1776 task); 1777 1778 /* The request was terminated explicitly. No handling 1779 * is needed in the SCSI error handler path. 1780 */ 1781 request->complete_in_target = true; 1782 response = SAS_TASK_UNDELIVERED; 1783 1784 /* See if the device has been/is being stopped. Note 1785 * that we ignore the quiesce state, since we are 1786 * concerned about the actual device state. 1787 */ 1788 if ((isci_device->status == isci_stopping) || 1789 (isci_device->status == isci_stopped)) 1790 status = SAS_DEVICE_UNKNOWN; 1791 else 1792 status = SAS_ABORTED_TASK; 1793 1794 complete_to_host = isci_perform_normal_io_completion; 1795 break; 1796 1797 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 1798 1799 isci_request_handle_controller_specific_errors( 1800 isci_device, request, task, &response, &status, 1801 &complete_to_host); 1802 1803 break; 1804 1805 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 1806 /* This is a special case, in that the I/O completion 1807 * is telling us that the device needs a reset. 1808 * In order for the device reset condition to be 1809 * noticed, the I/O has to be handled in the error 1810 * handler. Set the reset flag and cause the 1811 * SCSI error thread to be scheduled. 1812 */ 1813 spin_lock_irqsave(&task->task_state_lock, task_flags); 1814 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 1815 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 1816 1817 /* Fail the I/O. */ 1818 response = SAS_TASK_UNDELIVERED; 1819 status = SAM_STAT_TASK_ABORTED; 1820 1821 complete_to_host = isci_perform_error_io_completion; 1822 request->complete_in_target = false; 1823 break; 1824 1825 default: 1826 /* Catch any otherwise unhandled error codes here. */ 1827 dev_warn(&isci_host->pdev->dev, 1828 "%s: invalid completion code: 0x%x - " 1829 "isci_request = %p\n", 1830 __func__, completion_status, request); 1831 1832 response = SAS_TASK_UNDELIVERED; 1833 1834 /* See if the device has been/is being stopped. Note 1835 * that we ignore the quiesce state, since we are 1836 * concerned about the actual device state. 1837 */ 1838 if ((isci_device->status == isci_stopping) || 1839 (isci_device->status == isci_stopped)) 1840 status = SAS_DEVICE_UNKNOWN; 1841 else 1842 status = SAS_ABORTED_TASK; 1843 1844 complete_to_host = isci_perform_error_io_completion; 1845 request->complete_in_target = false; 1846 break; 1847 } 1848 break; 1849 } 1850 1851 isci_request_unmap_sgl(request, isci_host->pdev); 1852 1853 /* Put the completed request on the correct list */ 1854 isci_task_save_for_upper_layer_completion(isci_host, request, response, 1855 status, complete_to_host 1856 ); 1857 1858 /* complete the io request to the core. */ 1859 scic_controller_complete_io(&isci_host->sci, 1860 &isci_device->sci, 1861 &request->sci); 1862 /* set terminated handle so it cannot be completed or 1863 * terminated again, and to cause any calls into abort 1864 * task to recognize the already completed case. 1865 */ 1866 request->terminated = true; 1867 1868 isci_host_can_dequeue(isci_host, 1); 1869 } 1870 1871 /** 1872 * scic_sds_request_initial_state_enter() - 1873 * @object: This parameter specifies the base object for which the state 1874 * transition is occurring. 1875 * 1876 * This method implements the actions taken when entering the 1877 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial 1878 * base request is constructed. Entry into the initial state sets all handlers 1879 * for the io request object to their default handlers. none 1880 */ 1881 static void scic_sds_request_initial_state_enter(void *object) 1882 { 1883 struct scic_sds_request *sci_req = object; 1884 1885 SET_STATE_HANDLER( 1886 sci_req, 1887 scic_sds_request_state_handler_table, 1888 SCI_BASE_REQUEST_STATE_INITIAL 1889 ); 1890 } 1891 1892 /** 1893 * scic_sds_request_constructed_state_enter() - 1894 * @object: The io request object that is to enter the constructed state. 1895 * 1896 * This method implements the actions taken when entering the 1897 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers 1898 * for the the constructed state. none 1899 */ 1900 static void scic_sds_request_constructed_state_enter(void *object) 1901 { 1902 struct scic_sds_request *sci_req = object; 1903 1904 SET_STATE_HANDLER( 1905 sci_req, 1906 scic_sds_request_state_handler_table, 1907 SCI_BASE_REQUEST_STATE_CONSTRUCTED 1908 ); 1909 } 1910 1911 /** 1912 * scic_sds_request_started_state_enter() - 1913 * @object: This parameter specifies the base object for which the state 1914 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. 1915 * 1916 * This method implements the actions taken when entering the 1917 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a 1918 * SCSI Task request we must enter the started substate machine. none 1919 */ 1920 static void scic_sds_request_started_state_enter(void *object) 1921 { 1922 struct scic_sds_request *sci_req = object; 1923 1924 SET_STATE_HANDLER( 1925 sci_req, 1926 scic_sds_request_state_handler_table, 1927 SCI_BASE_REQUEST_STATE_STARTED 1928 ); 1929 1930 /* 1931 * Most of the request state machines have a started substate machine so 1932 * start its execution on the entry to the started state. */ 1933 if (sci_req->has_started_substate_machine == true) 1934 sci_base_state_machine_start(&sci_req->started_substate_machine); 1935 } 1936 1937 /** 1938 * scic_sds_request_started_state_exit() - 1939 * @object: This parameter specifies the base object for which the state 1940 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1941 * object. 1942 * 1943 * This method implements the actions taken when exiting the 1944 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be 1945 * to stop the started substate machine. none 1946 */ 1947 static void scic_sds_request_started_state_exit(void *object) 1948 { 1949 struct scic_sds_request *sci_req = object; 1950 1951 if (sci_req->has_started_substate_machine == true) 1952 sci_base_state_machine_stop(&sci_req->started_substate_machine); 1953 } 1954 1955 /** 1956 * scic_sds_request_completed_state_enter() - 1957 * @object: This parameter specifies the base object for which the state 1958 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1959 * object. 1960 * 1961 * This method implements the actions taken when entering the 1962 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the 1963 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request 1964 * completion status and convert it to an enum sci_status to return in the 1965 * completion callback function. none 1966 */ 1967 static void scic_sds_request_completed_state_enter(void *object) 1968 { 1969 struct scic_sds_request *sci_req = object; 1970 struct scic_sds_controller *scic = 1971 scic_sds_request_get_controller(sci_req); 1972 struct isci_host *ihost = scic_to_ihost(scic); 1973 struct isci_request *ireq = sci_req_to_ireq(sci_req); 1974 1975 SET_STATE_HANDLER(sci_req, 1976 scic_sds_request_state_handler_table, 1977 SCI_BASE_REQUEST_STATE_COMPLETED); 1978 1979 /* Tell the SCI_USER that the IO request is complete */ 1980 if (sci_req->is_task_management_request == false) 1981 isci_request_io_request_complete(ihost, ireq, 1982 sci_req->sci_status); 1983 else 1984 isci_task_request_complete(ihost, ireq, sci_req->sci_status); 1985 } 1986 1987 /** 1988 * scic_sds_request_aborting_state_enter() - 1989 * @object: This parameter specifies the base object for which the state 1990 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST 1991 * object. 1992 * 1993 * This method implements the actions taken when entering the 1994 * SCI_BASE_REQUEST_STATE_ABORTING state. none 1995 */ 1996 static void scic_sds_request_aborting_state_enter(void *object) 1997 { 1998 struct scic_sds_request *sci_req = object; 1999 2000 /* Setting the abort bit in the Task Context is required by the silicon. */ 2001 sci_req->task_context_buffer->abort = 1; 2002 2003 SET_STATE_HANDLER( 2004 sci_req, 2005 scic_sds_request_state_handler_table, 2006 SCI_BASE_REQUEST_STATE_ABORTING 2007 ); 2008 } 2009 2010 /** 2011 * scic_sds_request_final_state_enter() - 2012 * @object: This parameter specifies the base object for which the state 2013 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. 2014 * 2015 * This method implements the actions taken when entering the 2016 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the 2017 * state handlers in place. none 2018 */ 2019 static void scic_sds_request_final_state_enter(void *object) 2020 { 2021 struct scic_sds_request *sci_req = object; 2022 2023 SET_STATE_HANDLER( 2024 sci_req, 2025 scic_sds_request_state_handler_table, 2026 SCI_BASE_REQUEST_STATE_FINAL 2027 ); 2028 } 2029 2030 static const struct sci_base_state scic_sds_request_state_table[] = { 2031 [SCI_BASE_REQUEST_STATE_INITIAL] = { 2032 .enter_state = scic_sds_request_initial_state_enter, 2033 }, 2034 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { 2035 .enter_state = scic_sds_request_constructed_state_enter, 2036 }, 2037 [SCI_BASE_REQUEST_STATE_STARTED] = { 2038 .enter_state = scic_sds_request_started_state_enter, 2039 .exit_state = scic_sds_request_started_state_exit 2040 }, 2041 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 2042 .enter_state = scic_sds_request_completed_state_enter, 2043 }, 2044 [SCI_BASE_REQUEST_STATE_ABORTING] = { 2045 .enter_state = scic_sds_request_aborting_state_enter, 2046 }, 2047 [SCI_BASE_REQUEST_STATE_FINAL] = { 2048 .enter_state = scic_sds_request_final_state_enter, 2049 }, 2050 }; 2051 2052 static void scic_sds_general_request_construct(struct scic_sds_controller *scic, 2053 struct scic_sds_remote_device *sci_dev, 2054 u16 io_tag, struct scic_sds_request *sci_req) 2055 { 2056 sci_base_state_machine_construct(&sci_req->state_machine, sci_req, 2057 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL); 2058 sci_base_state_machine_start(&sci_req->state_machine); 2059 2060 sci_req->io_tag = io_tag; 2061 sci_req->owning_controller = scic; 2062 sci_req->target_device = sci_dev; 2063 sci_req->has_started_substate_machine = false; 2064 sci_req->protocol = SCIC_NO_PROTOCOL; 2065 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 2066 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); 2067 2068 sci_req->sci_status = SCI_SUCCESS; 2069 sci_req->scu_status = 0; 2070 sci_req->post_context = 0xFFFFFFFF; 2071 2072 sci_req->is_task_management_request = false; 2073 2074 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 2075 sci_req->was_tag_assigned_by_user = false; 2076 sci_req->task_context_buffer = NULL; 2077 } else { 2078 sci_req->was_tag_assigned_by_user = true; 2079 2080 sci_req->task_context_buffer = 2081 scic_sds_controller_get_task_context_buffer(scic, io_tag); 2082 } 2083 } 2084 2085 static enum sci_status 2086 scic_io_request_construct(struct scic_sds_controller *scic, 2087 struct scic_sds_remote_device *sci_dev, 2088 u16 io_tag, struct scic_sds_request *sci_req) 2089 { 2090 struct domain_device *dev = sci_dev_to_domain(sci_dev); 2091 enum sci_status status = SCI_SUCCESS; 2092 2093 /* Build the common part of the request */ 2094 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 2095 2096 if (sci_dev->rnc.remote_node_index == 2097 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 2098 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 2099 2100 if (dev->dev_type == SAS_END_DEV) 2101 scic_sds_ssp_io_request_assign_buffers(sci_req); 2102 else if ((dev->dev_type == SATA_DEV) || 2103 (dev->tproto & SAS_PROTOCOL_STP)) { 2104 scic_sds_stp_request_assign_buffers(sci_req); 2105 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); 2106 } else if (dev_is_expander(dev)) { 2107 scic_sds_smp_request_assign_buffers(sci_req); 2108 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); 2109 } else 2110 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2111 2112 if (status == SCI_SUCCESS) { 2113 memset(sci_req->task_context_buffer, 0, 2114 offsetof(struct scu_task_context, sgl_pair_ab)); 2115 } 2116 2117 return status; 2118 } 2119 2120 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, 2121 struct scic_sds_remote_device *sci_dev, 2122 u16 io_tag, struct scic_sds_request *sci_req) 2123 { 2124 struct domain_device *dev = sci_dev_to_domain(sci_dev); 2125 enum sci_status status = SCI_SUCCESS; 2126 2127 /* Build the common part of the request */ 2128 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 2129 2130 if (dev->dev_type == SAS_END_DEV) { 2131 scic_sds_ssp_task_request_assign_buffers(sci_req); 2132 2133 sci_req->has_started_substate_machine = true; 2134 2135 /* Construct the started sub-state machine. */ 2136 sci_base_state_machine_construct( 2137 &sci_req->started_substate_machine, 2138 sci_req, 2139 scic_sds_io_request_started_task_mgmt_substate_table, 2140 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION 2141 ); 2142 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 2143 scic_sds_stp_request_assign_buffers(sci_req); 2144 else 2145 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2146 2147 if (status == SCI_SUCCESS) { 2148 sci_req->is_task_management_request = true; 2149 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); 2150 } 2151 2152 return status; 2153 } 2154 2155 static enum sci_status isci_request_ssp_request_construct( 2156 struct isci_request *request) 2157 { 2158 enum sci_status status; 2159 2160 dev_dbg(&request->isci_host->pdev->dev, 2161 "%s: request = %p\n", 2162 __func__, 2163 request); 2164 status = scic_io_request_construct_basic_ssp(&request->sci); 2165 return status; 2166 } 2167 2168 static enum sci_status isci_request_stp_request_construct( 2169 struct isci_request *request) 2170 { 2171 struct sas_task *task = isci_request_access_task(request); 2172 enum sci_status status; 2173 struct host_to_dev_fis *register_fis; 2174 2175 dev_dbg(&request->isci_host->pdev->dev, 2176 "%s: request = %p\n", 2177 __func__, 2178 request); 2179 2180 /* Get the host_to_dev_fis from the core and copy 2181 * the fis from the task into it. 2182 */ 2183 register_fis = isci_sata_task_to_fis_copy(task); 2184 2185 status = scic_io_request_construct_basic_sata(&request->sci); 2186 2187 /* Set the ncq tag in the fis, from the queue 2188 * command in the task. 2189 */ 2190 if (isci_sata_is_task_ncq(task)) { 2191 2192 isci_sata_set_ncq_tag( 2193 register_fis, 2194 task 2195 ); 2196 } 2197 2198 return status; 2199 } 2200 2201 /* 2202 * isci_smp_request_build() - This function builds the smp request. 2203 * @ireq: This parameter points to the isci_request allocated in the 2204 * request construct function. 2205 * 2206 * SCI_SUCCESS on successfull completion, or specific failure code. 2207 */ 2208 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 2209 { 2210 enum sci_status status = SCI_FAILURE; 2211 struct sas_task *task = isci_request_access_task(ireq); 2212 struct scic_sds_request *sci_req = &ireq->sci; 2213 2214 dev_dbg(&ireq->isci_host->pdev->dev, 2215 "%s: request = %p\n", __func__, ireq); 2216 2217 dev_dbg(&ireq->isci_host->pdev->dev, 2218 "%s: smp_req len = %d\n", 2219 __func__, 2220 task->smp_task.smp_req.length); 2221 2222 /* copy the smp_command to the address; */ 2223 sg_copy_to_buffer(&task->smp_task.smp_req, 1, 2224 &sci_req->smp.cmd, 2225 sizeof(struct smp_req)); 2226 2227 status = scic_io_request_construct_smp(sci_req); 2228 if (status != SCI_SUCCESS) 2229 dev_warn(&ireq->isci_host->pdev->dev, 2230 "%s: failed with status = %d\n", 2231 __func__, 2232 status); 2233 2234 return status; 2235 } 2236 2237 /** 2238 * isci_io_request_build() - This function builds the io request object. 2239 * @isci_host: This parameter specifies the ISCI host object 2240 * @request: This parameter points to the isci_request object allocated in the 2241 * request construct function. 2242 * @sci_device: This parameter is the handle for the sci core's remote device 2243 * object that is the destination for this request. 2244 * 2245 * SCI_SUCCESS on successfull completion, or specific failure code. 2246 */ 2247 static enum sci_status isci_io_request_build( 2248 struct isci_host *isci_host, 2249 struct isci_request *request, 2250 struct isci_remote_device *isci_device) 2251 { 2252 enum sci_status status = SCI_SUCCESS; 2253 struct sas_task *task = isci_request_access_task(request); 2254 struct scic_sds_remote_device *sci_device = &isci_device->sci; 2255 2256 dev_dbg(&isci_host->pdev->dev, 2257 "%s: isci_device = 0x%p; request = %p, " 2258 "num_scatter = %d\n", 2259 __func__, 2260 isci_device, 2261 request, 2262 task->num_scatter); 2263 2264 /* map the sgl addresses, if present. 2265 * libata does the mapping for sata devices 2266 * before we get the request. 2267 */ 2268 if (task->num_scatter && 2269 !sas_protocol_ata(task->task_proto) && 2270 !(SAS_PROTOCOL_SMP & task->task_proto)) { 2271 2272 request->num_sg_entries = dma_map_sg( 2273 &isci_host->pdev->dev, 2274 task->scatter, 2275 task->num_scatter, 2276 task->data_dir 2277 ); 2278 2279 if (request->num_sg_entries == 0) 2280 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 2281 } 2282 2283 /* build the common request object. For now, 2284 * we will let the core allocate the IO tag. 2285 */ 2286 status = scic_io_request_construct(&isci_host->sci, sci_device, 2287 SCI_CONTROLLER_INVALID_IO_TAG, 2288 &request->sci); 2289 2290 if (status != SCI_SUCCESS) { 2291 dev_warn(&isci_host->pdev->dev, 2292 "%s: failed request construct\n", 2293 __func__); 2294 return SCI_FAILURE; 2295 } 2296 2297 switch (task->task_proto) { 2298 case SAS_PROTOCOL_SMP: 2299 status = isci_smp_request_build(request); 2300 break; 2301 case SAS_PROTOCOL_SSP: 2302 status = isci_request_ssp_request_construct(request); 2303 break; 2304 case SAS_PROTOCOL_SATA: 2305 case SAS_PROTOCOL_STP: 2306 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2307 status = isci_request_stp_request_construct(request); 2308 break; 2309 default: 2310 dev_warn(&isci_host->pdev->dev, 2311 "%s: unknown protocol\n", __func__); 2312 return SCI_FAILURE; 2313 } 2314 2315 return SCI_SUCCESS; 2316 } 2317 2318 /** 2319 * isci_request_alloc_core() - This function gets the request object from the 2320 * isci_host dma cache. 2321 * @isci_host: This parameter specifies the ISCI host object 2322 * @isci_request: This parameter will contain the pointer to the new 2323 * isci_request object. 2324 * @isci_device: This parameter is the pointer to the isci remote device object 2325 * that is the destination for this request. 2326 * @gfp_flags: This parameter specifies the os allocation flags. 2327 * 2328 * SCI_SUCCESS on successfull completion, or specific failure code. 2329 */ 2330 static int isci_request_alloc_core( 2331 struct isci_host *isci_host, 2332 struct isci_request **isci_request, 2333 struct isci_remote_device *isci_device, 2334 gfp_t gfp_flags) 2335 { 2336 int ret = 0; 2337 dma_addr_t handle; 2338 struct isci_request *request; 2339 2340 2341 /* get pointer to dma memory. This actually points 2342 * to both the isci_remote_device object and the 2343 * sci object. The isci object is at the beginning 2344 * of the memory allocated here. 2345 */ 2346 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); 2347 if (!request) { 2348 dev_warn(&isci_host->pdev->dev, 2349 "%s: dma_pool_alloc returned NULL\n", __func__); 2350 return -ENOMEM; 2351 } 2352 2353 /* initialize the request object. */ 2354 spin_lock_init(&request->state_lock); 2355 request->request_daddr = handle; 2356 request->isci_host = isci_host; 2357 request->isci_device = isci_device; 2358 request->io_request_completion = NULL; 2359 request->terminated = false; 2360 2361 request->num_sg_entries = 0; 2362 2363 request->complete_in_target = false; 2364 2365 INIT_LIST_HEAD(&request->completed_node); 2366 INIT_LIST_HEAD(&request->dev_node); 2367 2368 *isci_request = request; 2369 isci_request_change_state(request, allocated); 2370 2371 return ret; 2372 } 2373 2374 static int isci_request_alloc_io( 2375 struct isci_host *isci_host, 2376 struct sas_task *task, 2377 struct isci_request **isci_request, 2378 struct isci_remote_device *isci_device, 2379 gfp_t gfp_flags) 2380 { 2381 int retval = isci_request_alloc_core(isci_host, isci_request, 2382 isci_device, gfp_flags); 2383 2384 if (!retval) { 2385 (*isci_request)->ttype_ptr.io_task_ptr = task; 2386 (*isci_request)->ttype = io_task; 2387 2388 task->lldd_task = *isci_request; 2389 } 2390 return retval; 2391 } 2392 2393 /** 2394 * isci_request_alloc_tmf() - This function gets the request object from the 2395 * isci_host dma cache and initializes the relevant fields as a sas_task. 2396 * @isci_host: This parameter specifies the ISCI host object 2397 * @sas_task: This parameter is the task struct from the upper layer driver. 2398 * @isci_request: This parameter will contain the pointer to the new 2399 * isci_request object. 2400 * @isci_device: This parameter is the pointer to the isci remote device object 2401 * that is the destination for this request. 2402 * @gfp_flags: This parameter specifies the os allocation flags. 2403 * 2404 * SCI_SUCCESS on successfull completion, or specific failure code. 2405 */ 2406 int isci_request_alloc_tmf( 2407 struct isci_host *isci_host, 2408 struct isci_tmf *isci_tmf, 2409 struct isci_request **isci_request, 2410 struct isci_remote_device *isci_device, 2411 gfp_t gfp_flags) 2412 { 2413 int retval = isci_request_alloc_core(isci_host, isci_request, 2414 isci_device, gfp_flags); 2415 2416 if (!retval) { 2417 2418 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; 2419 (*isci_request)->ttype = tmf_task; 2420 } 2421 return retval; 2422 } 2423 2424 /** 2425 * isci_request_execute() - This function allocates the isci_request object, 2426 * all fills in some common fields. 2427 * @isci_host: This parameter specifies the ISCI host object 2428 * @sas_task: This parameter is the task struct from the upper layer driver. 2429 * @isci_request: This parameter will contain the pointer to the new 2430 * isci_request object. 2431 * @gfp_flags: This parameter specifies the os allocation flags. 2432 * 2433 * SCI_SUCCESS on successfull completion, or specific failure code. 2434 */ 2435 int isci_request_execute( 2436 struct isci_host *isci_host, 2437 struct sas_task *task, 2438 struct isci_request **isci_request, 2439 gfp_t gfp_flags) 2440 { 2441 int ret = 0; 2442 struct scic_sds_remote_device *sci_device; 2443 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2444 struct isci_remote_device *isci_device; 2445 struct isci_request *request; 2446 unsigned long flags; 2447 2448 isci_device = task->dev->lldd_dev; 2449 sci_device = &isci_device->sci; 2450 2451 /* do common allocation and init of request object. */ 2452 ret = isci_request_alloc_io( 2453 isci_host, 2454 task, 2455 &request, 2456 isci_device, 2457 gfp_flags 2458 ); 2459 2460 if (ret) 2461 goto out; 2462 2463 status = isci_io_request_build(isci_host, request, isci_device); 2464 if (status != SCI_SUCCESS) { 2465 dev_warn(&isci_host->pdev->dev, 2466 "%s: request_construct failed - status = 0x%x\n", 2467 __func__, 2468 status); 2469 goto out; 2470 } 2471 2472 spin_lock_irqsave(&isci_host->scic_lock, flags); 2473 2474 /* send the request, let the core assign the IO TAG. */ 2475 status = scic_controller_start_io(&isci_host->sci, sci_device, 2476 &request->sci, 2477 SCI_CONTROLLER_INVALID_IO_TAG); 2478 if (status != SCI_SUCCESS && 2479 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 2480 dev_warn(&isci_host->pdev->dev, 2481 "%s: failed request start (0x%x)\n", 2482 __func__, status); 2483 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 2484 goto out; 2485 } 2486 2487 /* Either I/O started OK, or the core has signaled that 2488 * the device needs a target reset. 2489 * 2490 * In either case, hold onto the I/O for later. 2491 * 2492 * Update it's status and add it to the list in the 2493 * remote device object. 2494 */ 2495 isci_request_change_state(request, started); 2496 list_add(&request->dev_node, &isci_device->reqs_in_process); 2497 2498 if (status == SCI_SUCCESS) { 2499 /* Save the tag for possible task mgmt later. */ 2500 request->io_tag = request->sci.io_tag; 2501 } else { 2502 /* The request did not really start in the 2503 * hardware, so clear the request handle 2504 * here so no terminations will be done. 2505 */ 2506 request->terminated = true; 2507 } 2508 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 2509 2510 if (status == 2511 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 2512 /* Signal libsas that we need the SCSI error 2513 * handler thread to work on this I/O and that 2514 * we want a device reset. 2515 */ 2516 spin_lock_irqsave(&task->task_state_lock, flags); 2517 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2518 spin_unlock_irqrestore(&task->task_state_lock, flags); 2519 2520 /* Cause this task to be scheduled in the SCSI error 2521 * handler thread. 2522 */ 2523 isci_execpath_callback(isci_host, task, 2524 sas_task_abort); 2525 2526 /* Change the status, since we are holding 2527 * the I/O until it is managed by the SCSI 2528 * error handler. 2529 */ 2530 status = SCI_SUCCESS; 2531 } 2532 2533 out: 2534 if (status != SCI_SUCCESS) { 2535 /* release dma memory on failure. */ 2536 isci_request_free(isci_host, request); 2537 request = NULL; 2538 ret = SCI_FAILURE; 2539 } 2540 2541 *isci_request = request; 2542 return ret; 2543 } 2544 2545 2546 2547