1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include "isci.h" 57 #include "task.h" 58 #include "request.h" 59 #include "scu_completion_codes.h" 60 #include "scu_event_codes.h" 61 #include "sas.h" 62 63 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 64 int idx) 65 { 66 if (idx == 0) 67 return &ireq->tc->sgl_pair_ab; 68 else if (idx == 1) 69 return &ireq->tc->sgl_pair_cd; 70 else if (idx < 0) 71 return NULL; 72 else 73 return &ireq->sg_table[idx - 2]; 74 } 75 76 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 77 struct isci_request *ireq, u32 idx) 78 { 79 u32 offset; 80 81 if (idx == 0) { 82 offset = (void *) &ireq->tc->sgl_pair_ab - 83 (void *) &ihost->task_context_table[0]; 84 return ihost->task_context_dma + offset; 85 } else if (idx == 1) { 86 offset = (void *) &ireq->tc->sgl_pair_cd - 87 (void *) &ihost->task_context_table[0]; 88 return ihost->task_context_dma + offset; 89 } 90 91 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 92 } 93 94 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 95 { 96 e->length = sg_dma_len(sg); 97 e->address_upper = upper_32_bits(sg_dma_address(sg)); 98 e->address_lower = lower_32_bits(sg_dma_address(sg)); 99 e->address_modifier = 0; 100 } 101 102 static void sci_request_build_sgl(struct isci_request *ireq) 103 { 104 struct isci_host *ihost = ireq->isci_host; 105 struct sas_task *task = isci_request_access_task(ireq); 106 struct scatterlist *sg = NULL; 107 dma_addr_t dma_addr; 108 u32 sg_idx = 0; 109 struct scu_sgl_element_pair *scu_sg = NULL; 110 struct scu_sgl_element_pair *prev_sg = NULL; 111 112 if (task->num_scatter > 0) { 113 sg = task->scatter; 114 115 while (sg) { 116 scu_sg = to_sgl_element_pair(ireq, sg_idx); 117 init_sgl_element(&scu_sg->A, sg); 118 sg = sg_next(sg); 119 if (sg) { 120 init_sgl_element(&scu_sg->B, sg); 121 sg = sg_next(sg); 122 } else 123 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 124 125 if (prev_sg) { 126 dma_addr = to_sgl_element_pair_dma(ihost, 127 ireq, 128 sg_idx); 129 130 prev_sg->next_pair_upper = 131 upper_32_bits(dma_addr); 132 prev_sg->next_pair_lower = 133 lower_32_bits(dma_addr); 134 } 135 136 prev_sg = scu_sg; 137 sg_idx++; 138 } 139 } else { /* handle when no sg */ 140 scu_sg = to_sgl_element_pair(ireq, sg_idx); 141 142 dma_addr = dma_map_single(&ihost->pdev->dev, 143 task->scatter, 144 task->total_xfer_len, 145 task->data_dir); 146 147 ireq->zero_scatter_daddr = dma_addr; 148 149 scu_sg->A.length = task->total_xfer_len; 150 scu_sg->A.address_upper = upper_32_bits(dma_addr); 151 scu_sg->A.address_lower = lower_32_bits(dma_addr); 152 } 153 154 if (scu_sg) { 155 scu_sg->next_pair_upper = 0; 156 scu_sg->next_pair_lower = 0; 157 } 158 } 159 160 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 161 { 162 struct ssp_cmd_iu *cmd_iu; 163 struct sas_task *task = isci_request_access_task(ireq); 164 165 cmd_iu = &ireq->ssp.cmd; 166 167 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 168 cmd_iu->add_cdb_len = 0; 169 cmd_iu->_r_a = 0; 170 cmd_iu->_r_b = 0; 171 cmd_iu->en_fburst = 0; /* unsupported */ 172 cmd_iu->task_prio = task->ssp_task.task_prio; 173 cmd_iu->task_attr = task->ssp_task.task_attr; 174 cmd_iu->_r_c = 0; 175 176 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 177 sizeof(task->ssp_task.cdb) / sizeof(u32)); 178 } 179 180 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 181 { 182 struct ssp_task_iu *task_iu; 183 struct sas_task *task = isci_request_access_task(ireq); 184 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 185 186 task_iu = &ireq->ssp.tmf; 187 188 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 189 190 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 191 192 task_iu->task_func = isci_tmf->tmf_code; 193 task_iu->task_tag = 194 (ireq->ttype == tmf_task) ? 195 isci_tmf->io_tag : 196 SCI_CONTROLLER_INVALID_IO_TAG; 197 } 198 199 /** 200 * This method is will fill in the SCU Task Context for any type of SSP request. 201 * @sci_req: 202 * @task_context: 203 * 204 */ 205 static void scu_ssp_reqeust_construct_task_context( 206 struct isci_request *ireq, 207 struct scu_task_context *task_context) 208 { 209 dma_addr_t dma_addr; 210 struct isci_remote_device *idev; 211 struct isci_port *iport; 212 213 idev = ireq->target_device; 214 iport = idev->owning_port; 215 216 /* Fill in the TC with the its required data */ 217 task_context->abort = 0; 218 task_context->priority = 0; 219 task_context->initiator_request = 1; 220 task_context->connection_rate = idev->connection_rate; 221 task_context->protocol_engine_index = ISCI_PEG; 222 task_context->logical_port_index = iport->physical_port_index; 223 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 224 task_context->valid = SCU_TASK_CONTEXT_VALID; 225 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 226 227 task_context->remote_node_index = idev->rnc.remote_node_index; 228 task_context->command_code = 0; 229 230 task_context->link_layer_control = 0; 231 task_context->do_not_dma_ssp_good_response = 1; 232 task_context->strict_ordering = 0; 233 task_context->control_frame = 0; 234 task_context->timeout_enable = 0; 235 task_context->block_guard_enable = 0; 236 237 task_context->address_modifier = 0; 238 239 /* task_context->type.ssp.tag = ireq->io_tag; */ 240 task_context->task_phase = 0x01; 241 242 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 243 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 244 (iport->physical_port_index << 245 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 246 ISCI_TAG_TCI(ireq->io_tag)); 247 248 /* 249 * Copy the physical address for the command buffer to the 250 * SCU Task Context 251 */ 252 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 253 254 task_context->command_iu_upper = upper_32_bits(dma_addr); 255 task_context->command_iu_lower = lower_32_bits(dma_addr); 256 257 /* 258 * Copy the physical address for the response buffer to the 259 * SCU Task Context 260 */ 261 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 262 263 task_context->response_iu_upper = upper_32_bits(dma_addr); 264 task_context->response_iu_lower = lower_32_bits(dma_addr); 265 } 266 267 /** 268 * This method is will fill in the SCU Task Context for a SSP IO request. 269 * @sci_req: 270 * 271 */ 272 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 273 enum dma_data_direction dir, 274 u32 len) 275 { 276 struct scu_task_context *task_context = ireq->tc; 277 278 scu_ssp_reqeust_construct_task_context(ireq, task_context); 279 280 task_context->ssp_command_iu_length = 281 sizeof(struct ssp_cmd_iu) / sizeof(u32); 282 task_context->type.ssp.frame_type = SSP_COMMAND; 283 284 switch (dir) { 285 case DMA_FROM_DEVICE: 286 case DMA_NONE: 287 default: 288 task_context->task_type = SCU_TASK_TYPE_IOREAD; 289 break; 290 case DMA_TO_DEVICE: 291 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 292 break; 293 } 294 295 task_context->transfer_length_bytes = len; 296 297 if (task_context->transfer_length_bytes > 0) 298 sci_request_build_sgl(ireq); 299 } 300 301 /** 302 * This method will fill in the SCU Task Context for a SSP Task request. The 303 * following important settings are utilized: -# priority == 304 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 305 * ahead of other task destined for the same Remote Node. -# task_type == 306 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 307 * (i.e. non-raw frame) is being utilized to perform task management. -# 308 * control_frame == 1. This ensures that the proper endianess is set so 309 * that the bytes are transmitted in the right order for a task frame. 310 * @sci_req: This parameter specifies the task request object being 311 * constructed. 312 * 313 */ 314 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 315 { 316 struct scu_task_context *task_context = ireq->tc; 317 318 scu_ssp_reqeust_construct_task_context(ireq, task_context); 319 320 task_context->control_frame = 1; 321 task_context->priority = SCU_TASK_PRIORITY_HIGH; 322 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 323 task_context->transfer_length_bytes = 0; 324 task_context->type.ssp.frame_type = SSP_TASK; 325 task_context->ssp_command_iu_length = 326 sizeof(struct ssp_task_iu) / sizeof(u32); 327 } 328 329 /** 330 * This method is will fill in the SCU Task Context for any type of SATA 331 * request. This is called from the various SATA constructors. 332 * @sci_req: The general IO request object which is to be used in 333 * constructing the SCU task context. 334 * @task_context: The buffer pointer for the SCU task context which is being 335 * constructed. 336 * 337 * The general io request construction is complete. The buffer assignment for 338 * the command buffer is complete. none Revisit task context construction to 339 * determine what is common for SSP/SMP/STP task context structures. 340 */ 341 static void scu_sata_reqeust_construct_task_context( 342 struct isci_request *ireq, 343 struct scu_task_context *task_context) 344 { 345 dma_addr_t dma_addr; 346 struct isci_remote_device *idev; 347 struct isci_port *iport; 348 349 idev = ireq->target_device; 350 iport = idev->owning_port; 351 352 /* Fill in the TC with the its required data */ 353 task_context->abort = 0; 354 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 355 task_context->initiator_request = 1; 356 task_context->connection_rate = idev->connection_rate; 357 task_context->protocol_engine_index = ISCI_PEG; 358 task_context->logical_port_index = iport->physical_port_index; 359 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 360 task_context->valid = SCU_TASK_CONTEXT_VALID; 361 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 362 363 task_context->remote_node_index = idev->rnc.remote_node_index; 364 task_context->command_code = 0; 365 366 task_context->link_layer_control = 0; 367 task_context->do_not_dma_ssp_good_response = 1; 368 task_context->strict_ordering = 0; 369 task_context->control_frame = 0; 370 task_context->timeout_enable = 0; 371 task_context->block_guard_enable = 0; 372 373 task_context->address_modifier = 0; 374 task_context->task_phase = 0x01; 375 376 task_context->ssp_command_iu_length = 377 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 378 379 /* Set the first word of the H2D REG FIS */ 380 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 381 382 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 383 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 384 (iport->physical_port_index << 385 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 386 ISCI_TAG_TCI(ireq->io_tag)); 387 /* 388 * Copy the physical address for the command buffer to the SCU Task 389 * Context. We must offset the command buffer by 4 bytes because the 390 * first 4 bytes are transfered in the body of the TC. 391 */ 392 dma_addr = sci_io_request_get_dma_addr(ireq, 393 ((char *) &ireq->stp.cmd) + 394 sizeof(u32)); 395 396 task_context->command_iu_upper = upper_32_bits(dma_addr); 397 task_context->command_iu_lower = lower_32_bits(dma_addr); 398 399 /* SATA Requests do not have a response buffer */ 400 task_context->response_iu_upper = 0; 401 task_context->response_iu_lower = 0; 402 } 403 404 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 405 { 406 struct scu_task_context *task_context = ireq->tc; 407 408 scu_sata_reqeust_construct_task_context(ireq, task_context); 409 410 task_context->control_frame = 0; 411 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 412 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 413 task_context->type.stp.fis_type = FIS_REGH2D; 414 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 415 } 416 417 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 418 bool copy_rx_frame) 419 { 420 struct isci_stp_request *stp_req = &ireq->stp.req; 421 422 scu_stp_raw_request_construct_task_context(ireq); 423 424 stp_req->status = 0; 425 stp_req->sgl.offset = 0; 426 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 427 428 if (copy_rx_frame) { 429 sci_request_build_sgl(ireq); 430 stp_req->sgl.index = 0; 431 } else { 432 /* The user does not want the data copied to the SGL buffer location */ 433 stp_req->sgl.index = -1; 434 } 435 436 return SCI_SUCCESS; 437 } 438 439 /** 440 * 441 * @sci_req: This parameter specifies the request to be constructed as an 442 * optimized request. 443 * @optimized_task_type: This parameter specifies whether the request is to be 444 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 445 * value of 1 indicates NCQ. 446 * 447 * This method will perform request construction common to all types of STP 448 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 449 * returns an indication as to whether the construction was successful. 450 */ 451 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 452 u8 optimized_task_type, 453 u32 len, 454 enum dma_data_direction dir) 455 { 456 struct scu_task_context *task_context = ireq->tc; 457 458 /* Build the STP task context structure */ 459 scu_sata_reqeust_construct_task_context(ireq, task_context); 460 461 /* Copy over the SGL elements */ 462 sci_request_build_sgl(ireq); 463 464 /* Copy over the number of bytes to be transfered */ 465 task_context->transfer_length_bytes = len; 466 467 if (dir == DMA_TO_DEVICE) { 468 /* 469 * The difference between the DMA IN and DMA OUT request task type 470 * values are consistent with the difference between FPDMA READ 471 * and FPDMA WRITE values. Add the supplied task type parameter 472 * to this difference to set the task type properly for this 473 * DATA OUT (WRITE) case. */ 474 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 475 - SCU_TASK_TYPE_DMA_IN); 476 } else { 477 /* 478 * For the DATA IN (READ) case, simply save the supplied 479 * optimized task type. */ 480 task_context->task_type = optimized_task_type; 481 } 482 } 483 484 485 486 static enum sci_status 487 sci_io_request_construct_sata(struct isci_request *ireq, 488 u32 len, 489 enum dma_data_direction dir, 490 bool copy) 491 { 492 enum sci_status status = SCI_SUCCESS; 493 struct sas_task *task = isci_request_access_task(ireq); 494 495 /* check for management protocols */ 496 if (ireq->ttype == tmf_task) { 497 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 498 499 if (tmf->tmf_code == isci_tmf_sata_srst_high || 500 tmf->tmf_code == isci_tmf_sata_srst_low) { 501 scu_stp_raw_request_construct_task_context(ireq); 502 return SCI_SUCCESS; 503 } else { 504 dev_err(&ireq->owning_controller->pdev->dev, 505 "%s: Request 0x%p received un-handled SAT " 506 "management protocol 0x%x.\n", 507 __func__, ireq, tmf->tmf_code); 508 509 return SCI_FAILURE; 510 } 511 } 512 513 if (!sas_protocol_ata(task->task_proto)) { 514 dev_err(&ireq->owning_controller->pdev->dev, 515 "%s: Non-ATA protocol in SATA path: 0x%x\n", 516 __func__, 517 task->task_proto); 518 return SCI_FAILURE; 519 520 } 521 522 /* non data */ 523 if (task->data_dir == DMA_NONE) { 524 scu_stp_raw_request_construct_task_context(ireq); 525 return SCI_SUCCESS; 526 } 527 528 /* NCQ */ 529 if (task->ata_task.use_ncq) { 530 sci_stp_optimized_request_construct(ireq, 531 SCU_TASK_TYPE_FPDMAQ_READ, 532 len, dir); 533 return SCI_SUCCESS; 534 } 535 536 /* DMA */ 537 if (task->ata_task.dma_xfer) { 538 sci_stp_optimized_request_construct(ireq, 539 SCU_TASK_TYPE_DMA_IN, 540 len, dir); 541 return SCI_SUCCESS; 542 } else /* PIO */ 543 return sci_stp_pio_request_construct(ireq, copy); 544 545 return status; 546 } 547 548 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 549 { 550 struct sas_task *task = isci_request_access_task(ireq); 551 552 ireq->protocol = SCIC_SSP_PROTOCOL; 553 554 scu_ssp_io_request_construct_task_context(ireq, 555 task->data_dir, 556 task->total_xfer_len); 557 558 sci_io_request_build_ssp_command_iu(ireq); 559 560 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 561 562 return SCI_SUCCESS; 563 } 564 565 enum sci_status sci_task_request_construct_ssp( 566 struct isci_request *ireq) 567 { 568 /* Construct the SSP Task SCU Task Context */ 569 scu_ssp_task_request_construct_task_context(ireq); 570 571 /* Fill in the SSP Task IU */ 572 sci_task_request_build_ssp_task_iu(ireq); 573 574 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 575 576 return SCI_SUCCESS; 577 } 578 579 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 580 { 581 enum sci_status status; 582 bool copy = false; 583 struct sas_task *task = isci_request_access_task(ireq); 584 585 ireq->protocol = SCIC_STP_PROTOCOL; 586 587 copy = (task->data_dir == DMA_NONE) ? false : true; 588 589 status = sci_io_request_construct_sata(ireq, 590 task->total_xfer_len, 591 task->data_dir, 592 copy); 593 594 if (status == SCI_SUCCESS) 595 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 596 597 return status; 598 } 599 600 enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) 601 { 602 enum sci_status status = SCI_SUCCESS; 603 604 /* check for management protocols */ 605 if (ireq->ttype == tmf_task) { 606 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 607 608 if (tmf->tmf_code == isci_tmf_sata_srst_high || 609 tmf->tmf_code == isci_tmf_sata_srst_low) { 610 scu_stp_raw_request_construct_task_context(ireq); 611 } else { 612 dev_err(&ireq->owning_controller->pdev->dev, 613 "%s: Request 0x%p received un-handled SAT " 614 "Protocol 0x%x.\n", 615 __func__, ireq, tmf->tmf_code); 616 617 return SCI_FAILURE; 618 } 619 } 620 621 if (status != SCI_SUCCESS) 622 return status; 623 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 624 625 return status; 626 } 627 628 /** 629 * sci_req_tx_bytes - bytes transferred when reply underruns request 630 * @sci_req: request that was terminated early 631 */ 632 #define SCU_TASK_CONTEXT_SRAM 0x200000 633 static u32 sci_req_tx_bytes(struct isci_request *ireq) 634 { 635 struct isci_host *ihost = ireq->owning_controller; 636 u32 ret_val = 0; 637 638 if (readl(&ihost->smu_registers->address_modifier) == 0) { 639 void __iomem *scu_reg_base = ihost->scu_registers; 640 641 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 642 * BAR1 is the scu_registers 643 * 0x20002C = 0x200000 + 0x2c 644 * = start of task context SRAM + offset of (type.ssp.data_offset) 645 * TCi is the io_tag of struct sci_request 646 */ 647 ret_val = readl(scu_reg_base + 648 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 649 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 650 } 651 652 return ret_val; 653 } 654 655 enum sci_status sci_request_start(struct isci_request *ireq) 656 { 657 enum sci_base_request_states state; 658 struct scu_task_context *tc = ireq->tc; 659 struct isci_host *ihost = ireq->owning_controller; 660 661 state = ireq->sm.current_state_id; 662 if (state != SCI_REQ_CONSTRUCTED) { 663 dev_warn(&ihost->pdev->dev, 664 "%s: SCIC IO Request requested to start while in wrong " 665 "state %d\n", __func__, state); 666 return SCI_FAILURE_INVALID_STATE; 667 } 668 669 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 670 671 switch (tc->protocol_type) { 672 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 673 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 674 /* SSP/SMP Frame */ 675 tc->type.ssp.tag = ireq->io_tag; 676 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 677 break; 678 679 case SCU_TASK_CONTEXT_PROTOCOL_STP: 680 /* STP/SATA Frame 681 * tc->type.stp.ncq_tag = ireq->ncq_tag; 682 */ 683 break; 684 685 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 686 /* / @todo When do we set no protocol type? */ 687 break; 688 689 default: 690 /* This should never happen since we build the IO 691 * requests */ 692 break; 693 } 694 695 /* Add to the post_context the io tag value */ 696 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 697 698 /* Everything is good go ahead and change state */ 699 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 700 701 return SCI_SUCCESS; 702 } 703 704 enum sci_status 705 sci_io_request_terminate(struct isci_request *ireq) 706 { 707 enum sci_base_request_states state; 708 709 state = ireq->sm.current_state_id; 710 711 switch (state) { 712 case SCI_REQ_CONSTRUCTED: 713 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 714 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 715 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 716 return SCI_SUCCESS; 717 case SCI_REQ_STARTED: 718 case SCI_REQ_TASK_WAIT_TC_COMP: 719 case SCI_REQ_SMP_WAIT_RESP: 720 case SCI_REQ_SMP_WAIT_TC_COMP: 721 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 722 case SCI_REQ_STP_UDMA_WAIT_D2H: 723 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 724 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 725 case SCI_REQ_STP_PIO_WAIT_H2D: 726 case SCI_REQ_STP_PIO_WAIT_FRAME: 727 case SCI_REQ_STP_PIO_DATA_IN: 728 case SCI_REQ_STP_PIO_DATA_OUT: 729 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: 730 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: 731 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: 732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 733 return SCI_SUCCESS; 734 case SCI_REQ_TASK_WAIT_TC_RESP: 735 /* The task frame was already confirmed to have been 736 * sent by the SCU HW. Since the state machine is 737 * now only waiting for the task response itself, 738 * abort the request and complete it immediately 739 * and don't wait for the task response. 740 */ 741 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 742 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 743 return SCI_SUCCESS; 744 case SCI_REQ_ABORTING: 745 /* If a request has a termination requested twice, return 746 * a failure indication, since HW confirmation of the first 747 * abort is still outstanding. 748 */ 749 case SCI_REQ_COMPLETED: 750 default: 751 dev_warn(&ireq->owning_controller->pdev->dev, 752 "%s: SCIC IO Request requested to abort while in wrong " 753 "state %d\n", 754 __func__, 755 ireq->sm.current_state_id); 756 break; 757 } 758 759 return SCI_FAILURE_INVALID_STATE; 760 } 761 762 enum sci_status sci_request_complete(struct isci_request *ireq) 763 { 764 enum sci_base_request_states state; 765 struct isci_host *ihost = ireq->owning_controller; 766 767 state = ireq->sm.current_state_id; 768 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 769 "isci: request completion from wrong state (%d)\n", state)) 770 return SCI_FAILURE_INVALID_STATE; 771 772 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 773 sci_controller_release_frame(ihost, 774 ireq->saved_rx_frame_index); 775 776 /* XXX can we just stop the machine and remove the 'final' state? */ 777 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 778 return SCI_SUCCESS; 779 } 780 781 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 782 u32 event_code) 783 { 784 enum sci_base_request_states state; 785 struct isci_host *ihost = ireq->owning_controller; 786 787 state = ireq->sm.current_state_id; 788 789 if (state != SCI_REQ_STP_PIO_DATA_IN) { 790 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", 791 __func__, event_code, state); 792 793 return SCI_FAILURE_INVALID_STATE; 794 } 795 796 switch (scu_get_event_specifier(event_code)) { 797 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 798 /* We are waiting for data and the SCU has R_ERR the data frame. 799 * Go back to waiting for the D2H Register FIS 800 */ 801 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 802 return SCI_SUCCESS; 803 default: 804 dev_err(&ihost->pdev->dev, 805 "%s: pio request unexpected event %#x\n", 806 __func__, event_code); 807 808 /* TODO Should we fail the PIO request when we get an 809 * unexpected event? 810 */ 811 return SCI_FAILURE; 812 } 813 } 814 815 /* 816 * This function copies response data for requests returning response data 817 * instead of sense data. 818 * @sci_req: This parameter specifies the request object for which to copy 819 * the response data. 820 */ 821 static void sci_io_request_copy_response(struct isci_request *ireq) 822 { 823 void *resp_buf; 824 u32 len; 825 struct ssp_response_iu *ssp_response; 826 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 827 828 ssp_response = &ireq->ssp.rsp; 829 830 resp_buf = &isci_tmf->resp.resp_iu; 831 832 len = min_t(u32, 833 SSP_RESP_IU_MAX_SIZE, 834 be32_to_cpu(ssp_response->response_data_len)); 835 836 memcpy(resp_buf, ssp_response->resp_data, len); 837 } 838 839 static enum sci_status 840 request_started_state_tc_event(struct isci_request *ireq, 841 u32 completion_code) 842 { 843 struct ssp_response_iu *resp_iu; 844 u8 datapres; 845 846 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 847 * to determine SDMA status 848 */ 849 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 850 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 851 ireq->scu_status = SCU_TASK_DONE_GOOD; 852 ireq->sci_status = SCI_SUCCESS; 853 break; 854 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 855 /* There are times when the SCU hardware will return an early 856 * response because the io request specified more data than is 857 * returned by the target device (mode pages, inquiry data, 858 * etc.). We must check the response stats to see if this is 859 * truly a failed request or a good request that just got 860 * completed early. 861 */ 862 struct ssp_response_iu *resp = &ireq->ssp.rsp; 863 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 864 865 sci_swab32_cpy(&ireq->ssp.rsp, 866 &ireq->ssp.rsp, 867 word_cnt); 868 869 if (resp->status == 0) { 870 ireq->scu_status = SCU_TASK_DONE_GOOD; 871 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 872 } else { 873 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 874 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 875 } 876 break; 877 } 878 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 879 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 880 881 sci_swab32_cpy(&ireq->ssp.rsp, 882 &ireq->ssp.rsp, 883 word_cnt); 884 885 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 886 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 887 break; 888 } 889 890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 891 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 892 * guaranteed to be received before this completion status is 893 * posted? 894 */ 895 resp_iu = &ireq->ssp.rsp; 896 datapres = resp_iu->datapres; 897 898 if (datapres == 1 || datapres == 2) { 899 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 900 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 901 } else { 902 ireq->scu_status = SCU_TASK_DONE_GOOD; 903 ireq->sci_status = SCI_SUCCESS; 904 } 905 break; 906 /* only stp device gets suspended. */ 907 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 908 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 909 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 910 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 911 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 912 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 914 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 915 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 916 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 918 if (ireq->protocol == SCIC_STP_PROTOCOL) { 919 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 920 SCU_COMPLETION_TL_STATUS_SHIFT; 921 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 922 } else { 923 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 924 SCU_COMPLETION_TL_STATUS_SHIFT; 925 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 926 } 927 break; 928 929 /* both stp/ssp device gets suspended */ 930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 940 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 941 SCU_COMPLETION_TL_STATUS_SHIFT; 942 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 943 break; 944 945 /* neither ssp nor stp gets suspended. */ 946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 948 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 949 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 951 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 952 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 953 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 954 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 955 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 956 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 957 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 958 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 959 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 960 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 961 default: 962 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 963 SCU_COMPLETION_TL_STATUS_SHIFT; 964 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 965 break; 966 } 967 968 /* 969 * TODO: This is probably wrong for ACK/NAK timeout conditions 970 */ 971 972 /* In all cases we will treat this as the completion of the IO req. */ 973 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 974 return SCI_SUCCESS; 975 } 976 977 static enum sci_status 978 request_aborting_state_tc_event(struct isci_request *ireq, 979 u32 completion_code) 980 { 981 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 982 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 983 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 984 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 985 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 986 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 987 break; 988 989 default: 990 /* Unless we get some strange error wait for the task abort to complete 991 * TODO: Should there be a state change for this completion? 992 */ 993 break; 994 } 995 996 return SCI_SUCCESS; 997 } 998 999 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1000 u32 completion_code) 1001 { 1002 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1003 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1004 ireq->scu_status = SCU_TASK_DONE_GOOD; 1005 ireq->sci_status = SCI_SUCCESS; 1006 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1007 break; 1008 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1009 /* Currently, the decision is to simply allow the task request 1010 * to timeout if the task IU wasn't received successfully. 1011 * There is a potential for receiving multiple task responses if 1012 * we decide to send the task IU again. 1013 */ 1014 dev_warn(&ireq->owning_controller->pdev->dev, 1015 "%s: TaskRequest:0x%p CompletionCode:%x - " 1016 "ACK/NAK timeout\n", __func__, ireq, 1017 completion_code); 1018 1019 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1020 break; 1021 default: 1022 /* 1023 * All other completion status cause the IO to be complete. 1024 * If a NAK was received, then it is up to the user to retry 1025 * the request. 1026 */ 1027 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1028 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1029 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1030 break; 1031 } 1032 1033 return SCI_SUCCESS; 1034 } 1035 1036 static enum sci_status 1037 smp_request_await_response_tc_event(struct isci_request *ireq, 1038 u32 completion_code) 1039 { 1040 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1041 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1042 /* In the AWAIT RESPONSE state, any TC completion is 1043 * unexpected. but if the TC has success status, we 1044 * complete the IO anyway. 1045 */ 1046 ireq->scu_status = SCU_TASK_DONE_GOOD; 1047 ireq->sci_status = SCI_SUCCESS; 1048 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1049 break; 1050 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1051 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1052 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1053 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1054 /* These status has been seen in a specific LSI 1055 * expander, which sometimes is not able to send smp 1056 * response within 2 ms. This causes our hardware break 1057 * the connection and set TC completion with one of 1058 * these SMP_XXX_XX_ERR status. For these type of error, 1059 * we ask ihost user to retry the request. 1060 */ 1061 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1062 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1063 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1064 break; 1065 default: 1066 /* All other completion status cause the IO to be complete. If a NAK 1067 * was received, then it is up to the user to retry the request 1068 */ 1069 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1070 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1071 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1072 break; 1073 } 1074 1075 return SCI_SUCCESS; 1076 } 1077 1078 static enum sci_status 1079 smp_request_await_tc_event(struct isci_request *ireq, 1080 u32 completion_code) 1081 { 1082 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1083 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1084 ireq->scu_status = SCU_TASK_DONE_GOOD; 1085 ireq->sci_status = SCI_SUCCESS; 1086 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1087 break; 1088 default: 1089 /* All other completion status cause the IO to be 1090 * complete. If a NAK was received, then it is up to 1091 * the user to retry the request. 1092 */ 1093 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1094 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1095 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1096 break; 1097 } 1098 1099 return SCI_SUCCESS; 1100 } 1101 1102 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1103 { 1104 struct scu_sgl_element *sgl; 1105 struct scu_sgl_element_pair *sgl_pair; 1106 struct isci_request *ireq = to_ireq(stp_req); 1107 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1108 1109 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1110 if (!sgl_pair) 1111 sgl = NULL; 1112 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1113 if (sgl_pair->B.address_lower == 0 && 1114 sgl_pair->B.address_upper == 0) { 1115 sgl = NULL; 1116 } else { 1117 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1118 sgl = &sgl_pair->B; 1119 } 1120 } else { 1121 if (sgl_pair->next_pair_lower == 0 && 1122 sgl_pair->next_pair_upper == 0) { 1123 sgl = NULL; 1124 } else { 1125 pio_sgl->index++; 1126 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1127 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1128 sgl = &sgl_pair->A; 1129 } 1130 } 1131 1132 return sgl; 1133 } 1134 1135 static enum sci_status 1136 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1137 u32 completion_code) 1138 { 1139 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1140 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1141 ireq->scu_status = SCU_TASK_DONE_GOOD; 1142 ireq->sci_status = SCI_SUCCESS; 1143 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1144 break; 1145 1146 default: 1147 /* All other completion status cause the IO to be 1148 * complete. If a NAK was received, then it is up to 1149 * the user to retry the request. 1150 */ 1151 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1152 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1153 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1154 break; 1155 } 1156 1157 return SCI_SUCCESS; 1158 } 1159 1160 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1161 1162 /* transmit DATA_FIS from (current sgl + offset) for input 1163 * parameter length. current sgl and offset is alreay stored in the IO request 1164 */ 1165 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1166 struct isci_request *ireq, 1167 u32 length) 1168 { 1169 struct isci_stp_request *stp_req = &ireq->stp.req; 1170 struct scu_task_context *task_context = ireq->tc; 1171 struct scu_sgl_element_pair *sgl_pair; 1172 struct scu_sgl_element *current_sgl; 1173 1174 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1175 * for the data from current_sgl+offset for the input length 1176 */ 1177 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1178 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1179 current_sgl = &sgl_pair->A; 1180 else 1181 current_sgl = &sgl_pair->B; 1182 1183 /* update the TC */ 1184 task_context->command_iu_upper = current_sgl->address_upper; 1185 task_context->command_iu_lower = current_sgl->address_lower; 1186 task_context->transfer_length_bytes = length; 1187 task_context->type.stp.fis_type = FIS_DATA; 1188 1189 /* send the new TC out. */ 1190 return sci_controller_continue_io(ireq); 1191 } 1192 1193 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1194 { 1195 struct isci_stp_request *stp_req = &ireq->stp.req; 1196 struct scu_sgl_element_pair *sgl_pair; 1197 struct scu_sgl_element *sgl; 1198 enum sci_status status; 1199 u32 offset; 1200 u32 len = 0; 1201 1202 offset = stp_req->sgl.offset; 1203 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1204 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1205 return SCI_FAILURE; 1206 1207 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1208 sgl = &sgl_pair->A; 1209 len = sgl_pair->A.length - offset; 1210 } else { 1211 sgl = &sgl_pair->B; 1212 len = sgl_pair->B.length - offset; 1213 } 1214 1215 if (stp_req->pio_len == 0) 1216 return SCI_SUCCESS; 1217 1218 if (stp_req->pio_len >= len) { 1219 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1220 if (status != SCI_SUCCESS) 1221 return status; 1222 stp_req->pio_len -= len; 1223 1224 /* update the current sgl, offset and save for future */ 1225 sgl = pio_sgl_next(stp_req); 1226 offset = 0; 1227 } else if (stp_req->pio_len < len) { 1228 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1229 1230 /* Sgl offset will be adjusted and saved for future */ 1231 offset += stp_req->pio_len; 1232 sgl->address_lower += stp_req->pio_len; 1233 stp_req->pio_len = 0; 1234 } 1235 1236 stp_req->sgl.offset = offset; 1237 1238 return status; 1239 } 1240 1241 /** 1242 * 1243 * @stp_request: The request that is used for the SGL processing. 1244 * @data_buffer: The buffer of data to be copied. 1245 * @length: The length of the data transfer. 1246 * 1247 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1248 * specified data region. enum sci_status 1249 */ 1250 static enum sci_status 1251 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1252 u8 *data_buf, u32 len) 1253 { 1254 struct isci_request *ireq; 1255 u8 *src_addr; 1256 int copy_len; 1257 struct sas_task *task; 1258 struct scatterlist *sg; 1259 void *kaddr; 1260 int total_len = len; 1261 1262 ireq = to_ireq(stp_req); 1263 task = isci_request_access_task(ireq); 1264 src_addr = data_buf; 1265 1266 if (task->num_scatter > 0) { 1267 sg = task->scatter; 1268 1269 while (total_len > 0) { 1270 struct page *page = sg_page(sg); 1271 1272 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1273 kaddr = kmap_atomic(page, KM_IRQ0); 1274 memcpy(kaddr + sg->offset, src_addr, copy_len); 1275 kunmap_atomic(kaddr, KM_IRQ0); 1276 total_len -= copy_len; 1277 src_addr += copy_len; 1278 sg = sg_next(sg); 1279 } 1280 } else { 1281 BUG_ON(task->total_xfer_len < total_len); 1282 memcpy(task->scatter, src_addr, total_len); 1283 } 1284 1285 return SCI_SUCCESS; 1286 } 1287 1288 /** 1289 * 1290 * @sci_req: The PIO DATA IN request that is to receive the data. 1291 * @data_buffer: The buffer to copy from. 1292 * 1293 * Copy the data buffer to the io request data region. enum sci_status 1294 */ 1295 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1296 struct isci_stp_request *stp_req, 1297 u8 *data_buffer) 1298 { 1299 enum sci_status status; 1300 1301 /* 1302 * If there is less than 1K remaining in the transfer request 1303 * copy just the data for the transfer */ 1304 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1305 status = sci_stp_request_pio_data_in_copy_data_buffer( 1306 stp_req, data_buffer, stp_req->pio_len); 1307 1308 if (status == SCI_SUCCESS) 1309 stp_req->pio_len = 0; 1310 } else { 1311 /* We are transfering the whole frame so copy */ 1312 status = sci_stp_request_pio_data_in_copy_data_buffer( 1313 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1314 1315 if (status == SCI_SUCCESS) 1316 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1317 } 1318 1319 return status; 1320 } 1321 1322 static enum sci_status 1323 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1324 u32 completion_code) 1325 { 1326 enum sci_status status = SCI_SUCCESS; 1327 1328 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1329 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1330 ireq->scu_status = SCU_TASK_DONE_GOOD; 1331 ireq->sci_status = SCI_SUCCESS; 1332 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1333 break; 1334 1335 default: 1336 /* All other completion status cause the IO to be 1337 * complete. If a NAK was received, then it is up to 1338 * the user to retry the request. 1339 */ 1340 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1341 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1342 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1343 break; 1344 } 1345 1346 return status; 1347 } 1348 1349 static enum sci_status 1350 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1351 u32 completion_code) 1352 { 1353 enum sci_status status = SCI_SUCCESS; 1354 bool all_frames_transferred = false; 1355 struct isci_stp_request *stp_req = &ireq->stp.req; 1356 1357 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1358 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1359 /* Transmit data */ 1360 if (stp_req->pio_len != 0) { 1361 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1362 if (status == SCI_SUCCESS) { 1363 if (stp_req->pio_len == 0) 1364 all_frames_transferred = true; 1365 } 1366 } else if (stp_req->pio_len == 0) { 1367 /* 1368 * this will happen if the all data is written at the 1369 * first time after the pio setup fis is received 1370 */ 1371 all_frames_transferred = true; 1372 } 1373 1374 /* all data transferred. */ 1375 if (all_frames_transferred) { 1376 /* 1377 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1378 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1379 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1380 } 1381 break; 1382 1383 default: 1384 /* 1385 * All other completion status cause the IO to be complete. 1386 * If a NAK was received, then it is up to the user to retry 1387 * the request. 1388 */ 1389 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1390 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1391 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1392 break; 1393 } 1394 1395 return status; 1396 } 1397 1398 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1399 u32 frame_index) 1400 { 1401 struct isci_host *ihost = ireq->owning_controller; 1402 struct dev_to_host_fis *frame_header; 1403 enum sci_status status; 1404 u32 *frame_buffer; 1405 1406 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1407 frame_index, 1408 (void **)&frame_header); 1409 1410 if ((status == SCI_SUCCESS) && 1411 (frame_header->fis_type == FIS_REGD2H)) { 1412 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1413 frame_index, 1414 (void **)&frame_buffer); 1415 1416 sci_controller_copy_sata_response(&ireq->stp.rsp, 1417 frame_header, 1418 frame_buffer); 1419 } 1420 1421 sci_controller_release_frame(ihost, frame_index); 1422 1423 return status; 1424 } 1425 1426 enum sci_status 1427 sci_io_request_frame_handler(struct isci_request *ireq, 1428 u32 frame_index) 1429 { 1430 struct isci_host *ihost = ireq->owning_controller; 1431 struct isci_stp_request *stp_req = &ireq->stp.req; 1432 enum sci_base_request_states state; 1433 enum sci_status status; 1434 ssize_t word_cnt; 1435 1436 state = ireq->sm.current_state_id; 1437 switch (state) { 1438 case SCI_REQ_STARTED: { 1439 struct ssp_frame_hdr ssp_hdr; 1440 void *frame_header; 1441 1442 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1443 frame_index, 1444 &frame_header); 1445 1446 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1447 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1448 1449 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1450 struct ssp_response_iu *resp_iu; 1451 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1452 1453 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1454 frame_index, 1455 (void **)&resp_iu); 1456 1457 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1458 1459 resp_iu = &ireq->ssp.rsp; 1460 1461 if (resp_iu->datapres == 0x01 || 1462 resp_iu->datapres == 0x02) { 1463 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1464 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1465 } else { 1466 ireq->scu_status = SCU_TASK_DONE_GOOD; 1467 ireq->sci_status = SCI_SUCCESS; 1468 } 1469 } else { 1470 /* not a response frame, why did it get forwarded? */ 1471 dev_err(&ihost->pdev->dev, 1472 "%s: SCIC IO Request 0x%p received unexpected " 1473 "frame %d type 0x%02x\n", __func__, ireq, 1474 frame_index, ssp_hdr.frame_type); 1475 } 1476 1477 /* 1478 * In any case we are done with this frame buffer return it to 1479 * the controller 1480 */ 1481 sci_controller_release_frame(ihost, frame_index); 1482 1483 return SCI_SUCCESS; 1484 } 1485 1486 case SCI_REQ_TASK_WAIT_TC_RESP: 1487 sci_io_request_copy_response(ireq); 1488 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1489 sci_controller_release_frame(ihost, frame_index); 1490 return SCI_SUCCESS; 1491 1492 case SCI_REQ_SMP_WAIT_RESP: { 1493 struct smp_resp *rsp_hdr = &ireq->smp.rsp; 1494 void *frame_header; 1495 1496 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1497 frame_index, 1498 &frame_header); 1499 1500 /* byte swap the header. */ 1501 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); 1502 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); 1503 1504 if (rsp_hdr->frame_type == SMP_RESPONSE) { 1505 void *smp_resp; 1506 1507 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1508 frame_index, 1509 &smp_resp); 1510 1511 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) / 1512 sizeof(u32); 1513 1514 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, 1515 smp_resp, word_cnt); 1516 1517 ireq->scu_status = SCU_TASK_DONE_GOOD; 1518 ireq->sci_status = SCI_SUCCESS; 1519 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1520 } else { 1521 /* 1522 * This was not a response frame why did it get 1523 * forwarded? 1524 */ 1525 dev_err(&ihost->pdev->dev, 1526 "%s: SCIC SMP Request 0x%p received unexpected " 1527 "frame %d type 0x%02x\n", 1528 __func__, 1529 ireq, 1530 frame_index, 1531 rsp_hdr->frame_type); 1532 1533 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1534 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1535 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1536 } 1537 1538 sci_controller_release_frame(ihost, frame_index); 1539 1540 return SCI_SUCCESS; 1541 } 1542 1543 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1544 return sci_stp_request_udma_general_frame_handler(ireq, 1545 frame_index); 1546 1547 case SCI_REQ_STP_UDMA_WAIT_D2H: 1548 /* Use the general frame handler to copy the resposne data */ 1549 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1550 1551 if (status != SCI_SUCCESS) 1552 return status; 1553 1554 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1555 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1556 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1557 return SCI_SUCCESS; 1558 1559 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1560 struct dev_to_host_fis *frame_header; 1561 u32 *frame_buffer; 1562 1563 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1564 frame_index, 1565 (void **)&frame_header); 1566 1567 if (status != SCI_SUCCESS) { 1568 dev_err(&ihost->pdev->dev, 1569 "%s: SCIC IO Request 0x%p could not get frame " 1570 "header for frame index %d, status %x\n", 1571 __func__, 1572 stp_req, 1573 frame_index, 1574 status); 1575 1576 return status; 1577 } 1578 1579 switch (frame_header->fis_type) { 1580 case FIS_REGD2H: 1581 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1582 frame_index, 1583 (void **)&frame_buffer); 1584 1585 sci_controller_copy_sata_response(&ireq->stp.rsp, 1586 frame_header, 1587 frame_buffer); 1588 1589 /* The command has completed with error */ 1590 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1591 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1592 break; 1593 1594 default: 1595 dev_warn(&ihost->pdev->dev, 1596 "%s: IO Request:0x%p Frame Id:%d protocol " 1597 "violation occurred\n", __func__, stp_req, 1598 frame_index); 1599 1600 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1601 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1602 break; 1603 } 1604 1605 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1606 1607 /* Frame has been decoded return it to the controller */ 1608 sci_controller_release_frame(ihost, frame_index); 1609 1610 return status; 1611 } 1612 1613 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1614 struct sas_task *task = isci_request_access_task(ireq); 1615 struct dev_to_host_fis *frame_header; 1616 u32 *frame_buffer; 1617 1618 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1619 frame_index, 1620 (void **)&frame_header); 1621 1622 if (status != SCI_SUCCESS) { 1623 dev_err(&ihost->pdev->dev, 1624 "%s: SCIC IO Request 0x%p could not get frame " 1625 "header for frame index %d, status %x\n", 1626 __func__, stp_req, frame_index, status); 1627 return status; 1628 } 1629 1630 switch (frame_header->fis_type) { 1631 case FIS_PIO_SETUP: 1632 /* Get from the frame buffer the PIO Setup Data */ 1633 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1634 frame_index, 1635 (void **)&frame_buffer); 1636 1637 /* Get the data from the PIO Setup The SCU Hardware 1638 * returns first word in the frame_header and the rest 1639 * of the data is in the frame buffer so we need to 1640 * back up one dword 1641 */ 1642 1643 /* transfer_count: first 16bits in the 4th dword */ 1644 stp_req->pio_len = frame_buffer[3] & 0xffff; 1645 1646 /* status: 4th byte in the 3rd dword */ 1647 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1648 1649 sci_controller_copy_sata_response(&ireq->stp.rsp, 1650 frame_header, 1651 frame_buffer); 1652 1653 ireq->stp.rsp.status = stp_req->status; 1654 1655 /* The next state is dependent on whether the 1656 * request was PIO Data-in or Data out 1657 */ 1658 if (task->data_dir == DMA_FROM_DEVICE) { 1659 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1660 } else if (task->data_dir == DMA_TO_DEVICE) { 1661 /* Transmit data */ 1662 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1663 if (status != SCI_SUCCESS) 1664 break; 1665 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1666 } 1667 break; 1668 1669 case FIS_SETDEVBITS: 1670 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1671 break; 1672 1673 case FIS_REGD2H: 1674 if (frame_header->status & ATA_BUSY) { 1675 /* 1676 * Now why is the drive sending a D2H Register 1677 * FIS when it is still busy? Do nothing since 1678 * we are still in the right state. 1679 */ 1680 dev_dbg(&ihost->pdev->dev, 1681 "%s: SCIC PIO Request 0x%p received " 1682 "D2H Register FIS with BSY status " 1683 "0x%x\n", 1684 __func__, 1685 stp_req, 1686 frame_header->status); 1687 break; 1688 } 1689 1690 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1691 frame_index, 1692 (void **)&frame_buffer); 1693 1694 sci_controller_copy_sata_response(&ireq->stp.req, 1695 frame_header, 1696 frame_buffer); 1697 1698 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1699 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1700 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1701 break; 1702 1703 default: 1704 /* FIXME: what do we do here? */ 1705 break; 1706 } 1707 1708 /* Frame is decoded return it to the controller */ 1709 sci_controller_release_frame(ihost, frame_index); 1710 1711 return status; 1712 } 1713 1714 case SCI_REQ_STP_PIO_DATA_IN: { 1715 struct dev_to_host_fis *frame_header; 1716 struct sata_fis_data *frame_buffer; 1717 1718 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1719 frame_index, 1720 (void **)&frame_header); 1721 1722 if (status != SCI_SUCCESS) { 1723 dev_err(&ihost->pdev->dev, 1724 "%s: SCIC IO Request 0x%p could not get frame " 1725 "header for frame index %d, status %x\n", 1726 __func__, 1727 stp_req, 1728 frame_index, 1729 status); 1730 return status; 1731 } 1732 1733 if (frame_header->fis_type != FIS_DATA) { 1734 dev_err(&ihost->pdev->dev, 1735 "%s: SCIC PIO Request 0x%p received frame %d " 1736 "with fis type 0x%02x when expecting a data " 1737 "fis.\n", 1738 __func__, 1739 stp_req, 1740 frame_index, 1741 frame_header->fis_type); 1742 1743 ireq->scu_status = SCU_TASK_DONE_GOOD; 1744 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 1745 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1746 1747 /* Frame is decoded return it to the controller */ 1748 sci_controller_release_frame(ihost, frame_index); 1749 return status; 1750 } 1751 1752 if (stp_req->sgl.index < 0) { 1753 ireq->saved_rx_frame_index = frame_index; 1754 stp_req->pio_len = 0; 1755 } else { 1756 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1757 frame_index, 1758 (void **)&frame_buffer); 1759 1760 status = sci_stp_request_pio_data_in_copy_data(stp_req, 1761 (u8 *)frame_buffer); 1762 1763 /* Frame is decoded return it to the controller */ 1764 sci_controller_release_frame(ihost, frame_index); 1765 } 1766 1767 /* Check for the end of the transfer, are there more 1768 * bytes remaining for this data transfer 1769 */ 1770 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 1771 return status; 1772 1773 if ((stp_req->status & ATA_BUSY) == 0) { 1774 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1775 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1776 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1777 } else { 1778 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1779 } 1780 return status; 1781 } 1782 1783 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: { 1784 struct dev_to_host_fis *frame_header; 1785 u32 *frame_buffer; 1786 1787 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1788 frame_index, 1789 (void **)&frame_header); 1790 if (status != SCI_SUCCESS) { 1791 dev_err(&ihost->pdev->dev, 1792 "%s: SCIC IO Request 0x%p could not get frame " 1793 "header for frame index %d, status %x\n", 1794 __func__, 1795 stp_req, 1796 frame_index, 1797 status); 1798 return status; 1799 } 1800 1801 switch (frame_header->fis_type) { 1802 case FIS_REGD2H: 1803 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1804 frame_index, 1805 (void **)&frame_buffer); 1806 1807 sci_controller_copy_sata_response(&ireq->stp.rsp, 1808 frame_header, 1809 frame_buffer); 1810 1811 /* The command has completed with error */ 1812 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1813 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1814 break; 1815 1816 default: 1817 dev_warn(&ihost->pdev->dev, 1818 "%s: IO Request:0x%p Frame Id:%d protocol " 1819 "violation occurred\n", 1820 __func__, 1821 stp_req, 1822 frame_index); 1823 1824 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1825 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1826 break; 1827 } 1828 1829 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1830 1831 /* Frame has been decoded return it to the controller */ 1832 sci_controller_release_frame(ihost, frame_index); 1833 1834 return status; 1835 } 1836 case SCI_REQ_ABORTING: 1837 /* 1838 * TODO: Is it even possible to get an unsolicited frame in the 1839 * aborting state? 1840 */ 1841 sci_controller_release_frame(ihost, frame_index); 1842 return SCI_SUCCESS; 1843 1844 default: 1845 dev_warn(&ihost->pdev->dev, 1846 "%s: SCIC IO Request given unexpected frame %x while " 1847 "in state %d\n", 1848 __func__, 1849 frame_index, 1850 state); 1851 1852 sci_controller_release_frame(ihost, frame_index); 1853 return SCI_FAILURE_INVALID_STATE; 1854 } 1855 } 1856 1857 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 1858 u32 completion_code) 1859 { 1860 enum sci_status status = SCI_SUCCESS; 1861 1862 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1863 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1864 ireq->scu_status = SCU_TASK_DONE_GOOD; 1865 ireq->sci_status = SCI_SUCCESS; 1866 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1867 break; 1868 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 1869 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1870 /* We must check ther response buffer to see if the D2H 1871 * Register FIS was received before we got the TC 1872 * completion. 1873 */ 1874 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 1875 sci_remote_device_suspend(ireq->target_device, 1876 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1877 1878 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1879 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1880 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1881 } else { 1882 /* If we have an error completion status for the 1883 * TC then we can expect a D2H register FIS from 1884 * the device so we must change state to wait 1885 * for it 1886 */ 1887 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 1888 } 1889 break; 1890 1891 /* TODO Check to see if any of these completion status need to 1892 * wait for the device to host register fis. 1893 */ 1894 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 1895 * - this comes only for B0 1896 */ 1897 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): 1898 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1899 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 1900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 1901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): 1902 sci_remote_device_suspend(ireq->target_device, 1903 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1904 /* Fall through to the default case */ 1905 default: 1906 /* All other completion status cause the IO to be complete. */ 1907 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1908 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1909 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1910 break; 1911 } 1912 1913 return status; 1914 } 1915 1916 static enum sci_status 1917 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, 1918 u32 completion_code) 1919 { 1920 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1922 ireq->scu_status = SCU_TASK_DONE_GOOD; 1923 ireq->sci_status = SCI_SUCCESS; 1924 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); 1925 break; 1926 1927 default: 1928 /* 1929 * All other completion status cause the IO to be complete. 1930 * If a NAK was received, then it is up to the user to retry 1931 * the request. 1932 */ 1933 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1934 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1935 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1936 break; 1937 } 1938 1939 return SCI_SUCCESS; 1940 } 1941 1942 static enum sci_status 1943 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, 1944 u32 completion_code) 1945 { 1946 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1948 ireq->scu_status = SCU_TASK_DONE_GOOD; 1949 ireq->sci_status = SCI_SUCCESS; 1950 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); 1951 break; 1952 1953 default: 1954 /* All other completion status cause the IO to be complete. If 1955 * a NAK was received, then it is up to the user to retry the 1956 * request. 1957 */ 1958 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1959 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1960 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1961 break; 1962 } 1963 1964 return SCI_SUCCESS; 1965 } 1966 1967 enum sci_status 1968 sci_io_request_tc_completion(struct isci_request *ireq, 1969 u32 completion_code) 1970 { 1971 enum sci_base_request_states state; 1972 struct isci_host *ihost = ireq->owning_controller; 1973 1974 state = ireq->sm.current_state_id; 1975 1976 switch (state) { 1977 case SCI_REQ_STARTED: 1978 return request_started_state_tc_event(ireq, completion_code); 1979 1980 case SCI_REQ_TASK_WAIT_TC_COMP: 1981 return ssp_task_request_await_tc_event(ireq, 1982 completion_code); 1983 1984 case SCI_REQ_SMP_WAIT_RESP: 1985 return smp_request_await_response_tc_event(ireq, 1986 completion_code); 1987 1988 case SCI_REQ_SMP_WAIT_TC_COMP: 1989 return smp_request_await_tc_event(ireq, completion_code); 1990 1991 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1992 return stp_request_udma_await_tc_event(ireq, 1993 completion_code); 1994 1995 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 1996 return stp_request_non_data_await_h2d_tc_event(ireq, 1997 completion_code); 1998 1999 case SCI_REQ_STP_PIO_WAIT_H2D: 2000 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2001 completion_code); 2002 2003 case SCI_REQ_STP_PIO_DATA_OUT: 2004 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2005 2006 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: 2007 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq, 2008 completion_code); 2009 2010 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: 2011 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq, 2012 completion_code); 2013 2014 case SCI_REQ_ABORTING: 2015 return request_aborting_state_tc_event(ireq, 2016 completion_code); 2017 2018 default: 2019 dev_warn(&ihost->pdev->dev, 2020 "%s: SCIC IO Request given task completion " 2021 "notification %x while in wrong state %d\n", 2022 __func__, 2023 completion_code, 2024 state); 2025 return SCI_FAILURE_INVALID_STATE; 2026 } 2027 } 2028 2029 /** 2030 * isci_request_process_response_iu() - This function sets the status and 2031 * response iu, in the task struct, from the request object for the upper 2032 * layer driver. 2033 * @sas_task: This parameter is the task struct from the upper layer driver. 2034 * @resp_iu: This parameter points to the response iu of the completed request. 2035 * @dev: This parameter specifies the linux device struct. 2036 * 2037 * none. 2038 */ 2039 static void isci_request_process_response_iu( 2040 struct sas_task *task, 2041 struct ssp_response_iu *resp_iu, 2042 struct device *dev) 2043 { 2044 dev_dbg(dev, 2045 "%s: resp_iu = %p " 2046 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2047 "resp_iu->response_data_len = %x, " 2048 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2049 __func__, 2050 resp_iu, 2051 resp_iu->status, 2052 resp_iu->datapres, 2053 resp_iu->response_data_len, 2054 resp_iu->sense_data_len); 2055 2056 task->task_status.stat = resp_iu->status; 2057 2058 /* libsas updates the task status fields based on the response iu. */ 2059 sas_ssp_task_response(dev, task, resp_iu); 2060 } 2061 2062 /** 2063 * isci_request_set_open_reject_status() - This function prepares the I/O 2064 * completion for OPEN_REJECT conditions. 2065 * @request: This parameter is the completed isci_request object. 2066 * @response_ptr: This parameter specifies the service response for the I/O. 2067 * @status_ptr: This parameter specifies the exec status for the I/O. 2068 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2069 * the LLDD with respect to completing this request or forcing an abort 2070 * condition on the I/O. 2071 * @open_rej_reason: This parameter specifies the encoded reason for the 2072 * abandon-class reject. 2073 * 2074 * none. 2075 */ 2076 static void isci_request_set_open_reject_status( 2077 struct isci_request *request, 2078 struct sas_task *task, 2079 enum service_response *response_ptr, 2080 enum exec_status *status_ptr, 2081 enum isci_completion_selection *complete_to_host_ptr, 2082 enum sas_open_rej_reason open_rej_reason) 2083 { 2084 /* Task in the target is done. */ 2085 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2086 *response_ptr = SAS_TASK_UNDELIVERED; 2087 *status_ptr = SAS_OPEN_REJECT; 2088 *complete_to_host_ptr = isci_perform_normal_io_completion; 2089 task->task_status.open_rej_reason = open_rej_reason; 2090 } 2091 2092 /** 2093 * isci_request_handle_controller_specific_errors() - This function decodes 2094 * controller-specific I/O completion error conditions. 2095 * @request: This parameter is the completed isci_request object. 2096 * @response_ptr: This parameter specifies the service response for the I/O. 2097 * @status_ptr: This parameter specifies the exec status for the I/O. 2098 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2099 * the LLDD with respect to completing this request or forcing an abort 2100 * condition on the I/O. 2101 * 2102 * none. 2103 */ 2104 static void isci_request_handle_controller_specific_errors( 2105 struct isci_remote_device *idev, 2106 struct isci_request *request, 2107 struct sas_task *task, 2108 enum service_response *response_ptr, 2109 enum exec_status *status_ptr, 2110 enum isci_completion_selection *complete_to_host_ptr) 2111 { 2112 unsigned int cstatus; 2113 2114 cstatus = request->scu_status; 2115 2116 dev_dbg(&request->isci_host->pdev->dev, 2117 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2118 "- controller status = 0x%x\n", 2119 __func__, request, cstatus); 2120 2121 /* Decode the controller-specific errors; most 2122 * important is to recognize those conditions in which 2123 * the target may still have a task outstanding that 2124 * must be aborted. 2125 * 2126 * Note that there are SCU completion codes being 2127 * named in the decode below for which SCIC has already 2128 * done work to handle them in a way other than as 2129 * a controller-specific completion code; these are left 2130 * in the decode below for completeness sake. 2131 */ 2132 switch (cstatus) { 2133 case SCU_TASK_DONE_DMASETUP_DIRERR: 2134 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2135 case SCU_TASK_DONE_XFERCNT_ERR: 2136 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2137 if (task->task_proto == SAS_PROTOCOL_SMP) { 2138 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2139 *response_ptr = SAS_TASK_COMPLETE; 2140 2141 /* See if the device has been/is being stopped. Note 2142 * that we ignore the quiesce state, since we are 2143 * concerned about the actual device state. 2144 */ 2145 if (!idev) 2146 *status_ptr = SAS_DEVICE_UNKNOWN; 2147 else 2148 *status_ptr = SAS_ABORTED_TASK; 2149 2150 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2151 2152 *complete_to_host_ptr = 2153 isci_perform_normal_io_completion; 2154 } else { 2155 /* Task in the target is not done. */ 2156 *response_ptr = SAS_TASK_UNDELIVERED; 2157 2158 if (!idev) 2159 *status_ptr = SAS_DEVICE_UNKNOWN; 2160 else 2161 *status_ptr = SAM_STAT_TASK_ABORTED; 2162 2163 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2164 2165 *complete_to_host_ptr = 2166 isci_perform_error_io_completion; 2167 } 2168 2169 break; 2170 2171 case SCU_TASK_DONE_CRC_ERR: 2172 case SCU_TASK_DONE_NAK_CMD_ERR: 2173 case SCU_TASK_DONE_EXCESS_DATA: 2174 case SCU_TASK_DONE_UNEXP_FIS: 2175 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2176 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2177 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2178 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2179 /* These are conditions in which the target 2180 * has completed the task, so that no cleanup 2181 * is necessary. 2182 */ 2183 *response_ptr = SAS_TASK_COMPLETE; 2184 2185 /* See if the device has been/is being stopped. Note 2186 * that we ignore the quiesce state, since we are 2187 * concerned about the actual device state. 2188 */ 2189 if (!idev) 2190 *status_ptr = SAS_DEVICE_UNKNOWN; 2191 else 2192 *status_ptr = SAS_ABORTED_TASK; 2193 2194 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2195 2196 *complete_to_host_ptr = isci_perform_normal_io_completion; 2197 break; 2198 2199 2200 /* Note that the only open reject completion codes seen here will be 2201 * abandon-class codes; all others are automatically retried in the SCU. 2202 */ 2203 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2204 2205 isci_request_set_open_reject_status( 2206 request, task, response_ptr, status_ptr, 2207 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2208 break; 2209 2210 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2211 2212 /* Note - the return of AB0 will change when 2213 * libsas implements detection of zone violations. 2214 */ 2215 isci_request_set_open_reject_status( 2216 request, task, response_ptr, status_ptr, 2217 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2218 break; 2219 2220 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2221 2222 isci_request_set_open_reject_status( 2223 request, task, response_ptr, status_ptr, 2224 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2225 break; 2226 2227 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2228 2229 isci_request_set_open_reject_status( 2230 request, task, response_ptr, status_ptr, 2231 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2232 break; 2233 2234 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2235 2236 isci_request_set_open_reject_status( 2237 request, task, response_ptr, status_ptr, 2238 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2239 break; 2240 2241 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2242 2243 isci_request_set_open_reject_status( 2244 request, task, response_ptr, status_ptr, 2245 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2246 break; 2247 2248 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2249 2250 isci_request_set_open_reject_status( 2251 request, task, response_ptr, status_ptr, 2252 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2253 break; 2254 2255 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2256 2257 isci_request_set_open_reject_status( 2258 request, task, response_ptr, status_ptr, 2259 complete_to_host_ptr, SAS_OREJ_EPROTO); 2260 break; 2261 2262 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2263 2264 isci_request_set_open_reject_status( 2265 request, task, response_ptr, status_ptr, 2266 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2267 break; 2268 2269 case SCU_TASK_DONE_LL_R_ERR: 2270 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2271 case SCU_TASK_DONE_LL_PERR: 2272 case SCU_TASK_DONE_LL_SY_TERM: 2273 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2274 case SCU_TASK_DONE_LL_LF_TERM: 2275 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2276 case SCU_TASK_DONE_LL_ABORT_ERR: 2277 case SCU_TASK_DONE_SEQ_INV_TYPE: 2278 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2279 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2280 case SCU_TASK_DONE_INV_FIS_LEN: 2281 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2282 case SCU_TASK_DONE_SDMA_ERR: 2283 case SCU_TASK_DONE_OFFSET_ERR: 2284 case SCU_TASK_DONE_MAX_PLD_ERR: 2285 case SCU_TASK_DONE_LF_ERR: 2286 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2287 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2288 case SCU_TASK_DONE_UNEXP_DATA: 2289 case SCU_TASK_DONE_UNEXP_SDBFIS: 2290 case SCU_TASK_DONE_REG_ERR: 2291 case SCU_TASK_DONE_SDB_ERR: 2292 case SCU_TASK_DONE_TASK_ABORT: 2293 default: 2294 /* Task in the target is not done. */ 2295 *response_ptr = SAS_TASK_UNDELIVERED; 2296 *status_ptr = SAM_STAT_TASK_ABORTED; 2297 2298 if (task->task_proto == SAS_PROTOCOL_SMP) { 2299 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2300 2301 *complete_to_host_ptr = isci_perform_normal_io_completion; 2302 } else { 2303 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2304 2305 *complete_to_host_ptr = isci_perform_error_io_completion; 2306 } 2307 break; 2308 } 2309 } 2310 2311 /** 2312 * isci_task_save_for_upper_layer_completion() - This function saves the 2313 * request for later completion to the upper layer driver. 2314 * @host: This parameter is a pointer to the host on which the the request 2315 * should be queued (either as an error or success). 2316 * @request: This parameter is the completed request. 2317 * @response: This parameter is the response code for the completed task. 2318 * @status: This parameter is the status code for the completed task. 2319 * 2320 * none. 2321 */ 2322 static void isci_task_save_for_upper_layer_completion( 2323 struct isci_host *host, 2324 struct isci_request *request, 2325 enum service_response response, 2326 enum exec_status status, 2327 enum isci_completion_selection task_notification_selection) 2328 { 2329 struct sas_task *task = isci_request_access_task(request); 2330 2331 task_notification_selection 2332 = isci_task_set_completion_status(task, response, status, 2333 task_notification_selection); 2334 2335 /* Tasks aborted specifically by a call to the lldd_abort_task 2336 * function should not be completed to the host in the regular path. 2337 */ 2338 switch (task_notification_selection) { 2339 2340 case isci_perform_normal_io_completion: 2341 2342 /* Normal notification (task_done) */ 2343 dev_dbg(&host->pdev->dev, 2344 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", 2345 __func__, 2346 task, 2347 task->task_status.resp, response, 2348 task->task_status.stat, status); 2349 /* Add to the completed list. */ 2350 list_add(&request->completed_node, 2351 &host->requests_to_complete); 2352 2353 /* Take the request off the device's pending request list. */ 2354 list_del_init(&request->dev_node); 2355 break; 2356 2357 case isci_perform_aborted_io_completion: 2358 /* No notification to libsas because this request is 2359 * already in the abort path. 2360 */ 2361 dev_dbg(&host->pdev->dev, 2362 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", 2363 __func__, 2364 task, 2365 task->task_status.resp, response, 2366 task->task_status.stat, status); 2367 2368 /* Wake up whatever process was waiting for this 2369 * request to complete. 2370 */ 2371 WARN_ON(request->io_request_completion == NULL); 2372 2373 if (request->io_request_completion != NULL) { 2374 2375 /* Signal whoever is waiting that this 2376 * request is complete. 2377 */ 2378 complete(request->io_request_completion); 2379 } 2380 break; 2381 2382 case isci_perform_error_io_completion: 2383 /* Use sas_task_abort */ 2384 dev_dbg(&host->pdev->dev, 2385 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", 2386 __func__, 2387 task, 2388 task->task_status.resp, response, 2389 task->task_status.stat, status); 2390 /* Add to the aborted list. */ 2391 list_add(&request->completed_node, 2392 &host->requests_to_errorback); 2393 break; 2394 2395 default: 2396 dev_dbg(&host->pdev->dev, 2397 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", 2398 __func__, 2399 task, 2400 task->task_status.resp, response, 2401 task->task_status.stat, status); 2402 2403 /* Add to the error to libsas list. */ 2404 list_add(&request->completed_node, 2405 &host->requests_to_errorback); 2406 break; 2407 } 2408 } 2409 2410 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2411 { 2412 struct task_status_struct *ts = &task->task_status; 2413 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2414 2415 resp->frame_len = sizeof(*fis); 2416 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2417 ts->buf_valid_size = sizeof(*resp); 2418 2419 /* If the device fault bit is set in the status register, then 2420 * set the sense data and return. 2421 */ 2422 if (fis->status & ATA_DF) 2423 ts->stat = SAS_PROTO_RESPONSE; 2424 else 2425 ts->stat = SAM_STAT_GOOD; 2426 2427 ts->resp = SAS_TASK_COMPLETE; 2428 } 2429 2430 static void isci_request_io_request_complete(struct isci_host *ihost, 2431 struct isci_request *request, 2432 enum sci_io_status completion_status) 2433 { 2434 struct sas_task *task = isci_request_access_task(request); 2435 struct ssp_response_iu *resp_iu; 2436 unsigned long task_flags; 2437 struct isci_remote_device *idev = isci_lookup_device(task->dev); 2438 enum service_response response = SAS_TASK_UNDELIVERED; 2439 enum exec_status status = SAS_ABORTED_TASK; 2440 enum isci_request_status request_status; 2441 enum isci_completion_selection complete_to_host 2442 = isci_perform_normal_io_completion; 2443 2444 dev_dbg(&ihost->pdev->dev, 2445 "%s: request = %p, task = %p,\n" 2446 "task->data_dir = %d completion_status = 0x%x\n", 2447 __func__, 2448 request, 2449 task, 2450 task->data_dir, 2451 completion_status); 2452 2453 spin_lock(&request->state_lock); 2454 request_status = request->status; 2455 2456 /* Decode the request status. Note that if the request has been 2457 * aborted by a task management function, we don't care 2458 * what the status is. 2459 */ 2460 switch (request_status) { 2461 2462 case aborted: 2463 /* "aborted" indicates that the request was aborted by a task 2464 * management function, since once a task management request is 2465 * perfomed by the device, the request only completes because 2466 * of the subsequent driver terminate. 2467 * 2468 * Aborted also means an external thread is explicitly managing 2469 * this request, so that we do not complete it up the stack. 2470 * 2471 * The target is still there (since the TMF was successful). 2472 */ 2473 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2474 response = SAS_TASK_COMPLETE; 2475 2476 /* See if the device has been/is being stopped. Note 2477 * that we ignore the quiesce state, since we are 2478 * concerned about the actual device state. 2479 */ 2480 if (!idev) 2481 status = SAS_DEVICE_UNKNOWN; 2482 else 2483 status = SAS_ABORTED_TASK; 2484 2485 complete_to_host = isci_perform_aborted_io_completion; 2486 /* This was an aborted request. */ 2487 2488 spin_unlock(&request->state_lock); 2489 break; 2490 2491 case aborting: 2492 /* aborting means that the task management function tried and 2493 * failed to abort the request. We need to note the request 2494 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2495 * target as down. 2496 * 2497 * Aborting also means an external thread is explicitly managing 2498 * this request, so that we do not complete it up the stack. 2499 */ 2500 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2501 response = SAS_TASK_UNDELIVERED; 2502 2503 if (!idev) 2504 /* The device has been /is being stopped. Note that 2505 * we ignore the quiesce state, since we are 2506 * concerned about the actual device state. 2507 */ 2508 status = SAS_DEVICE_UNKNOWN; 2509 else 2510 status = SAS_PHY_DOWN; 2511 2512 complete_to_host = isci_perform_aborted_io_completion; 2513 2514 /* This was an aborted request. */ 2515 2516 spin_unlock(&request->state_lock); 2517 break; 2518 2519 case terminating: 2520 2521 /* This was an terminated request. This happens when 2522 * the I/O is being terminated because of an action on 2523 * the device (reset, tear down, etc.), and the I/O needs 2524 * to be completed up the stack. 2525 */ 2526 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2527 response = SAS_TASK_UNDELIVERED; 2528 2529 /* See if the device has been/is being stopped. Note 2530 * that we ignore the quiesce state, since we are 2531 * concerned about the actual device state. 2532 */ 2533 if (!idev) 2534 status = SAS_DEVICE_UNKNOWN; 2535 else 2536 status = SAS_ABORTED_TASK; 2537 2538 complete_to_host = isci_perform_aborted_io_completion; 2539 2540 /* This was a terminated request. */ 2541 2542 spin_unlock(&request->state_lock); 2543 break; 2544 2545 case dead: 2546 /* This was a terminated request that timed-out during the 2547 * termination process. There is no task to complete to 2548 * libsas. 2549 */ 2550 complete_to_host = isci_perform_normal_io_completion; 2551 spin_unlock(&request->state_lock); 2552 break; 2553 2554 default: 2555 2556 /* The request is done from an SCU HW perspective. */ 2557 request->status = completed; 2558 2559 spin_unlock(&request->state_lock); 2560 2561 /* This is an active request being completed from the core. */ 2562 switch (completion_status) { 2563 2564 case SCI_IO_FAILURE_RESPONSE_VALID: 2565 dev_dbg(&ihost->pdev->dev, 2566 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2567 __func__, 2568 request, 2569 task); 2570 2571 if (sas_protocol_ata(task->task_proto)) { 2572 isci_process_stp_response(task, &request->stp.rsp); 2573 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2574 2575 /* crack the iu response buffer. */ 2576 resp_iu = &request->ssp.rsp; 2577 isci_request_process_response_iu(task, resp_iu, 2578 &ihost->pdev->dev); 2579 2580 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2581 2582 dev_err(&ihost->pdev->dev, 2583 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2584 "SAS_PROTOCOL_SMP protocol\n", 2585 __func__); 2586 2587 } else 2588 dev_err(&ihost->pdev->dev, 2589 "%s: unknown protocol\n", __func__); 2590 2591 /* use the task status set in the task struct by the 2592 * isci_request_process_response_iu call. 2593 */ 2594 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2595 response = task->task_status.resp; 2596 status = task->task_status.stat; 2597 break; 2598 2599 case SCI_IO_SUCCESS: 2600 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2601 2602 response = SAS_TASK_COMPLETE; 2603 status = SAM_STAT_GOOD; 2604 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2605 2606 if (task->task_proto == SAS_PROTOCOL_SMP) { 2607 void *rsp = &request->smp.rsp; 2608 2609 dev_dbg(&ihost->pdev->dev, 2610 "%s: SMP protocol completion\n", 2611 __func__); 2612 2613 sg_copy_from_buffer( 2614 &task->smp_task.smp_resp, 1, 2615 rsp, sizeof(struct smp_resp)); 2616 } else if (completion_status 2617 == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2618 2619 /* This was an SSP / STP / SATA transfer. 2620 * There is a possibility that less data than 2621 * the maximum was transferred. 2622 */ 2623 u32 transferred_length = sci_req_tx_bytes(request); 2624 2625 task->task_status.residual 2626 = task->total_xfer_len - transferred_length; 2627 2628 /* If there were residual bytes, call this an 2629 * underrun. 2630 */ 2631 if (task->task_status.residual != 0) 2632 status = SAS_DATA_UNDERRUN; 2633 2634 dev_dbg(&ihost->pdev->dev, 2635 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2636 __func__, 2637 status); 2638 2639 } else 2640 dev_dbg(&ihost->pdev->dev, 2641 "%s: SCI_IO_SUCCESS\n", 2642 __func__); 2643 2644 break; 2645 2646 case SCI_IO_FAILURE_TERMINATED: 2647 dev_dbg(&ihost->pdev->dev, 2648 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2649 __func__, 2650 request, 2651 task); 2652 2653 /* The request was terminated explicitly. No handling 2654 * is needed in the SCSI error handler path. 2655 */ 2656 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2657 response = SAS_TASK_UNDELIVERED; 2658 2659 /* See if the device has been/is being stopped. Note 2660 * that we ignore the quiesce state, since we are 2661 * concerned about the actual device state. 2662 */ 2663 if (!idev) 2664 status = SAS_DEVICE_UNKNOWN; 2665 else 2666 status = SAS_ABORTED_TASK; 2667 2668 complete_to_host = isci_perform_normal_io_completion; 2669 break; 2670 2671 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2672 2673 isci_request_handle_controller_specific_errors( 2674 idev, request, task, &response, &status, 2675 &complete_to_host); 2676 2677 break; 2678 2679 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2680 /* This is a special case, in that the I/O completion 2681 * is telling us that the device needs a reset. 2682 * In order for the device reset condition to be 2683 * noticed, the I/O has to be handled in the error 2684 * handler. Set the reset flag and cause the 2685 * SCSI error thread to be scheduled. 2686 */ 2687 spin_lock_irqsave(&task->task_state_lock, task_flags); 2688 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2689 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2690 2691 /* Fail the I/O. */ 2692 response = SAS_TASK_UNDELIVERED; 2693 status = SAM_STAT_TASK_ABORTED; 2694 2695 complete_to_host = isci_perform_error_io_completion; 2696 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2697 break; 2698 2699 case SCI_FAILURE_RETRY_REQUIRED: 2700 2701 /* Fail the I/O so it can be retried. */ 2702 response = SAS_TASK_UNDELIVERED; 2703 if (!idev) 2704 status = SAS_DEVICE_UNKNOWN; 2705 else 2706 status = SAS_ABORTED_TASK; 2707 2708 complete_to_host = isci_perform_normal_io_completion; 2709 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2710 break; 2711 2712 2713 default: 2714 /* Catch any otherwise unhandled error codes here. */ 2715 dev_dbg(&ihost->pdev->dev, 2716 "%s: invalid completion code: 0x%x - " 2717 "isci_request = %p\n", 2718 __func__, completion_status, request); 2719 2720 response = SAS_TASK_UNDELIVERED; 2721 2722 /* See if the device has been/is being stopped. Note 2723 * that we ignore the quiesce state, since we are 2724 * concerned about the actual device state. 2725 */ 2726 if (!idev) 2727 status = SAS_DEVICE_UNKNOWN; 2728 else 2729 status = SAS_ABORTED_TASK; 2730 2731 if (SAS_PROTOCOL_SMP == task->task_proto) { 2732 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2733 complete_to_host = isci_perform_normal_io_completion; 2734 } else { 2735 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2736 complete_to_host = isci_perform_error_io_completion; 2737 } 2738 break; 2739 } 2740 break; 2741 } 2742 2743 switch (task->task_proto) { 2744 case SAS_PROTOCOL_SSP: 2745 if (task->data_dir == DMA_NONE) 2746 break; 2747 if (task->num_scatter == 0) 2748 /* 0 indicates a single dma address */ 2749 dma_unmap_single(&ihost->pdev->dev, 2750 request->zero_scatter_daddr, 2751 task->total_xfer_len, task->data_dir); 2752 else /* unmap the sgl dma addresses */ 2753 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 2754 request->num_sg_entries, task->data_dir); 2755 break; 2756 case SAS_PROTOCOL_SMP: { 2757 struct scatterlist *sg = &task->smp_task.smp_req; 2758 struct smp_req *smp_req; 2759 void *kaddr; 2760 2761 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 2762 2763 /* need to swab it back in case the command buffer is re-used */ 2764 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 2765 smp_req = kaddr + sg->offset; 2766 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 2767 kunmap_atomic(kaddr, KM_IRQ0); 2768 break; 2769 } 2770 default: 2771 break; 2772 } 2773 2774 /* Put the completed request on the correct list */ 2775 isci_task_save_for_upper_layer_completion(ihost, request, response, 2776 status, complete_to_host 2777 ); 2778 2779 /* complete the io request to the core. */ 2780 sci_controller_complete_io(ihost, request->target_device, request); 2781 isci_put_device(idev); 2782 2783 /* set terminated handle so it cannot be completed or 2784 * terminated again, and to cause any calls into abort 2785 * task to recognize the already completed case. 2786 */ 2787 set_bit(IREQ_TERMINATED, &request->flags); 2788 } 2789 2790 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2791 { 2792 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2793 struct domain_device *dev = ireq->target_device->domain_dev; 2794 struct sas_task *task; 2795 2796 /* XXX as hch said always creating an internal sas_task for tmf 2797 * requests would simplify the driver 2798 */ 2799 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; 2800 2801 /* all unaccelerated request types (non ssp or ncq) handled with 2802 * substates 2803 */ 2804 if (!task && dev->dev_type == SAS_END_DEV) { 2805 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); 2806 } else if (!task && 2807 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || 2808 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { 2809 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); 2810 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2811 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); 2812 } else if (task && sas_protocol_ata(task->task_proto) && 2813 !task->ata_task.use_ncq) { 2814 u32 state; 2815 2816 if (task->data_dir == DMA_NONE) 2817 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 2818 else if (task->ata_task.dma_xfer) 2819 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 2820 else /* PIO */ 2821 state = SCI_REQ_STP_PIO_WAIT_H2D; 2822 2823 sci_change_state(sm, state); 2824 } 2825 } 2826 2827 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 2828 { 2829 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2830 struct isci_host *ihost = ireq->owning_controller; 2831 2832 /* Tell the SCI_USER that the IO request is complete */ 2833 if (!test_bit(IREQ_TMF, &ireq->flags)) 2834 isci_request_io_request_complete(ihost, ireq, 2835 ireq->sci_status); 2836 else 2837 isci_task_request_complete(ihost, ireq, ireq->sci_status); 2838 } 2839 2840 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 2841 { 2842 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2843 2844 /* Setting the abort bit in the Task Context is required by the silicon. */ 2845 ireq->tc->abort = 1; 2846 } 2847 2848 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2849 { 2850 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2851 2852 ireq->target_device->working_request = ireq; 2853 } 2854 2855 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2856 { 2857 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2858 2859 ireq->target_device->working_request = ireq; 2860 } 2861 2862 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) 2863 { 2864 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2865 2866 ireq->target_device->working_request = ireq; 2867 } 2868 2869 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) 2870 { 2871 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2872 struct scu_task_context *tc = ireq->tc; 2873 struct host_to_dev_fis *h2d_fis; 2874 enum sci_status status; 2875 2876 /* Clear the SRST bit */ 2877 h2d_fis = &ireq->stp.cmd; 2878 h2d_fis->control = 0; 2879 2880 /* Clear the TC control bit */ 2881 tc->control_frame = 0; 2882 2883 status = sci_controller_continue_io(ireq); 2884 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); 2885 } 2886 2887 static const struct sci_base_state sci_request_state_table[] = { 2888 [SCI_REQ_INIT] = { }, 2889 [SCI_REQ_CONSTRUCTED] = { }, 2890 [SCI_REQ_STARTED] = { 2891 .enter_state = sci_request_started_state_enter, 2892 }, 2893 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 2894 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 2895 }, 2896 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 2897 [SCI_REQ_STP_PIO_WAIT_H2D] = { 2898 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 2899 }, 2900 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 2901 [SCI_REQ_STP_PIO_DATA_IN] = { }, 2902 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 2903 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 2904 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 2905 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { 2906 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, 2907 }, 2908 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { 2909 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, 2910 }, 2911 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, 2912 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 2913 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 2914 [SCI_REQ_SMP_WAIT_RESP] = { }, 2915 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 2916 [SCI_REQ_COMPLETED] = { 2917 .enter_state = sci_request_completed_state_enter, 2918 }, 2919 [SCI_REQ_ABORTING] = { 2920 .enter_state = sci_request_aborting_state_enter, 2921 }, 2922 [SCI_REQ_FINAL] = { }, 2923 }; 2924 2925 static void 2926 sci_general_request_construct(struct isci_host *ihost, 2927 struct isci_remote_device *idev, 2928 struct isci_request *ireq) 2929 { 2930 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 2931 2932 ireq->target_device = idev; 2933 ireq->protocol = SCIC_NO_PROTOCOL; 2934 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 2935 2936 ireq->sci_status = SCI_SUCCESS; 2937 ireq->scu_status = 0; 2938 ireq->post_context = 0xFFFFFFFF; 2939 } 2940 2941 static enum sci_status 2942 sci_io_request_construct(struct isci_host *ihost, 2943 struct isci_remote_device *idev, 2944 struct isci_request *ireq) 2945 { 2946 struct domain_device *dev = idev->domain_dev; 2947 enum sci_status status = SCI_SUCCESS; 2948 2949 /* Build the common part of the request */ 2950 sci_general_request_construct(ihost, idev, ireq); 2951 2952 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 2953 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 2954 2955 if (dev->dev_type == SAS_END_DEV) 2956 /* pass */; 2957 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 2958 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 2959 else if (dev_is_expander(dev)) 2960 /* pass */; 2961 else 2962 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2963 2964 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 2965 2966 return status; 2967 } 2968 2969 enum sci_status sci_task_request_construct(struct isci_host *ihost, 2970 struct isci_remote_device *idev, 2971 u16 io_tag, struct isci_request *ireq) 2972 { 2973 struct domain_device *dev = idev->domain_dev; 2974 enum sci_status status = SCI_SUCCESS; 2975 2976 /* Build the common part of the request */ 2977 sci_general_request_construct(ihost, idev, ireq); 2978 2979 if (dev->dev_type == SAS_END_DEV || 2980 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 2981 set_bit(IREQ_TMF, &ireq->flags); 2982 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 2983 } else 2984 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 2985 2986 return status; 2987 } 2988 2989 static enum sci_status isci_request_ssp_request_construct( 2990 struct isci_request *request) 2991 { 2992 enum sci_status status; 2993 2994 dev_dbg(&request->isci_host->pdev->dev, 2995 "%s: request = %p\n", 2996 __func__, 2997 request); 2998 status = sci_io_request_construct_basic_ssp(request); 2999 return status; 3000 } 3001 3002 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3003 { 3004 struct sas_task *task = isci_request_access_task(ireq); 3005 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3006 struct ata_queued_cmd *qc = task->uldd_task; 3007 enum sci_status status; 3008 3009 dev_dbg(&ireq->isci_host->pdev->dev, 3010 "%s: ireq = %p\n", 3011 __func__, 3012 ireq); 3013 3014 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3015 if (!task->ata_task.device_control_reg_update) 3016 fis->flags |= 0x80; 3017 fis->flags &= 0xF0; 3018 3019 status = sci_io_request_construct_basic_sata(ireq); 3020 3021 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3022 qc->tf.command == ATA_CMD_FPDMA_READ)) { 3023 fis->sector_count = qc->tag << 3; 3024 ireq->tc->type.stp.ncq_tag = qc->tag; 3025 } 3026 3027 return status; 3028 } 3029 3030 static enum sci_status 3031 sci_io_request_construct_smp(struct device *dev, 3032 struct isci_request *ireq, 3033 struct sas_task *task) 3034 { 3035 struct scatterlist *sg = &task->smp_task.smp_req; 3036 struct isci_remote_device *idev; 3037 struct scu_task_context *task_context; 3038 struct isci_port *iport; 3039 struct smp_req *smp_req; 3040 void *kaddr; 3041 u8 req_len; 3042 u32 cmd; 3043 3044 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3045 smp_req = kaddr + sg->offset; 3046 /* 3047 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3048 * functions under SAS 2.0, a zero request length really indicates 3049 * a non-zero default length. 3050 */ 3051 if (smp_req->req_len == 0) { 3052 switch (smp_req->func) { 3053 case SMP_DISCOVER: 3054 case SMP_REPORT_PHY_ERR_LOG: 3055 case SMP_REPORT_PHY_SATA: 3056 case SMP_REPORT_ROUTE_INFO: 3057 smp_req->req_len = 2; 3058 break; 3059 case SMP_CONF_ROUTE_INFO: 3060 case SMP_PHY_CONTROL: 3061 case SMP_PHY_TEST_FUNCTION: 3062 smp_req->req_len = 9; 3063 break; 3064 /* Default - zero is a valid default for 2.0. */ 3065 } 3066 } 3067 req_len = smp_req->req_len; 3068 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3069 cmd = *(u32 *) smp_req; 3070 kunmap_atomic(kaddr, KM_IRQ0); 3071 3072 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3073 return SCI_FAILURE; 3074 3075 ireq->protocol = SCIC_SMP_PROTOCOL; 3076 3077 /* byte swap the smp request. */ 3078 3079 task_context = ireq->tc; 3080 3081 idev = ireq->target_device; 3082 iport = idev->owning_port; 3083 3084 /* 3085 * Fill in the TC with the its required data 3086 * 00h 3087 */ 3088 task_context->priority = 0; 3089 task_context->initiator_request = 1; 3090 task_context->connection_rate = idev->connection_rate; 3091 task_context->protocol_engine_index = ISCI_PEG; 3092 task_context->logical_port_index = iport->physical_port_index; 3093 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3094 task_context->abort = 0; 3095 task_context->valid = SCU_TASK_CONTEXT_VALID; 3096 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3097 3098 /* 04h */ 3099 task_context->remote_node_index = idev->rnc.remote_node_index; 3100 task_context->command_code = 0; 3101 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3102 3103 /* 08h */ 3104 task_context->link_layer_control = 0; 3105 task_context->do_not_dma_ssp_good_response = 1; 3106 task_context->strict_ordering = 0; 3107 task_context->control_frame = 1; 3108 task_context->timeout_enable = 0; 3109 task_context->block_guard_enable = 0; 3110 3111 /* 0ch */ 3112 task_context->address_modifier = 0; 3113 3114 /* 10h */ 3115 task_context->ssp_command_iu_length = req_len; 3116 3117 /* 14h */ 3118 task_context->transfer_length_bytes = 0; 3119 3120 /* 3121 * 18h ~ 30h, protocol specific 3122 * since commandIU has been build by framework at this point, we just 3123 * copy the frist DWord from command IU to this location. */ 3124 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3125 3126 /* 3127 * 40h 3128 * "For SMP you could program it to zero. We would prefer that way 3129 * so that done code will be consistent." - Venki 3130 */ 3131 task_context->task_phase = 0; 3132 3133 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3134 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3135 (iport->physical_port_index << 3136 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3137 ISCI_TAG_TCI(ireq->io_tag)); 3138 /* 3139 * Copy the physical address for the command buffer to the SCU Task 3140 * Context command buffer should not contain command header. 3141 */ 3142 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3143 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3144 3145 /* SMP response comes as UF, so no need to set response IU address. */ 3146 task_context->response_iu_upper = 0; 3147 task_context->response_iu_lower = 0; 3148 3149 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3150 3151 return SCI_SUCCESS; 3152 } 3153 3154 /* 3155 * isci_smp_request_build() - This function builds the smp request. 3156 * @ireq: This parameter points to the isci_request allocated in the 3157 * request construct function. 3158 * 3159 * SCI_SUCCESS on successfull completion, or specific failure code. 3160 */ 3161 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3162 { 3163 struct sas_task *task = isci_request_access_task(ireq); 3164 struct device *dev = &ireq->isci_host->pdev->dev; 3165 enum sci_status status = SCI_FAILURE; 3166 3167 status = sci_io_request_construct_smp(dev, ireq, task); 3168 if (status != SCI_SUCCESS) 3169 dev_dbg(&ireq->isci_host->pdev->dev, 3170 "%s: failed with status = %d\n", 3171 __func__, 3172 status); 3173 3174 return status; 3175 } 3176 3177 /** 3178 * isci_io_request_build() - This function builds the io request object. 3179 * @ihost: This parameter specifies the ISCI host object 3180 * @request: This parameter points to the isci_request object allocated in the 3181 * request construct function. 3182 * @sci_device: This parameter is the handle for the sci core's remote device 3183 * object that is the destination for this request. 3184 * 3185 * SCI_SUCCESS on successfull completion, or specific failure code. 3186 */ 3187 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3188 struct isci_request *request, 3189 struct isci_remote_device *idev) 3190 { 3191 enum sci_status status = SCI_SUCCESS; 3192 struct sas_task *task = isci_request_access_task(request); 3193 3194 dev_dbg(&ihost->pdev->dev, 3195 "%s: idev = 0x%p; request = %p, " 3196 "num_scatter = %d\n", 3197 __func__, 3198 idev, 3199 request, 3200 task->num_scatter); 3201 3202 /* map the sgl addresses, if present. 3203 * libata does the mapping for sata devices 3204 * before we get the request. 3205 */ 3206 if (task->num_scatter && 3207 !sas_protocol_ata(task->task_proto) && 3208 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3209 3210 request->num_sg_entries = dma_map_sg( 3211 &ihost->pdev->dev, 3212 task->scatter, 3213 task->num_scatter, 3214 task->data_dir 3215 ); 3216 3217 if (request->num_sg_entries == 0) 3218 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3219 } 3220 3221 status = sci_io_request_construct(ihost, idev, request); 3222 3223 if (status != SCI_SUCCESS) { 3224 dev_dbg(&ihost->pdev->dev, 3225 "%s: failed request construct\n", 3226 __func__); 3227 return SCI_FAILURE; 3228 } 3229 3230 switch (task->task_proto) { 3231 case SAS_PROTOCOL_SMP: 3232 status = isci_smp_request_build(request); 3233 break; 3234 case SAS_PROTOCOL_SSP: 3235 status = isci_request_ssp_request_construct(request); 3236 break; 3237 case SAS_PROTOCOL_SATA: 3238 case SAS_PROTOCOL_STP: 3239 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3240 status = isci_request_stp_request_construct(request); 3241 break; 3242 default: 3243 dev_dbg(&ihost->pdev->dev, 3244 "%s: unknown protocol\n", __func__); 3245 return SCI_FAILURE; 3246 } 3247 3248 return SCI_SUCCESS; 3249 } 3250 3251 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3252 { 3253 struct isci_request *ireq; 3254 3255 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3256 ireq->io_tag = tag; 3257 ireq->io_request_completion = NULL; 3258 ireq->flags = 0; 3259 ireq->num_sg_entries = 0; 3260 INIT_LIST_HEAD(&ireq->completed_node); 3261 INIT_LIST_HEAD(&ireq->dev_node); 3262 isci_request_change_state(ireq, allocated); 3263 3264 return ireq; 3265 } 3266 3267 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3268 struct sas_task *task, 3269 u16 tag) 3270 { 3271 struct isci_request *ireq; 3272 3273 ireq = isci_request_from_tag(ihost, tag); 3274 ireq->ttype_ptr.io_task_ptr = task; 3275 ireq->ttype = io_task; 3276 task->lldd_task = ireq; 3277 3278 return ireq; 3279 } 3280 3281 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3282 struct isci_tmf *isci_tmf, 3283 u16 tag) 3284 { 3285 struct isci_request *ireq; 3286 3287 ireq = isci_request_from_tag(ihost, tag); 3288 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3289 ireq->ttype = tmf_task; 3290 3291 return ireq; 3292 } 3293 3294 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3295 struct sas_task *task, u16 tag) 3296 { 3297 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3298 struct isci_request *ireq; 3299 unsigned long flags; 3300 int ret = 0; 3301 3302 /* do common allocation and init of request object. */ 3303 ireq = isci_io_request_from_tag(ihost, task, tag); 3304 3305 status = isci_io_request_build(ihost, ireq, idev); 3306 if (status != SCI_SUCCESS) { 3307 dev_dbg(&ihost->pdev->dev, 3308 "%s: request_construct failed - status = 0x%x\n", 3309 __func__, 3310 status); 3311 return status; 3312 } 3313 3314 spin_lock_irqsave(&ihost->scic_lock, flags); 3315 3316 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3317 3318 if (isci_task_is_ncq_recovery(task)) { 3319 3320 /* The device is in an NCQ recovery state. Issue the 3321 * request on the task side. Note that it will 3322 * complete on the I/O request side because the 3323 * request was built that way (ie. 3324 * ireq->is_task_management_request is false). 3325 */ 3326 status = sci_controller_start_task(ihost, 3327 idev, 3328 ireq); 3329 } else { 3330 status = SCI_FAILURE; 3331 } 3332 } else { 3333 /* send the request, let the core assign the IO TAG. */ 3334 status = sci_controller_start_io(ihost, idev, 3335 ireq); 3336 } 3337 3338 if (status != SCI_SUCCESS && 3339 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3340 dev_dbg(&ihost->pdev->dev, 3341 "%s: failed request start (0x%x)\n", 3342 __func__, status); 3343 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3344 return status; 3345 } 3346 3347 /* Either I/O started OK, or the core has signaled that 3348 * the device needs a target reset. 3349 * 3350 * In either case, hold onto the I/O for later. 3351 * 3352 * Update it's status and add it to the list in the 3353 * remote device object. 3354 */ 3355 list_add(&ireq->dev_node, &idev->reqs_in_process); 3356 3357 if (status == SCI_SUCCESS) { 3358 isci_request_change_state(ireq, started); 3359 } else { 3360 /* The request did not really start in the 3361 * hardware, so clear the request handle 3362 * here so no terminations will be done. 3363 */ 3364 set_bit(IREQ_TERMINATED, &ireq->flags); 3365 isci_request_change_state(ireq, completed); 3366 } 3367 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3368 3369 if (status == 3370 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3371 /* Signal libsas that we need the SCSI error 3372 * handler thread to work on this I/O and that 3373 * we want a device reset. 3374 */ 3375 spin_lock_irqsave(&task->task_state_lock, flags); 3376 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3377 spin_unlock_irqrestore(&task->task_state_lock, flags); 3378 3379 /* Cause this task to be scheduled in the SCSI error 3380 * handler thread. 3381 */ 3382 isci_execpath_callback(ihost, task, 3383 sas_task_abort); 3384 3385 /* Change the status, since we are holding 3386 * the I/O until it is managed by the SCSI 3387 * error handler. 3388 */ 3389 status = SCI_SUCCESS; 3390 } 3391 3392 return ret; 3393 } 3394