1 /* 2 * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 PMC-Sierra, Inc., 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 #include <linux/slab.h> 41 #include "pm8001_sas.h" 42 #include "pm80xx_hwi.h" 43 #include "pm8001_chips.h" 44 #include "pm8001_ctl.h" 45 #include "pm80xx_tracepoints.h" 46 47 #define SMP_DIRECT 1 48 #define SMP_INDIRECT 2 49 50 51 int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) 52 { 53 u32 reg_val; 54 unsigned long start; 55 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); 56 /* confirm the setting is written */ 57 start = jiffies + HZ; /* 1 sec */ 58 do { 59 reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); 60 } while ((reg_val != shift_value) && time_before(jiffies, start)); 61 if (reg_val != shift_value) { 62 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n", 63 reg_val); 64 return -1; 65 } 66 return 0; 67 } 68 69 static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, 70 const void *destination, 71 u32 dw_count, u32 bus_base_number) 72 { 73 u32 index, value, offset; 74 u32 *destination1; 75 destination1 = (u32 *)destination; 76 77 for (index = 0; index < dw_count; index += 4, destination1++) { 78 offset = (soffset + index); 79 if (offset < (64 * 1024)) { 80 value = pm8001_cr32(pm8001_ha, bus_base_number, offset); 81 *destination1 = cpu_to_le32(value); 82 } 83 } 84 return; 85 } 86 87 ssize_t pm80xx_get_fatal_dump(struct device *cdev, 88 struct device_attribute *attr, char *buf) 89 { 90 struct Scsi_Host *shost = class_to_shost(cdev); 91 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 92 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 93 void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; 94 u32 accum_len, reg_val, index, *temp; 95 u32 status = 1; 96 unsigned long start; 97 u8 *direct_data; 98 char *fatal_error_data = buf; 99 u32 length_to_read; 100 u32 offset; 101 102 pm8001_ha->forensic_info.data_buf.direct_data = buf; 103 if (pm8001_ha->chip_id == chip_8001) { 104 pm8001_ha->forensic_info.data_buf.direct_data += 105 sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 106 "Not supported for SPC controller"); 107 return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 108 (char *)buf; 109 } 110 /* initialize variables for very first call from host application */ 111 if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { 112 pm8001_dbg(pm8001_ha, IO, 113 "forensic_info TYPE_NON_FATAL..............\n"); 114 direct_data = (u8 *)fatal_error_data; 115 pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; 116 pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; 117 pm8001_ha->forensic_info.data_buf.direct_offset = 0; 118 pm8001_ha->forensic_info.data_buf.read_len = 0; 119 pm8001_ha->forensic_preserved_accumulated_transfer = 0; 120 121 /* Write signature to fatal dump table */ 122 pm8001_mw32(fatal_table_address, 123 MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd); 124 125 pm8001_ha->forensic_info.data_buf.direct_data = direct_data; 126 pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status); 127 pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n", 128 pm8001_ha->forensic_info.data_buf.read_len); 129 pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n", 130 pm8001_ha->forensic_info.data_buf.direct_len); 131 pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n", 132 pm8001_ha->forensic_info.data_buf.direct_offset); 133 } 134 if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { 135 /* start to get data */ 136 /* Program the MEMBASE II Shifting Register with 0x00.*/ 137 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 138 pm8001_ha->fatal_forensic_shift_offset); 139 pm8001_ha->forensic_last_offset = 0; 140 pm8001_ha->forensic_fatal_step = 0; 141 pm8001_ha->fatal_bar_loc = 0; 142 } 143 144 /* Read until accum_len is retrieved */ 145 accum_len = pm8001_mr32(fatal_table_address, 146 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); 147 /* Determine length of data between previously stored transfer length 148 * and current accumulated transfer length 149 */ 150 length_to_read = 151 accum_len - pm8001_ha->forensic_preserved_accumulated_transfer; 152 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n", 153 accum_len); 154 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n", 155 length_to_read); 156 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n", 157 pm8001_ha->forensic_last_offset); 158 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n", 159 pm8001_ha->forensic_info.data_buf.read_len); 160 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n", 161 pm8001_ha->forensic_info.data_buf.direct_len); 162 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n", 163 pm8001_ha->forensic_info.data_buf.direct_offset); 164 165 /* If accumulated length failed to read correctly fail the attempt.*/ 166 if (accum_len == 0xFFFFFFFF) { 167 pm8001_dbg(pm8001_ha, IO, 168 "Possible PCI issue 0x%x not expected\n", 169 accum_len); 170 return status; 171 } 172 /* If accumulated length is zero fail the attempt */ 173 if (accum_len == 0) { 174 pm8001_ha->forensic_info.data_buf.direct_data += 175 sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 176 "%08x ", 0xFFFFFFFF); 177 return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 178 (char *)buf; 179 } 180 /* Accumulated length is good so start capturing the first data */ 181 temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; 182 if (pm8001_ha->forensic_fatal_step == 0) { 183 moreData: 184 /* If data to read is less than SYSFS_OFFSET then reduce the 185 * length of dataLen 186 */ 187 if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET 188 > length_to_read) { 189 pm8001_ha->forensic_info.data_buf.direct_len = 190 length_to_read - 191 pm8001_ha->forensic_last_offset; 192 } else { 193 pm8001_ha->forensic_info.data_buf.direct_len = 194 SYSFS_OFFSET; 195 } 196 if (pm8001_ha->forensic_info.data_buf.direct_data) { 197 /* Data is in bar, copy to host memory */ 198 pm80xx_pci_mem_copy(pm8001_ha, 199 pm8001_ha->fatal_bar_loc, 200 pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, 201 pm8001_ha->forensic_info.data_buf.direct_len, 1); 202 } 203 pm8001_ha->fatal_bar_loc += 204 pm8001_ha->forensic_info.data_buf.direct_len; 205 pm8001_ha->forensic_info.data_buf.direct_offset += 206 pm8001_ha->forensic_info.data_buf.direct_len; 207 pm8001_ha->forensic_last_offset += 208 pm8001_ha->forensic_info.data_buf.direct_len; 209 pm8001_ha->forensic_info.data_buf.read_len = 210 pm8001_ha->forensic_info.data_buf.direct_len; 211 212 if (pm8001_ha->forensic_last_offset >= length_to_read) { 213 pm8001_ha->forensic_info.data_buf.direct_data += 214 sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 215 "%08x ", 3); 216 for (index = 0; index < 217 (pm8001_ha->forensic_info.data_buf.direct_len 218 / 4); index++) { 219 pm8001_ha->forensic_info.data_buf.direct_data += 220 sprintf( 221 pm8001_ha->forensic_info.data_buf.direct_data, 222 "%08x ", *(temp + index)); 223 } 224 225 pm8001_ha->fatal_bar_loc = 0; 226 pm8001_ha->forensic_fatal_step = 1; 227 pm8001_ha->fatal_forensic_shift_offset = 0; 228 pm8001_ha->forensic_last_offset = 0; 229 status = 0; 230 offset = (int) 231 ((char *)pm8001_ha->forensic_info.data_buf.direct_data 232 - (char *)buf); 233 pm8001_dbg(pm8001_ha, IO, 234 "get_fatal_spcv:return1 0x%x\n", offset); 235 return (char *)pm8001_ha-> 236 forensic_info.data_buf.direct_data - 237 (char *)buf; 238 } 239 if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { 240 pm8001_ha->forensic_info.data_buf.direct_data += 241 sprintf(pm8001_ha-> 242 forensic_info.data_buf.direct_data, 243 "%08x ", 2); 244 for (index = 0; index < 245 (pm8001_ha->forensic_info.data_buf.direct_len 246 / 4); index++) { 247 pm8001_ha->forensic_info.data_buf.direct_data 248 += sprintf(pm8001_ha-> 249 forensic_info.data_buf.direct_data, 250 "%08x ", *(temp + index)); 251 } 252 status = 0; 253 offset = (int) 254 ((char *)pm8001_ha->forensic_info.data_buf.direct_data 255 - (char *)buf); 256 pm8001_dbg(pm8001_ha, IO, 257 "get_fatal_spcv:return2 0x%x\n", offset); 258 return (char *)pm8001_ha-> 259 forensic_info.data_buf.direct_data - 260 (char *)buf; 261 } 262 263 /* Increment the MEMBASE II Shifting Register value by 0x100.*/ 264 pm8001_ha->forensic_info.data_buf.direct_data += 265 sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 266 "%08x ", 2); 267 for (index = 0; index < 268 (pm8001_ha->forensic_info.data_buf.direct_len 269 / 4) ; index++) { 270 pm8001_ha->forensic_info.data_buf.direct_data += 271 sprintf(pm8001_ha-> 272 forensic_info.data_buf.direct_data, 273 "%08x ", *(temp + index)); 274 } 275 pm8001_ha->fatal_forensic_shift_offset += 0x100; 276 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 277 pm8001_ha->fatal_forensic_shift_offset); 278 pm8001_ha->fatal_bar_loc = 0; 279 status = 0; 280 offset = (int) 281 ((char *)pm8001_ha->forensic_info.data_buf.direct_data 282 - (char *)buf); 283 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n", 284 offset); 285 return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 286 (char *)buf; 287 } 288 if (pm8001_ha->forensic_fatal_step == 1) { 289 /* store previous accumulated length before triggering next 290 * accumulated length update 291 */ 292 pm8001_ha->forensic_preserved_accumulated_transfer = 293 pm8001_mr32(fatal_table_address, 294 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); 295 296 /* continue capturing the fatal log until Dump status is 0x3 */ 297 if (pm8001_mr32(fatal_table_address, 298 MPI_FATAL_EDUMP_TABLE_STATUS) < 299 MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { 300 301 /* reset fddstat bit by writing to zero*/ 302 pm8001_mw32(fatal_table_address, 303 MPI_FATAL_EDUMP_TABLE_STATUS, 0x0); 304 305 /* set dump control value to '1' so that new data will 306 * be transferred to shared memory 307 */ 308 pm8001_mw32(fatal_table_address, 309 MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 310 MPI_FATAL_EDUMP_HANDSHAKE_RDY); 311 312 /*Poll FDDHSHK until clear */ 313 start = jiffies + (2 * HZ); /* 2 sec */ 314 315 do { 316 reg_val = pm8001_mr32(fatal_table_address, 317 MPI_FATAL_EDUMP_TABLE_HANDSHAKE); 318 } while ((reg_val) && time_before(jiffies, start)); 319 320 if (reg_val != 0) { 321 pm8001_dbg(pm8001_ha, FAIL, 322 "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n", 323 reg_val); 324 /* Fail the dump if a timeout occurs */ 325 pm8001_ha->forensic_info.data_buf.direct_data += 326 sprintf( 327 pm8001_ha->forensic_info.data_buf.direct_data, 328 "%08x ", 0xFFFFFFFF); 329 return((char *) 330 pm8001_ha->forensic_info.data_buf.direct_data 331 - (char *)buf); 332 } 333 /* Poll status register until set to 2 or 334 * 3 for up to 2 seconds 335 */ 336 start = jiffies + (2 * HZ); /* 2 sec */ 337 338 do { 339 reg_val = pm8001_mr32(fatal_table_address, 340 MPI_FATAL_EDUMP_TABLE_STATUS); 341 } while (((reg_val != 2) && (reg_val != 3)) && 342 time_before(jiffies, start)); 343 344 if (reg_val < 2) { 345 pm8001_dbg(pm8001_ha, FAIL, 346 "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n", 347 reg_val); 348 /* Fail the dump if a timeout occurs */ 349 pm8001_ha->forensic_info.data_buf.direct_data += 350 sprintf( 351 pm8001_ha->forensic_info.data_buf.direct_data, 352 "%08x ", 0xFFFFFFFF); 353 return((char *)pm8001_ha->forensic_info.data_buf.direct_data - 354 (char *)buf); 355 } 356 /* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */ 357 pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */ 358 pm8001_cw32(pm8001_ha, 0, 359 MEMBASE_II_SHIFT_REGISTER, 360 pm8001_ha->fatal_forensic_shift_offset); 361 } 362 /* Read the next block of the debug data.*/ 363 length_to_read = pm8001_mr32(fatal_table_address, 364 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) - 365 pm8001_ha->forensic_preserved_accumulated_transfer; 366 if (length_to_read != 0x0) { 367 pm8001_ha->forensic_fatal_step = 0; 368 goto moreData; 369 } else { 370 pm8001_ha->forensic_info.data_buf.direct_data += 371 sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 372 "%08x ", 4); 373 pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; 374 pm8001_ha->forensic_info.data_buf.direct_len = 0; 375 pm8001_ha->forensic_info.data_buf.direct_offset = 0; 376 pm8001_ha->forensic_info.data_buf.read_len = 0; 377 } 378 } 379 offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data 380 - (char *)buf); 381 pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset); 382 return ((char *)pm8001_ha->forensic_info.data_buf.direct_data - 383 (char *)buf); 384 } 385 386 /* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma 387 * location by the firmware. 388 */ 389 ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, 390 struct device_attribute *attr, char *buf) 391 { 392 struct Scsi_Host *shost = class_to_shost(cdev); 393 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 394 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 395 void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr; 396 u32 accum_len = 0; 397 u32 total_len = 0; 398 u32 reg_val = 0; 399 u32 *temp = NULL; 400 u32 index = 0; 401 u32 output_length; 402 unsigned long start = 0; 403 char *buf_copy = buf; 404 405 temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; 406 if (++pm8001_ha->non_fatal_count == 1) { 407 if (pm8001_ha->chip_id == chip_8001) { 408 snprintf(pm8001_ha->forensic_info.data_buf.direct_data, 409 PAGE_SIZE, "Not supported for SPC controller"); 410 return 0; 411 } 412 pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n"); 413 /* 414 * Step 1: Write the host buffer parameters in the MPI Fatal and 415 * Non-Fatal Error Dump Capture Table.This is the buffer 416 * where debug data will be DMAed to. 417 */ 418 pm8001_mw32(nonfatal_table_address, 419 MPI_FATAL_EDUMP_TABLE_LO_OFFSET, 420 pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo); 421 422 pm8001_mw32(nonfatal_table_address, 423 MPI_FATAL_EDUMP_TABLE_HI_OFFSET, 424 pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi); 425 426 pm8001_mw32(nonfatal_table_address, 427 MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET); 428 429 /* Optionally, set the DUMPCTRL bit to 1 if the host 430 * keeps sending active I/Os while capturing the non-fatal 431 * debug data. Otherwise, leave this bit set to zero 432 */ 433 pm8001_mw32(nonfatal_table_address, 434 MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY); 435 436 /* 437 * Step 2: Clear Accumulative Length of Debug Data Transferred 438 * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump 439 * Capture Table to zero. 440 */ 441 pm8001_mw32(nonfatal_table_address, 442 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0); 443 444 /* initiallize previous accumulated length to 0 */ 445 pm8001_ha->forensic_preserved_accumulated_transfer = 0; 446 pm8001_ha->non_fatal_read_length = 0; 447 } 448 449 total_len = pm8001_mr32(nonfatal_table_address, 450 MPI_FATAL_EDUMP_TABLE_TOTAL_LEN); 451 /* 452 * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT] 453 * field and then request that the SPCv controller transfer the debug 454 * data by setting bit 7 of the Inbound Doorbell Set Register. 455 */ 456 pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0); 457 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, 458 SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP); 459 460 /* 461 * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for 462 * 2 seconds) until register bit 7 is cleared. 463 * This step only indicates the request is accepted by the controller. 464 */ 465 start = jiffies + (2 * HZ); /* 2 sec */ 466 do { 467 reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) & 468 SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP; 469 } while ((reg_val != 0) && time_before(jiffies, start)); 470 471 /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non 472 * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in 473 * the MPI Fatal and Non-Fatal Error Dump Capture Table. 474 */ 475 start = jiffies + (2 * HZ); /* 2 sec */ 476 do { 477 reg_val = pm8001_mr32(nonfatal_table_address, 478 MPI_FATAL_EDUMP_TABLE_STATUS); 479 } while ((!reg_val) && time_before(jiffies, start)); 480 481 if ((reg_val == 0x00) || 482 (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) || 483 (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) { 484 pm8001_ha->non_fatal_read_length = 0; 485 buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF); 486 pm8001_ha->non_fatal_count = 0; 487 return (buf_copy - buf); 488 } else if (reg_val == 489 MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) { 490 buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2); 491 } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) || 492 (pm8001_ha->non_fatal_read_length >= total_len)) { 493 pm8001_ha->non_fatal_read_length = 0; 494 buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4); 495 pm8001_ha->non_fatal_count = 0; 496 } 497 accum_len = pm8001_mr32(nonfatal_table_address, 498 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); 499 output_length = accum_len - 500 pm8001_ha->forensic_preserved_accumulated_transfer; 501 502 for (index = 0; index < output_length/4; index++) 503 buf_copy += snprintf(buf_copy, PAGE_SIZE, 504 "%08x ", *(temp+index)); 505 506 pm8001_ha->non_fatal_read_length += output_length; 507 508 /* store current accumulated length to use in next iteration as 509 * the previous accumulated length 510 */ 511 pm8001_ha->forensic_preserved_accumulated_transfer = accum_len; 512 return (buf_copy - buf); 513 } 514 515 /** 516 * read_main_config_table - read the configure table and save it. 517 * @pm8001_ha: our hba card information 518 */ 519 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) 520 { 521 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 522 523 pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = 524 pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); 525 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = 526 pm8001_mr32(address, MAIN_INTERFACE_REVISION); 527 pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = 528 pm8001_mr32(address, MAIN_FW_REVISION); 529 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = 530 pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); 531 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = 532 pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); 533 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = 534 pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); 535 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = 536 pm8001_mr32(address, MAIN_GST_OFFSET); 537 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = 538 pm8001_mr32(address, MAIN_IBQ_OFFSET); 539 pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = 540 pm8001_mr32(address, MAIN_OBQ_OFFSET); 541 542 /* read Error Dump Offset and Length */ 543 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = 544 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); 545 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = 546 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); 547 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = 548 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); 549 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = 550 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); 551 552 /* read GPIO LED settings from the configuration table */ 553 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = 554 pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); 555 556 /* read analog Setting offset from the configuration table */ 557 pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = 558 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); 559 560 pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = 561 pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); 562 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = 563 pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); 564 /* read port recover and reset timeout */ 565 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer = 566 pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER); 567 /* read ILA and inactive firmware version */ 568 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version = 569 pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE); 570 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = 571 pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); 572 573 pm8001_dbg(pm8001_ha, DEV, 574 "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", 575 pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, 576 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, 577 pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev); 578 579 pm8001_dbg(pm8001_ha, DEV, 580 "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", 581 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, 582 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, 583 pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset, 584 pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, 585 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset); 586 587 pm8001_dbg(pm8001_ha, DEV, 588 "Main cfg table; ila rev:%x Inactive fw rev:%x\n", 589 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, 590 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version); 591 } 592 593 /** 594 * read_general_status_table - read the general status table and save it. 595 * @pm8001_ha: our hba card information 596 */ 597 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) 598 { 599 void __iomem *address = pm8001_ha->general_stat_tbl_addr; 600 pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = 601 pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); 602 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = 603 pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); 604 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = 605 pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); 606 pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = 607 pm8001_mr32(address, GST_MSGUTCNT_OFFSET); 608 pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = 609 pm8001_mr32(address, GST_IOPTCNT_OFFSET); 610 pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = 611 pm8001_mr32(address, GST_GPIO_INPUT_VAL); 612 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = 613 pm8001_mr32(address, GST_RERRINFO_OFFSET0); 614 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = 615 pm8001_mr32(address, GST_RERRINFO_OFFSET1); 616 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = 617 pm8001_mr32(address, GST_RERRINFO_OFFSET2); 618 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = 619 pm8001_mr32(address, GST_RERRINFO_OFFSET3); 620 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = 621 pm8001_mr32(address, GST_RERRINFO_OFFSET4); 622 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = 623 pm8001_mr32(address, GST_RERRINFO_OFFSET5); 624 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = 625 pm8001_mr32(address, GST_RERRINFO_OFFSET6); 626 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = 627 pm8001_mr32(address, GST_RERRINFO_OFFSET7); 628 } 629 /** 630 * read_phy_attr_table - read the phy attribute table and save it. 631 * @pm8001_ha: our hba card information 632 */ 633 static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) 634 { 635 void __iomem *address = pm8001_ha->pspa_q_tbl_addr; 636 pm8001_ha->phy_attr_table.phystart1_16[0] = 637 pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); 638 pm8001_ha->phy_attr_table.phystart1_16[1] = 639 pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); 640 pm8001_ha->phy_attr_table.phystart1_16[2] = 641 pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); 642 pm8001_ha->phy_attr_table.phystart1_16[3] = 643 pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); 644 pm8001_ha->phy_attr_table.phystart1_16[4] = 645 pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); 646 pm8001_ha->phy_attr_table.phystart1_16[5] = 647 pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); 648 pm8001_ha->phy_attr_table.phystart1_16[6] = 649 pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); 650 pm8001_ha->phy_attr_table.phystart1_16[7] = 651 pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); 652 pm8001_ha->phy_attr_table.phystart1_16[8] = 653 pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); 654 pm8001_ha->phy_attr_table.phystart1_16[9] = 655 pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); 656 pm8001_ha->phy_attr_table.phystart1_16[10] = 657 pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); 658 pm8001_ha->phy_attr_table.phystart1_16[11] = 659 pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); 660 pm8001_ha->phy_attr_table.phystart1_16[12] = 661 pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); 662 pm8001_ha->phy_attr_table.phystart1_16[13] = 663 pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); 664 pm8001_ha->phy_attr_table.phystart1_16[14] = 665 pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); 666 pm8001_ha->phy_attr_table.phystart1_16[15] = 667 pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); 668 669 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = 670 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); 671 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = 672 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); 673 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = 674 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); 675 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = 676 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); 677 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = 678 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); 679 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = 680 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); 681 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = 682 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); 683 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = 684 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); 685 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = 686 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); 687 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = 688 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); 689 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = 690 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); 691 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = 692 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); 693 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = 694 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); 695 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = 696 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); 697 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = 698 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); 699 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = 700 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); 701 702 } 703 704 /** 705 * read_inbnd_queue_table - read the inbound queue table and save it. 706 * @pm8001_ha: our hba card information 707 */ 708 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 709 { 710 int i; 711 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 712 for (i = 0; i < PM8001_MAX_INB_NUM; i++) { 713 u32 offset = i * 0x20; 714 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 715 get_pci_bar_index(pm8001_mr32(address, 716 (offset + IB_PIPCI_BAR))); 717 pm8001_ha->inbnd_q_tbl[i].pi_offset = 718 pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); 719 } 720 } 721 722 /** 723 * read_outbnd_queue_table - read the outbound queue table and save it. 724 * @pm8001_ha: our hba card information 725 */ 726 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 727 { 728 int i; 729 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 730 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { 731 u32 offset = i * 0x24; 732 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 733 get_pci_bar_index(pm8001_mr32(address, 734 (offset + OB_CIPCI_BAR))); 735 pm8001_ha->outbnd_q_tbl[i].ci_offset = 736 pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); 737 } 738 } 739 740 /** 741 * init_default_table_values - init the default table. 742 * @pm8001_ha: our hba card information 743 */ 744 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) 745 { 746 int i; 747 u32 offsetib, offsetob; 748 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; 749 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; 750 u32 ib_offset = pm8001_ha->ib_offset; 751 u32 ob_offset = pm8001_ha->ob_offset; 752 u32 ci_offset = pm8001_ha->ci_offset; 753 u32 pi_offset = pm8001_ha->pi_offset; 754 755 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = 756 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; 757 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = 758 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; 759 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = 760 PM8001_EVENT_LOG_SIZE; 761 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; 762 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = 763 pm8001_ha->memoryMap.region[IOP].phys_addr_hi; 764 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = 765 pm8001_ha->memoryMap.region[IOP].phys_addr_lo; 766 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = 767 PM8001_EVENT_LOG_SIZE; 768 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; 769 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; 770 771 /* Disable end to end CRC checking */ 772 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); 773 774 for (i = 0; i < pm8001_ha->max_q_num; i++) { 775 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 776 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); 777 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = 778 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; 779 pm8001_ha->inbnd_q_tbl[i].lower_base_addr = 780 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; 781 pm8001_ha->inbnd_q_tbl[i].base_virt = 782 (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; 783 pm8001_ha->inbnd_q_tbl[i].total_length = 784 pm8001_ha->memoryMap.region[ib_offset + i].total_len; 785 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = 786 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; 787 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = 788 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; 789 pm8001_ha->inbnd_q_tbl[i].ci_virt = 790 pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; 791 pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); 792 offsetib = i * 0x20; 793 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 794 get_pci_bar_index(pm8001_mr32(addressib, 795 (offsetib + 0x14))); 796 pm8001_ha->inbnd_q_tbl[i].pi_offset = 797 pm8001_mr32(addressib, (offsetib + 0x18)); 798 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; 799 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; 800 801 pm8001_dbg(pm8001_ha, DEV, 802 "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i, 803 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar, 804 pm8001_ha->inbnd_q_tbl[i].pi_offset); 805 } 806 for (i = 0; i < pm8001_ha->max_q_num; i++) { 807 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 808 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); 809 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = 810 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; 811 pm8001_ha->outbnd_q_tbl[i].lower_base_addr = 812 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; 813 pm8001_ha->outbnd_q_tbl[i].base_virt = 814 (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; 815 pm8001_ha->outbnd_q_tbl[i].total_length = 816 pm8001_ha->memoryMap.region[ob_offset + i].total_len; 817 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = 818 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; 819 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = 820 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; 821 /* interrupt vector based on oq */ 822 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); 823 pm8001_ha->outbnd_q_tbl[i].pi_virt = 824 pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; 825 pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); 826 offsetob = i * 0x24; 827 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 828 get_pci_bar_index(pm8001_mr32(addressob, 829 offsetob + 0x14)); 830 pm8001_ha->outbnd_q_tbl[i].ci_offset = 831 pm8001_mr32(addressob, (offsetob + 0x18)); 832 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; 833 pm8001_ha->outbnd_q_tbl[i].producer_index = 0; 834 835 pm8001_dbg(pm8001_ha, DEV, 836 "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i, 837 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar, 838 pm8001_ha->outbnd_q_tbl[i].ci_offset); 839 } 840 } 841 842 /** 843 * update_main_config_table - update the main default table to the HBA. 844 * @pm8001_ha: our hba card information 845 */ 846 static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) 847 { 848 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 849 pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, 850 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); 851 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, 852 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); 853 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, 854 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); 855 pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, 856 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); 857 pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, 858 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); 859 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, 860 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); 861 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, 862 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); 863 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, 864 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); 865 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, 866 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); 867 /* Update Fatal error interrupt vector */ 868 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= 869 ((pm8001_ha->max_q_num - 1) << 8); 870 pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, 871 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); 872 pm8001_dbg(pm8001_ha, DEV, 873 "Updated Fatal error interrupt vector 0x%x\n", 874 pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)); 875 876 pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, 877 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); 878 879 /* SPCv specific */ 880 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; 881 /* Set GPIOLED to 0x2 for LED indicator */ 882 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; 883 pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, 884 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); 885 pm8001_dbg(pm8001_ha, DEV, 886 "Programming DW 0x21 in main cfg table with 0x%x\n", 887 pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)); 888 889 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, 890 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); 891 pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, 892 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); 893 894 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000; 895 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= 896 PORT_RECOVERY_TIMEOUT; 897 if (pm8001_ha->chip_id == chip_8006) { 898 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 899 0x0000ffff; 900 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= 901 CHIP_8006_PORT_RECOVERY_TIMEOUT; 902 } 903 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, 904 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); 905 } 906 907 /** 908 * update_inbnd_queue_table - update the inbound queue table to the HBA. 909 * @pm8001_ha: our hba card information 910 * @number: entry in the queue 911 */ 912 static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, 913 int number) 914 { 915 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 916 u16 offset = number * 0x20; 917 pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, 918 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); 919 pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, 920 pm8001_ha->inbnd_q_tbl[number].upper_base_addr); 921 pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, 922 pm8001_ha->inbnd_q_tbl[number].lower_base_addr); 923 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, 924 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); 925 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, 926 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); 927 928 pm8001_dbg(pm8001_ha, DEV, 929 "IQ %d: Element pri size 0x%x\n", 930 number, 931 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); 932 933 pm8001_dbg(pm8001_ha, DEV, 934 "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n", 935 pm8001_ha->inbnd_q_tbl[number].upper_base_addr, 936 pm8001_ha->inbnd_q_tbl[number].lower_base_addr); 937 938 pm8001_dbg(pm8001_ha, DEV, 939 "CI upper base addr 0x%x CI lower base addr 0x%x\n", 940 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr, 941 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); 942 } 943 944 /** 945 * update_outbnd_queue_table - update the outbound queue table to the HBA. 946 * @pm8001_ha: our hba card information 947 * @number: entry in the queue 948 */ 949 static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, 950 int number) 951 { 952 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 953 u16 offset = number * 0x24; 954 pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, 955 pm8001_ha->outbnd_q_tbl[number].element_size_cnt); 956 pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, 957 pm8001_ha->outbnd_q_tbl[number].upper_base_addr); 958 pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, 959 pm8001_ha->outbnd_q_tbl[number].lower_base_addr); 960 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, 961 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); 962 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, 963 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); 964 pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, 965 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); 966 967 pm8001_dbg(pm8001_ha, DEV, 968 "OQ %d: Element pri size 0x%x\n", 969 number, 970 pm8001_ha->outbnd_q_tbl[number].element_size_cnt); 971 972 pm8001_dbg(pm8001_ha, DEV, 973 "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n", 974 pm8001_ha->outbnd_q_tbl[number].upper_base_addr, 975 pm8001_ha->outbnd_q_tbl[number].lower_base_addr); 976 977 pm8001_dbg(pm8001_ha, DEV, 978 "PI upper base addr 0x%x PI lower base addr 0x%x\n", 979 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr, 980 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); 981 } 982 983 /** 984 * mpi_init_check - check firmware initialization status. 985 * @pm8001_ha: our hba card information 986 */ 987 static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) 988 { 989 u32 max_wait_count; 990 u32 value; 991 u32 gst_len_mpistate; 992 993 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the 994 table is updated */ 995 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); 996 /* wait until Inbound DoorBell Clear Register toggled */ 997 if (IS_SPCV_12G(pm8001_ha->pdev)) { 998 max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; 999 } else { 1000 max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; 1001 } 1002 do { 1003 msleep(FW_READY_INTERVAL); 1004 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); 1005 value &= SPCv_MSGU_CFG_TABLE_UPDATE; 1006 } while ((value != 0) && (--max_wait_count)); 1007 1008 if (!max_wait_count) { 1009 /* additional check */ 1010 pm8001_dbg(pm8001_ha, FAIL, 1011 "Inb doorbell clear not toggled[value:%x]\n", 1012 value); 1013 return -EBUSY; 1014 } 1015 /* check the MPI-State for initialization up to 100ms*/ 1016 max_wait_count = 5;/* 100 msec */ 1017 do { 1018 msleep(FW_READY_INTERVAL); 1019 gst_len_mpistate = 1020 pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 1021 GST_GSTLEN_MPIS_OFFSET); 1022 } while ((GST_MPI_STATE_INIT != 1023 (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); 1024 if (!max_wait_count) 1025 return -EBUSY; 1026 1027 /* check MPI Initialization error */ 1028 gst_len_mpistate = gst_len_mpistate >> 16; 1029 if (0x0000 != gst_len_mpistate) 1030 return -EBUSY; 1031 1032 return 0; 1033 } 1034 1035 /** 1036 * check_fw_ready - The LLDD check if the FW is ready, if not, return error. 1037 * This function sleeps hence it must not be used in atomic context. 1038 * @pm8001_ha: our hba card information 1039 */ 1040 static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) 1041 { 1042 u32 value; 1043 u32 max_wait_count; 1044 u32 max_wait_time; 1045 u32 expected_mask; 1046 int ret = 0; 1047 1048 /* reset / PCIe ready */ 1049 max_wait_time = max_wait_count = 5; /* 100 milli sec */ 1050 do { 1051 msleep(FW_READY_INTERVAL); 1052 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 1053 } while ((value == 0xFFFFFFFF) && (--max_wait_count)); 1054 1055 /* check ila, RAAE and iops status */ 1056 if ((pm8001_ha->chip_id != chip_8008) && 1057 (pm8001_ha->chip_id != chip_8009)) { 1058 max_wait_time = max_wait_count = 180; /* 3600 milli sec */ 1059 expected_mask = SCRATCH_PAD_ILA_READY | 1060 SCRATCH_PAD_RAAE_READY | 1061 SCRATCH_PAD_IOP0_READY | 1062 SCRATCH_PAD_IOP1_READY; 1063 } else { 1064 max_wait_time = max_wait_count = 170; /* 3400 milli sec */ 1065 expected_mask = SCRATCH_PAD_ILA_READY | 1066 SCRATCH_PAD_RAAE_READY | 1067 SCRATCH_PAD_IOP0_READY; 1068 } 1069 do { 1070 msleep(FW_READY_INTERVAL); 1071 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 1072 } while (((value & expected_mask) != 1073 expected_mask) && (--max_wait_count)); 1074 if (!max_wait_count) { 1075 pm8001_dbg(pm8001_ha, INIT, 1076 "At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n", 1077 max_wait_time * FW_READY_INTERVAL, value); 1078 ret = -1; 1079 } else { 1080 pm8001_dbg(pm8001_ha, MSG, 1081 "All FW components ready by %d ms\n", 1082 (max_wait_time - max_wait_count) * FW_READY_INTERVAL); 1083 } 1084 return ret; 1085 } 1086 1087 static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) 1088 { 1089 void __iomem *base_addr; 1090 u32 value; 1091 u32 offset; 1092 u32 pcibar; 1093 u32 pcilogic; 1094 1095 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); 1096 1097 /* 1098 * lower 26 bits of SCRATCHPAD0 register describes offset within the 1099 * PCIe BAR where the MPI configuration table is present 1100 */ 1101 offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ 1102 1103 pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", 1104 offset, value); 1105 /* 1106 * Upper 6 bits describe the offset within PCI config space where BAR 1107 * is located. 1108 */ 1109 pcilogic = (value & 0xFC000000) >> 26; 1110 pcibar = get_pci_bar_index(pcilogic); 1111 pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); 1112 1113 /* 1114 * Make sure the offset falls inside the ioremapped PCI BAR 1115 */ 1116 if (offset > pm8001_ha->io_mem[pcibar].memsize) { 1117 pm8001_dbg(pm8001_ha, FAIL, 1118 "Main cfg tbl offset outside %u > %u\n", 1119 offset, pm8001_ha->io_mem[pcibar].memsize); 1120 return -EBUSY; 1121 } 1122 pm8001_ha->main_cfg_tbl_addr = base_addr = 1123 pm8001_ha->io_mem[pcibar].memvirtaddr + offset; 1124 1125 /* 1126 * Validate main configuration table address: first DWord should read 1127 * "PMCS" 1128 */ 1129 value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0); 1130 if (memcmp(&value, "PMCS", 4) != 0) { 1131 pm8001_dbg(pm8001_ha, FAIL, 1132 "BAD main config signature 0x%x\n", 1133 value); 1134 return -EBUSY; 1135 } 1136 pm8001_dbg(pm8001_ha, INIT, 1137 "VALID main config signature 0x%x\n", value); 1138 pm8001_ha->general_stat_tbl_addr = 1139 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & 1140 0xFFFFFF); 1141 pm8001_ha->inbnd_q_tbl_addr = 1142 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & 1143 0xFFFFFF); 1144 pm8001_ha->outbnd_q_tbl_addr = 1145 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & 1146 0xFFFFFF); 1147 pm8001_ha->ivt_tbl_addr = 1148 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & 1149 0xFFFFFF); 1150 pm8001_ha->pspa_q_tbl_addr = 1151 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & 1152 0xFFFFFF); 1153 pm8001_ha->fatal_tbl_addr = 1154 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & 1155 0xFFFFFF); 1156 1157 pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n", 1158 pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)); 1159 pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n", 1160 pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)); 1161 pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n", 1162 pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)); 1163 pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n", 1164 pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)); 1165 pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n", 1166 pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)); 1167 pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n", 1168 pm8001_ha->main_cfg_tbl_addr, 1169 pm8001_ha->general_stat_tbl_addr); 1170 pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n", 1171 pm8001_ha->inbnd_q_tbl_addr, 1172 pm8001_ha->outbnd_q_tbl_addr); 1173 pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n", 1174 pm8001_ha->pspa_q_tbl_addr, 1175 pm8001_ha->ivt_tbl_addr); 1176 return 0; 1177 } 1178 1179 /** 1180 * pm80xx_set_thermal_config - support the thermal configuration 1181 * @pm8001_ha: our hba card information. 1182 */ 1183 int 1184 pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) 1185 { 1186 struct set_ctrl_cfg_req payload; 1187 struct inbound_queue_table *circularQ; 1188 int rc; 1189 u32 tag; 1190 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; 1191 u32 page_code; 1192 1193 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); 1194 rc = pm8001_tag_alloc(pm8001_ha, &tag); 1195 if (rc) 1196 return -1; 1197 1198 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1199 payload.tag = cpu_to_le32(tag); 1200 1201 if (IS_SPCV_12G(pm8001_ha->pdev)) 1202 page_code = THERMAL_PAGE_CODE_7H; 1203 else 1204 page_code = THERMAL_PAGE_CODE_8H; 1205 1206 payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | 1207 (THERMAL_ENABLE << 8) | page_code; 1208 payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); 1209 1210 pm8001_dbg(pm8001_ha, DEV, 1211 "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", 1212 payload.cfg_pg[0], payload.cfg_pg[1]); 1213 1214 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 1215 sizeof(payload), 0); 1216 if (rc) 1217 pm8001_tag_free(pm8001_ha, tag); 1218 return rc; 1219 1220 } 1221 1222 /** 1223 * pm80xx_set_sas_protocol_timer_config - support the SAS Protocol 1224 * Timer configuration page 1225 * @pm8001_ha: our hba card information. 1226 */ 1227 static int 1228 pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) 1229 { 1230 struct set_ctrl_cfg_req payload; 1231 struct inbound_queue_table *circularQ; 1232 SASProtocolTimerConfig_t SASConfigPage; 1233 int rc; 1234 u32 tag; 1235 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; 1236 1237 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); 1238 memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); 1239 1240 rc = pm8001_tag_alloc(pm8001_ha, &tag); 1241 1242 if (rc) 1243 return -1; 1244 1245 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1246 payload.tag = cpu_to_le32(tag); 1247 1248 SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; 1249 SASConfigPage.MST_MSI = 3 << 15; 1250 SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; 1251 SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | 1252 (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; 1253 SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; 1254 1255 if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) 1256 SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; 1257 1258 1259 SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | 1260 SAS_OPNRJT_RTRY_INTVL; 1261 SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) 1262 | SAS_COPNRJT_RTRY_TMO; 1263 SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) 1264 | SAS_COPNRJT_RTRY_THR; 1265 SASConfigPage.MAX_AIP = SAS_MAX_AIP; 1266 1267 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", 1268 SASConfigPage.pageCode); 1269 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", 1270 SASConfigPage.MST_MSI); 1271 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", 1272 SASConfigPage.STP_SSP_MCT_TMO); 1273 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", 1274 SASConfigPage.STP_FRM_TMO); 1275 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", 1276 SASConfigPage.STP_IDLE_TMO); 1277 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", 1278 SASConfigPage.OPNRJT_RTRY_INTVL); 1279 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", 1280 SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO); 1281 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", 1282 SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR); 1283 pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", 1284 SASConfigPage.MAX_AIP); 1285 1286 memcpy(&payload.cfg_pg, &SASConfigPage, 1287 sizeof(SASProtocolTimerConfig_t)); 1288 1289 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 1290 sizeof(payload), 0); 1291 if (rc) 1292 pm8001_tag_free(pm8001_ha, tag); 1293 1294 return rc; 1295 } 1296 1297 /** 1298 * pm80xx_get_encrypt_info - Check for encryption 1299 * @pm8001_ha: our hba card information. 1300 */ 1301 static int 1302 pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) 1303 { 1304 u32 scratch3_value; 1305 int ret = -1; 1306 1307 /* Read encryption status from SCRATCH PAD 3 */ 1308 scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); 1309 1310 if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == 1311 SCRATCH_PAD3_ENC_READY) { 1312 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) 1313 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; 1314 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1315 SCRATCH_PAD3_SMF_ENABLED) 1316 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; 1317 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1318 SCRATCH_PAD3_SMA_ENABLED) 1319 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; 1320 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1321 SCRATCH_PAD3_SMB_ENABLED) 1322 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; 1323 pm8001_ha->encrypt_info.status = 0; 1324 pm8001_dbg(pm8001_ha, INIT, 1325 "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", 1326 scratch3_value, 1327 pm8001_ha->encrypt_info.cipher_mode, 1328 pm8001_ha->encrypt_info.sec_mode, 1329 pm8001_ha->encrypt_info.status); 1330 ret = 0; 1331 } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == 1332 SCRATCH_PAD3_ENC_DISABLED) { 1333 pm8001_dbg(pm8001_ha, INIT, 1334 "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", 1335 scratch3_value); 1336 pm8001_ha->encrypt_info.status = 0xFFFFFFFF; 1337 pm8001_ha->encrypt_info.cipher_mode = 0; 1338 pm8001_ha->encrypt_info.sec_mode = 0; 1339 ret = 0; 1340 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == 1341 SCRATCH_PAD3_ENC_DIS_ERR) { 1342 pm8001_ha->encrypt_info.status = 1343 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; 1344 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) 1345 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; 1346 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1347 SCRATCH_PAD3_SMF_ENABLED) 1348 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; 1349 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1350 SCRATCH_PAD3_SMA_ENABLED) 1351 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; 1352 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1353 SCRATCH_PAD3_SMB_ENABLED) 1354 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; 1355 pm8001_dbg(pm8001_ha, INIT, 1356 "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", 1357 scratch3_value, 1358 pm8001_ha->encrypt_info.cipher_mode, 1359 pm8001_ha->encrypt_info.sec_mode, 1360 pm8001_ha->encrypt_info.status); 1361 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == 1362 SCRATCH_PAD3_ENC_ENA_ERR) { 1363 1364 pm8001_ha->encrypt_info.status = 1365 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; 1366 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) 1367 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; 1368 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1369 SCRATCH_PAD3_SMF_ENABLED) 1370 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; 1371 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1372 SCRATCH_PAD3_SMA_ENABLED) 1373 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; 1374 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == 1375 SCRATCH_PAD3_SMB_ENABLED) 1376 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; 1377 1378 pm8001_dbg(pm8001_ha, INIT, 1379 "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", 1380 scratch3_value, 1381 pm8001_ha->encrypt_info.cipher_mode, 1382 pm8001_ha->encrypt_info.sec_mode, 1383 pm8001_ha->encrypt_info.status); 1384 } 1385 return ret; 1386 } 1387 1388 /** 1389 * pm80xx_encrypt_update - update flash with encryption information 1390 * @pm8001_ha: our hba card information. 1391 */ 1392 static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) 1393 { 1394 struct kek_mgmt_req payload; 1395 struct inbound_queue_table *circularQ; 1396 int rc; 1397 u32 tag; 1398 u32 opc = OPC_INB_KEK_MANAGEMENT; 1399 1400 memset(&payload, 0, sizeof(struct kek_mgmt_req)); 1401 rc = pm8001_tag_alloc(pm8001_ha, &tag); 1402 if (rc) 1403 return -1; 1404 1405 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1406 payload.tag = cpu_to_le32(tag); 1407 /* Currently only one key is used. New KEK index is 1. 1408 * Current KEK index is 1. Store KEK to NVRAM is 1. 1409 */ 1410 payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | 1411 KEK_MGMT_SUBOP_KEYCARDUPDATE); 1412 1413 pm8001_dbg(pm8001_ha, DEV, 1414 "Saving Encryption info to flash. payload 0x%x\n", 1415 payload.new_curidx_ksop); 1416 1417 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 1418 sizeof(payload), 0); 1419 if (rc) 1420 pm8001_tag_free(pm8001_ha, tag); 1421 1422 return rc; 1423 } 1424 1425 /** 1426 * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. 1427 * @pm8001_ha: our hba card information 1428 */ 1429 static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) 1430 { 1431 int ret; 1432 u8 i = 0; 1433 1434 /* check the firmware status */ 1435 if (-1 == check_fw_ready(pm8001_ha)) { 1436 pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); 1437 return -EBUSY; 1438 } 1439 1440 /* Initialize the controller fatal error flag */ 1441 pm8001_ha->controller_fatal_error = false; 1442 1443 /* Initialize pci space address eg: mpi offset */ 1444 ret = init_pci_device_addresses(pm8001_ha); 1445 if (ret) { 1446 pm8001_dbg(pm8001_ha, FAIL, 1447 "Failed to init pci addresses"); 1448 return ret; 1449 } 1450 init_default_table_values(pm8001_ha); 1451 read_main_config_table(pm8001_ha); 1452 read_general_status_table(pm8001_ha); 1453 read_inbnd_queue_table(pm8001_ha); 1454 read_outbnd_queue_table(pm8001_ha); 1455 read_phy_attr_table(pm8001_ha); 1456 1457 /* update main config table ,inbound table and outbound table */ 1458 update_main_config_table(pm8001_ha); 1459 for (i = 0; i < pm8001_ha->max_q_num; i++) { 1460 update_inbnd_queue_table(pm8001_ha, i); 1461 update_outbnd_queue_table(pm8001_ha, i); 1462 } 1463 /* notify firmware update finished and check initialization status */ 1464 if (0 == mpi_init_check(pm8001_ha)) { 1465 pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); 1466 } else 1467 return -EBUSY; 1468 1469 /* send SAS protocol timer configuration page to FW */ 1470 ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); 1471 1472 /* Check for encryption */ 1473 if (pm8001_ha->chip->encrypt) { 1474 pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n"); 1475 ret = pm80xx_get_encrypt_info(pm8001_ha); 1476 if (ret == -1) { 1477 pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n"); 1478 if (pm8001_ha->encrypt_info.status == 0x81) { 1479 pm8001_dbg(pm8001_ha, INIT, 1480 "Encryption enabled with error.Saving encryption key to flash\n"); 1481 pm80xx_encrypt_update(pm8001_ha); 1482 } 1483 } 1484 } 1485 return 0; 1486 } 1487 1488 static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) 1489 { 1490 u32 max_wait_count; 1491 u32 value; 1492 u32 gst_len_mpistate; 1493 int ret; 1494 1495 ret = init_pci_device_addresses(pm8001_ha); 1496 if (ret) { 1497 pm8001_dbg(pm8001_ha, FAIL, 1498 "Failed to init pci addresses"); 1499 return ret; 1500 } 1501 1502 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the 1503 table is stop */ 1504 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); 1505 1506 /* wait until Inbound DoorBell Clear Register toggled */ 1507 if (IS_SPCV_12G(pm8001_ha->pdev)) { 1508 max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; 1509 } else { 1510 max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; 1511 } 1512 do { 1513 msleep(FW_READY_INTERVAL); 1514 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); 1515 value &= SPCv_MSGU_CFG_TABLE_RESET; 1516 } while ((value != 0) && (--max_wait_count)); 1517 1518 if (!max_wait_count) { 1519 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value); 1520 return -1; 1521 } 1522 1523 /* check the MPI-State for termination in progress */ 1524 /* wait until Inbound DoorBell Clear Register toggled */ 1525 max_wait_count = 100; /* 2 sec for spcv/ve */ 1526 do { 1527 msleep(FW_READY_INTERVAL); 1528 gst_len_mpistate = 1529 pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 1530 GST_GSTLEN_MPIS_OFFSET); 1531 if (GST_MPI_STATE_UNINIT == 1532 (gst_len_mpistate & GST_MPI_STATE_MASK)) 1533 break; 1534 } while (--max_wait_count); 1535 if (!max_wait_count) { 1536 pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", 1537 gst_len_mpistate & GST_MPI_STATE_MASK); 1538 return -1; 1539 } 1540 1541 return 0; 1542 } 1543 1544 /** 1545 * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors 1546 * @pm8001_ha: our hba card information 1547 * 1548 * Fatal errors are recoverable only after a host reboot. 1549 */ 1550 int 1551 pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) 1552 { 1553 int ret = 0; 1554 u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0, 1555 MSGU_HOST_SCRATCH_PAD_6); 1556 u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0, 1557 MSGU_HOST_SCRATCH_PAD_7); 1558 u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 1559 u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); 1560 u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); 1561 1562 if (pm8001_ha->chip_id != chip_8006 && 1563 pm8001_ha->chip_id != chip_8074 && 1564 pm8001_ha->chip_id != chip_8076) { 1565 return 0; 1566 } 1567 1568 if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) { 1569 pm8001_dbg(pm8001_ha, FAIL, 1570 "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n", 1571 scratch_pad1, scratch_pad2, scratch_pad3, 1572 scratch_pad_rsvd0, scratch_pad_rsvd1); 1573 ret = 1; 1574 } 1575 1576 return ret; 1577 } 1578 1579 /** 1580 * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all 1581 * FW register status are reset to the originated status. 1582 * @pm8001_ha: our hba card information 1583 */ 1584 1585 static int 1586 pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) 1587 { 1588 u32 regval; 1589 u32 bootloader_state; 1590 u32 ibutton0, ibutton1; 1591 1592 /* Process MPI table uninitialization only if FW is ready */ 1593 if (!pm8001_ha->controller_fatal_error) { 1594 /* Check if MPI is in ready state to reset */ 1595 if (mpi_uninit_check(pm8001_ha) != 0) { 1596 u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); 1597 u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 1598 u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); 1599 u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); 1600 pm8001_dbg(pm8001_ha, FAIL, 1601 "MPI state is not ready scratch: %x:%x:%x:%x\n", 1602 r0, r1, r2, r3); 1603 /* if things aren't ready but the bootloader is ok then 1604 * try the reset anyway. 1605 */ 1606 if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK) 1607 return -1; 1608 } 1609 } 1610 /* checked for reset register normal state; 0x0 */ 1611 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); 1612 pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n", 1613 regval); 1614 1615 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); 1616 msleep(500); 1617 1618 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); 1619 pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n", 1620 regval); 1621 1622 if ((regval & SPCv_SOFT_RESET_READ_MASK) == 1623 SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { 1624 pm8001_dbg(pm8001_ha, MSG, 1625 " soft reset successful [regval: 0x%x]\n", 1626 regval); 1627 } else { 1628 pm8001_dbg(pm8001_ha, MSG, 1629 " soft reset failed [regval: 0x%x]\n", 1630 regval); 1631 1632 /* check bootloader is successfully executed or in HDA mode */ 1633 bootloader_state = 1634 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & 1635 SCRATCH_PAD1_BOOTSTATE_MASK; 1636 1637 if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { 1638 pm8001_dbg(pm8001_ha, MSG, 1639 "Bootloader state - HDA mode SEEPROM\n"); 1640 } else if (bootloader_state == 1641 SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { 1642 pm8001_dbg(pm8001_ha, MSG, 1643 "Bootloader state - HDA mode Bootstrap Pin\n"); 1644 } else if (bootloader_state == 1645 SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { 1646 pm8001_dbg(pm8001_ha, MSG, 1647 "Bootloader state - HDA mode soft reset\n"); 1648 } else if (bootloader_state == 1649 SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { 1650 pm8001_dbg(pm8001_ha, MSG, 1651 "Bootloader state-HDA mode critical error\n"); 1652 } 1653 return -EBUSY; 1654 } 1655 1656 /* check the firmware status after reset */ 1657 if (-1 == check_fw_ready(pm8001_ha)) { 1658 pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); 1659 /* check iButton feature support for motherboard controller */ 1660 if (pm8001_ha->pdev->subsystem_vendor != 1661 PCI_VENDOR_ID_ADAPTEC2 && 1662 pm8001_ha->pdev->subsystem_vendor != 1663 PCI_VENDOR_ID_ATTO && 1664 pm8001_ha->pdev->subsystem_vendor != 0) { 1665 ibutton0 = pm8001_cr32(pm8001_ha, 0, 1666 MSGU_HOST_SCRATCH_PAD_6); 1667 ibutton1 = pm8001_cr32(pm8001_ha, 0, 1668 MSGU_HOST_SCRATCH_PAD_7); 1669 if (!ibutton0 && !ibutton1) { 1670 pm8001_dbg(pm8001_ha, FAIL, 1671 "iButton Feature is not Available!!!\n"); 1672 return -EBUSY; 1673 } 1674 if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { 1675 pm8001_dbg(pm8001_ha, FAIL, 1676 "CRC Check for iButton Feature Failed!!!\n"); 1677 return -EBUSY; 1678 } 1679 } 1680 } 1681 pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n"); 1682 return 0; 1683 } 1684 1685 static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) 1686 { 1687 u32 i; 1688 1689 pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); 1690 1691 /* do SPCv chip reset. */ 1692 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); 1693 pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); 1694 1695 /* Check this ..whether delay is required or no */ 1696 /* delay 10 usec */ 1697 udelay(10); 1698 1699 /* wait for 20 msec until the firmware gets reloaded */ 1700 i = 20; 1701 do { 1702 mdelay(1); 1703 } while ((--i) != 0); 1704 1705 pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); 1706 } 1707 1708 /** 1709 * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt 1710 * @pm8001_ha: our hba card information 1711 */ 1712 static void 1713 pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) 1714 { 1715 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); 1716 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); 1717 } 1718 1719 /** 1720 * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt 1721 * @pm8001_ha: our hba card information 1722 */ 1723 static void 1724 pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) 1725 { 1726 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); 1727 } 1728 1729 /** 1730 * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt 1731 * @pm8001_ha: our hba card information 1732 * @vec: interrupt number to enable 1733 */ 1734 static void 1735 pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) 1736 { 1737 #ifdef PM8001_USE_MSIX 1738 u32 mask; 1739 mask = (u32)(1 << vec); 1740 1741 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); 1742 return; 1743 #endif 1744 pm80xx_chip_intx_interrupt_enable(pm8001_ha); 1745 1746 } 1747 1748 /** 1749 * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt 1750 * @pm8001_ha: our hba card information 1751 * @vec: interrupt number to disable 1752 */ 1753 static void 1754 pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) 1755 { 1756 #ifdef PM8001_USE_MSIX 1757 u32 mask; 1758 if (vec == 0xFF) 1759 mask = 0xFFFFFFFF; 1760 else 1761 mask = (u32)(1 << vec); 1762 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); 1763 return; 1764 #endif 1765 pm80xx_chip_intx_interrupt_disable(pm8001_ha); 1766 } 1767 1768 static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, 1769 struct pm8001_device *pm8001_ha_dev) 1770 { 1771 int res; 1772 u32 ccb_tag; 1773 struct pm8001_ccb_info *ccb; 1774 struct sas_task *task = NULL; 1775 struct task_abort_req task_abort; 1776 struct inbound_queue_table *circularQ; 1777 u32 opc = OPC_INB_SATA_ABORT; 1778 int ret; 1779 1780 if (!pm8001_ha_dev) { 1781 pm8001_dbg(pm8001_ha, FAIL, "dev is null\n"); 1782 return; 1783 } 1784 1785 task = sas_alloc_slow_task(GFP_ATOMIC); 1786 1787 if (!task) { 1788 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); 1789 return; 1790 } 1791 1792 task->task_done = pm8001_task_done; 1793 1794 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 1795 if (res) { 1796 sas_free_task(task); 1797 return; 1798 } 1799 1800 ccb = &pm8001_ha->ccb_info[ccb_tag]; 1801 ccb->device = pm8001_ha_dev; 1802 ccb->ccb_tag = ccb_tag; 1803 ccb->task = task; 1804 1805 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1806 1807 memset(&task_abort, 0, sizeof(task_abort)); 1808 task_abort.abort_all = cpu_to_le32(1); 1809 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 1810 task_abort.tag = cpu_to_le32(ccb_tag); 1811 1812 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 1813 sizeof(task_abort), 0); 1814 pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n"); 1815 if (ret) { 1816 sas_free_task(task); 1817 pm8001_tag_free(pm8001_ha, ccb_tag); 1818 } 1819 } 1820 1821 static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, 1822 struct pm8001_device *pm8001_ha_dev) 1823 { 1824 struct sata_start_req sata_cmd; 1825 int res; 1826 u32 ccb_tag; 1827 struct pm8001_ccb_info *ccb; 1828 struct sas_task *task = NULL; 1829 struct host_to_dev_fis fis; 1830 struct domain_device *dev; 1831 struct inbound_queue_table *circularQ; 1832 u32 opc = OPC_INB_SATA_HOST_OPSTART; 1833 1834 task = sas_alloc_slow_task(GFP_ATOMIC); 1835 1836 if (!task) { 1837 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); 1838 return; 1839 } 1840 task->task_done = pm8001_task_done; 1841 1842 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 1843 if (res) { 1844 sas_free_task(task); 1845 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n"); 1846 return; 1847 } 1848 1849 /* allocate domain device by ourselves as libsas 1850 * is not going to provide any 1851 */ 1852 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); 1853 if (!dev) { 1854 sas_free_task(task); 1855 pm8001_tag_free(pm8001_ha, ccb_tag); 1856 pm8001_dbg(pm8001_ha, FAIL, 1857 "Domain device cannot be allocated\n"); 1858 return; 1859 } 1860 1861 task->dev = dev; 1862 task->dev->lldd_dev = pm8001_ha_dev; 1863 1864 ccb = &pm8001_ha->ccb_info[ccb_tag]; 1865 ccb->device = pm8001_ha_dev; 1866 ccb->ccb_tag = ccb_tag; 1867 ccb->task = task; 1868 ccb->n_elem = 0; 1869 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; 1870 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; 1871 1872 memset(&sata_cmd, 0, sizeof(sata_cmd)); 1873 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1874 1875 /* construct read log FIS */ 1876 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1877 fis.fis_type = 0x27; 1878 fis.flags = 0x80; 1879 fis.command = ATA_CMD_READ_LOG_EXT; 1880 fis.lbal = 0x10; 1881 fis.sector_count = 0x1; 1882 1883 sata_cmd.tag = cpu_to_le32(ccb_tag); 1884 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 1885 sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); 1886 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); 1887 1888 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 1889 sizeof(sata_cmd), 0); 1890 pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n"); 1891 if (res) { 1892 sas_free_task(task); 1893 pm8001_tag_free(pm8001_ha, ccb_tag); 1894 kfree(dev); 1895 } 1896 } 1897 1898 /** 1899 * mpi_ssp_completion - process the event that FW response to the SSP request. 1900 * @pm8001_ha: our hba card information 1901 * @piomb: the message contents of this outbound message. 1902 * 1903 * When FW has completed a ssp request for example a IO request, after it has 1904 * filled the SG data with the data, it will trigger this event representing 1905 * that he has finished the job; please check the corresponding buffer. 1906 * So we will tell the caller who maybe waiting the result to tell upper layer 1907 * that the task has been finished. 1908 */ 1909 static void 1910 mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 1911 { 1912 struct sas_task *t; 1913 struct pm8001_ccb_info *ccb; 1914 unsigned long flags; 1915 u32 status; 1916 u32 param; 1917 u32 tag; 1918 struct ssp_completion_resp *psspPayload; 1919 struct task_status_struct *ts; 1920 struct ssp_response_iu *iu; 1921 struct pm8001_device *pm8001_dev; 1922 psspPayload = (struct ssp_completion_resp *)(piomb + 4); 1923 status = le32_to_cpu(psspPayload->status); 1924 tag = le32_to_cpu(psspPayload->tag); 1925 ccb = &pm8001_ha->ccb_info[tag]; 1926 if ((status == IO_ABORTED) && ccb->open_retry) { 1927 /* Being completed by another */ 1928 ccb->open_retry = 0; 1929 return; 1930 } 1931 pm8001_dev = ccb->device; 1932 param = le32_to_cpu(psspPayload->param); 1933 t = ccb->task; 1934 1935 if (status && status != IO_UNDERFLOW) 1936 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); 1937 if (unlikely(!t || !t->lldd_task || !t->dev)) 1938 return; 1939 ts = &t->task_status; 1940 1941 pm8001_dbg(pm8001_ha, DEV, 1942 "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t); 1943 1944 /* Print sas address of IO failed device */ 1945 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 1946 (status != IO_UNDERFLOW)) 1947 pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", 1948 SAS_ADDR(t->dev->sas_addr)); 1949 1950 switch (status) { 1951 case IO_SUCCESS: 1952 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n", 1953 param); 1954 if (param == 0) { 1955 ts->resp = SAS_TASK_COMPLETE; 1956 ts->stat = SAS_SAM_STAT_GOOD; 1957 } else { 1958 ts->resp = SAS_TASK_COMPLETE; 1959 ts->stat = SAS_PROTO_RESPONSE; 1960 ts->residual = param; 1961 iu = &psspPayload->ssp_resp_iu; 1962 sas_ssp_task_response(pm8001_ha->dev, t, iu); 1963 } 1964 if (pm8001_dev) 1965 atomic_dec(&pm8001_dev->running_req); 1966 break; 1967 case IO_ABORTED: 1968 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); 1969 ts->resp = SAS_TASK_COMPLETE; 1970 ts->stat = SAS_ABORTED_TASK; 1971 if (pm8001_dev) 1972 atomic_dec(&pm8001_dev->running_req); 1973 break; 1974 case IO_UNDERFLOW: 1975 /* SSP Completion with error */ 1976 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n", 1977 param); 1978 ts->resp = SAS_TASK_COMPLETE; 1979 ts->stat = SAS_DATA_UNDERRUN; 1980 ts->residual = param; 1981 if (pm8001_dev) 1982 atomic_dec(&pm8001_dev->running_req); 1983 break; 1984 case IO_NO_DEVICE: 1985 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 1986 ts->resp = SAS_TASK_UNDELIVERED; 1987 ts->stat = SAS_PHY_DOWN; 1988 if (pm8001_dev) 1989 atomic_dec(&pm8001_dev->running_req); 1990 break; 1991 case IO_XFER_ERROR_BREAK: 1992 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 1993 ts->resp = SAS_TASK_COMPLETE; 1994 ts->stat = SAS_OPEN_REJECT; 1995 /* Force the midlayer to retry */ 1996 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1997 if (pm8001_dev) 1998 atomic_dec(&pm8001_dev->running_req); 1999 break; 2000 case IO_XFER_ERROR_PHY_NOT_READY: 2001 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2002 ts->resp = SAS_TASK_COMPLETE; 2003 ts->stat = SAS_OPEN_REJECT; 2004 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2005 if (pm8001_dev) 2006 atomic_dec(&pm8001_dev->running_req); 2007 break; 2008 case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME: 2009 pm8001_dbg(pm8001_ha, IO, 2010 "IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"); 2011 ts->resp = SAS_TASK_COMPLETE; 2012 ts->stat = SAS_OPEN_REJECT; 2013 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2014 if (pm8001_dev) 2015 atomic_dec(&pm8001_dev->running_req); 2016 break; 2017 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2018 pm8001_dbg(pm8001_ha, IO, 2019 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2020 ts->resp = SAS_TASK_COMPLETE; 2021 ts->stat = SAS_OPEN_REJECT; 2022 ts->open_rej_reason = SAS_OREJ_EPROTO; 2023 if (pm8001_dev) 2024 atomic_dec(&pm8001_dev->running_req); 2025 break; 2026 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2027 pm8001_dbg(pm8001_ha, IO, 2028 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2029 ts->resp = SAS_TASK_COMPLETE; 2030 ts->stat = SAS_OPEN_REJECT; 2031 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2032 if (pm8001_dev) 2033 atomic_dec(&pm8001_dev->running_req); 2034 break; 2035 case IO_OPEN_CNX_ERROR_BREAK: 2036 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2037 ts->resp = SAS_TASK_COMPLETE; 2038 ts->stat = SAS_OPEN_REJECT; 2039 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2040 if (pm8001_dev) 2041 atomic_dec(&pm8001_dev->running_req); 2042 break; 2043 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2044 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: 2045 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: 2046 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: 2047 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: 2048 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: 2049 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2050 ts->resp = SAS_TASK_COMPLETE; 2051 ts->stat = SAS_OPEN_REJECT; 2052 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2053 if (!t->uldd_task) 2054 pm8001_handle_event(pm8001_ha, 2055 pm8001_dev, 2056 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2057 break; 2058 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2059 pm8001_dbg(pm8001_ha, IO, 2060 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2061 ts->resp = SAS_TASK_COMPLETE; 2062 ts->stat = SAS_OPEN_REJECT; 2063 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2064 if (pm8001_dev) 2065 atomic_dec(&pm8001_dev->running_req); 2066 break; 2067 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2068 pm8001_dbg(pm8001_ha, IO, 2069 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2070 ts->resp = SAS_TASK_COMPLETE; 2071 ts->stat = SAS_OPEN_REJECT; 2072 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2073 if (pm8001_dev) 2074 atomic_dec(&pm8001_dev->running_req); 2075 break; 2076 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2077 pm8001_dbg(pm8001_ha, IO, 2078 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2079 ts->resp = SAS_TASK_UNDELIVERED; 2080 ts->stat = SAS_OPEN_REJECT; 2081 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2082 if (pm8001_dev) 2083 atomic_dec(&pm8001_dev->running_req); 2084 break; 2085 case IO_XFER_ERROR_NAK_RECEIVED: 2086 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2087 ts->resp = SAS_TASK_COMPLETE; 2088 ts->stat = SAS_OPEN_REJECT; 2089 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2090 if (pm8001_dev) 2091 atomic_dec(&pm8001_dev->running_req); 2092 break; 2093 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2094 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2095 ts->resp = SAS_TASK_COMPLETE; 2096 ts->stat = SAS_NAK_R_ERR; 2097 if (pm8001_dev) 2098 atomic_dec(&pm8001_dev->running_req); 2099 break; 2100 case IO_XFER_ERROR_DMA: 2101 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); 2102 ts->resp = SAS_TASK_COMPLETE; 2103 ts->stat = SAS_OPEN_REJECT; 2104 if (pm8001_dev) 2105 atomic_dec(&pm8001_dev->running_req); 2106 break; 2107 case IO_XFER_OPEN_RETRY_TIMEOUT: 2108 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2109 ts->resp = SAS_TASK_COMPLETE; 2110 ts->stat = SAS_OPEN_REJECT; 2111 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2112 if (pm8001_dev) 2113 atomic_dec(&pm8001_dev->running_req); 2114 break; 2115 case IO_XFER_ERROR_OFFSET_MISMATCH: 2116 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2117 ts->resp = SAS_TASK_COMPLETE; 2118 ts->stat = SAS_OPEN_REJECT; 2119 if (pm8001_dev) 2120 atomic_dec(&pm8001_dev->running_req); 2121 break; 2122 case IO_PORT_IN_RESET: 2123 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 2124 ts->resp = SAS_TASK_COMPLETE; 2125 ts->stat = SAS_OPEN_REJECT; 2126 if (pm8001_dev) 2127 atomic_dec(&pm8001_dev->running_req); 2128 break; 2129 case IO_DS_NON_OPERATIONAL: 2130 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 2131 ts->resp = SAS_TASK_COMPLETE; 2132 ts->stat = SAS_OPEN_REJECT; 2133 if (!t->uldd_task) 2134 pm8001_handle_event(pm8001_ha, 2135 pm8001_dev, 2136 IO_DS_NON_OPERATIONAL); 2137 break; 2138 case IO_DS_IN_RECOVERY: 2139 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); 2140 ts->resp = SAS_TASK_COMPLETE; 2141 ts->stat = SAS_OPEN_REJECT; 2142 if (pm8001_dev) 2143 atomic_dec(&pm8001_dev->running_req); 2144 break; 2145 case IO_TM_TAG_NOT_FOUND: 2146 pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); 2147 ts->resp = SAS_TASK_COMPLETE; 2148 ts->stat = SAS_OPEN_REJECT; 2149 if (pm8001_dev) 2150 atomic_dec(&pm8001_dev->running_req); 2151 break; 2152 case IO_SSP_EXT_IU_ZERO_LEN_ERROR: 2153 pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); 2154 ts->resp = SAS_TASK_COMPLETE; 2155 ts->stat = SAS_OPEN_REJECT; 2156 if (pm8001_dev) 2157 atomic_dec(&pm8001_dev->running_req); 2158 break; 2159 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 2160 pm8001_dbg(pm8001_ha, IO, 2161 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 2162 ts->resp = SAS_TASK_COMPLETE; 2163 ts->stat = SAS_OPEN_REJECT; 2164 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2165 if (pm8001_dev) 2166 atomic_dec(&pm8001_dev->running_req); 2167 break; 2168 default: 2169 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); 2170 /* not allowed case. Therefore, return failed status */ 2171 ts->resp = SAS_TASK_COMPLETE; 2172 ts->stat = SAS_OPEN_REJECT; 2173 if (pm8001_dev) 2174 atomic_dec(&pm8001_dev->running_req); 2175 break; 2176 } 2177 pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", 2178 psspPayload->ssp_resp_iu.status); 2179 spin_lock_irqsave(&t->task_state_lock, flags); 2180 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2181 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2182 t->task_state_flags |= SAS_TASK_STATE_DONE; 2183 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2184 spin_unlock_irqrestore(&t->task_state_lock, flags); 2185 pm8001_dbg(pm8001_ha, FAIL, 2186 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2187 t, status, ts->resp, ts->stat); 2188 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2189 if (t->slow_task) 2190 complete(&t->slow_task->completion); 2191 } else { 2192 spin_unlock_irqrestore(&t->task_state_lock, flags); 2193 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2194 mb();/* in order to force CPU ordering */ 2195 t->task_done(t); 2196 } 2197 } 2198 2199 /*See the comments for mpi_ssp_completion */ 2200 static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 2201 { 2202 struct sas_task *t; 2203 unsigned long flags; 2204 struct task_status_struct *ts; 2205 struct pm8001_ccb_info *ccb; 2206 struct pm8001_device *pm8001_dev; 2207 struct ssp_event_resp *psspPayload = 2208 (struct ssp_event_resp *)(piomb + 4); 2209 u32 event = le32_to_cpu(psspPayload->event); 2210 u32 tag = le32_to_cpu(psspPayload->tag); 2211 u32 port_id = le32_to_cpu(psspPayload->port_id); 2212 2213 ccb = &pm8001_ha->ccb_info[tag]; 2214 t = ccb->task; 2215 pm8001_dev = ccb->device; 2216 if (event) 2217 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); 2218 if (unlikely(!t || !t->lldd_task || !t->dev)) 2219 return; 2220 ts = &t->task_status; 2221 pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", 2222 port_id, tag, event); 2223 switch (event) { 2224 case IO_OVERFLOW: 2225 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 2226 ts->resp = SAS_TASK_COMPLETE; 2227 ts->stat = SAS_DATA_OVERRUN; 2228 ts->residual = 0; 2229 if (pm8001_dev) 2230 atomic_dec(&pm8001_dev->running_req); 2231 break; 2232 case IO_XFER_ERROR_BREAK: 2233 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2234 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); 2235 return; 2236 case IO_XFER_ERROR_PHY_NOT_READY: 2237 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2238 ts->resp = SAS_TASK_COMPLETE; 2239 ts->stat = SAS_OPEN_REJECT; 2240 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2241 break; 2242 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2243 pm8001_dbg(pm8001_ha, IO, 2244 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2245 ts->resp = SAS_TASK_COMPLETE; 2246 ts->stat = SAS_OPEN_REJECT; 2247 ts->open_rej_reason = SAS_OREJ_EPROTO; 2248 break; 2249 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2250 pm8001_dbg(pm8001_ha, IO, 2251 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2252 ts->resp = SAS_TASK_COMPLETE; 2253 ts->stat = SAS_OPEN_REJECT; 2254 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2255 break; 2256 case IO_OPEN_CNX_ERROR_BREAK: 2257 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2258 ts->resp = SAS_TASK_COMPLETE; 2259 ts->stat = SAS_OPEN_REJECT; 2260 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2261 break; 2262 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2263 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: 2264 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: 2265 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: 2266 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: 2267 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: 2268 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2269 ts->resp = SAS_TASK_COMPLETE; 2270 ts->stat = SAS_OPEN_REJECT; 2271 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2272 if (!t->uldd_task) 2273 pm8001_handle_event(pm8001_ha, 2274 pm8001_dev, 2275 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2276 break; 2277 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2278 pm8001_dbg(pm8001_ha, IO, 2279 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2280 ts->resp = SAS_TASK_COMPLETE; 2281 ts->stat = SAS_OPEN_REJECT; 2282 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2283 break; 2284 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2285 pm8001_dbg(pm8001_ha, IO, 2286 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2287 ts->resp = SAS_TASK_COMPLETE; 2288 ts->stat = SAS_OPEN_REJECT; 2289 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2290 break; 2291 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2292 pm8001_dbg(pm8001_ha, IO, 2293 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2294 ts->resp = SAS_TASK_COMPLETE; 2295 ts->stat = SAS_OPEN_REJECT; 2296 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2297 break; 2298 case IO_XFER_ERROR_NAK_RECEIVED: 2299 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2300 ts->resp = SAS_TASK_COMPLETE; 2301 ts->stat = SAS_OPEN_REJECT; 2302 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2303 break; 2304 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2305 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2306 ts->resp = SAS_TASK_COMPLETE; 2307 ts->stat = SAS_NAK_R_ERR; 2308 break; 2309 case IO_XFER_OPEN_RETRY_TIMEOUT: 2310 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2311 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); 2312 return; 2313 case IO_XFER_ERROR_UNEXPECTED_PHASE: 2314 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); 2315 ts->resp = SAS_TASK_COMPLETE; 2316 ts->stat = SAS_DATA_OVERRUN; 2317 break; 2318 case IO_XFER_ERROR_XFER_RDY_OVERRUN: 2319 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); 2320 ts->resp = SAS_TASK_COMPLETE; 2321 ts->stat = SAS_DATA_OVERRUN; 2322 break; 2323 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: 2324 pm8001_dbg(pm8001_ha, IO, 2325 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); 2326 ts->resp = SAS_TASK_COMPLETE; 2327 ts->stat = SAS_DATA_OVERRUN; 2328 break; 2329 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 2330 pm8001_dbg(pm8001_ha, IO, 2331 "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); 2332 ts->resp = SAS_TASK_COMPLETE; 2333 ts->stat = SAS_DATA_OVERRUN; 2334 break; 2335 case IO_XFER_ERROR_OFFSET_MISMATCH: 2336 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2337 ts->resp = SAS_TASK_COMPLETE; 2338 ts->stat = SAS_DATA_OVERRUN; 2339 break; 2340 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: 2341 pm8001_dbg(pm8001_ha, IO, 2342 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); 2343 ts->resp = SAS_TASK_COMPLETE; 2344 ts->stat = SAS_DATA_OVERRUN; 2345 break; 2346 case IO_XFER_ERROR_INTERNAL_CRC_ERROR: 2347 pm8001_dbg(pm8001_ha, IOERR, 2348 "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); 2349 /* TBC: used default set values */ 2350 ts->resp = SAS_TASK_COMPLETE; 2351 ts->stat = SAS_DATA_OVERRUN; 2352 break; 2353 case IO_XFER_CMD_FRAME_ISSUED: 2354 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); 2355 return; 2356 default: 2357 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); 2358 /* not allowed case. Therefore, return failed status */ 2359 ts->resp = SAS_TASK_COMPLETE; 2360 ts->stat = SAS_DATA_OVERRUN; 2361 break; 2362 } 2363 spin_lock_irqsave(&t->task_state_lock, flags); 2364 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2365 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2366 t->task_state_flags |= SAS_TASK_STATE_DONE; 2367 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2368 spin_unlock_irqrestore(&t->task_state_lock, flags); 2369 pm8001_dbg(pm8001_ha, FAIL, 2370 "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2371 t, event, ts->resp, ts->stat); 2372 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2373 } else { 2374 spin_unlock_irqrestore(&t->task_state_lock, flags); 2375 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2376 mb();/* in order to force CPU ordering */ 2377 t->task_done(t); 2378 } 2379 } 2380 2381 /*See the comments for mpi_ssp_completion */ 2382 static void 2383 mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, 2384 struct outbound_queue_table *circularQ, void *piomb) 2385 { 2386 struct sas_task *t; 2387 struct pm8001_ccb_info *ccb; 2388 u32 param; 2389 u32 status; 2390 u32 tag; 2391 int i, j; 2392 u8 sata_addr_low[4]; 2393 u32 temp_sata_addr_low, temp_sata_addr_hi; 2394 u8 sata_addr_hi[4]; 2395 struct sata_completion_resp *psataPayload; 2396 struct task_status_struct *ts; 2397 struct ata_task_resp *resp ; 2398 u32 *sata_resp; 2399 struct pm8001_device *pm8001_dev; 2400 unsigned long flags; 2401 2402 psataPayload = (struct sata_completion_resp *)(piomb + 4); 2403 status = le32_to_cpu(psataPayload->status); 2404 param = le32_to_cpu(psataPayload->param); 2405 tag = le32_to_cpu(psataPayload->tag); 2406 2407 if (!tag) { 2408 pm8001_dbg(pm8001_ha, FAIL, "tag null\n"); 2409 return; 2410 } 2411 2412 ccb = &pm8001_ha->ccb_info[tag]; 2413 t = ccb->task; 2414 pm8001_dev = ccb->device; 2415 2416 if (t) { 2417 if (t->dev && (t->dev->lldd_dev)) 2418 pm8001_dev = t->dev->lldd_dev; 2419 } else { 2420 pm8001_dbg(pm8001_ha, FAIL, "task null\n"); 2421 return; 2422 } 2423 2424 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) 2425 && unlikely(!t || !t->lldd_task || !t->dev)) { 2426 pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); 2427 return; 2428 } 2429 2430 ts = &t->task_status; 2431 2432 if (status != IO_SUCCESS) { 2433 pm8001_dbg(pm8001_ha, FAIL, 2434 "IO failed device_id %u status 0x%x tag %d\n", 2435 pm8001_dev->device_id, status, tag); 2436 } 2437 2438 /* Print sas address of IO failed device */ 2439 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 2440 (status != IO_UNDERFLOW)) { 2441 if (!((t->dev->parent) && 2442 (dev_is_expander(t->dev->parent->dev_type)))) { 2443 for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++) 2444 sata_addr_low[i] = pm8001_ha->sas_addr[j]; 2445 for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++) 2446 sata_addr_hi[i] = pm8001_ha->sas_addr[j]; 2447 memcpy(&temp_sata_addr_low, sata_addr_low, 2448 sizeof(sata_addr_low)); 2449 memcpy(&temp_sata_addr_hi, sata_addr_hi, 2450 sizeof(sata_addr_hi)); 2451 temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) 2452 |((temp_sata_addr_hi << 8) & 2453 0xff0000) | 2454 ((temp_sata_addr_hi >> 8) 2455 & 0xff00) | 2456 ((temp_sata_addr_hi << 24) & 2457 0xff000000)); 2458 temp_sata_addr_low = ((((temp_sata_addr_low >> 24) 2459 & 0xff) | 2460 ((temp_sata_addr_low << 8) 2461 & 0xff0000) | 2462 ((temp_sata_addr_low >> 8) 2463 & 0xff00) | 2464 ((temp_sata_addr_low << 24) 2465 & 0xff000000)) + 2466 pm8001_dev->attached_phy + 2467 0x10); 2468 pm8001_dbg(pm8001_ha, FAIL, 2469 "SAS Address of IO Failure Drive:%08x%08x\n", 2470 temp_sata_addr_hi, 2471 temp_sata_addr_low); 2472 2473 } else { 2474 pm8001_dbg(pm8001_ha, FAIL, 2475 "SAS Address of IO Failure Drive:%016llx\n", 2476 SAS_ADDR(t->dev->sas_addr)); 2477 } 2478 } 2479 switch (status) { 2480 case IO_SUCCESS: 2481 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); 2482 if (param == 0) { 2483 ts->resp = SAS_TASK_COMPLETE; 2484 ts->stat = SAS_SAM_STAT_GOOD; 2485 /* check if response is for SEND READ LOG */ 2486 if (pm8001_dev && 2487 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { 2488 /* set new bit for abort_all */ 2489 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; 2490 /* clear bit for read log */ 2491 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; 2492 pm80xx_send_abort_all(pm8001_ha, pm8001_dev); 2493 /* Free the tag */ 2494 pm8001_tag_free(pm8001_ha, tag); 2495 sas_free_task(t); 2496 return; 2497 } 2498 } else { 2499 u8 len; 2500 ts->resp = SAS_TASK_COMPLETE; 2501 ts->stat = SAS_PROTO_RESPONSE; 2502 ts->residual = param; 2503 pm8001_dbg(pm8001_ha, IO, 2504 "SAS_PROTO_RESPONSE len = %d\n", 2505 param); 2506 sata_resp = &psataPayload->sata_resp[0]; 2507 resp = (struct ata_task_resp *)ts->buf; 2508 if (t->ata_task.dma_xfer == 0 && 2509 t->data_dir == DMA_FROM_DEVICE) { 2510 len = sizeof(struct pio_setup_fis); 2511 pm8001_dbg(pm8001_ha, IO, 2512 "PIO read len = %d\n", len); 2513 } else if (t->ata_task.use_ncq) { 2514 len = sizeof(struct set_dev_bits_fis); 2515 pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", 2516 len); 2517 } else { 2518 len = sizeof(struct dev_to_host_fis); 2519 pm8001_dbg(pm8001_ha, IO, "other len = %d\n", 2520 len); 2521 } 2522 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { 2523 resp->frame_len = len; 2524 memcpy(&resp->ending_fis[0], sata_resp, len); 2525 ts->buf_valid_size = sizeof(*resp); 2526 } else 2527 pm8001_dbg(pm8001_ha, IO, 2528 "response too large\n"); 2529 } 2530 if (pm8001_dev) 2531 atomic_dec(&pm8001_dev->running_req); 2532 break; 2533 case IO_ABORTED: 2534 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); 2535 ts->resp = SAS_TASK_COMPLETE; 2536 ts->stat = SAS_ABORTED_TASK; 2537 if (pm8001_dev) 2538 atomic_dec(&pm8001_dev->running_req); 2539 break; 2540 /* following cases are to do cases */ 2541 case IO_UNDERFLOW: 2542 /* SATA Completion with error */ 2543 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); 2544 ts->resp = SAS_TASK_COMPLETE; 2545 ts->stat = SAS_DATA_UNDERRUN; 2546 ts->residual = param; 2547 if (pm8001_dev) 2548 atomic_dec(&pm8001_dev->running_req); 2549 break; 2550 case IO_NO_DEVICE: 2551 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 2552 ts->resp = SAS_TASK_UNDELIVERED; 2553 ts->stat = SAS_PHY_DOWN; 2554 if (pm8001_dev) 2555 atomic_dec(&pm8001_dev->running_req); 2556 break; 2557 case IO_XFER_ERROR_BREAK: 2558 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2559 ts->resp = SAS_TASK_COMPLETE; 2560 ts->stat = SAS_INTERRUPTED; 2561 if (pm8001_dev) 2562 atomic_dec(&pm8001_dev->running_req); 2563 break; 2564 case IO_XFER_ERROR_PHY_NOT_READY: 2565 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2566 ts->resp = SAS_TASK_COMPLETE; 2567 ts->stat = SAS_OPEN_REJECT; 2568 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2569 if (pm8001_dev) 2570 atomic_dec(&pm8001_dev->running_req); 2571 break; 2572 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2573 pm8001_dbg(pm8001_ha, IO, 2574 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2575 ts->resp = SAS_TASK_COMPLETE; 2576 ts->stat = SAS_OPEN_REJECT; 2577 ts->open_rej_reason = SAS_OREJ_EPROTO; 2578 if (pm8001_dev) 2579 atomic_dec(&pm8001_dev->running_req); 2580 break; 2581 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2582 pm8001_dbg(pm8001_ha, IO, 2583 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2584 ts->resp = SAS_TASK_COMPLETE; 2585 ts->stat = SAS_OPEN_REJECT; 2586 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2587 if (pm8001_dev) 2588 atomic_dec(&pm8001_dev->running_req); 2589 break; 2590 case IO_OPEN_CNX_ERROR_BREAK: 2591 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2592 ts->resp = SAS_TASK_COMPLETE; 2593 ts->stat = SAS_OPEN_REJECT; 2594 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 2595 if (pm8001_dev) 2596 atomic_dec(&pm8001_dev->running_req); 2597 break; 2598 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2599 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: 2600 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: 2601 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: 2602 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: 2603 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: 2604 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2605 ts->resp = SAS_TASK_COMPLETE; 2606 ts->stat = SAS_DEV_NO_RESPONSE; 2607 if (!t->uldd_task) { 2608 pm8001_handle_event(pm8001_ha, 2609 pm8001_dev, 2610 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2611 ts->resp = SAS_TASK_UNDELIVERED; 2612 ts->stat = SAS_QUEUE_FULL; 2613 spin_unlock_irqrestore(&circularQ->oq_lock, 2614 circularQ->lock_flags); 2615 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2616 spin_lock_irqsave(&circularQ->oq_lock, 2617 circularQ->lock_flags); 2618 return; 2619 } 2620 break; 2621 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2622 pm8001_dbg(pm8001_ha, IO, 2623 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2624 ts->resp = SAS_TASK_UNDELIVERED; 2625 ts->stat = SAS_OPEN_REJECT; 2626 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2627 if (!t->uldd_task) { 2628 pm8001_handle_event(pm8001_ha, 2629 pm8001_dev, 2630 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2631 ts->resp = SAS_TASK_UNDELIVERED; 2632 ts->stat = SAS_QUEUE_FULL; 2633 spin_unlock_irqrestore(&circularQ->oq_lock, 2634 circularQ->lock_flags); 2635 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2636 spin_lock_irqsave(&circularQ->oq_lock, 2637 circularQ->lock_flags); 2638 return; 2639 } 2640 break; 2641 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2642 pm8001_dbg(pm8001_ha, IO, 2643 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2644 ts->resp = SAS_TASK_COMPLETE; 2645 ts->stat = SAS_OPEN_REJECT; 2646 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2647 if (pm8001_dev) 2648 atomic_dec(&pm8001_dev->running_req); 2649 break; 2650 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 2651 pm8001_dbg(pm8001_ha, IO, 2652 "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); 2653 ts->resp = SAS_TASK_COMPLETE; 2654 ts->stat = SAS_DEV_NO_RESPONSE; 2655 if (!t->uldd_task) { 2656 pm8001_handle_event(pm8001_ha, 2657 pm8001_dev, 2658 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); 2659 ts->resp = SAS_TASK_UNDELIVERED; 2660 ts->stat = SAS_QUEUE_FULL; 2661 spin_unlock_irqrestore(&circularQ->oq_lock, 2662 circularQ->lock_flags); 2663 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2664 spin_lock_irqsave(&circularQ->oq_lock, 2665 circularQ->lock_flags); 2666 return; 2667 } 2668 break; 2669 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2670 pm8001_dbg(pm8001_ha, IO, 2671 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2672 ts->resp = SAS_TASK_COMPLETE; 2673 ts->stat = SAS_OPEN_REJECT; 2674 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2675 if (pm8001_dev) 2676 atomic_dec(&pm8001_dev->running_req); 2677 break; 2678 case IO_XFER_ERROR_NAK_RECEIVED: 2679 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2680 ts->resp = SAS_TASK_COMPLETE; 2681 ts->stat = SAS_NAK_R_ERR; 2682 if (pm8001_dev) 2683 atomic_dec(&pm8001_dev->running_req); 2684 break; 2685 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2686 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2687 ts->resp = SAS_TASK_COMPLETE; 2688 ts->stat = SAS_NAK_R_ERR; 2689 if (pm8001_dev) 2690 atomic_dec(&pm8001_dev->running_req); 2691 break; 2692 case IO_XFER_ERROR_DMA: 2693 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); 2694 ts->resp = SAS_TASK_COMPLETE; 2695 ts->stat = SAS_ABORTED_TASK; 2696 if (pm8001_dev) 2697 atomic_dec(&pm8001_dev->running_req); 2698 break; 2699 case IO_XFER_ERROR_SATA_LINK_TIMEOUT: 2700 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); 2701 ts->resp = SAS_TASK_UNDELIVERED; 2702 ts->stat = SAS_DEV_NO_RESPONSE; 2703 if (pm8001_dev) 2704 atomic_dec(&pm8001_dev->running_req); 2705 break; 2706 case IO_XFER_ERROR_REJECTED_NCQ_MODE: 2707 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); 2708 ts->resp = SAS_TASK_COMPLETE; 2709 ts->stat = SAS_DATA_UNDERRUN; 2710 if (pm8001_dev) 2711 atomic_dec(&pm8001_dev->running_req); 2712 break; 2713 case IO_XFER_OPEN_RETRY_TIMEOUT: 2714 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2715 ts->resp = SAS_TASK_COMPLETE; 2716 ts->stat = SAS_OPEN_TO; 2717 if (pm8001_dev) 2718 atomic_dec(&pm8001_dev->running_req); 2719 break; 2720 case IO_PORT_IN_RESET: 2721 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 2722 ts->resp = SAS_TASK_COMPLETE; 2723 ts->stat = SAS_DEV_NO_RESPONSE; 2724 if (pm8001_dev) 2725 atomic_dec(&pm8001_dev->running_req); 2726 break; 2727 case IO_DS_NON_OPERATIONAL: 2728 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 2729 ts->resp = SAS_TASK_COMPLETE; 2730 ts->stat = SAS_DEV_NO_RESPONSE; 2731 if (!t->uldd_task) { 2732 pm8001_handle_event(pm8001_ha, pm8001_dev, 2733 IO_DS_NON_OPERATIONAL); 2734 ts->resp = SAS_TASK_UNDELIVERED; 2735 ts->stat = SAS_QUEUE_FULL; 2736 spin_unlock_irqrestore(&circularQ->oq_lock, 2737 circularQ->lock_flags); 2738 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2739 spin_lock_irqsave(&circularQ->oq_lock, 2740 circularQ->lock_flags); 2741 return; 2742 } 2743 break; 2744 case IO_DS_IN_RECOVERY: 2745 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); 2746 ts->resp = SAS_TASK_COMPLETE; 2747 ts->stat = SAS_DEV_NO_RESPONSE; 2748 if (pm8001_dev) 2749 atomic_dec(&pm8001_dev->running_req); 2750 break; 2751 case IO_DS_IN_ERROR: 2752 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); 2753 ts->resp = SAS_TASK_COMPLETE; 2754 ts->stat = SAS_DEV_NO_RESPONSE; 2755 if (!t->uldd_task) { 2756 pm8001_handle_event(pm8001_ha, pm8001_dev, 2757 IO_DS_IN_ERROR); 2758 ts->resp = SAS_TASK_UNDELIVERED; 2759 ts->stat = SAS_QUEUE_FULL; 2760 spin_unlock_irqrestore(&circularQ->oq_lock, 2761 circularQ->lock_flags); 2762 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2763 spin_lock_irqsave(&circularQ->oq_lock, 2764 circularQ->lock_flags); 2765 return; 2766 } 2767 break; 2768 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 2769 pm8001_dbg(pm8001_ha, IO, 2770 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 2771 ts->resp = SAS_TASK_COMPLETE; 2772 ts->stat = SAS_OPEN_REJECT; 2773 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2774 if (pm8001_dev) 2775 atomic_dec(&pm8001_dev->running_req); 2776 break; 2777 default: 2778 pm8001_dbg(pm8001_ha, DEVIO, 2779 "Unknown status device_id %u status 0x%x tag %d\n", 2780 pm8001_dev->device_id, status, tag); 2781 /* not allowed case. Therefore, return failed status */ 2782 ts->resp = SAS_TASK_COMPLETE; 2783 ts->stat = SAS_DEV_NO_RESPONSE; 2784 if (pm8001_dev) 2785 atomic_dec(&pm8001_dev->running_req); 2786 break; 2787 } 2788 spin_lock_irqsave(&t->task_state_lock, flags); 2789 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2790 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2791 t->task_state_flags |= SAS_TASK_STATE_DONE; 2792 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2793 spin_unlock_irqrestore(&t->task_state_lock, flags); 2794 pm8001_dbg(pm8001_ha, FAIL, 2795 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2796 t, status, ts->resp, ts->stat); 2797 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2798 if (t->slow_task) 2799 complete(&t->slow_task->completion); 2800 } else { 2801 spin_unlock_irqrestore(&t->task_state_lock, flags); 2802 spin_unlock_irqrestore(&circularQ->oq_lock, 2803 circularQ->lock_flags); 2804 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2805 spin_lock_irqsave(&circularQ->oq_lock, 2806 circularQ->lock_flags); 2807 } 2808 } 2809 2810 /*See the comments for mpi_ssp_completion */ 2811 static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, 2812 struct outbound_queue_table *circularQ, void *piomb) 2813 { 2814 struct sas_task *t; 2815 struct task_status_struct *ts; 2816 struct pm8001_ccb_info *ccb; 2817 struct pm8001_device *pm8001_dev; 2818 struct sata_event_resp *psataPayload = 2819 (struct sata_event_resp *)(piomb + 4); 2820 u32 event = le32_to_cpu(psataPayload->event); 2821 u32 tag = le32_to_cpu(psataPayload->tag); 2822 u32 port_id = le32_to_cpu(psataPayload->port_id); 2823 u32 dev_id = le32_to_cpu(psataPayload->device_id); 2824 2825 if (event) 2826 pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); 2827 2828 /* Check if this is NCQ error */ 2829 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { 2830 /* find device using device id */ 2831 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); 2832 /* send read log extension */ 2833 if (pm8001_dev) 2834 pm80xx_send_read_log(pm8001_ha, pm8001_dev); 2835 return; 2836 } 2837 2838 ccb = &pm8001_ha->ccb_info[tag]; 2839 t = ccb->task; 2840 pm8001_dev = ccb->device; 2841 2842 if (unlikely(!t || !t->lldd_task || !t->dev)) { 2843 pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); 2844 return; 2845 } 2846 2847 ts = &t->task_status; 2848 pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", 2849 port_id, tag, event); 2850 switch (event) { 2851 case IO_OVERFLOW: 2852 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 2853 ts->resp = SAS_TASK_COMPLETE; 2854 ts->stat = SAS_DATA_OVERRUN; 2855 ts->residual = 0; 2856 break; 2857 case IO_XFER_ERROR_BREAK: 2858 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2859 ts->resp = SAS_TASK_COMPLETE; 2860 ts->stat = SAS_INTERRUPTED; 2861 break; 2862 case IO_XFER_ERROR_PHY_NOT_READY: 2863 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2864 ts->resp = SAS_TASK_COMPLETE; 2865 ts->stat = SAS_OPEN_REJECT; 2866 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2867 break; 2868 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2869 pm8001_dbg(pm8001_ha, IO, 2870 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2871 ts->resp = SAS_TASK_COMPLETE; 2872 ts->stat = SAS_OPEN_REJECT; 2873 ts->open_rej_reason = SAS_OREJ_EPROTO; 2874 break; 2875 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2876 pm8001_dbg(pm8001_ha, IO, 2877 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2878 ts->resp = SAS_TASK_COMPLETE; 2879 ts->stat = SAS_OPEN_REJECT; 2880 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2881 break; 2882 case IO_OPEN_CNX_ERROR_BREAK: 2883 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2884 ts->resp = SAS_TASK_COMPLETE; 2885 ts->stat = SAS_OPEN_REJECT; 2886 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 2887 break; 2888 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2889 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: 2890 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: 2891 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: 2892 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: 2893 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: 2894 pm8001_dbg(pm8001_ha, FAIL, 2895 "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2896 ts->resp = SAS_TASK_UNDELIVERED; 2897 ts->stat = SAS_DEV_NO_RESPONSE; 2898 if (!t->uldd_task) { 2899 pm8001_handle_event(pm8001_ha, 2900 pm8001_dev, 2901 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2902 ts->resp = SAS_TASK_COMPLETE; 2903 ts->stat = SAS_QUEUE_FULL; 2904 return; 2905 } 2906 break; 2907 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2908 pm8001_dbg(pm8001_ha, IO, 2909 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2910 ts->resp = SAS_TASK_UNDELIVERED; 2911 ts->stat = SAS_OPEN_REJECT; 2912 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2913 break; 2914 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2915 pm8001_dbg(pm8001_ha, IO, 2916 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2917 ts->resp = SAS_TASK_COMPLETE; 2918 ts->stat = SAS_OPEN_REJECT; 2919 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2920 break; 2921 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2922 pm8001_dbg(pm8001_ha, IO, 2923 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2924 ts->resp = SAS_TASK_COMPLETE; 2925 ts->stat = SAS_OPEN_REJECT; 2926 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2927 break; 2928 case IO_XFER_ERROR_NAK_RECEIVED: 2929 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2930 ts->resp = SAS_TASK_COMPLETE; 2931 ts->stat = SAS_NAK_R_ERR; 2932 break; 2933 case IO_XFER_ERROR_PEER_ABORTED: 2934 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); 2935 ts->resp = SAS_TASK_COMPLETE; 2936 ts->stat = SAS_NAK_R_ERR; 2937 break; 2938 case IO_XFER_ERROR_REJECTED_NCQ_MODE: 2939 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); 2940 ts->resp = SAS_TASK_COMPLETE; 2941 ts->stat = SAS_DATA_UNDERRUN; 2942 break; 2943 case IO_XFER_OPEN_RETRY_TIMEOUT: 2944 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2945 ts->resp = SAS_TASK_COMPLETE; 2946 ts->stat = SAS_OPEN_TO; 2947 break; 2948 case IO_XFER_ERROR_UNEXPECTED_PHASE: 2949 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); 2950 ts->resp = SAS_TASK_COMPLETE; 2951 ts->stat = SAS_OPEN_TO; 2952 break; 2953 case IO_XFER_ERROR_XFER_RDY_OVERRUN: 2954 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); 2955 ts->resp = SAS_TASK_COMPLETE; 2956 ts->stat = SAS_OPEN_TO; 2957 break; 2958 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: 2959 pm8001_dbg(pm8001_ha, IO, 2960 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); 2961 ts->resp = SAS_TASK_COMPLETE; 2962 ts->stat = SAS_OPEN_TO; 2963 break; 2964 case IO_XFER_ERROR_OFFSET_MISMATCH: 2965 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2966 ts->resp = SAS_TASK_COMPLETE; 2967 ts->stat = SAS_OPEN_TO; 2968 break; 2969 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: 2970 pm8001_dbg(pm8001_ha, IO, 2971 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); 2972 ts->resp = SAS_TASK_COMPLETE; 2973 ts->stat = SAS_OPEN_TO; 2974 break; 2975 case IO_XFER_CMD_FRAME_ISSUED: 2976 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); 2977 break; 2978 case IO_XFER_PIO_SETUP_ERROR: 2979 pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); 2980 ts->resp = SAS_TASK_COMPLETE; 2981 ts->stat = SAS_OPEN_TO; 2982 break; 2983 case IO_XFER_ERROR_INTERNAL_CRC_ERROR: 2984 pm8001_dbg(pm8001_ha, FAIL, 2985 "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); 2986 /* TBC: used default set values */ 2987 ts->resp = SAS_TASK_COMPLETE; 2988 ts->stat = SAS_OPEN_TO; 2989 break; 2990 case IO_XFER_DMA_ACTIVATE_TIMEOUT: 2991 pm8001_dbg(pm8001_ha, FAIL, "IO_XFR_DMA_ACTIVATE_TIMEOUT\n"); 2992 /* TBC: used default set values */ 2993 ts->resp = SAS_TASK_COMPLETE; 2994 ts->stat = SAS_OPEN_TO; 2995 break; 2996 default: 2997 pm8001_dbg(pm8001_ha, IO, "Unknown status 0x%x\n", event); 2998 /* not allowed case. Therefore, return failed status */ 2999 ts->resp = SAS_TASK_COMPLETE; 3000 ts->stat = SAS_OPEN_TO; 3001 break; 3002 } 3003 } 3004 3005 /*See the comments for mpi_ssp_completion */ 3006 static void 3007 mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 3008 { 3009 u32 param, i; 3010 struct sas_task *t; 3011 struct pm8001_ccb_info *ccb; 3012 unsigned long flags; 3013 u32 status; 3014 u32 tag; 3015 struct smp_completion_resp *psmpPayload; 3016 struct task_status_struct *ts; 3017 struct pm8001_device *pm8001_dev; 3018 3019 psmpPayload = (struct smp_completion_resp *)(piomb + 4); 3020 status = le32_to_cpu(psmpPayload->status); 3021 tag = le32_to_cpu(psmpPayload->tag); 3022 3023 ccb = &pm8001_ha->ccb_info[tag]; 3024 param = le32_to_cpu(psmpPayload->param); 3025 t = ccb->task; 3026 ts = &t->task_status; 3027 pm8001_dev = ccb->device; 3028 if (status) 3029 pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); 3030 if (unlikely(!t || !t->lldd_task || !t->dev)) 3031 return; 3032 3033 pm8001_dbg(pm8001_ha, DEV, "tag::0x%x status::0x%x\n", tag, status); 3034 3035 switch (status) { 3036 3037 case IO_SUCCESS: 3038 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); 3039 ts->resp = SAS_TASK_COMPLETE; 3040 ts->stat = SAS_SAM_STAT_GOOD; 3041 if (pm8001_dev) 3042 atomic_dec(&pm8001_dev->running_req); 3043 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { 3044 struct scatterlist *sg_resp = &t->smp_task.smp_resp; 3045 u8 *payload; 3046 void *to; 3047 3048 pm8001_dbg(pm8001_ha, IO, 3049 "DIRECT RESPONSE Length:%d\n", 3050 param); 3051 to = kmap_atomic(sg_page(sg_resp)); 3052 payload = to + sg_resp->offset; 3053 for (i = 0; i < param; i++) { 3054 *(payload + i) = psmpPayload->_r_a[i]; 3055 pm8001_dbg(pm8001_ha, IO, 3056 "SMP Byte%d DMA data 0x%x psmp 0x%x\n", 3057 i, *(payload + i), 3058 psmpPayload->_r_a[i]); 3059 } 3060 kunmap_atomic(to); 3061 } 3062 break; 3063 case IO_ABORTED: 3064 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); 3065 ts->resp = SAS_TASK_COMPLETE; 3066 ts->stat = SAS_ABORTED_TASK; 3067 if (pm8001_dev) 3068 atomic_dec(&pm8001_dev->running_req); 3069 break; 3070 case IO_OVERFLOW: 3071 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 3072 ts->resp = SAS_TASK_COMPLETE; 3073 ts->stat = SAS_DATA_OVERRUN; 3074 ts->residual = 0; 3075 if (pm8001_dev) 3076 atomic_dec(&pm8001_dev->running_req); 3077 break; 3078 case IO_NO_DEVICE: 3079 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 3080 ts->resp = SAS_TASK_COMPLETE; 3081 ts->stat = SAS_PHY_DOWN; 3082 break; 3083 case IO_ERROR_HW_TIMEOUT: 3084 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); 3085 ts->resp = SAS_TASK_COMPLETE; 3086 ts->stat = SAS_SAM_STAT_BUSY; 3087 break; 3088 case IO_XFER_ERROR_BREAK: 3089 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 3090 ts->resp = SAS_TASK_COMPLETE; 3091 ts->stat = SAS_SAM_STAT_BUSY; 3092 break; 3093 case IO_XFER_ERROR_PHY_NOT_READY: 3094 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 3095 ts->resp = SAS_TASK_COMPLETE; 3096 ts->stat = SAS_SAM_STAT_BUSY; 3097 break; 3098 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 3099 pm8001_dbg(pm8001_ha, IO, 3100 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 3101 ts->resp = SAS_TASK_COMPLETE; 3102 ts->stat = SAS_OPEN_REJECT; 3103 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 3104 break; 3105 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 3106 pm8001_dbg(pm8001_ha, IO, 3107 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 3108 ts->resp = SAS_TASK_COMPLETE; 3109 ts->stat = SAS_OPEN_REJECT; 3110 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 3111 break; 3112 case IO_OPEN_CNX_ERROR_BREAK: 3113 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 3114 ts->resp = SAS_TASK_COMPLETE; 3115 ts->stat = SAS_OPEN_REJECT; 3116 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 3117 break; 3118 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 3119 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: 3120 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: 3121 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: 3122 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: 3123 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: 3124 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 3125 ts->resp = SAS_TASK_COMPLETE; 3126 ts->stat = SAS_OPEN_REJECT; 3127 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 3128 pm8001_handle_event(pm8001_ha, 3129 pm8001_dev, 3130 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 3131 break; 3132 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 3133 pm8001_dbg(pm8001_ha, IO, 3134 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 3135 ts->resp = SAS_TASK_COMPLETE; 3136 ts->stat = SAS_OPEN_REJECT; 3137 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 3138 break; 3139 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 3140 pm8001_dbg(pm8001_ha, IO, 3141 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 3142 ts->resp = SAS_TASK_COMPLETE; 3143 ts->stat = SAS_OPEN_REJECT; 3144 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 3145 break; 3146 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 3147 pm8001_dbg(pm8001_ha, IO, 3148 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 3149 ts->resp = SAS_TASK_COMPLETE; 3150 ts->stat = SAS_OPEN_REJECT; 3151 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 3152 break; 3153 case IO_XFER_ERROR_RX_FRAME: 3154 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); 3155 ts->resp = SAS_TASK_COMPLETE; 3156 ts->stat = SAS_DEV_NO_RESPONSE; 3157 break; 3158 case IO_XFER_OPEN_RETRY_TIMEOUT: 3159 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 3160 ts->resp = SAS_TASK_COMPLETE; 3161 ts->stat = SAS_OPEN_REJECT; 3162 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3163 break; 3164 case IO_ERROR_INTERNAL_SMP_RESOURCE: 3165 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); 3166 ts->resp = SAS_TASK_COMPLETE; 3167 ts->stat = SAS_QUEUE_FULL; 3168 break; 3169 case IO_PORT_IN_RESET: 3170 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 3171 ts->resp = SAS_TASK_COMPLETE; 3172 ts->stat = SAS_OPEN_REJECT; 3173 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3174 break; 3175 case IO_DS_NON_OPERATIONAL: 3176 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 3177 ts->resp = SAS_TASK_COMPLETE; 3178 ts->stat = SAS_DEV_NO_RESPONSE; 3179 break; 3180 case IO_DS_IN_RECOVERY: 3181 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); 3182 ts->resp = SAS_TASK_COMPLETE; 3183 ts->stat = SAS_OPEN_REJECT; 3184 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3185 break; 3186 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 3187 pm8001_dbg(pm8001_ha, IO, 3188 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 3189 ts->resp = SAS_TASK_COMPLETE; 3190 ts->stat = SAS_OPEN_REJECT; 3191 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3192 break; 3193 default: 3194 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); 3195 ts->resp = SAS_TASK_COMPLETE; 3196 ts->stat = SAS_DEV_NO_RESPONSE; 3197 /* not allowed case. Therefore, return failed status */ 3198 break; 3199 } 3200 spin_lock_irqsave(&t->task_state_lock, flags); 3201 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 3202 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 3203 t->task_state_flags |= SAS_TASK_STATE_DONE; 3204 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 3205 spin_unlock_irqrestore(&t->task_state_lock, flags); 3206 pm8001_dbg(pm8001_ha, FAIL, 3207 "task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n", 3208 t, status, ts->resp, ts->stat); 3209 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3210 } else { 3211 spin_unlock_irqrestore(&t->task_state_lock, flags); 3212 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3213 mb();/* in order to force CPU ordering */ 3214 t->task_done(t); 3215 } 3216 } 3217 3218 /** 3219 * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. 3220 * @pm8001_ha: our hba card information 3221 * @Qnum: the outbound queue message number. 3222 * @SEA: source of event to ack 3223 * @port_id: port id. 3224 * @phyId: phy id. 3225 * @param0: parameter 0. 3226 * @param1: parameter 1. 3227 */ 3228 static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, 3229 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) 3230 { 3231 struct hw_event_ack_req payload; 3232 u32 opc = OPC_INB_SAS_HW_EVENT_ACK; 3233 3234 struct inbound_queue_table *circularQ; 3235 3236 memset((u8 *)&payload, 0, sizeof(payload)); 3237 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; 3238 payload.tag = cpu_to_le32(1); 3239 payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | 3240 ((phyId & 0xFF) << 24) | (port_id & 0xFF)); 3241 payload.param0 = cpu_to_le32(param0); 3242 payload.param1 = cpu_to_le32(param1); 3243 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 3244 sizeof(payload), 0); 3245 } 3246 3247 static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 3248 u32 phyId, u32 phy_op); 3249 3250 static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, 3251 void *piomb) 3252 { 3253 struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); 3254 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); 3255 u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16); 3256 u32 lr_status_evt_portid = 3257 le32_to_cpu(pPayload->lr_status_evt_portid); 3258 u8 deviceType = pPayload->sas_identify.dev_type; 3259 u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28); 3260 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3261 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); 3262 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3263 3264 if (deviceType == SAS_END_DEVICE) { 3265 pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, 3266 PHY_NOTIFY_ENABLE_SPINUP); 3267 } 3268 3269 port->wide_port_phymap |= (1U << phy_id); 3270 pm8001_get_lrate_mode(phy, link_rate); 3271 phy->sas_phy.oob_mode = SAS_OOB_MODE; 3272 phy->phy_state = PHY_STATE_LINK_UP_SPCV; 3273 phy->phy_attached = 1; 3274 } 3275 3276 /** 3277 * hw_event_sas_phy_up - FW tells me a SAS phy up event. 3278 * @pm8001_ha: our hba card information 3279 * @piomb: IO message buffer 3280 */ 3281 static void 3282 hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) 3283 { 3284 struct hw_event_resp *pPayload = 3285 (struct hw_event_resp *)(piomb + 4); 3286 u32 lr_status_evt_portid = 3287 le32_to_cpu(pPayload->lr_status_evt_portid); 3288 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); 3289 3290 u8 link_rate = 3291 (u8)((lr_status_evt_portid & 0xF0000000) >> 28); 3292 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); 3293 u8 phy_id = 3294 (u8)((phyid_npip_portstate & 0xFF0000) >> 16); 3295 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); 3296 3297 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3298 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3299 unsigned long flags; 3300 u8 deviceType = pPayload->sas_identify.dev_type; 3301 phy->port = port; 3302 port->port_id = port_id; 3303 port->port_state = portstate; 3304 port->wide_port_phymap |= (1U << phy_id); 3305 phy->phy_state = PHY_STATE_LINK_UP_SPCV; 3306 pm8001_dbg(pm8001_ha, MSG, 3307 "portid:%d; phyid:%d; linkrate:%d; portstate:%x; devicetype:%x\n", 3308 port_id, phy_id, link_rate, portstate, deviceType); 3309 3310 switch (deviceType) { 3311 case SAS_PHY_UNUSED: 3312 pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); 3313 break; 3314 case SAS_END_DEVICE: 3315 pm8001_dbg(pm8001_ha, MSG, "end device.\n"); 3316 pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, 3317 PHY_NOTIFY_ENABLE_SPINUP); 3318 port->port_attached = 1; 3319 pm8001_get_lrate_mode(phy, link_rate); 3320 break; 3321 case SAS_EDGE_EXPANDER_DEVICE: 3322 pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); 3323 port->port_attached = 1; 3324 pm8001_get_lrate_mode(phy, link_rate); 3325 break; 3326 case SAS_FANOUT_EXPANDER_DEVICE: 3327 pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); 3328 port->port_attached = 1; 3329 pm8001_get_lrate_mode(phy, link_rate); 3330 break; 3331 default: 3332 pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", 3333 deviceType); 3334 break; 3335 } 3336 phy->phy_type |= PORT_TYPE_SAS; 3337 phy->identify.device_type = deviceType; 3338 phy->phy_attached = 1; 3339 if (phy->identify.device_type == SAS_END_DEVICE) 3340 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; 3341 else if (phy->identify.device_type != SAS_PHY_UNUSED) 3342 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; 3343 phy->sas_phy.oob_mode = SAS_OOB_MODE; 3344 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); 3345 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); 3346 memcpy(phy->frame_rcvd, &pPayload->sas_identify, 3347 sizeof(struct sas_identify_frame)-4); 3348 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; 3349 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3350 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3351 if (pm8001_ha->flags == PM8001F_RUN_TIME) 3352 mdelay(200); /* delay a moment to wait for disk to spin up */ 3353 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3354 } 3355 3356 /** 3357 * hw_event_sata_phy_up - FW tells me a SATA phy up event. 3358 * @pm8001_ha: our hba card information 3359 * @piomb: IO message buffer 3360 */ 3361 static void 3362 hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) 3363 { 3364 struct hw_event_resp *pPayload = 3365 (struct hw_event_resp *)(piomb + 4); 3366 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); 3367 u32 lr_status_evt_portid = 3368 le32_to_cpu(pPayload->lr_status_evt_portid); 3369 u8 link_rate = 3370 (u8)((lr_status_evt_portid & 0xF0000000) >> 28); 3371 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); 3372 u8 phy_id = 3373 (u8)((phyid_npip_portstate & 0xFF0000) >> 16); 3374 3375 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); 3376 3377 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3378 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3379 unsigned long flags; 3380 pm8001_dbg(pm8001_ha, DEVIO, 3381 "port id %d, phy id %d link_rate %d portstate 0x%x\n", 3382 port_id, phy_id, link_rate, portstate); 3383 3384 phy->port = port; 3385 port->port_id = port_id; 3386 port->port_state = portstate; 3387 phy->phy_state = PHY_STATE_LINK_UP_SPCV; 3388 port->port_attached = 1; 3389 pm8001_get_lrate_mode(phy, link_rate); 3390 phy->phy_type |= PORT_TYPE_SATA; 3391 phy->phy_attached = 1; 3392 phy->sas_phy.oob_mode = SATA_OOB_MODE; 3393 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); 3394 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); 3395 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), 3396 sizeof(struct dev_to_host_fis)); 3397 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3398 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3399 phy->identify.device_type = SAS_SATA_DEV; 3400 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3401 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3402 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3403 } 3404 3405 /** 3406 * hw_event_phy_down - we should notify the libsas the phy is down. 3407 * @pm8001_ha: our hba card information 3408 * @piomb: IO message buffer 3409 */ 3410 static void 3411 hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) 3412 { 3413 struct hw_event_resp *pPayload = 3414 (struct hw_event_resp *)(piomb + 4); 3415 3416 u32 lr_status_evt_portid = 3417 le32_to_cpu(pPayload->lr_status_evt_portid); 3418 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); 3419 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); 3420 u8 phy_id = 3421 (u8)((phyid_npip_portstate & 0xFF0000) >> 16); 3422 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); 3423 3424 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3425 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3426 u32 port_sata = (phy->phy_type & PORT_TYPE_SATA); 3427 port->port_state = portstate; 3428 phy->identify.device_type = 0; 3429 phy->phy_attached = 0; 3430 switch (portstate) { 3431 case PORT_VALID: 3432 break; 3433 case PORT_INVALID: 3434 pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n", 3435 port_id); 3436 pm8001_dbg(pm8001_ha, MSG, 3437 " Last phy Down and port invalid\n"); 3438 if (port_sata) { 3439 phy->phy_type = 0; 3440 port->port_attached = 0; 3441 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3442 port_id, phy_id, 0, 0); 3443 } 3444 sas_phy_disconnected(&phy->sas_phy); 3445 break; 3446 case PORT_IN_RESET: 3447 pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n", 3448 port_id); 3449 break; 3450 case PORT_NOT_ESTABLISHED: 3451 pm8001_dbg(pm8001_ha, MSG, 3452 " Phy Down and PORT_NOT_ESTABLISHED\n"); 3453 port->port_attached = 0; 3454 break; 3455 case PORT_LOSTCOMM: 3456 pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n"); 3457 pm8001_dbg(pm8001_ha, MSG, 3458 " Last phy Down and port invalid\n"); 3459 if (port_sata) { 3460 port->port_attached = 0; 3461 phy->phy_type = 0; 3462 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3463 port_id, phy_id, 0, 0); 3464 } 3465 sas_phy_disconnected(&phy->sas_phy); 3466 break; 3467 default: 3468 port->port_attached = 0; 3469 pm8001_dbg(pm8001_ha, DEVIO, 3470 " Phy Down and(default) = 0x%x\n", 3471 portstate); 3472 break; 3473 3474 } 3475 if (port_sata && (portstate != PORT_IN_RESET)) 3476 sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, 3477 GFP_ATOMIC); 3478 } 3479 3480 static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3481 { 3482 struct phy_start_resp *pPayload = 3483 (struct phy_start_resp *)(piomb + 4); 3484 u32 status = 3485 le32_to_cpu(pPayload->status); 3486 u32 phy_id = 3487 le32_to_cpu(pPayload->phyid) & 0xFF; 3488 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3489 3490 pm8001_dbg(pm8001_ha, INIT, 3491 "phy start resp status:0x%x, phyid:0x%x\n", 3492 status, phy_id); 3493 if (status == 0) 3494 phy->phy_state = PHY_LINK_DOWN; 3495 3496 if (pm8001_ha->flags == PM8001F_RUN_TIME && 3497 phy->enable_completion != NULL) { 3498 complete(phy->enable_completion); 3499 phy->enable_completion = NULL; 3500 } 3501 return 0; 3502 3503 } 3504 3505 /** 3506 * mpi_thermal_hw_event - a thermal hw event has come. 3507 * @pm8001_ha: our hba card information 3508 * @piomb: IO message buffer 3509 */ 3510 static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 3511 { 3512 struct thermal_hw_event *pPayload = 3513 (struct thermal_hw_event *)(piomb + 4); 3514 3515 u32 thermal_event = le32_to_cpu(pPayload->thermal_event); 3516 u32 rht_lht = le32_to_cpu(pPayload->rht_lht); 3517 3518 if (thermal_event & 0x40) { 3519 pm8001_dbg(pm8001_ha, IO, 3520 "Thermal Event: Local high temperature violated!\n"); 3521 pm8001_dbg(pm8001_ha, IO, 3522 "Thermal Event: Measured local high temperature %d\n", 3523 ((rht_lht & 0xFF00) >> 8)); 3524 } 3525 if (thermal_event & 0x10) { 3526 pm8001_dbg(pm8001_ha, IO, 3527 "Thermal Event: Remote high temperature violated!\n"); 3528 pm8001_dbg(pm8001_ha, IO, 3529 "Thermal Event: Measured remote high temperature %d\n", 3530 ((rht_lht & 0xFF000000) >> 24)); 3531 } 3532 return 0; 3533 } 3534 3535 /** 3536 * mpi_hw_event - The hw event has come. 3537 * @pm8001_ha: our hba card information 3538 * @piomb: IO message buffer 3539 */ 3540 static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 3541 { 3542 unsigned long flags, i; 3543 struct hw_event_resp *pPayload = 3544 (struct hw_event_resp *)(piomb + 4); 3545 u32 lr_status_evt_portid = 3546 le32_to_cpu(pPayload->lr_status_evt_portid); 3547 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); 3548 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); 3549 u8 phy_id = 3550 (u8)((phyid_npip_portstate & 0xFF0000) >> 16); 3551 u16 eventType = 3552 (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); 3553 u8 status = 3554 (u8)((lr_status_evt_portid & 0x0F000000) >> 24); 3555 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 3556 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3557 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3558 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; 3559 pm8001_dbg(pm8001_ha, DEV, 3560 "portid:%d phyid:%d event:0x%x status:0x%x\n", 3561 port_id, phy_id, eventType, status); 3562 3563 switch (eventType) { 3564 3565 case HW_EVENT_SAS_PHY_UP: 3566 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n"); 3567 hw_event_sas_phy_up(pm8001_ha, piomb); 3568 break; 3569 case HW_EVENT_SATA_PHY_UP: 3570 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n"); 3571 hw_event_sata_phy_up(pm8001_ha, piomb); 3572 break; 3573 case HW_EVENT_SATA_SPINUP_HOLD: 3574 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n"); 3575 sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, 3576 GFP_ATOMIC); 3577 break; 3578 case HW_EVENT_PHY_DOWN: 3579 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); 3580 hw_event_phy_down(pm8001_ha, piomb); 3581 if (pm8001_ha->reset_in_progress) { 3582 pm8001_dbg(pm8001_ha, MSG, "Reset in progress\n"); 3583 return 0; 3584 } 3585 phy->phy_attached = 0; 3586 phy->phy_state = PHY_LINK_DISABLE; 3587 break; 3588 case HW_EVENT_PORT_INVALID: 3589 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n"); 3590 sas_phy_disconnected(sas_phy); 3591 phy->phy_attached = 0; 3592 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3593 GFP_ATOMIC); 3594 break; 3595 /* the broadcast change primitive received, tell the LIBSAS this event 3596 to revalidate the sas domain*/ 3597 case HW_EVENT_BROADCAST_CHANGE: 3598 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); 3599 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, 3600 port_id, phy_id, 1, 0); 3601 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3602 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; 3603 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3604 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3605 GFP_ATOMIC); 3606 break; 3607 case HW_EVENT_PHY_ERROR: 3608 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n"); 3609 sas_phy_disconnected(&phy->sas_phy); 3610 phy->phy_attached = 0; 3611 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); 3612 break; 3613 case HW_EVENT_BROADCAST_EXP: 3614 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); 3615 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3616 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; 3617 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3618 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3619 GFP_ATOMIC); 3620 break; 3621 case HW_EVENT_LINK_ERR_INVALID_DWORD: 3622 pm8001_dbg(pm8001_ha, MSG, 3623 "HW_EVENT_LINK_ERR_INVALID_DWORD\n"); 3624 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3625 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); 3626 break; 3627 case HW_EVENT_LINK_ERR_DISPARITY_ERROR: 3628 pm8001_dbg(pm8001_ha, MSG, 3629 "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"); 3630 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3631 HW_EVENT_LINK_ERR_DISPARITY_ERROR, 3632 port_id, phy_id, 0, 0); 3633 break; 3634 case HW_EVENT_LINK_ERR_CODE_VIOLATION: 3635 pm8001_dbg(pm8001_ha, MSG, 3636 "HW_EVENT_LINK_ERR_CODE_VIOLATION\n"); 3637 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3638 HW_EVENT_LINK_ERR_CODE_VIOLATION, 3639 port_id, phy_id, 0, 0); 3640 break; 3641 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: 3642 pm8001_dbg(pm8001_ha, MSG, 3643 "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"); 3644 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3645 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, 3646 port_id, phy_id, 0, 0); 3647 break; 3648 case HW_EVENT_MALFUNCTION: 3649 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n"); 3650 break; 3651 case HW_EVENT_BROADCAST_SES: 3652 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); 3653 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3654 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; 3655 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3656 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3657 GFP_ATOMIC); 3658 break; 3659 case HW_EVENT_INBOUND_CRC_ERROR: 3660 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n"); 3661 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3662 HW_EVENT_INBOUND_CRC_ERROR, 3663 port_id, phy_id, 0, 0); 3664 break; 3665 case HW_EVENT_HARD_RESET_RECEIVED: 3666 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n"); 3667 sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); 3668 break; 3669 case HW_EVENT_ID_FRAME_TIMEOUT: 3670 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n"); 3671 sas_phy_disconnected(sas_phy); 3672 phy->phy_attached = 0; 3673 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3674 GFP_ATOMIC); 3675 break; 3676 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: 3677 pm8001_dbg(pm8001_ha, MSG, 3678 "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"); 3679 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3680 HW_EVENT_LINK_ERR_PHY_RESET_FAILED, 3681 port_id, phy_id, 0, 0); 3682 sas_phy_disconnected(sas_phy); 3683 phy->phy_attached = 0; 3684 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3685 GFP_ATOMIC); 3686 break; 3687 case HW_EVENT_PORT_RESET_TIMER_TMO: 3688 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); 3689 if (!pm8001_ha->phy[phy_id].reset_completion) { 3690 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3691 port_id, phy_id, 0, 0); 3692 } 3693 sas_phy_disconnected(sas_phy); 3694 phy->phy_attached = 0; 3695 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3696 GFP_ATOMIC); 3697 if (pm8001_ha->phy[phy_id].reset_completion) { 3698 pm8001_ha->phy[phy_id].port_reset_status = 3699 PORT_RESET_TMO; 3700 complete(pm8001_ha->phy[phy_id].reset_completion); 3701 pm8001_ha->phy[phy_id].reset_completion = NULL; 3702 } 3703 break; 3704 case HW_EVENT_PORT_RECOVERY_TIMER_TMO: 3705 pm8001_dbg(pm8001_ha, MSG, 3706 "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"); 3707 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3708 HW_EVENT_PORT_RECOVERY_TIMER_TMO, 3709 port_id, phy_id, 0, 0); 3710 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 3711 if (port->wide_port_phymap & (1 << i)) { 3712 phy = &pm8001_ha->phy[i]; 3713 sas_notify_phy_event(&phy->sas_phy, 3714 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); 3715 port->wide_port_phymap &= ~(1 << i); 3716 } 3717 } 3718 break; 3719 case HW_EVENT_PORT_RECOVER: 3720 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n"); 3721 hw_event_port_recover(pm8001_ha, piomb); 3722 break; 3723 case HW_EVENT_PORT_RESET_COMPLETE: 3724 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n"); 3725 if (pm8001_ha->phy[phy_id].reset_completion) { 3726 pm8001_ha->phy[phy_id].port_reset_status = 3727 PORT_RESET_SUCCESS; 3728 complete(pm8001_ha->phy[phy_id].reset_completion); 3729 pm8001_ha->phy[phy_id].reset_completion = NULL; 3730 } 3731 break; 3732 case EVENT_BROADCAST_ASYNCH_EVENT: 3733 pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); 3734 break; 3735 default: 3736 pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n", 3737 eventType); 3738 break; 3739 } 3740 return 0; 3741 } 3742 3743 /** 3744 * mpi_phy_stop_resp - SPCv specific 3745 * @pm8001_ha: our hba card information 3746 * @piomb: IO message buffer 3747 */ 3748 static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3749 { 3750 struct phy_stop_resp *pPayload = 3751 (struct phy_stop_resp *)(piomb + 4); 3752 u32 status = 3753 le32_to_cpu(pPayload->status); 3754 u32 phyid = 3755 le32_to_cpu(pPayload->phyid) & 0xFF; 3756 struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; 3757 pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", 3758 phyid, status); 3759 if (status == PHY_STOP_SUCCESS || 3760 status == PHY_STOP_ERR_DEVICE_ATTACHED) 3761 phy->phy_state = PHY_LINK_DISABLE; 3762 return 0; 3763 } 3764 3765 /** 3766 * mpi_set_controller_config_resp - SPCv specific 3767 * @pm8001_ha: our hba card information 3768 * @piomb: IO message buffer 3769 */ 3770 static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, 3771 void *piomb) 3772 { 3773 struct set_ctrl_cfg_resp *pPayload = 3774 (struct set_ctrl_cfg_resp *)(piomb + 4); 3775 u32 status = le32_to_cpu(pPayload->status); 3776 u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); 3777 3778 pm8001_dbg(pm8001_ha, MSG, 3779 "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", 3780 status, err_qlfr_pgcd); 3781 3782 return 0; 3783 } 3784 3785 /** 3786 * mpi_get_controller_config_resp - SPCv specific 3787 * @pm8001_ha: our hba card information 3788 * @piomb: IO message buffer 3789 */ 3790 static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, 3791 void *piomb) 3792 { 3793 pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); 3794 3795 return 0; 3796 } 3797 3798 /** 3799 * mpi_get_phy_profile_resp - SPCv specific 3800 * @pm8001_ha: our hba card information 3801 * @piomb: IO message buffer 3802 */ 3803 static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, 3804 void *piomb) 3805 { 3806 pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); 3807 3808 return 0; 3809 } 3810 3811 /** 3812 * mpi_flash_op_ext_resp - SPCv specific 3813 * @pm8001_ha: our hba card information 3814 * @piomb: IO message buffer 3815 */ 3816 static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3817 { 3818 pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); 3819 3820 return 0; 3821 } 3822 3823 /** 3824 * mpi_set_phy_profile_resp - SPCv specific 3825 * @pm8001_ha: our hba card information 3826 * @piomb: IO message buffer 3827 */ 3828 static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, 3829 void *piomb) 3830 { 3831 u32 tag; 3832 u8 page_code; 3833 int rc = 0; 3834 struct set_phy_profile_resp *pPayload = 3835 (struct set_phy_profile_resp *)(piomb + 4); 3836 u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); 3837 u32 status = le32_to_cpu(pPayload->status); 3838 3839 tag = le32_to_cpu(pPayload->tag); 3840 page_code = (u8)((ppc_phyid & 0xFF00) >> 8); 3841 if (status) { 3842 /* status is FAILED */ 3843 pm8001_dbg(pm8001_ha, FAIL, 3844 "PhyProfile command failed with status 0x%08X\n", 3845 status); 3846 rc = -1; 3847 } else { 3848 if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { 3849 pm8001_dbg(pm8001_ha, FAIL, "Invalid page code 0x%X\n", 3850 page_code); 3851 rc = -1; 3852 } 3853 } 3854 pm8001_tag_free(pm8001_ha, tag); 3855 return rc; 3856 } 3857 3858 /** 3859 * mpi_kek_management_resp - SPCv specific 3860 * @pm8001_ha: our hba card information 3861 * @piomb: IO message buffer 3862 */ 3863 static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, 3864 void *piomb) 3865 { 3866 struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); 3867 3868 u32 status = le32_to_cpu(pPayload->status); 3869 u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); 3870 u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); 3871 3872 pm8001_dbg(pm8001_ha, MSG, 3873 "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", 3874 status, kidx_new_curr_ksop, err_qlfr); 3875 3876 return 0; 3877 } 3878 3879 /** 3880 * mpi_dek_management_resp - SPCv specific 3881 * @pm8001_ha: our hba card information 3882 * @piomb: IO message buffer 3883 */ 3884 static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, 3885 void *piomb) 3886 { 3887 pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); 3888 3889 return 0; 3890 } 3891 3892 /** 3893 * ssp_coalesced_comp_resp - SPCv specific 3894 * @pm8001_ha: our hba card information 3895 * @piomb: IO message buffer 3896 */ 3897 static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, 3898 void *piomb) 3899 { 3900 pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); 3901 3902 return 0; 3903 } 3904 3905 /** 3906 * process_one_iomb - process one outbound Queue memory block 3907 * @pm8001_ha: our hba card information 3908 * @circularQ: outbound circular queue 3909 * @piomb: IO message buffer 3910 */ 3911 static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, 3912 struct outbound_queue_table *circularQ, void *piomb) 3913 { 3914 __le32 pHeader = *(__le32 *)piomb; 3915 u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); 3916 3917 switch (opc) { 3918 case OPC_OUB_ECHO: 3919 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); 3920 break; 3921 case OPC_OUB_HW_EVENT: 3922 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); 3923 mpi_hw_event(pm8001_ha, piomb); 3924 break; 3925 case OPC_OUB_THERM_HW_EVENT: 3926 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_THERMAL_EVENT\n"); 3927 mpi_thermal_hw_event(pm8001_ha, piomb); 3928 break; 3929 case OPC_OUB_SSP_COMP: 3930 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); 3931 mpi_ssp_completion(pm8001_ha, piomb); 3932 break; 3933 case OPC_OUB_SMP_COMP: 3934 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); 3935 mpi_smp_completion(pm8001_ha, piomb); 3936 break; 3937 case OPC_OUB_LOCAL_PHY_CNTRL: 3938 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); 3939 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); 3940 break; 3941 case OPC_OUB_DEV_REGIST: 3942 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); 3943 pm8001_mpi_reg_resp(pm8001_ha, piomb); 3944 break; 3945 case OPC_OUB_DEREG_DEV: 3946 pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); 3947 pm8001_mpi_dereg_resp(pm8001_ha, piomb); 3948 break; 3949 case OPC_OUB_GET_DEV_HANDLE: 3950 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); 3951 break; 3952 case OPC_OUB_SATA_COMP: 3953 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); 3954 mpi_sata_completion(pm8001_ha, circularQ, piomb); 3955 break; 3956 case OPC_OUB_SATA_EVENT: 3957 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); 3958 mpi_sata_event(pm8001_ha, circularQ, piomb); 3959 break; 3960 case OPC_OUB_SSP_EVENT: 3961 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); 3962 mpi_ssp_event(pm8001_ha, piomb); 3963 break; 3964 case OPC_OUB_DEV_HANDLE_ARRIV: 3965 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); 3966 /*This is for target*/ 3967 break; 3968 case OPC_OUB_SSP_RECV_EVENT: 3969 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); 3970 /*This is for target*/ 3971 break; 3972 case OPC_OUB_FW_FLASH_UPDATE: 3973 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); 3974 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); 3975 break; 3976 case OPC_OUB_GPIO_RESPONSE: 3977 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); 3978 break; 3979 case OPC_OUB_GPIO_EVENT: 3980 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); 3981 break; 3982 case OPC_OUB_GENERAL_EVENT: 3983 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); 3984 pm8001_mpi_general_event(pm8001_ha, piomb); 3985 break; 3986 case OPC_OUB_SSP_ABORT_RSP: 3987 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); 3988 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 3989 break; 3990 case OPC_OUB_SATA_ABORT_RSP: 3991 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); 3992 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 3993 break; 3994 case OPC_OUB_SAS_DIAG_MODE_START_END: 3995 pm8001_dbg(pm8001_ha, MSG, 3996 "OPC_OUB_SAS_DIAG_MODE_START_END\n"); 3997 break; 3998 case OPC_OUB_SAS_DIAG_EXECUTE: 3999 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); 4000 break; 4001 case OPC_OUB_GET_TIME_STAMP: 4002 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); 4003 break; 4004 case OPC_OUB_SAS_HW_EVENT_ACK: 4005 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); 4006 break; 4007 case OPC_OUB_PORT_CONTROL: 4008 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); 4009 break; 4010 case OPC_OUB_SMP_ABORT_RSP: 4011 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); 4012 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 4013 break; 4014 case OPC_OUB_GET_NVMD_DATA: 4015 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); 4016 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); 4017 break; 4018 case OPC_OUB_SET_NVMD_DATA: 4019 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); 4020 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); 4021 break; 4022 case OPC_OUB_DEVICE_HANDLE_REMOVAL: 4023 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); 4024 break; 4025 case OPC_OUB_SET_DEVICE_STATE: 4026 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); 4027 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); 4028 break; 4029 case OPC_OUB_GET_DEVICE_STATE: 4030 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); 4031 break; 4032 case OPC_OUB_SET_DEV_INFO: 4033 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); 4034 break; 4035 /* spcv specific commands */ 4036 case OPC_OUB_PHY_START_RESP: 4037 pm8001_dbg(pm8001_ha, MSG, 4038 "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); 4039 mpi_phy_start_resp(pm8001_ha, piomb); 4040 break; 4041 case OPC_OUB_PHY_STOP_RESP: 4042 pm8001_dbg(pm8001_ha, MSG, 4043 "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc); 4044 mpi_phy_stop_resp(pm8001_ha, piomb); 4045 break; 4046 case OPC_OUB_SET_CONTROLLER_CONFIG: 4047 pm8001_dbg(pm8001_ha, MSG, 4048 "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc); 4049 mpi_set_controller_config_resp(pm8001_ha, piomb); 4050 break; 4051 case OPC_OUB_GET_CONTROLLER_CONFIG: 4052 pm8001_dbg(pm8001_ha, MSG, 4053 "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc); 4054 mpi_get_controller_config_resp(pm8001_ha, piomb); 4055 break; 4056 case OPC_OUB_GET_PHY_PROFILE: 4057 pm8001_dbg(pm8001_ha, MSG, 4058 "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc); 4059 mpi_get_phy_profile_resp(pm8001_ha, piomb); 4060 break; 4061 case OPC_OUB_FLASH_OP_EXT: 4062 pm8001_dbg(pm8001_ha, MSG, 4063 "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc); 4064 mpi_flash_op_ext_resp(pm8001_ha, piomb); 4065 break; 4066 case OPC_OUB_SET_PHY_PROFILE: 4067 pm8001_dbg(pm8001_ha, MSG, 4068 "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc); 4069 mpi_set_phy_profile_resp(pm8001_ha, piomb); 4070 break; 4071 case OPC_OUB_KEK_MANAGEMENT_RESP: 4072 pm8001_dbg(pm8001_ha, MSG, 4073 "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc); 4074 mpi_kek_management_resp(pm8001_ha, piomb); 4075 break; 4076 case OPC_OUB_DEK_MANAGEMENT_RESP: 4077 pm8001_dbg(pm8001_ha, MSG, 4078 "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc); 4079 mpi_dek_management_resp(pm8001_ha, piomb); 4080 break; 4081 case OPC_OUB_SSP_COALESCED_COMP_RESP: 4082 pm8001_dbg(pm8001_ha, MSG, 4083 "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc); 4084 ssp_coalesced_comp_resp(pm8001_ha, piomb); 4085 break; 4086 default: 4087 pm8001_dbg(pm8001_ha, DEVIO, 4088 "Unknown outbound Queue IOMB OPC = 0x%x\n", opc); 4089 break; 4090 } 4091 } 4092 4093 static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha) 4094 { 4095 pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_0: 0x%x\n", 4096 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); 4097 pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_1:0x%x\n", 4098 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)); 4099 pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_2: 0x%x\n", 4100 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)); 4101 pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_3: 0x%x\n", 4102 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); 4103 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_0: 0x%x\n", 4104 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)); 4105 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_1: 0x%x\n", 4106 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)); 4107 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_2: 0x%x\n", 4108 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)); 4109 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_3: 0x%x\n", 4110 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)); 4111 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_4: 0x%x\n", 4112 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)); 4113 pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n", 4114 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)); 4115 pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n", 4116 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)); 4117 pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n", 4118 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)); 4119 } 4120 4121 static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) 4122 { 4123 struct outbound_queue_table *circularQ; 4124 void *pMsg1 = NULL; 4125 u8 bc; 4126 u32 ret = MPI_IO_STATUS_FAIL; 4127 u32 regval; 4128 4129 /* 4130 * Fatal errors are programmed to be signalled in irq vector 4131 * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl. 4132 * fatal_err_interrupt 4133 */ 4134 if (vec == (pm8001_ha->max_q_num - 1)) { 4135 u32 mipsall_ready; 4136 4137 if (pm8001_ha->chip_id == chip_8008 || 4138 pm8001_ha->chip_id == chip_8009) 4139 mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT; 4140 else 4141 mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT; 4142 4143 regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 4144 if ((regval & mipsall_ready) != mipsall_ready) { 4145 pm8001_ha->controller_fatal_error = true; 4146 pm8001_dbg(pm8001_ha, FAIL, 4147 "Firmware Fatal error! Regval:0x%x\n", 4148 regval); 4149 pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR); 4150 print_scratchpad_registers(pm8001_ha); 4151 return ret; 4152 } 4153 } 4154 circularQ = &pm8001_ha->outbnd_q_tbl[vec]; 4155 spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags); 4156 do { 4157 /* spurious interrupt during setup if kexec-ing and 4158 * driver doing a doorbell access w/ the pre-kexec oq 4159 * interrupt setup. 4160 */ 4161 if (!circularQ->pi_virt) 4162 break; 4163 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 4164 if (MPI_IO_STATUS_SUCCESS == ret) { 4165 /* process the outbound message */ 4166 process_one_iomb(pm8001_ha, circularQ, 4167 (void *)(pMsg1 - 4)); 4168 /* free the message from the outbound circular buffer */ 4169 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, 4170 circularQ, bc); 4171 } 4172 if (MPI_IO_STATUS_BUSY == ret) { 4173 /* Update the producer index from SPC */ 4174 circularQ->producer_index = 4175 cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); 4176 if (le32_to_cpu(circularQ->producer_index) == 4177 circularQ->consumer_idx) 4178 /* OQ is empty */ 4179 break; 4180 } 4181 } while (1); 4182 spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags); 4183 return ret; 4184 } 4185 4186 /* DMA_... to our direction translation. */ 4187 static const u8 data_dir_flags[] = { 4188 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ 4189 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ 4190 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ 4191 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ 4192 }; 4193 4194 static void build_smp_cmd(u32 deviceID, __le32 hTag, 4195 struct smp_req *psmp_cmd, int mode, int length) 4196 { 4197 psmp_cmd->tag = hTag; 4198 psmp_cmd->device_id = cpu_to_le32(deviceID); 4199 if (mode == SMP_DIRECT) { 4200 length = length - 4; /* subtract crc */ 4201 psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); 4202 } else { 4203 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); 4204 } 4205 } 4206 4207 /** 4208 * pm80xx_chip_smp_req - send an SMP task to FW 4209 * @pm8001_ha: our hba card information. 4210 * @ccb: the ccb information this request used. 4211 */ 4212 static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, 4213 struct pm8001_ccb_info *ccb) 4214 { 4215 int elem, rc; 4216 struct sas_task *task = ccb->task; 4217 struct domain_device *dev = task->dev; 4218 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4219 struct scatterlist *sg_req, *sg_resp, *smp_req; 4220 u32 req_len, resp_len; 4221 struct smp_req smp_cmd; 4222 u32 opc; 4223 struct inbound_queue_table *circularQ; 4224 u32 i, length; 4225 u8 *payload; 4226 u8 *to; 4227 4228 memset(&smp_cmd, 0, sizeof(smp_cmd)); 4229 /* 4230 * DMA-map SMP request, response buffers 4231 */ 4232 sg_req = &task->smp_task.smp_req; 4233 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); 4234 if (!elem) 4235 return -ENOMEM; 4236 req_len = sg_dma_len(sg_req); 4237 4238 sg_resp = &task->smp_task.smp_resp; 4239 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); 4240 if (!elem) { 4241 rc = -ENOMEM; 4242 goto err_out; 4243 } 4244 resp_len = sg_dma_len(sg_resp); 4245 /* must be in dwords */ 4246 if ((req_len & 0x3) || (resp_len & 0x3)) { 4247 rc = -EINVAL; 4248 goto err_out_2; 4249 } 4250 4251 opc = OPC_INB_SMP_REQUEST; 4252 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4253 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); 4254 4255 length = sg_req->length; 4256 pm8001_dbg(pm8001_ha, IO, "SMP Frame Length %d\n", sg_req->length); 4257 if (!(length - 8)) 4258 pm8001_ha->smp_exp_mode = SMP_DIRECT; 4259 else 4260 pm8001_ha->smp_exp_mode = SMP_INDIRECT; 4261 4262 4263 smp_req = &task->smp_task.smp_req; 4264 to = kmap_atomic(sg_page(smp_req)); 4265 payload = to + smp_req->offset; 4266 4267 /* INDIRECT MODE command settings. Use DMA */ 4268 if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { 4269 pm8001_dbg(pm8001_ha, IO, "SMP REQUEST INDIRECT MODE\n"); 4270 /* for SPCv indirect mode. Place the top 4 bytes of 4271 * SMP Request header here. */ 4272 for (i = 0; i < 4; i++) 4273 smp_cmd.smp_req16[i] = *(payload + i); 4274 /* exclude top 4 bytes for SMP req header */ 4275 smp_cmd.long_smp_req.long_req_addr = 4276 cpu_to_le64((u64)sg_dma_address 4277 (&task->smp_task.smp_req) + 4); 4278 /* exclude 4 bytes for SMP req header and CRC */ 4279 smp_cmd.long_smp_req.long_req_size = 4280 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); 4281 smp_cmd.long_smp_req.long_resp_addr = 4282 cpu_to_le64((u64)sg_dma_address 4283 (&task->smp_task.smp_resp)); 4284 smp_cmd.long_smp_req.long_resp_size = 4285 cpu_to_le32((u32)sg_dma_len 4286 (&task->smp_task.smp_resp)-4); 4287 } else { /* DIRECT MODE */ 4288 smp_cmd.long_smp_req.long_req_addr = 4289 cpu_to_le64((u64)sg_dma_address 4290 (&task->smp_task.smp_req)); 4291 smp_cmd.long_smp_req.long_req_size = 4292 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); 4293 smp_cmd.long_smp_req.long_resp_addr = 4294 cpu_to_le64((u64)sg_dma_address 4295 (&task->smp_task.smp_resp)); 4296 smp_cmd.long_smp_req.long_resp_size = 4297 cpu_to_le32 4298 ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); 4299 } 4300 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { 4301 pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n"); 4302 for (i = 0; i < length; i++) 4303 if (i < 16) { 4304 smp_cmd.smp_req16[i] = *(payload + i); 4305 pm8001_dbg(pm8001_ha, IO, 4306 "Byte[%d]:%x (DMA data:%x)\n", 4307 i, smp_cmd.smp_req16[i], 4308 *(payload)); 4309 } else { 4310 smp_cmd.smp_req[i] = *(payload + i); 4311 pm8001_dbg(pm8001_ha, IO, 4312 "Byte[%d]:%x (DMA data:%x)\n", 4313 i, smp_cmd.smp_req[i], 4314 *(payload)); 4315 } 4316 } 4317 kunmap_atomic(to); 4318 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, 4319 &smp_cmd, pm8001_ha->smp_exp_mode, length); 4320 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd, 4321 sizeof(smp_cmd), 0); 4322 if (rc) 4323 goto err_out_2; 4324 return 0; 4325 4326 err_out_2: 4327 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, 4328 DMA_FROM_DEVICE); 4329 err_out: 4330 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, 4331 DMA_TO_DEVICE); 4332 return rc; 4333 } 4334 4335 static int check_enc_sas_cmd(struct sas_task *task) 4336 { 4337 u8 cmd = task->ssp_task.cmd->cmnd[0]; 4338 4339 if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY) 4340 return 1; 4341 else 4342 return 0; 4343 } 4344 4345 static int check_enc_sat_cmd(struct sas_task *task) 4346 { 4347 int ret = 0; 4348 switch (task->ata_task.fis.command) { 4349 case ATA_CMD_FPDMA_READ: 4350 case ATA_CMD_READ_EXT: 4351 case ATA_CMD_READ: 4352 case ATA_CMD_FPDMA_WRITE: 4353 case ATA_CMD_WRITE_EXT: 4354 case ATA_CMD_WRITE: 4355 case ATA_CMD_PIO_READ: 4356 case ATA_CMD_PIO_READ_EXT: 4357 case ATA_CMD_PIO_WRITE: 4358 case ATA_CMD_PIO_WRITE_EXT: 4359 ret = 1; 4360 break; 4361 default: 4362 ret = 0; 4363 break; 4364 } 4365 return ret; 4366 } 4367 4368 /** 4369 * pm80xx_chip_ssp_io_req - send an SSP task to FW 4370 * @pm8001_ha: our hba card information. 4371 * @ccb: the ccb information this request used. 4372 */ 4373 static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, 4374 struct pm8001_ccb_info *ccb) 4375 { 4376 struct sas_task *task = ccb->task; 4377 struct domain_device *dev = task->dev; 4378 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4379 struct ssp_ini_io_start_req ssp_cmd; 4380 u32 tag = ccb->ccb_tag; 4381 int ret; 4382 u64 phys_addr, start_addr, end_addr; 4383 u32 end_addr_high, end_addr_low; 4384 struct inbound_queue_table *circularQ; 4385 u32 q_index, cpu_id; 4386 u32 opc = OPC_INB_SSPINIIOSTART; 4387 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 4388 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); 4389 /* data address domain added for spcv; set to 0 by host, 4390 * used internally by controller 4391 * 0 for SAS 1.1 and SAS 2.0 compatible TLR 4392 */ 4393 ssp_cmd.dad_dir_m_tlr = 4394 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); 4395 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); 4396 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); 4397 ssp_cmd.tag = cpu_to_le32(tag); 4398 if (task->ssp_task.enable_first_burst) 4399 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; 4400 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); 4401 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); 4402 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, 4403 task->ssp_task.cmd->cmd_len); 4404 cpu_id = smp_processor_id(); 4405 q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); 4406 circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; 4407 4408 /* Check if encryption is set */ 4409 if (pm8001_ha->chip->encrypt && 4410 !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { 4411 pm8001_dbg(pm8001_ha, IO, 4412 "Encryption enabled.Sending Encrypt SAS command 0x%x\n", 4413 task->ssp_task.cmd->cmnd[0]); 4414 opc = OPC_INB_SSP_INI_DIF_ENC_IO; 4415 /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ 4416 ssp_cmd.dad_dir_m_tlr = cpu_to_le32 4417 ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); 4418 4419 /* fill in PRD (scatter/gather) table, if any */ 4420 if (task->num_scatter > 1) { 4421 pm8001_chip_make_sg(task->scatter, 4422 ccb->n_elem, ccb->buf_prd); 4423 phys_addr = ccb->ccb_dma_handle; 4424 ssp_cmd.enc_addr_low = 4425 cpu_to_le32(lower_32_bits(phys_addr)); 4426 ssp_cmd.enc_addr_high = 4427 cpu_to_le32(upper_32_bits(phys_addr)); 4428 ssp_cmd.enc_esgl = cpu_to_le32(1<<31); 4429 } else if (task->num_scatter == 1) { 4430 u64 dma_addr = sg_dma_address(task->scatter); 4431 ssp_cmd.enc_addr_low = 4432 cpu_to_le32(lower_32_bits(dma_addr)); 4433 ssp_cmd.enc_addr_high = 4434 cpu_to_le32(upper_32_bits(dma_addr)); 4435 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 4436 ssp_cmd.enc_esgl = 0; 4437 /* Check 4G Boundary */ 4438 start_addr = cpu_to_le64(dma_addr); 4439 end_addr = (start_addr + ssp_cmd.enc_len) - 1; 4440 end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 4441 end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 4442 if (end_addr_high != ssp_cmd.enc_addr_high) { 4443 pm8001_dbg(pm8001_ha, FAIL, 4444 "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", 4445 start_addr, ssp_cmd.enc_len, 4446 end_addr_high, end_addr_low); 4447 pm8001_chip_make_sg(task->scatter, 1, 4448 ccb->buf_prd); 4449 phys_addr = ccb->ccb_dma_handle; 4450 ssp_cmd.enc_addr_low = 4451 cpu_to_le32(lower_32_bits(phys_addr)); 4452 ssp_cmd.enc_addr_high = 4453 cpu_to_le32(upper_32_bits(phys_addr)); 4454 ssp_cmd.enc_esgl = cpu_to_le32(1<<31); 4455 } 4456 } else if (task->num_scatter == 0) { 4457 ssp_cmd.enc_addr_low = 0; 4458 ssp_cmd.enc_addr_high = 0; 4459 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 4460 ssp_cmd.enc_esgl = 0; 4461 } 4462 /* XTS mode. All other fields are 0 */ 4463 ssp_cmd.key_cmode = 0x6 << 4; 4464 /* set tweak values. Should be the start lba */ 4465 ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | 4466 (task->ssp_task.cmd->cmnd[3] << 16) | 4467 (task->ssp_task.cmd->cmnd[4] << 8) | 4468 (task->ssp_task.cmd->cmnd[5])); 4469 } else { 4470 pm8001_dbg(pm8001_ha, IO, 4471 "Sending Normal SAS command 0x%x inb q %x\n", 4472 task->ssp_task.cmd->cmnd[0], q_index); 4473 /* fill in PRD (scatter/gather) table, if any */ 4474 if (task->num_scatter > 1) { 4475 pm8001_chip_make_sg(task->scatter, ccb->n_elem, 4476 ccb->buf_prd); 4477 phys_addr = ccb->ccb_dma_handle; 4478 ssp_cmd.addr_low = 4479 cpu_to_le32(lower_32_bits(phys_addr)); 4480 ssp_cmd.addr_high = 4481 cpu_to_le32(upper_32_bits(phys_addr)); 4482 ssp_cmd.esgl = cpu_to_le32(1<<31); 4483 } else if (task->num_scatter == 1) { 4484 u64 dma_addr = sg_dma_address(task->scatter); 4485 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); 4486 ssp_cmd.addr_high = 4487 cpu_to_le32(upper_32_bits(dma_addr)); 4488 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4489 ssp_cmd.esgl = 0; 4490 /* Check 4G Boundary */ 4491 start_addr = cpu_to_le64(dma_addr); 4492 end_addr = (start_addr + ssp_cmd.len) - 1; 4493 end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 4494 end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 4495 if (end_addr_high != ssp_cmd.addr_high) { 4496 pm8001_dbg(pm8001_ha, FAIL, 4497 "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", 4498 start_addr, ssp_cmd.len, 4499 end_addr_high, end_addr_low); 4500 pm8001_chip_make_sg(task->scatter, 1, 4501 ccb->buf_prd); 4502 phys_addr = ccb->ccb_dma_handle; 4503 ssp_cmd.addr_low = 4504 cpu_to_le32(lower_32_bits(phys_addr)); 4505 ssp_cmd.addr_high = 4506 cpu_to_le32(upper_32_bits(phys_addr)); 4507 ssp_cmd.esgl = cpu_to_le32(1<<31); 4508 } 4509 } else if (task->num_scatter == 0) { 4510 ssp_cmd.addr_low = 0; 4511 ssp_cmd.addr_high = 0; 4512 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4513 ssp_cmd.esgl = 0; 4514 } 4515 } 4516 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, 4517 &ssp_cmd, sizeof(ssp_cmd), q_index); 4518 return ret; 4519 } 4520 4521 static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, 4522 struct pm8001_ccb_info *ccb) 4523 { 4524 struct sas_task *task = ccb->task; 4525 struct domain_device *dev = task->dev; 4526 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; 4527 struct ata_queued_cmd *qc = task->uldd_task; 4528 u32 tag = ccb->ccb_tag; 4529 int ret; 4530 u32 q_index, cpu_id; 4531 struct sata_start_req sata_cmd; 4532 u32 hdr_tag, ncg_tag = 0; 4533 u64 phys_addr, start_addr, end_addr; 4534 u32 end_addr_high, end_addr_low; 4535 u32 ATAP = 0x0; 4536 u32 dir; 4537 struct inbound_queue_table *circularQ; 4538 unsigned long flags; 4539 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4540 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4541 cpu_id = smp_processor_id(); 4542 q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); 4543 circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; 4544 4545 if (task->data_dir == DMA_NONE) { 4546 ATAP = 0x04; /* no data*/ 4547 pm8001_dbg(pm8001_ha, IO, "no data\n"); 4548 } else if (likely(!task->ata_task.device_control_reg_update)) { 4549 if (task->ata_task.dma_xfer) { 4550 ATAP = 0x06; /* DMA */ 4551 pm8001_dbg(pm8001_ha, IO, "DMA\n"); 4552 } else { 4553 ATAP = 0x05; /* PIO*/ 4554 pm8001_dbg(pm8001_ha, IO, "PIO\n"); 4555 } 4556 if (task->ata_task.use_ncq && 4557 dev->sata_dev.class != ATA_DEV_ATAPI) { 4558 ATAP = 0x07; /* FPDMA */ 4559 pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); 4560 } 4561 } 4562 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { 4563 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 4564 ncg_tag = hdr_tag; 4565 } 4566 dir = data_dir_flags[task->data_dir] << 8; 4567 sata_cmd.tag = cpu_to_le32(tag); 4568 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 4569 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); 4570 4571 sata_cmd.sata_fis = task->ata_task.fis; 4572 if (likely(!task->ata_task.device_control_reg_update)) 4573 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ 4574 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ 4575 4576 /* Check if encryption is set */ 4577 if (pm8001_ha->chip->encrypt && 4578 !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { 4579 pm8001_dbg(pm8001_ha, IO, 4580 "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", 4581 sata_cmd.sata_fis.command); 4582 opc = OPC_INB_SATA_DIF_ENC_IO; 4583 4584 /* set encryption bit */ 4585 sata_cmd.ncqtag_atap_dir_m_dad = 4586 cpu_to_le32(((ncg_tag & 0xff)<<16)| 4587 ((ATAP & 0x3f) << 10) | 0x20 | dir); 4588 /* dad (bit 0-1) is 0 */ 4589 /* fill in PRD (scatter/gather) table, if any */ 4590 if (task->num_scatter > 1) { 4591 pm8001_chip_make_sg(task->scatter, 4592 ccb->n_elem, ccb->buf_prd); 4593 phys_addr = ccb->ccb_dma_handle; 4594 sata_cmd.enc_addr_low = lower_32_bits(phys_addr); 4595 sata_cmd.enc_addr_high = upper_32_bits(phys_addr); 4596 sata_cmd.enc_esgl = cpu_to_le32(1 << 31); 4597 } else if (task->num_scatter == 1) { 4598 u64 dma_addr = sg_dma_address(task->scatter); 4599 sata_cmd.enc_addr_low = lower_32_bits(dma_addr); 4600 sata_cmd.enc_addr_high = upper_32_bits(dma_addr); 4601 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 4602 sata_cmd.enc_esgl = 0; 4603 /* Check 4G Boundary */ 4604 start_addr = cpu_to_le64(dma_addr); 4605 end_addr = (start_addr + sata_cmd.enc_len) - 1; 4606 end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 4607 end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 4608 if (end_addr_high != sata_cmd.enc_addr_high) { 4609 pm8001_dbg(pm8001_ha, FAIL, 4610 "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", 4611 start_addr, sata_cmd.enc_len, 4612 end_addr_high, end_addr_low); 4613 pm8001_chip_make_sg(task->scatter, 1, 4614 ccb->buf_prd); 4615 phys_addr = ccb->ccb_dma_handle; 4616 sata_cmd.enc_addr_low = 4617 lower_32_bits(phys_addr); 4618 sata_cmd.enc_addr_high = 4619 upper_32_bits(phys_addr); 4620 sata_cmd.enc_esgl = 4621 cpu_to_le32(1 << 31); 4622 } 4623 } else if (task->num_scatter == 0) { 4624 sata_cmd.enc_addr_low = 0; 4625 sata_cmd.enc_addr_high = 0; 4626 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 4627 sata_cmd.enc_esgl = 0; 4628 } 4629 /* XTS mode. All other fields are 0 */ 4630 sata_cmd.key_index_mode = 0x6 << 4; 4631 /* set tweak values. Should be the start lba */ 4632 sata_cmd.twk_val0 = 4633 cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | 4634 (sata_cmd.sata_fis.lbah << 16) | 4635 (sata_cmd.sata_fis.lbam << 8) | 4636 (sata_cmd.sata_fis.lbal)); 4637 sata_cmd.twk_val1 = 4638 cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | 4639 (sata_cmd.sata_fis.lbam_exp)); 4640 } else { 4641 pm8001_dbg(pm8001_ha, IO, 4642 "Sending Normal SATA command 0x%x inb %x\n", 4643 sata_cmd.sata_fis.command, q_index); 4644 /* dad (bit 0-1) is 0 */ 4645 sata_cmd.ncqtag_atap_dir_m_dad = 4646 cpu_to_le32(((ncg_tag & 0xff)<<16) | 4647 ((ATAP & 0x3f) << 10) | dir); 4648 4649 /* fill in PRD (scatter/gather) table, if any */ 4650 if (task->num_scatter > 1) { 4651 pm8001_chip_make_sg(task->scatter, 4652 ccb->n_elem, ccb->buf_prd); 4653 phys_addr = ccb->ccb_dma_handle; 4654 sata_cmd.addr_low = lower_32_bits(phys_addr); 4655 sata_cmd.addr_high = upper_32_bits(phys_addr); 4656 sata_cmd.esgl = cpu_to_le32(1 << 31); 4657 } else if (task->num_scatter == 1) { 4658 u64 dma_addr = sg_dma_address(task->scatter); 4659 sata_cmd.addr_low = lower_32_bits(dma_addr); 4660 sata_cmd.addr_high = upper_32_bits(dma_addr); 4661 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4662 sata_cmd.esgl = 0; 4663 /* Check 4G Boundary */ 4664 start_addr = cpu_to_le64(dma_addr); 4665 end_addr = (start_addr + sata_cmd.len) - 1; 4666 end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 4667 end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 4668 if (end_addr_high != sata_cmd.addr_high) { 4669 pm8001_dbg(pm8001_ha, FAIL, 4670 "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", 4671 start_addr, sata_cmd.len, 4672 end_addr_high, end_addr_low); 4673 pm8001_chip_make_sg(task->scatter, 1, 4674 ccb->buf_prd); 4675 phys_addr = ccb->ccb_dma_handle; 4676 sata_cmd.addr_low = 4677 lower_32_bits(phys_addr); 4678 sata_cmd.addr_high = 4679 upper_32_bits(phys_addr); 4680 sata_cmd.esgl = cpu_to_le32(1 << 31); 4681 } 4682 } else if (task->num_scatter == 0) { 4683 sata_cmd.addr_low = 0; 4684 sata_cmd.addr_high = 0; 4685 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4686 sata_cmd.esgl = 0; 4687 } 4688 /* scsi cdb */ 4689 sata_cmd.atapi_scsi_cdb[0] = 4690 cpu_to_le32(((task->ata_task.atapi_packet[0]) | 4691 (task->ata_task.atapi_packet[1] << 8) | 4692 (task->ata_task.atapi_packet[2] << 16) | 4693 (task->ata_task.atapi_packet[3] << 24))); 4694 sata_cmd.atapi_scsi_cdb[1] = 4695 cpu_to_le32(((task->ata_task.atapi_packet[4]) | 4696 (task->ata_task.atapi_packet[5] << 8) | 4697 (task->ata_task.atapi_packet[6] << 16) | 4698 (task->ata_task.atapi_packet[7] << 24))); 4699 sata_cmd.atapi_scsi_cdb[2] = 4700 cpu_to_le32(((task->ata_task.atapi_packet[8]) | 4701 (task->ata_task.atapi_packet[9] << 8) | 4702 (task->ata_task.atapi_packet[10] << 16) | 4703 (task->ata_task.atapi_packet[11] << 24))); 4704 sata_cmd.atapi_scsi_cdb[3] = 4705 cpu_to_le32(((task->ata_task.atapi_packet[12]) | 4706 (task->ata_task.atapi_packet[13] << 8) | 4707 (task->ata_task.atapi_packet[14] << 16) | 4708 (task->ata_task.atapi_packet[15] << 24))); 4709 } 4710 4711 /* Check for read log for failed drive and return */ 4712 if (sata_cmd.sata_fis.command == 0x2f) { 4713 if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || 4714 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || 4715 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { 4716 struct task_status_struct *ts; 4717 4718 pm8001_ha_dev->id &= 0xDFFFFFFF; 4719 ts = &task->task_status; 4720 4721 spin_lock_irqsave(&task->task_state_lock, flags); 4722 ts->resp = SAS_TASK_COMPLETE; 4723 ts->stat = SAS_SAM_STAT_GOOD; 4724 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 4725 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 4726 task->task_state_flags |= SAS_TASK_STATE_DONE; 4727 if (unlikely((task->task_state_flags & 4728 SAS_TASK_STATE_ABORTED))) { 4729 spin_unlock_irqrestore(&task->task_state_lock, 4730 flags); 4731 pm8001_dbg(pm8001_ha, FAIL, 4732 "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", 4733 task, ts->resp, 4734 ts->stat); 4735 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4736 return 0; 4737 } else { 4738 spin_unlock_irqrestore(&task->task_state_lock, 4739 flags); 4740 pm8001_ccb_task_free_done(pm8001_ha, task, 4741 ccb, tag); 4742 atomic_dec(&pm8001_ha_dev->running_req); 4743 return 0; 4744 } 4745 } 4746 } 4747 trace_pm80xx_request_issue(pm8001_ha->id, 4748 ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, 4749 ccb->ccb_tag, opc, 4750 qc ? qc->tf.command : 0, // ata opcode 4751 ccb->device ? atomic_read(&ccb->device->running_req) : 0); 4752 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, 4753 &sata_cmd, sizeof(sata_cmd), q_index); 4754 return ret; 4755 } 4756 4757 /** 4758 * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND 4759 * @pm8001_ha: our hba card information. 4760 * @phy_id: the phy id which we wanted to start up. 4761 */ 4762 static int 4763 pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) 4764 { 4765 struct phy_start_req payload; 4766 struct inbound_queue_table *circularQ; 4767 int ret; 4768 u32 tag = 0x01; 4769 u32 opcode = OPC_INB_PHYSTART; 4770 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4771 memset(&payload, 0, sizeof(payload)); 4772 payload.tag = cpu_to_le32(tag); 4773 4774 pm8001_dbg(pm8001_ha, INIT, "PHY START REQ for phy_id %d\n", phy_id); 4775 4776 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 4777 LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); 4778 /* SSC Disable and SAS Analog ST configuration */ 4779 /* 4780 payload.ase_sh_lm_slr_phyid = 4781 cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | 4782 LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | 4783 phy_id); 4784 Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need 4785 */ 4786 4787 payload.sas_identify.dev_type = SAS_END_DEVICE; 4788 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; 4789 memcpy(payload.sas_identify.sas_addr, 4790 &pm8001_ha->sas_addr, SAS_ADDR_SIZE); 4791 payload.sas_identify.phy_id = phy_id; 4792 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 4793 sizeof(payload), 0); 4794 return ret; 4795 } 4796 4797 /** 4798 * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND 4799 * @pm8001_ha: our hba card information. 4800 * @phy_id: the phy id which we wanted to start up. 4801 */ 4802 static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, 4803 u8 phy_id) 4804 { 4805 struct phy_stop_req payload; 4806 struct inbound_queue_table *circularQ; 4807 int ret; 4808 u32 tag = 0x01; 4809 u32 opcode = OPC_INB_PHYSTOP; 4810 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4811 memset(&payload, 0, sizeof(payload)); 4812 payload.tag = cpu_to_le32(tag); 4813 payload.phy_id = cpu_to_le32(phy_id); 4814 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 4815 sizeof(payload), 0); 4816 return ret; 4817 } 4818 4819 /* 4820 * see comments on pm8001_mpi_reg_resp. 4821 */ 4822 static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, 4823 struct pm8001_device *pm8001_dev, u32 flag) 4824 { 4825 struct reg_dev_req payload; 4826 u32 opc; 4827 u32 stp_sspsmp_sata = 0x4; 4828 struct inbound_queue_table *circularQ; 4829 u32 linkrate, phy_id; 4830 int rc, tag = 0xdeadbeef; 4831 struct pm8001_ccb_info *ccb; 4832 u8 retryFlag = 0x1; 4833 u16 firstBurstSize = 0; 4834 u16 ITNT = 2000; 4835 struct domain_device *dev = pm8001_dev->sas_device; 4836 struct domain_device *parent_dev = dev->parent; 4837 struct pm8001_port *port = dev->port->lldd_port; 4838 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4839 4840 memset(&payload, 0, sizeof(payload)); 4841 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4842 if (rc) 4843 return rc; 4844 ccb = &pm8001_ha->ccb_info[tag]; 4845 ccb->device = pm8001_dev; 4846 ccb->ccb_tag = tag; 4847 payload.tag = cpu_to_le32(tag); 4848 4849 if (flag == 1) { 4850 stp_sspsmp_sata = 0x02; /*direct attached sata */ 4851 } else { 4852 if (pm8001_dev->dev_type == SAS_SATA_DEV) 4853 stp_sspsmp_sata = 0x00; /* stp*/ 4854 else if (pm8001_dev->dev_type == SAS_END_DEVICE || 4855 dev_is_expander(pm8001_dev->dev_type)) 4856 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4857 } 4858 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 4859 phy_id = parent_dev->ex_dev.ex_phy->phy_id; 4860 else 4861 phy_id = pm8001_dev->attached_phy; 4862 4863 opc = OPC_INB_REG_DEV; 4864 4865 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? 4866 pm8001_dev->sas_device->linkrate : dev->port->linkrate; 4867 4868 payload.phyid_portid = 4869 cpu_to_le32(((port->port_id) & 0xFF) | 4870 ((phy_id & 0xFF) << 8)); 4871 4872 payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | 4873 ((linkrate & 0x0F) << 24) | 4874 ((stp_sspsmp_sata & 0x03) << 28)); 4875 payload.firstburstsize_ITNexustimeout = 4876 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4877 4878 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, 4879 SAS_ADDR_SIZE); 4880 4881 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4882 sizeof(payload), 0); 4883 if (rc) 4884 pm8001_tag_free(pm8001_ha, tag); 4885 4886 return rc; 4887 } 4888 4889 /** 4890 * pm80xx_chip_phy_ctl_req - support the local phy operation 4891 * @pm8001_ha: our hba card information. 4892 * @phyId: the phy id which we wanted to operate 4893 * @phy_op: phy operation to request 4894 */ 4895 static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 4896 u32 phyId, u32 phy_op) 4897 { 4898 u32 tag; 4899 int rc; 4900 struct local_phy_ctl_req payload; 4901 struct inbound_queue_table *circularQ; 4902 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4903 memset(&payload, 0, sizeof(payload)); 4904 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4905 if (rc) 4906 return rc; 4907 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4908 payload.tag = cpu_to_le32(tag); 4909 payload.phyop_phyid = 4910 cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); 4911 return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4912 sizeof(payload), 0); 4913 } 4914 4915 static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) 4916 { 4917 #ifdef PM8001_USE_MSIX 4918 return 1; 4919 #else 4920 u32 value; 4921 4922 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); 4923 if (value) 4924 return 1; 4925 return 0; 4926 #endif 4927 } 4928 4929 /** 4930 * pm80xx_chip_isr - PM8001 isr handler. 4931 * @pm8001_ha: our hba card information. 4932 * @vec: irq number. 4933 */ 4934 static irqreturn_t 4935 pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) 4936 { 4937 pm80xx_chip_interrupt_disable(pm8001_ha, vec); 4938 pm8001_dbg(pm8001_ha, DEVIO, 4939 "irq vec %d, ODMR:0x%x\n", 4940 vec, pm8001_cr32(pm8001_ha, 0, 0x30)); 4941 process_oq(pm8001_ha, vec); 4942 pm80xx_chip_interrupt_enable(pm8001_ha, vec); 4943 return IRQ_HANDLED; 4944 } 4945 4946 static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, 4947 u32 operation, u32 phyid, 4948 u32 length, u32 *buf) 4949 { 4950 u32 tag, i, j = 0; 4951 int rc; 4952 struct set_phy_profile_req payload; 4953 struct inbound_queue_table *circularQ; 4954 u32 opc = OPC_INB_SET_PHY_PROFILE; 4955 4956 memset(&payload, 0, sizeof(payload)); 4957 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4958 if (rc) 4959 pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n"); 4960 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4961 payload.tag = cpu_to_le32(tag); 4962 payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF)); 4963 pm8001_dbg(pm8001_ha, INIT, 4964 " phy profile command for phy %x ,length is %d\n", 4965 payload.ppc_phyid, length); 4966 for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { 4967 payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); 4968 j++; 4969 } 4970 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4971 sizeof(payload), 0); 4972 if (rc) 4973 pm8001_tag_free(pm8001_ha, tag); 4974 } 4975 4976 void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, 4977 u32 length, u8 *buf) 4978 { 4979 u32 i; 4980 4981 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 4982 mpi_set_phy_profile_req(pm8001_ha, 4983 SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); 4984 length = length + PHY_DWORD_LENGTH; 4985 } 4986 pm8001_dbg(pm8001_ha, INIT, "phy settings completed\n"); 4987 } 4988 4989 void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, 4990 u32 phy, u32 length, u32 *buf) 4991 { 4992 u32 tag, opc; 4993 int rc, i; 4994 struct set_phy_profile_req payload; 4995 struct inbound_queue_table *circularQ; 4996 4997 memset(&payload, 0, sizeof(payload)); 4998 4999 rc = pm8001_tag_alloc(pm8001_ha, &tag); 5000 if (rc) 5001 pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n"); 5002 5003 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 5004 opc = OPC_INB_SET_PHY_PROFILE; 5005 5006 payload.tag = cpu_to_le32(tag); 5007 payload.ppc_phyid = (((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8) 5008 | (phy & 0xFF)); 5009 5010 for (i = 0; i < length; i++) 5011 payload.reserved[i] = cpu_to_le32(*(buf + i)); 5012 5013 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 5014 sizeof(payload), 0); 5015 if (rc) 5016 pm8001_tag_free(pm8001_ha, tag); 5017 5018 pm8001_dbg(pm8001_ha, INIT, "PHY %d settings applied\n", phy); 5019 } 5020 const struct pm8001_dispatch pm8001_80xx_dispatch = { 5021 .name = "pmc80xx", 5022 .chip_init = pm80xx_chip_init, 5023 .chip_soft_rst = pm80xx_chip_soft_rst, 5024 .chip_rst = pm80xx_hw_chip_rst, 5025 .chip_iounmap = pm8001_chip_iounmap, 5026 .isr = pm80xx_chip_isr, 5027 .is_our_interrupt = pm80xx_chip_is_our_interrupt, 5028 .isr_process_oq = process_oq, 5029 .interrupt_enable = pm80xx_chip_interrupt_enable, 5030 .interrupt_disable = pm80xx_chip_interrupt_disable, 5031 .make_prd = pm8001_chip_make_sg, 5032 .smp_req = pm80xx_chip_smp_req, 5033 .ssp_io_req = pm80xx_chip_ssp_io_req, 5034 .sata_req = pm80xx_chip_sata_req, 5035 .phy_start_req = pm80xx_chip_phy_start_req, 5036 .phy_stop_req = pm80xx_chip_phy_stop_req, 5037 .reg_dev_req = pm80xx_chip_reg_dev_req, 5038 .dereg_dev_req = pm8001_chip_dereg_dev_req, 5039 .phy_ctl_req = pm80xx_chip_phy_ctl_req, 5040 .task_abort = pm8001_chip_abort_task, 5041 .ssp_tm_req = pm8001_chip_ssp_tm_req, 5042 .get_nvmd_req = pm8001_chip_get_nvmd_req, 5043 .set_nvmd_req = pm8001_chip_set_nvmd_req, 5044 .fw_flash_update_req = pm8001_chip_fw_flash_update_req, 5045 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5046 .fatal_errors = pm80xx_fatal_errors, 5047 .hw_event_ack_req = pm80xx_hw_event_ack_req, 5048 }; 5049