1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/bsg-lib.h> 12 #include <uapi/scsi/scsi_bsg_mpi3mr.h> 13 14 /** 15 * mpi3mr_alloc_trace_buffer: Allocate trace buffer 16 * @mrioc: Adapter instance reference 17 * @trace_size: Trace buffer size 18 * 19 * Allocate trace buffer 20 * Return: 0 on success, non-zero on failure. 21 */ 22 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size) 23 { 24 struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0]; 25 26 diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, 27 trace_size, &diag_buffer->dma_addr, GFP_KERNEL); 28 if (diag_buffer->addr) { 29 dprint_init(mrioc, "trace diag buffer is allocated successfully\n"); 30 return 0; 31 } 32 return -1; 33 } 34 35 /** 36 * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers 37 * @mrioc: Adapter instance reference 38 * 39 * This functions checks whether the driver defined buffer sizes 40 * are greater than IOCFacts provided controller local buffer 41 * sizes and if the driver defined sizes are more then the 42 * driver allocates the specific buffer by reading driver page1 43 * 44 * Return: Nothing. 45 */ 46 void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc) 47 { 48 struct diag_buffer_desc *diag_buffer; 49 struct mpi3_driver_page1 driver_pg1; 50 u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size, 51 trace_size, fw_size; 52 u16 pg_sz = sizeof(driver_pg1); 53 int retval = 0; 54 bool retry = false; 55 56 if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr) 57 return; 58 59 retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); 60 if (retval) { 61 ioc_warn(mrioc, 62 "%s: driver page 1 read failed, allocating trace\n" 63 "and firmware diag buffers of default size\n", __func__); 64 trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ; 65 trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ; 66 trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ; 67 68 } else { 69 trace_size = driver_pg1.host_diag_trace_max_size * 1024; 70 trace_dec_size = driver_pg1.host_diag_trace_decrement_size 71 * 1024; 72 trace_min_size = driver_pg1.host_diag_trace_min_size * 1024; 73 fw_size = driver_pg1.host_diag_fw_max_size * 1024; 74 fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024; 75 fw_min_size = driver_pg1.host_diag_fw_min_size * 1024; 76 dprint_init(mrioc, 77 "%s:trace diag buffer sizes read from driver\n" 78 "page1: maximum size = %dKB, decrement size = %dKB\n" 79 ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size, 80 driver_pg1.host_diag_trace_decrement_size, 81 driver_pg1.host_diag_trace_min_size); 82 dprint_init(mrioc, 83 "%s:firmware diag buffer sizes read from driver\n" 84 "page1: maximum size = %dKB, decrement size = %dKB\n" 85 ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size, 86 driver_pg1.host_diag_fw_decrement_size, 87 driver_pg1.host_diag_fw_min_size); 88 if ((trace_size == 0) && (fw_size == 0)) 89 return; 90 } 91 92 93 retry_trace: 94 diag_buffer = &mrioc->diag_buffers[0]; 95 diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE; 96 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 97 if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >= 98 trace_min_size)) { 99 if (!retry) 100 dprint_init(mrioc, 101 "trying to allocate trace diag buffer of size = %dKB\n", 102 trace_size / 1024); 103 if (mpi3mr_alloc_trace_buffer(mrioc, trace_size)) { 104 retry = true; 105 trace_size -= trace_dec_size; 106 dprint_init(mrioc, "trace diag buffer allocation failed\n" 107 "retrying smaller size %dKB\n", trace_size / 1024); 108 goto retry_trace; 109 } else 110 diag_buffer->size = trace_size; 111 } 112 113 retry = false; 114 retry_fw: 115 116 diag_buffer = &mrioc->diag_buffers[1]; 117 118 diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW; 119 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 120 if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) { 121 diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, 122 fw_size, &diag_buffer->dma_addr, GFP_KERNEL); 123 if (!retry) 124 dprint_init(mrioc, 125 "%s:trying to allocate firmware diag buffer of size = %dKB\n", 126 __func__, fw_size / 1024); 127 if (diag_buffer->addr) { 128 dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n", 129 __func__); 130 diag_buffer->size = fw_size; 131 } else { 132 retry = true; 133 fw_size -= fw_dec_size; 134 dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n" 135 "retrying smaller size %dKB\n", 136 __func__, fw_size / 1024); 137 goto retry_fw; 138 } 139 } 140 } 141 142 /** 143 * mpi3mr_issue_diag_buf_post - Send diag buffer post req 144 * @mrioc: Adapter instance reference 145 * @diag_buffer: Diagnostic buffer descriptor 146 * 147 * Issue diagnostic buffer post MPI request through admin queue 148 * and wait for the completion of it or time out. 149 * 150 * Return: 0 on success, non-zero on failures. 151 */ 152 int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, 153 struct diag_buffer_desc *diag_buffer) 154 { 155 struct mpi3_diag_buffer_post_request diag_buf_post_req; 156 u8 prev_status; 157 int retval = 0; 158 159 memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req)); 160 mutex_lock(&mrioc->init_cmds.mutex); 161 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 162 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 163 mutex_unlock(&mrioc->init_cmds.mutex); 164 return -1; 165 } 166 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 167 mrioc->init_cmds.is_waiting = 1; 168 mrioc->init_cmds.callback = NULL; 169 diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 170 diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST; 171 diag_buf_post_req.type = diag_buffer->type; 172 diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr); 173 diag_buf_post_req.length = le32_to_cpu(diag_buffer->size); 174 175 dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__, 176 diag_buffer->type); 177 prev_status = diag_buffer->status; 178 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 179 init_completion(&mrioc->init_cmds.done); 180 retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req, 181 sizeof(diag_buf_post_req), 1); 182 if (retval) { 183 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 184 __func__); 185 goto out_unlock; 186 } 187 wait_for_completion_timeout(&mrioc->init_cmds.done, 188 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 189 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 190 mrioc->init_cmds.is_waiting = 0; 191 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 192 mpi3mr_check_rh_fault_ioc(mrioc, 193 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT); 194 retval = -1; 195 goto out_unlock; 196 } 197 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 198 != MPI3_IOCSTATUS_SUCCESS) { 199 dprint_bsg_err(mrioc, 200 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 201 __func__, diag_buffer->type, 202 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 203 mrioc->init_cmds.ioc_loginfo); 204 retval = -1; 205 goto out_unlock; 206 } 207 dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n", 208 __func__, diag_buffer->type); 209 210 out_unlock: 211 if (retval) 212 diag_buffer->status = prev_status; 213 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 214 mutex_unlock(&mrioc->init_cmds.mutex); 215 return retval; 216 } 217 218 /** 219 * mpi3mr_post_diag_bufs - Post diag buffers to the controller 220 * @mrioc: Adapter instance reference 221 * 222 * This function calls helper function to post both trace and 223 * firmware buffers to the controller. 224 * 225 * Return: None 226 */ 227 int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc) 228 { 229 u8 i; 230 struct diag_buffer_desc *diag_buffer; 231 232 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 233 diag_buffer = &mrioc->diag_buffers[i]; 234 if (!(diag_buffer->addr)) 235 continue; 236 if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) 237 return -1; 238 } 239 return 0; 240 } 241 242 /** 243 * mpi3mr_issue_diag_buf_release - Send diag buffer release req 244 * @mrioc: Adapter instance reference 245 * @diag_buffer: Diagnostic buffer descriptor 246 * 247 * Issue diagnostic buffer manage MPI request with release 248 * action request through admin queue and wait for the 249 * completion of it or time out. 250 * 251 * Return: 0 on success, non-zero on failures. 252 */ 253 int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, 254 struct diag_buffer_desc *diag_buffer) 255 { 256 struct mpi3_diag_buffer_manage_request diag_buf_manage_req; 257 int retval = 0; 258 259 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 260 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 261 return retval; 262 263 memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req)); 264 mutex_lock(&mrioc->init_cmds.mutex); 265 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 266 dprint_reset(mrioc, "%s: command is in use\n", __func__); 267 mutex_unlock(&mrioc->init_cmds.mutex); 268 return -1; 269 } 270 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 271 mrioc->init_cmds.is_waiting = 1; 272 mrioc->init_cmds.callback = NULL; 273 diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 274 diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE; 275 diag_buf_manage_req.type = diag_buffer->type; 276 diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE; 277 278 279 dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__, 280 diag_buffer->type); 281 init_completion(&mrioc->init_cmds.done); 282 retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req, 283 sizeof(diag_buf_manage_req), 1); 284 if (retval) { 285 dprint_reset(mrioc, "%s: admin request post failed\n", __func__); 286 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 287 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 288 goto out_unlock; 289 } 290 wait_for_completion_timeout(&mrioc->init_cmds.done, 291 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 292 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 293 mrioc->init_cmds.is_waiting = 0; 294 dprint_reset(mrioc, "%s: command timedout\n", __func__); 295 mpi3mr_check_rh_fault_ioc(mrioc, 296 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT); 297 retval = -1; 298 goto out_unlock; 299 } 300 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 301 != MPI3_IOCSTATUS_SUCCESS) { 302 dprint_reset(mrioc, 303 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 304 __func__, diag_buffer->type, 305 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 306 mrioc->init_cmds.ioc_loginfo); 307 retval = -1; 308 goto out_unlock; 309 } 310 dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n", 311 __func__, diag_buffer->type); 312 313 out_unlock: 314 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 315 mutex_unlock(&mrioc->init_cmds.mutex); 316 return retval; 317 } 318 319 /** 320 * mpi3mr_process_trigger - Generic HDB Trigger handler 321 * @mrioc: Adapter instance reference 322 * @trigger_type: Trigger type 323 * @trigger_data: Trigger data 324 * @trigger_flags: Trigger flags 325 * 326 * This function checks validity of HDB, triggers and based on 327 * trigger information, creates an event to be processed in the 328 * firmware event worker thread . 329 * 330 * This function should be called with trigger spinlock held 331 * 332 * Return: Nothing 333 */ 334 static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type, 335 union mpi3mr_trigger_data *trigger_data, u8 trigger_flags) 336 { 337 struct trigger_event_data event_data; 338 struct diag_buffer_desc *trace_hdb = NULL; 339 struct diag_buffer_desc *fw_hdb = NULL; 340 u64 global_trigger; 341 342 trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, 343 MPI3_DIAG_BUFFER_TYPE_TRACE); 344 if (trace_hdb && 345 (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 346 (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 347 trace_hdb = NULL; 348 349 fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 350 351 if (fw_hdb && 352 (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 353 (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 354 fw_hdb = NULL; 355 356 if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active 357 && mrioc->trace_release_trigger_active) || 358 (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) || 359 ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) 360 && (!mrioc->driver_pg2->num_triggers))) 361 return; 362 363 memset(&event_data, 0, sizeof(event_data)); 364 event_data.trigger_type = trigger_type; 365 memcpy(&event_data.trigger_specific_data, trigger_data, 366 sizeof(*trigger_data)); 367 global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); 368 369 if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) { 370 event_data.snapdump = true; 371 event_data.trace_hdb = trace_hdb; 372 event_data.fw_hdb = fw_hdb; 373 mrioc->snapdump_trigger_active = true; 374 } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) { 375 if ((trace_hdb) && (global_trigger & 376 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) && 377 (!mrioc->trace_release_trigger_active)) { 378 event_data.trace_hdb = trace_hdb; 379 mrioc->trace_release_trigger_active = true; 380 } 381 if ((fw_hdb) && (global_trigger & 382 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) && 383 (!mrioc->fw_release_trigger_active)) { 384 event_data.fw_hdb = fw_hdb; 385 mrioc->fw_release_trigger_active = true; 386 } 387 } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) { 388 if ((trace_hdb) && (trigger_flags & 389 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) && 390 (!mrioc->trace_release_trigger_active)) { 391 event_data.trace_hdb = trace_hdb; 392 mrioc->trace_release_trigger_active = true; 393 } 394 if ((fw_hdb) && (trigger_flags & 395 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) && 396 (!mrioc->fw_release_trigger_active)) { 397 event_data.fw_hdb = fw_hdb; 398 mrioc->fw_release_trigger_active = true; 399 } 400 } 401 402 if (event_data.trace_hdb || event_data.fw_hdb) 403 mpi3mr_hdb_trigger_data_event(mrioc, &event_data); 404 } 405 406 /** 407 * mpi3mr_global_trigger - Global HDB trigger handler 408 * @mrioc: Adapter instance reference 409 * @trigger_data: Trigger data 410 * 411 * This function checks whether the given global trigger is 412 * enabled in the driver page 2 and if so calls generic trigger 413 * handler to queue event for HDB release. 414 * 415 * Return: Nothing 416 */ 417 void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data) 418 { 419 unsigned long flags; 420 union mpi3mr_trigger_data trigger_specific_data; 421 422 spin_lock_irqsave(&mrioc->trigger_lock, flags); 423 if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) { 424 memset(&trigger_specific_data, 0, 425 sizeof(trigger_specific_data)); 426 trigger_specific_data.global = trigger_data; 427 mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL, 428 &trigger_specific_data, 0); 429 } 430 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 431 } 432 433 /** 434 * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler 435 * @mrioc: Adapter instance reference 436 * @sensekey: Sense Key 437 * @asc: Additional Sense Code 438 * @ascq: Additional Sense Code Qualifier 439 * 440 * This function compares SCSI sense trigger values with driver 441 * page 2 values and calls generic trigger handler to release 442 * HDBs if match found 443 * 444 * Return: Nothing 445 */ 446 void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc, 447 u8 ascq) 448 { 449 struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL; 450 u64 i = 0; 451 unsigned long flags; 452 u8 num_triggers, trigger_flags; 453 454 if (mrioc->scsisense_trigger_present) { 455 spin_lock_irqsave(&mrioc->trigger_lock, flags); 456 scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *) 457 mrioc->driver_pg2->trigger; 458 num_triggers = mrioc->driver_pg2->num_triggers; 459 for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) { 460 if (scsi_sense_trigger->type != 461 MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE) 462 continue; 463 if (!(scsi_sense_trigger->sense_key == 464 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL 465 || scsi_sense_trigger->sense_key == sensekey)) 466 continue; 467 if (!(scsi_sense_trigger->asc == 468 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL || 469 scsi_sense_trigger->asc == asc)) 470 continue; 471 if (!(scsi_sense_trigger->ascq == 472 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL || 473 scsi_sense_trigger->ascq == ascq)) 474 continue; 475 trigger_flags = scsi_sense_trigger->flags; 476 mpi3mr_process_trigger(mrioc, 477 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 478 (union mpi3mr_trigger_data *)scsi_sense_trigger, 479 trigger_flags); 480 break; 481 } 482 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 483 } 484 } 485 486 /** 487 * mpi3mr_event_trigger - MPI event HDB trigger handler 488 * @mrioc: Adapter instance reference 489 * @event: MPI Event 490 * 491 * This function compares event trigger values with driver page 492 * 2 values and calls generic trigger handler to release 493 * HDBs if match found. 494 * 495 * Return: Nothing 496 */ 497 void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event) 498 { 499 struct mpi3_driver2_trigger_event *event_trigger = NULL; 500 u64 i = 0; 501 unsigned long flags; 502 u8 num_triggers, trigger_flags; 503 504 if (mrioc->event_trigger_present) { 505 spin_lock_irqsave(&mrioc->trigger_lock, flags); 506 event_trigger = (struct mpi3_driver2_trigger_event *) 507 mrioc->driver_pg2->trigger; 508 num_triggers = mrioc->driver_pg2->num_triggers; 509 510 for (i = 0; i < num_triggers; i++, event_trigger++) { 511 if (event_trigger->type != 512 MPI3_DRIVER2_TRIGGER_TYPE_EVENT) 513 continue; 514 if (event_trigger->event != event) 515 continue; 516 trigger_flags = event_trigger->flags; 517 mpi3mr_process_trigger(mrioc, 518 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 519 (union mpi3mr_trigger_data *)event_trigger, 520 trigger_flags); 521 break; 522 } 523 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 524 } 525 } 526 527 /** 528 * mpi3mr_reply_trigger - MPI Reply HDB trigger handler 529 * @mrioc: Adapter instance reference 530 * @ioc_status: Masked value of IOC Status from MPI Reply 531 * @ioc_loginfo: IOC Log Info from MPI Reply 532 * 533 * This function compares IOC status and IOC log info trigger 534 * values with driver page 2 values and calls generic trigger 535 * handler to release HDBs if match found. 536 * 537 * Return: Nothing 538 */ 539 void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status, 540 u32 ioc_loginfo) 541 { 542 struct mpi3_driver2_trigger_reply *reply_trigger = NULL; 543 u64 i = 0; 544 unsigned long flags; 545 u8 num_triggers, trigger_flags; 546 547 if (mrioc->reply_trigger_present) { 548 spin_lock_irqsave(&mrioc->trigger_lock, flags); 549 reply_trigger = (struct mpi3_driver2_trigger_reply *) 550 mrioc->driver_pg2->trigger; 551 num_triggers = mrioc->driver_pg2->num_triggers; 552 for (i = 0; i < num_triggers; i++, reply_trigger++) { 553 if (reply_trigger->type != 554 MPI3_DRIVER2_TRIGGER_TYPE_REPLY) 555 continue; 556 if ((le16_to_cpu(reply_trigger->ioc_status) != 557 ioc_status) 558 && (le16_to_cpu(reply_trigger->ioc_status) != 559 MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL)) 560 continue; 561 if ((le32_to_cpu(reply_trigger->ioc_log_info) != 562 (le32_to_cpu(reply_trigger->ioc_log_info_mask) & 563 ioc_loginfo))) 564 continue; 565 trigger_flags = reply_trigger->flags; 566 mpi3mr_process_trigger(mrioc, 567 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 568 (union mpi3mr_trigger_data *)reply_trigger, 569 trigger_flags); 570 break; 571 } 572 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 573 } 574 } 575 576 /** 577 * mpi3mr_get_num_trigger - Gets number of HDB triggers 578 * @mrioc: Adapter instance reference 579 * @num_triggers: Number of triggers 580 * @page_action: Page action 581 * 582 * This function reads number of triggers by reading driver page 583 * 2 584 * 585 * Return: 0 on success and proper error codes on failure 586 */ 587 static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers, 588 u8 page_action) 589 { 590 struct mpi3_driver_page2 drvr_page2; 591 int retval = 0; 592 593 *num_triggers = 0; 594 595 retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2, 596 sizeof(struct mpi3_driver_page2), page_action); 597 598 if (retval) { 599 dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 600 return retval; 601 } 602 *num_triggers = drvr_page2.num_triggers; 603 return retval; 604 } 605 606 /** 607 * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG 608 * @mrioc: Adapter instance reference 609 * @page_action: Page action 610 * 611 * This function caches the driver page 2 in the driver's memory 612 * by reading driver page 2 from the controller for a given page 613 * type and updates the HDB trigger values 614 * 615 * Return: 0 on success and proper error codes on failure 616 */ 617 int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action) 618 { 619 u16 pg_sz = sizeof(struct mpi3_driver_page2); 620 struct mpi3_driver_page2 *drvr_page2 = NULL; 621 u8 trigger_type, num_triggers; 622 int retval; 623 int i = 0; 624 unsigned long flags; 625 626 retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action); 627 628 if (retval) 629 goto out; 630 631 pg_sz = offsetof(struct mpi3_driver_page2, trigger) + 632 (num_triggers * sizeof(union mpi3_driver2_trigger_element)); 633 drvr_page2 = kzalloc(pg_sz, GFP_KERNEL); 634 if (!drvr_page2) { 635 retval = -ENOMEM; 636 goto out; 637 } 638 639 retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action); 640 if (retval) { 641 dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 642 kfree(drvr_page2); 643 goto out; 644 } 645 spin_lock_irqsave(&mrioc->trigger_lock, flags); 646 kfree(mrioc->driver_pg2); 647 mrioc->driver_pg2 = drvr_page2; 648 mrioc->reply_trigger_present = false; 649 mrioc->event_trigger_present = false; 650 mrioc->scsisense_trigger_present = false; 651 652 for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) { 653 trigger_type = mrioc->driver_pg2->trigger[i].event.type; 654 switch (trigger_type) { 655 case MPI3_DRIVER2_TRIGGER_TYPE_REPLY: 656 mrioc->reply_trigger_present = true; 657 break; 658 case MPI3_DRIVER2_TRIGGER_TYPE_EVENT: 659 mrioc->event_trigger_present = true; 660 break; 661 case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE: 662 mrioc->scsisense_trigger_present = true; 663 break; 664 default: 665 break; 666 } 667 } 668 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 669 out: 670 return retval; 671 } 672 673 /** 674 * mpi3mr_release_diag_bufs - Release diag buffers 675 * @mrioc: Adapter instance reference 676 * @skip_rel_action: Skip release action and set buffer state 677 * 678 * This function calls helper function to release both trace and 679 * firmware buffers from the controller. 680 * 681 * Return: None 682 */ 683 void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action) 684 { 685 u8 i; 686 struct diag_buffer_desc *diag_buffer; 687 688 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 689 diag_buffer = &mrioc->diag_buffers[i]; 690 if (!(diag_buffer->addr)) 691 continue; 692 if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED) 693 continue; 694 if (!skip_rel_action) 695 mpi3mr_issue_diag_buf_release(mrioc, diag_buffer); 696 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 697 atomic64_inc(&event_counter); 698 } 699 } 700 701 /** 702 * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and 703 * trigger data 704 * 705 * @hdb: HDB pointer 706 * @type: Trigger type 707 * @data: Trigger data 708 * @force: Trigger overwrite flag 709 * @trigger_data: Pointer to trigger data information 710 * 711 * Updates trigger type and trigger data based on parameter 712 * passed to this function 713 * 714 * Return: Nothing 715 */ 716 void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, 717 u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 718 { 719 if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN)) 720 return; 721 hdb->trigger_type = type; 722 if (!trigger_data) 723 memset(&hdb->trigger_data, 0, sizeof(*trigger_data)); 724 else 725 memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data)); 726 } 727 728 /** 729 * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type 730 * and trigger data for all HDB 731 * 732 * @mrioc: Adapter instance reference 733 * @type: Trigger type 734 * @data: Trigger data 735 * @force: Trigger overwrite flag 736 * @trigger_data: Pointer to trigger data information 737 * 738 * Updates trigger type and trigger data based on parameter 739 * passed to this function 740 * 741 * Return: Nothing 742 */ 743 void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, 744 u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 745 { 746 struct diag_buffer_desc *hdb = NULL; 747 748 hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE); 749 if (hdb) 750 mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 751 hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 752 if (hdb) 753 mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 754 } 755 756 /** 757 * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf 758 * @mrioc: Adapter instance reference 759 * @event_reply: event data 760 * 761 * Modifies the status of the applicable diag buffer descriptors 762 * 763 * Return: Nothing 764 */ 765 void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 766 struct mpi3_event_notification_reply *event_reply) 767 { 768 struct mpi3_event_data_diag_buffer_status_change *evtdata; 769 struct diag_buffer_desc *diag_buffer; 770 771 evtdata = (struct mpi3_event_data_diag_buffer_status_change *) 772 event_reply->event_data; 773 774 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type); 775 if (!diag_buffer) 776 return; 777 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 778 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 779 return; 780 switch (evtdata->reason_code) { 781 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED: 782 { 783 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 784 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 785 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); 786 atomic64_inc(&event_counter); 787 break; 788 } 789 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED: 790 { 791 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 792 break; 793 } 794 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED: 795 { 796 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED; 797 break; 798 } 799 default: 800 dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n", 801 __func__, evtdata->reason_code); 802 break; 803 } 804 } 805 806 /** 807 * mpi3mr_diag_buffer_for_type - returns buffer desc for type 808 * @mrioc: Adapter instance reference 809 * @buf_type: Diagnostic buffer type 810 * 811 * Identifies matching diag descriptor from mrioc for given diag 812 * buffer type. 813 * 814 * Return: diag buffer descriptor on success, NULL on failures. 815 */ 816 817 struct diag_buffer_desc * 818 mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type) 819 { 820 u8 i; 821 822 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 823 if (mrioc->diag_buffers[i].type == buf_type) 824 return &mrioc->diag_buffers[i]; 825 } 826 return NULL; 827 } 828 829 /** 830 * mpi3mr_bsg_pel_abort - sends PEL abort request 831 * @mrioc: Adapter instance reference 832 * 833 * This function sends PEL abort request to the firmware through 834 * admin request queue. 835 * 836 * Return: 0 on success, -1 on failure 837 */ 838 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) 839 { 840 struct mpi3_pel_req_action_abort pel_abort_req; 841 struct mpi3_pel_reply *pel_reply; 842 int retval = 0; 843 u16 pe_log_status; 844 845 if (mrioc->reset_in_progress) { 846 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 847 return -1; 848 } 849 if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 850 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 851 return -1; 852 } 853 854 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 855 mutex_lock(&mrioc->pel_abort_cmd.mutex); 856 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 857 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 858 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 859 return -1; 860 } 861 mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 862 mrioc->pel_abort_cmd.is_waiting = 1; 863 mrioc->pel_abort_cmd.callback = NULL; 864 pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); 865 pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 866 pel_abort_req.action = MPI3_PEL_ACTION_ABORT; 867 pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 868 869 mrioc->pel_abort_requested = 1; 870 init_completion(&mrioc->pel_abort_cmd.done); 871 retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, 872 sizeof(pel_abort_req), 0); 873 if (retval) { 874 retval = -1; 875 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 876 __func__); 877 mrioc->pel_abort_requested = 0; 878 goto out_unlock; 879 } 880 881 wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, 882 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 883 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 884 mrioc->pel_abort_cmd.is_waiting = 0; 885 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 886 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) 887 mpi3mr_soft_reset_handler(mrioc, 888 MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); 889 retval = -1; 890 goto out_unlock; 891 } 892 if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 893 != MPI3_IOCSTATUS_SUCCESS) { 894 dprint_bsg_err(mrioc, 895 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 896 __func__, (mrioc->pel_abort_cmd.ioc_status & 897 MPI3_IOCSTATUS_STATUS_MASK), 898 mrioc->pel_abort_cmd.ioc_loginfo); 899 retval = -1; 900 goto out_unlock; 901 } 902 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { 903 pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; 904 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 905 if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { 906 dprint_bsg_err(mrioc, 907 "%s: command failed, pel_status(0x%04x)\n", 908 __func__, pe_log_status); 909 retval = -1; 910 } 911 } 912 913 out_unlock: 914 mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 915 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 916 return retval; 917 } 918 /** 919 * mpi3mr_bsg_verify_adapter - verify adapter number is valid 920 * @ioc_number: Adapter number 921 * 922 * This function returns the adapter instance pointer of given 923 * adapter number. If adapter number does not match with the 924 * driver's adapter list, driver returns NULL. 925 * 926 * Return: adapter instance reference 927 */ 928 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) 929 { 930 struct mpi3mr_ioc *mrioc = NULL; 931 932 spin_lock(&mrioc_list_lock); 933 list_for_each_entry(mrioc, &mrioc_list, list) { 934 if (mrioc->id == ioc_number) { 935 spin_unlock(&mrioc_list_lock); 936 return mrioc; 937 } 938 } 939 spin_unlock(&mrioc_list_lock); 940 return NULL; 941 } 942 943 /** 944 * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data 945 * @mrioc: Adapter instance reference 946 * @job: BSG Job pointer 947 * 948 * This function reads the controller trigger config page as 949 * defined by the input page type and refreshes the driver's 950 * local trigger information structures with the controller's 951 * config page data. 952 * 953 * Return: 0 on success and proper error codes on failure 954 */ 955 static long 956 mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc, 957 struct bsg_job *job) 958 { 959 struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers; 960 uint32_t data_out_sz; 961 u8 page_action; 962 long rval = -EINVAL; 963 964 data_out_sz = job->request_payload.payload_len; 965 966 if (data_out_sz != sizeof(refresh_triggers)) { 967 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 968 __func__); 969 return rval; 970 } 971 972 if (mrioc->unrecoverable) { 973 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 974 __func__); 975 return -EFAULT; 976 } 977 if (mrioc->reset_in_progress) { 978 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 979 return -EAGAIN; 980 } 981 982 sg_copy_to_buffer(job->request_payload.sg_list, 983 job->request_payload.sg_cnt, 984 &refresh_triggers, sizeof(refresh_triggers)); 985 986 switch (refresh_triggers.page_type) { 987 case MPI3MR_HDB_REFRESH_TYPE_CURRENT: 988 page_action = MPI3_CONFIG_ACTION_READ_CURRENT; 989 break; 990 case MPI3MR_HDB_REFRESH_TYPE_DEFAULT: 991 page_action = MPI3_CONFIG_ACTION_READ_DEFAULT; 992 break; 993 case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT: 994 page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT; 995 break; 996 default: 997 dprint_bsg_err(mrioc, 998 "%s: unsupported refresh trigger, page_type %d\n", 999 __func__, refresh_triggers.page_type); 1000 return rval; 1001 } 1002 rval = mpi3mr_refresh_trigger(mrioc, page_action); 1003 1004 return rval; 1005 } 1006 1007 /** 1008 * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space 1009 * @mrioc: Adapter instance reference 1010 * @job: BSG Job pointer 1011 * 1012 * Return: 0 on success and proper error codes on failure 1013 */ 1014 static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc, 1015 struct bsg_job *job) 1016 { 1017 struct mpi3mr_bsg_out_upload_hdb upload_hdb; 1018 struct diag_buffer_desc *diag_buffer; 1019 uint32_t data_out_size; 1020 uint32_t data_in_size; 1021 1022 data_out_size = job->request_payload.payload_len; 1023 data_in_size = job->reply_payload.payload_len; 1024 1025 if (data_out_size != sizeof(upload_hdb)) { 1026 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1027 __func__); 1028 return -EINVAL; 1029 } 1030 1031 sg_copy_to_buffer(job->request_payload.sg_list, 1032 job->request_payload.sg_cnt, 1033 &upload_hdb, sizeof(upload_hdb)); 1034 1035 if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) { 1036 dprint_bsg_err(mrioc, "%s: invalid length argument\n", 1037 __func__); 1038 return -EINVAL; 1039 } 1040 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type); 1041 if ((!diag_buffer) || (!diag_buffer->addr)) { 1042 dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 1043 __func__, upload_hdb.buf_type); 1044 return -EINVAL; 1045 } 1046 1047 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) && 1048 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) { 1049 dprint_bsg_err(mrioc, 1050 "%s: invalid buffer status %d for type %d\n", 1051 __func__, diag_buffer->status, upload_hdb.buf_type); 1052 return -EINVAL; 1053 } 1054 1055 if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) { 1056 dprint_bsg_err(mrioc, 1057 "%s: invalid start offset %d, length %d for type %d\n", 1058 __func__, upload_hdb.start_offset, upload_hdb.length, 1059 upload_hdb.buf_type); 1060 return -EINVAL; 1061 } 1062 sg_copy_from_buffer(job->reply_payload.sg_list, 1063 job->reply_payload.sg_cnt, 1064 (diag_buffer->addr + upload_hdb.start_offset), 1065 data_in_size); 1066 return 0; 1067 } 1068 1069 /** 1070 * mpi3mr_bsg_repost_hdb - Re-post HDB 1071 * @mrioc: Adapter instance reference 1072 * @job: BSG job pointer 1073 * 1074 * This function retrieves the HDB descriptor corresponding to a 1075 * given buffer type and if the HDB is in released status then 1076 * posts the HDB with the firmware. 1077 * 1078 * Return: 0 on success and proper error codes on failure 1079 */ 1080 static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc, 1081 struct bsg_job *job) 1082 { 1083 struct mpi3mr_bsg_out_repost_hdb repost_hdb; 1084 struct diag_buffer_desc *diag_buffer; 1085 uint32_t data_out_sz; 1086 1087 data_out_sz = job->request_payload.payload_len; 1088 1089 if (data_out_sz != sizeof(repost_hdb)) { 1090 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1091 __func__); 1092 return -EINVAL; 1093 } 1094 if (mrioc->unrecoverable) { 1095 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1096 __func__); 1097 return -EFAULT; 1098 } 1099 if (mrioc->reset_in_progress) { 1100 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1101 return -EAGAIN; 1102 } 1103 1104 sg_copy_to_buffer(job->request_payload.sg_list, 1105 job->request_payload.sg_cnt, 1106 &repost_hdb, sizeof(repost_hdb)); 1107 1108 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type); 1109 if ((!diag_buffer) || (!diag_buffer->addr)) { 1110 dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 1111 __func__, repost_hdb.buf_type); 1112 return -EINVAL; 1113 } 1114 1115 if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) { 1116 dprint_bsg_err(mrioc, 1117 "%s: invalid buffer status %d for type %d\n", 1118 __func__, diag_buffer->status, repost_hdb.buf_type); 1119 return -EINVAL; 1120 } 1121 1122 if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) { 1123 dprint_bsg_err(mrioc, "%s: post failed for type %d\n", 1124 __func__, repost_hdb.buf_type); 1125 return -EFAULT; 1126 } 1127 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 1128 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 1129 1130 return 0; 1131 } 1132 1133 /** 1134 * mpi3mr_bsg_query_hdb - Handler for query HDB command 1135 * @mrioc: Adapter instance reference 1136 * @job: BSG job pointer 1137 * 1138 * This function prepares and copies the host diagnostic buffer 1139 * entries to the user buffer. 1140 * 1141 * Return: 0 on success and proper error codes on failure 1142 */ 1143 static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc, 1144 struct bsg_job *job) 1145 { 1146 long rval = 0; 1147 struct mpi3mr_bsg_in_hdb_status *hbd_status; 1148 struct mpi3mr_hdb_entry *hbd_status_entry; 1149 u32 length, min_length; 1150 u8 i; 1151 struct diag_buffer_desc *diag_buffer; 1152 uint32_t data_in_sz = 0; 1153 1154 data_in_sz = job->request_payload.payload_len; 1155 1156 length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) * 1157 sizeof(*hbd_status_entry))); 1158 hbd_status = kmalloc(length, GFP_KERNEL); 1159 if (!hbd_status) 1160 return -ENOMEM; 1161 hbd_status_entry = &hbd_status->entry[0]; 1162 1163 hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB; 1164 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 1165 diag_buffer = &mrioc->diag_buffers[i]; 1166 hbd_status_entry->buf_type = diag_buffer->type; 1167 hbd_status_entry->status = diag_buffer->status; 1168 hbd_status_entry->trigger_type = diag_buffer->trigger_type; 1169 memcpy(&hbd_status_entry->trigger_data, 1170 &diag_buffer->trigger_data, 1171 sizeof(hbd_status_entry->trigger_data)); 1172 hbd_status_entry->size = (diag_buffer->size / 1024); 1173 hbd_status_entry++; 1174 } 1175 hbd_status->element_trigger_format = 1176 MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA; 1177 1178 if (data_in_sz < 4) { 1179 dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__); 1180 rval = -EINVAL; 1181 goto out; 1182 } 1183 min_length = min(data_in_sz, length); 1184 if (job->request_payload.payload_len >= min_length) { 1185 sg_copy_from_buffer(job->request_payload.sg_list, 1186 job->request_payload.sg_cnt, 1187 hbd_status, min_length); 1188 rval = 0; 1189 } 1190 out: 1191 kfree(hbd_status); 1192 return rval; 1193 } 1194 1195 1196 /** 1197 * mpi3mr_enable_logdata - Handler for log data enable 1198 * @mrioc: Adapter instance reference 1199 * @job: BSG job reference 1200 * 1201 * This function enables log data caching in the driver if not 1202 * already enabled and return the maximum number of log data 1203 * entries that can be cached in the driver. 1204 * 1205 * Return: 0 on success and proper error codes on failure 1206 */ 1207 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, 1208 struct bsg_job *job) 1209 { 1210 struct mpi3mr_logdata_enable logdata_enable; 1211 1212 if (!mrioc->logdata_buf) { 1213 mrioc->logdata_entry_sz = 1214 (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) 1215 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; 1216 mrioc->logdata_buf_idx = 0; 1217 mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, 1218 mrioc->logdata_entry_sz, GFP_KERNEL); 1219 1220 if (!mrioc->logdata_buf) 1221 return -ENOMEM; 1222 } 1223 1224 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1225 logdata_enable.max_entries = 1226 MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 1227 if (job->request_payload.payload_len >= sizeof(logdata_enable)) { 1228 sg_copy_from_buffer(job->request_payload.sg_list, 1229 job->request_payload.sg_cnt, 1230 &logdata_enable, sizeof(logdata_enable)); 1231 return 0; 1232 } 1233 1234 return -EINVAL; 1235 } 1236 /** 1237 * mpi3mr_get_logdata - Handler for get log data 1238 * @mrioc: Adapter instance reference 1239 * @job: BSG job pointer 1240 * This function copies the log data entries to the user buffer 1241 * when log caching is enabled in the driver. 1242 * 1243 * Return: 0 on success and proper error codes on failure 1244 */ 1245 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, 1246 struct bsg_job *job) 1247 { 1248 u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; 1249 1250 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) 1251 return -EINVAL; 1252 1253 num_entries = job->request_payload.payload_len / entry_sz; 1254 if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) 1255 num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 1256 sz = num_entries * entry_sz; 1257 1258 if (job->request_payload.payload_len >= sz) { 1259 sg_copy_from_buffer(job->request_payload.sg_list, 1260 job->request_payload.sg_cnt, 1261 mrioc->logdata_buf, sz); 1262 return 0; 1263 } 1264 return -EINVAL; 1265 } 1266 1267 /** 1268 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver 1269 * @mrioc: Adapter instance reference 1270 * @job: BSG job pointer 1271 * 1272 * This function is the handler for PEL enable driver. 1273 * Validates the application given class and locale and if 1274 * requires aborts the existing PEL wait request and/or issues 1275 * new PEL wait request to the firmware and returns. 1276 * 1277 * Return: 0 on success and proper error codes on failure. 1278 */ 1279 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, 1280 struct bsg_job *job) 1281 { 1282 long rval = -EINVAL; 1283 struct mpi3mr_bsg_out_pel_enable pel_enable; 1284 u8 issue_pel_wait; 1285 u8 tmp_class; 1286 u16 tmp_locale; 1287 1288 if (job->request_payload.payload_len != sizeof(pel_enable)) { 1289 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1290 __func__); 1291 return rval; 1292 } 1293 1294 if (mrioc->unrecoverable) { 1295 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1296 __func__); 1297 return -EFAULT; 1298 } 1299 1300 if (mrioc->reset_in_progress) { 1301 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1302 return -EAGAIN; 1303 } 1304 1305 if (mrioc->stop_bsgs) { 1306 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 1307 return -EAGAIN; 1308 } 1309 1310 sg_copy_to_buffer(job->request_payload.sg_list, 1311 job->request_payload.sg_cnt, 1312 &pel_enable, sizeof(pel_enable)); 1313 1314 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1315 dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", 1316 __func__, pel_enable.pel_class); 1317 rval = 0; 1318 goto out; 1319 } 1320 if (!mrioc->pel_enabled) 1321 issue_pel_wait = 1; 1322 else { 1323 if ((mrioc->pel_class <= pel_enable.pel_class) && 1324 !((mrioc->pel_locale & pel_enable.pel_locale) ^ 1325 pel_enable.pel_locale)) { 1326 issue_pel_wait = 0; 1327 rval = 0; 1328 } else { 1329 pel_enable.pel_locale |= mrioc->pel_locale; 1330 1331 if (mrioc->pel_class < pel_enable.pel_class) 1332 pel_enable.pel_class = mrioc->pel_class; 1333 1334 rval = mpi3mr_bsg_pel_abort(mrioc); 1335 if (rval) { 1336 dprint_bsg_err(mrioc, 1337 "%s: pel_abort failed, status(%ld)\n", 1338 __func__, rval); 1339 goto out; 1340 } 1341 issue_pel_wait = 1; 1342 } 1343 } 1344 if (issue_pel_wait) { 1345 tmp_class = mrioc->pel_class; 1346 tmp_locale = mrioc->pel_locale; 1347 mrioc->pel_class = pel_enable.pel_class; 1348 mrioc->pel_locale = pel_enable.pel_locale; 1349 mrioc->pel_enabled = 1; 1350 rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); 1351 if (rval) { 1352 mrioc->pel_class = tmp_class; 1353 mrioc->pel_locale = tmp_locale; 1354 mrioc->pel_enabled = 0; 1355 dprint_bsg_err(mrioc, 1356 "%s: pel get sequence number failed, status(%ld)\n", 1357 __func__, rval); 1358 } 1359 } 1360 1361 out: 1362 return rval; 1363 } 1364 /** 1365 * mpi3mr_get_all_tgt_info - Get all target information 1366 * @mrioc: Adapter instance reference 1367 * @job: BSG job reference 1368 * 1369 * This function copies the driver managed target devices device 1370 * handle, persistent ID, bus ID and taret ID to the user 1371 * provided buffer for the specific controller. This function 1372 * also provides the number of devices managed by the driver for 1373 * the specific controller. 1374 * 1375 * Return: 0 on success and proper error codes on failure 1376 */ 1377 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, 1378 struct bsg_job *job) 1379 { 1380 u16 num_devices = 0, i = 0, size; 1381 unsigned long flags; 1382 struct mpi3mr_tgt_dev *tgtdev; 1383 struct mpi3mr_device_map_info *devmap_info = NULL; 1384 struct mpi3mr_all_tgt_info *alltgt_info = NULL; 1385 uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; 1386 1387 if (job->request_payload.payload_len < sizeof(u32)) { 1388 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1389 __func__); 1390 return -EINVAL; 1391 } 1392 1393 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1394 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 1395 num_devices++; 1396 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1397 1398 if ((job->request_payload.payload_len <= sizeof(u64)) || 1399 list_empty(&mrioc->tgtdev_list)) { 1400 sg_copy_from_buffer(job->request_payload.sg_list, 1401 job->request_payload.sg_cnt, 1402 &num_devices, sizeof(num_devices)); 1403 return 0; 1404 } 1405 1406 kern_entrylen = num_devices * sizeof(*devmap_info); 1407 size = sizeof(u64) + kern_entrylen; 1408 alltgt_info = kzalloc(size, GFP_KERNEL); 1409 if (!alltgt_info) 1410 return -ENOMEM; 1411 1412 devmap_info = alltgt_info->dmi; 1413 memset((u8 *)devmap_info, 0xFF, kern_entrylen); 1414 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1415 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1416 if (i < num_devices) { 1417 devmap_info[i].handle = tgtdev->dev_handle; 1418 devmap_info[i].perst_id = tgtdev->perst_id; 1419 if (tgtdev->host_exposed && tgtdev->starget) { 1420 devmap_info[i].target_id = tgtdev->starget->id; 1421 devmap_info[i].bus_id = 1422 tgtdev->starget->channel; 1423 } 1424 i++; 1425 } 1426 } 1427 num_devices = i; 1428 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1429 1430 alltgt_info->num_devices = num_devices; 1431 1432 usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) / 1433 sizeof(*devmap_info); 1434 usr_entrylen *= sizeof(*devmap_info); 1435 min_entrylen = min(usr_entrylen, kern_entrylen); 1436 1437 sg_copy_from_buffer(job->request_payload.sg_list, 1438 job->request_payload.sg_cnt, 1439 alltgt_info, (min_entrylen + sizeof(u64))); 1440 kfree(alltgt_info); 1441 return 0; 1442 } 1443 /** 1444 * mpi3mr_get_change_count - Get topology change count 1445 * @mrioc: Adapter instance reference 1446 * @job: BSG job reference 1447 * 1448 * This function copies the toplogy change count provided by the 1449 * driver in events and cached in the driver to the user 1450 * provided buffer for the specific controller. 1451 * 1452 * Return: 0 on success and proper error codes on failure 1453 */ 1454 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, 1455 struct bsg_job *job) 1456 { 1457 struct mpi3mr_change_count chgcnt; 1458 1459 memset(&chgcnt, 0, sizeof(chgcnt)); 1460 chgcnt.change_count = mrioc->change_count; 1461 if (job->request_payload.payload_len >= sizeof(chgcnt)) { 1462 sg_copy_from_buffer(job->request_payload.sg_list, 1463 job->request_payload.sg_cnt, 1464 &chgcnt, sizeof(chgcnt)); 1465 return 0; 1466 } 1467 return -EINVAL; 1468 } 1469 1470 /** 1471 * mpi3mr_bsg_adp_reset - Issue controller reset 1472 * @mrioc: Adapter instance reference 1473 * @job: BSG job reference 1474 * 1475 * This function identifies the user provided reset type and 1476 * issues approporiate reset to the controller and wait for that 1477 * to complete and reinitialize the controller and then returns 1478 * 1479 * Return: 0 on success and proper error codes on failure 1480 */ 1481 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, 1482 struct bsg_job *job) 1483 { 1484 long rval = -EINVAL; 1485 u8 save_snapdump; 1486 struct mpi3mr_bsg_adp_reset adpreset; 1487 1488 if (job->request_payload.payload_len != 1489 sizeof(adpreset)) { 1490 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1491 __func__); 1492 goto out; 1493 } 1494 1495 if (mrioc->unrecoverable || mrioc->block_on_pci_err) 1496 return -EINVAL; 1497 1498 sg_copy_to_buffer(job->request_payload.sg_list, 1499 job->request_payload.sg_cnt, 1500 &adpreset, sizeof(adpreset)); 1501 1502 switch (adpreset.reset_type) { 1503 case MPI3MR_BSG_ADPRESET_SOFT: 1504 save_snapdump = 0; 1505 break; 1506 case MPI3MR_BSG_ADPRESET_DIAG_FAULT: 1507 save_snapdump = 1; 1508 break; 1509 default: 1510 dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", 1511 __func__, adpreset.reset_type); 1512 goto out; 1513 } 1514 1515 rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, 1516 save_snapdump); 1517 1518 if (rval) 1519 dprint_bsg_err(mrioc, 1520 "%s: reset handler returned error(%ld) for reset type %d\n", 1521 __func__, rval, adpreset.reset_type); 1522 out: 1523 return rval; 1524 } 1525 1526 /** 1527 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler 1528 * @mrioc: Adapter instance reference 1529 * @job: BSG job reference 1530 * 1531 * This function provides adapter information for the given 1532 * controller 1533 * 1534 * Return: 0 on success and proper error codes on failure 1535 */ 1536 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, 1537 struct bsg_job *job) 1538 { 1539 enum mpi3mr_iocstate ioc_state; 1540 struct mpi3mr_bsg_in_adpinfo adpinfo; 1541 1542 memset(&adpinfo, 0, sizeof(adpinfo)); 1543 adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; 1544 adpinfo.pci_dev_id = mrioc->pdev->device; 1545 adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; 1546 adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; 1547 adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; 1548 adpinfo.pci_bus = mrioc->pdev->bus->number; 1549 adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); 1550 adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); 1551 adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); 1552 adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; 1553 1554 ioc_state = mpi3mr_get_iocstate(mrioc); 1555 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 1556 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 1557 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 1558 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 1559 else if (ioc_state == MRIOC_STATE_FAULT) 1560 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 1561 else 1562 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 1563 1564 memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, 1565 sizeof(adpinfo.driver_info)); 1566 1567 if (job->request_payload.payload_len >= sizeof(adpinfo)) { 1568 sg_copy_from_buffer(job->request_payload.sg_list, 1569 job->request_payload.sg_cnt, 1570 &adpinfo, sizeof(adpinfo)); 1571 return 0; 1572 } 1573 return -EINVAL; 1574 } 1575 1576 /** 1577 * mpi3mr_bsg_process_drv_cmds - Driver Command handler 1578 * @job: BSG job reference 1579 * 1580 * This function is the top level handler for driver commands, 1581 * this does basic validation of the buffer and identifies the 1582 * opcode and switches to correct sub handler. 1583 * 1584 * Return: 0 on success and proper error codes on failure 1585 */ 1586 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) 1587 { 1588 long rval = -EINVAL; 1589 struct mpi3mr_ioc *mrioc = NULL; 1590 struct mpi3mr_bsg_packet *bsg_req = NULL; 1591 struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; 1592 1593 bsg_req = job->request; 1594 drvrcmd = &bsg_req->cmd.drvrcmd; 1595 1596 mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); 1597 if (!mrioc) 1598 return -ENODEV; 1599 1600 if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { 1601 rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); 1602 return rval; 1603 } 1604 1605 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) 1606 return -ERESTARTSYS; 1607 1608 switch (drvrcmd->opcode) { 1609 case MPI3MR_DRVBSG_OPCODE_ADPRESET: 1610 rval = mpi3mr_bsg_adp_reset(mrioc, job); 1611 break; 1612 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: 1613 rval = mpi3mr_get_all_tgt_info(mrioc, job); 1614 break; 1615 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: 1616 rval = mpi3mr_get_change_count(mrioc, job); 1617 break; 1618 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: 1619 rval = mpi3mr_enable_logdata(mrioc, job); 1620 break; 1621 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: 1622 rval = mpi3mr_get_logdata(mrioc, job); 1623 break; 1624 case MPI3MR_DRVBSG_OPCODE_PELENABLE: 1625 rval = mpi3mr_bsg_pel_enable(mrioc, job); 1626 break; 1627 case MPI3MR_DRVBSG_OPCODE_QUERY_HDB: 1628 rval = mpi3mr_bsg_query_hdb(mrioc, job); 1629 break; 1630 case MPI3MR_DRVBSG_OPCODE_REPOST_HDB: 1631 rval = mpi3mr_bsg_repost_hdb(mrioc, job); 1632 break; 1633 case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB: 1634 rval = mpi3mr_bsg_upload_hdb(mrioc, job); 1635 break; 1636 case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS: 1637 rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job); 1638 break; 1639 case MPI3MR_DRVBSG_OPCODE_UNKNOWN: 1640 default: 1641 pr_err("%s: unsupported driver command opcode %d\n", 1642 MPI3MR_DRIVER_NAME, drvrcmd->opcode); 1643 break; 1644 } 1645 mutex_unlock(&mrioc->bsg_cmds.mutex); 1646 return rval; 1647 } 1648 1649 /** 1650 * mpi3mr_total_num_ioctl_sges - Count number of SGEs required 1651 * @drv_bufs: DMA address of the buffers to be placed in sgl 1652 * @bufcnt: Number of DMA buffers 1653 * 1654 * This function returns total number of data SGEs required 1655 * including zero length SGEs and excluding management request 1656 * and response buffer for the given list of data buffer 1657 * descriptors 1658 * 1659 * Return: Number of SGE elements needed 1660 */ 1661 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs, 1662 u8 bufcnt) 1663 { 1664 u16 i, sge_count = 0; 1665 1666 for (i = 0; i < bufcnt; i++, drv_bufs++) { 1667 if (drv_bufs->data_dir == DMA_NONE || 1668 drv_bufs->kern_buf) 1669 continue; 1670 sge_count += drv_bufs->num_dma_desc; 1671 if (!drv_bufs->num_dma_desc) 1672 sge_count++; 1673 } 1674 return sge_count; 1675 } 1676 1677 /** 1678 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands 1679 * @mrioc: Adapter instance reference 1680 * @mpi_req: MPI request 1681 * @sgl_offset: offset to start sgl in the MPI request 1682 * @drv_bufs: DMA address of the buffers to be placed in sgl 1683 * @bufcnt: Number of DMA buffers 1684 * @is_rmc: Does the buffer list has management command buffer 1685 * @is_rmr: Does the buffer list has management response buffer 1686 * @num_datasges: Number of data buffers in the list 1687 * 1688 * This function places the DMA address of the given buffers in 1689 * proper format as SGEs in the given MPI request. 1690 * 1691 * Return: 0 on success,-1 on failure 1692 */ 1693 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req, 1694 u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs, 1695 u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges) 1696 { 1697 struct mpi3_request_header *mpi_header = 1698 (struct mpi3_request_header *)mpi_req; 1699 u8 *sgl = (mpi_req + sgl_offset), count = 0; 1700 struct mpi3_mgmt_passthrough_request *rmgmt_req = 1701 (struct mpi3_mgmt_passthrough_request *)mpi_req; 1702 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1703 u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag; 1704 u16 available_sges, i, sges_needed; 1705 u32 sge_element_size = sizeof(struct mpi3_sge_common); 1706 bool chain_used = false; 1707 1708 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 1709 MPI3_SGE_FLAGS_DLAS_SYSTEM; 1710 sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 1711 sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST; 1712 last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 1713 MPI3_SGE_FLAGS_DLAS_SYSTEM; 1714 1715 sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt); 1716 1717 if (is_rmc) { 1718 mpi3mr_add_sg_single(&rmgmt_req->command_sgl, 1719 sgl_flags_last, drv_buf_iter->kern_buf_len, 1720 drv_buf_iter->kern_buf_dma); 1721 sgl = (u8 *)drv_buf_iter->kern_buf + 1722 drv_buf_iter->bsg_buf_len; 1723 available_sges = (drv_buf_iter->kern_buf_len - 1724 drv_buf_iter->bsg_buf_len) / sge_element_size; 1725 1726 if (sges_needed > available_sges) 1727 return -1; 1728 1729 chain_used = true; 1730 drv_buf_iter++; 1731 count++; 1732 if (is_rmr) { 1733 mpi3mr_add_sg_single(&rmgmt_req->response_sgl, 1734 sgl_flags_last, drv_buf_iter->kern_buf_len, 1735 drv_buf_iter->kern_buf_dma); 1736 drv_buf_iter++; 1737 count++; 1738 } else 1739 mpi3mr_build_zero_len_sge( 1740 &rmgmt_req->response_sgl); 1741 if (num_datasges) { 1742 i = 0; 1743 goto build_sges; 1744 } 1745 } else { 1746 if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ) 1747 return -1; 1748 available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) / 1749 sge_element_size; 1750 if (!available_sges) 1751 return -1; 1752 } 1753 if (!num_datasges) { 1754 mpi3mr_build_zero_len_sge(sgl); 1755 return 0; 1756 } 1757 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 1758 if ((sges_needed > 2) || (sges_needed > available_sges)) 1759 return -1; 1760 for (; count < bufcnt; count++, drv_buf_iter++) { 1761 if (drv_buf_iter->data_dir == DMA_NONE || 1762 !drv_buf_iter->num_dma_desc) 1763 continue; 1764 mpi3mr_add_sg_single(sgl, sgl_flags_last, 1765 drv_buf_iter->dma_desc[0].size, 1766 drv_buf_iter->dma_desc[0].dma_addr); 1767 sgl += sge_element_size; 1768 } 1769 return 0; 1770 } 1771 i = 0; 1772 1773 build_sges: 1774 for (; count < bufcnt; count++, drv_buf_iter++) { 1775 if (drv_buf_iter->data_dir == DMA_NONE) 1776 continue; 1777 if (!drv_buf_iter->num_dma_desc) { 1778 if (chain_used && !available_sges) 1779 return -1; 1780 if (!chain_used && (available_sges == 1) && 1781 (sges_needed > 1)) 1782 goto setup_chain; 1783 flag = sgl_flag_eob; 1784 if (num_datasges == 1) 1785 flag = sgl_flags_last; 1786 mpi3mr_add_sg_single(sgl, flag, 0, 0); 1787 sgl += sge_element_size; 1788 sges_needed--; 1789 available_sges--; 1790 num_datasges--; 1791 continue; 1792 } 1793 for (; i < drv_buf_iter->num_dma_desc; i++) { 1794 if (chain_used && !available_sges) 1795 return -1; 1796 if (!chain_used && (available_sges == 1) && 1797 (sges_needed > 1)) 1798 goto setup_chain; 1799 flag = sgl_flags; 1800 if (i == (drv_buf_iter->num_dma_desc - 1)) { 1801 if (num_datasges == 1) 1802 flag = sgl_flags_last; 1803 else 1804 flag = sgl_flag_eob; 1805 } 1806 1807 mpi3mr_add_sg_single(sgl, flag, 1808 drv_buf_iter->dma_desc[i].size, 1809 drv_buf_iter->dma_desc[i].dma_addr); 1810 sgl += sge_element_size; 1811 available_sges--; 1812 sges_needed--; 1813 } 1814 num_datasges--; 1815 i = 0; 1816 } 1817 return 0; 1818 1819 setup_chain: 1820 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 1821 if (sges_needed > available_sges) 1822 return -1; 1823 mpi3mr_add_sg_single(sgl, last_chain_sgl_flag, 1824 (sges_needed * sge_element_size), 1825 mrioc->ioctl_chain_sge.dma_addr); 1826 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 1827 sgl = (u8 *)mrioc->ioctl_chain_sge.addr; 1828 chain_used = true; 1829 goto build_sges; 1830 } 1831 1832 /** 1833 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format 1834 * @nvme_encap_request: NVMe encapsulated MPI request 1835 * 1836 * This function returns the type of the data format specified 1837 * in user provided NVMe command in NVMe encapsulated request. 1838 * 1839 * Return: Data format of the NVMe command (PRP/SGL etc) 1840 */ 1841 static unsigned int mpi3mr_get_nvme_data_fmt( 1842 struct mpi3_nvme_encapsulated_request *nvme_encap_request) 1843 { 1844 u8 format = 0; 1845 1846 format = ((nvme_encap_request->command[0] & 0xc000) >> 14); 1847 return format; 1848 1849 } 1850 1851 /** 1852 * mpi3mr_build_nvme_sgl - SGL constructor for NVME 1853 * encapsulated request 1854 * @mrioc: Adapter instance reference 1855 * @nvme_encap_request: NVMe encapsulated MPI request 1856 * @drv_bufs: DMA address of the buffers to be placed in sgl 1857 * @bufcnt: Number of DMA buffers 1858 * 1859 * This function places the DMA address of the given buffers in 1860 * proper format as SGEs in the given NVMe encapsulated request. 1861 * 1862 * Return: 0 on success, -1 on failure 1863 */ 1864 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, 1865 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 1866 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 1867 { 1868 struct mpi3mr_nvme_pt_sge *nvme_sgl; 1869 __le64 sgl_dma; 1870 u8 count; 1871 size_t length = 0; 1872 u16 available_sges = 0, i; 1873 u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 1874 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1875 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 1876 mrioc->facts.sge_mod_shift) << 32); 1877 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 1878 mrioc->facts.sge_mod_shift) << 32; 1879 u32 size; 1880 1881 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 1882 ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); 1883 1884 /* 1885 * Not all commands require a data transfer. If no data, just return 1886 * without constructing any sgl. 1887 */ 1888 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1889 if (drv_buf_iter->data_dir == DMA_NONE) 1890 continue; 1891 length = drv_buf_iter->kern_buf_len; 1892 break; 1893 } 1894 if (!length || !drv_buf_iter->num_dma_desc) 1895 return 0; 1896 1897 if (drv_buf_iter->num_dma_desc == 1) { 1898 available_sges = 1; 1899 goto build_sges; 1900 } 1901 1902 sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr); 1903 if (sgl_dma & sgemod_mask) { 1904 dprint_bsg_err(mrioc, 1905 "%s: SGL chain address collides with SGE modifier\n", 1906 __func__); 1907 return -1; 1908 } 1909 1910 sgl_dma &= ~sgemod_mask; 1911 sgl_dma |= sgemod_val; 1912 1913 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 1914 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 1915 if (available_sges < drv_buf_iter->num_dma_desc) 1916 return -1; 1917 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 1918 nvme_sgl->base_addr = sgl_dma; 1919 size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 1920 nvme_sgl->length = cpu_to_le32(size); 1921 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 1922 nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr; 1923 1924 build_sges: 1925 for (i = 0; i < drv_buf_iter->num_dma_desc; i++) { 1926 sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr); 1927 if (sgl_dma & sgemod_mask) { 1928 dprint_bsg_err(mrioc, 1929 "%s: SGL address collides with SGE modifier\n", 1930 __func__); 1931 return -1; 1932 } 1933 1934 sgl_dma &= ~sgemod_mask; 1935 sgl_dma |= sgemod_val; 1936 1937 nvme_sgl->base_addr = sgl_dma; 1938 nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size); 1939 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 1940 nvme_sgl++; 1941 available_sges--; 1942 } 1943 1944 return 0; 1945 } 1946 1947 /** 1948 * mpi3mr_build_nvme_prp - PRP constructor for NVME 1949 * encapsulated request 1950 * @mrioc: Adapter instance reference 1951 * @nvme_encap_request: NVMe encapsulated MPI request 1952 * @drv_bufs: DMA address of the buffers to be placed in SGL 1953 * @bufcnt: Number of DMA buffers 1954 * 1955 * This function places the DMA address of the given buffers in 1956 * proper format as PRP entries in the given NVMe encapsulated 1957 * request. 1958 * 1959 * Return: 0 on success, -1 on failure 1960 */ 1961 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, 1962 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 1963 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 1964 { 1965 int prp_size = MPI3MR_NVME_PRP_SIZE; 1966 __le64 *prp_entry, *prp1_entry, *prp2_entry; 1967 __le64 *prp_page; 1968 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 1969 u32 offset, entry_len, dev_pgsz; 1970 u32 page_mask_result, page_mask; 1971 size_t length = 0, desc_len; 1972 u8 count; 1973 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1974 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 1975 mrioc->facts.sge_mod_shift) << 32); 1976 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 1977 mrioc->facts.sge_mod_shift) << 32; 1978 u16 dev_handle = nvme_encap_request->dev_handle; 1979 struct mpi3mr_tgt_dev *tgtdev; 1980 u16 desc_count = 0; 1981 1982 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1983 if (!tgtdev) { 1984 dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", 1985 __func__, dev_handle); 1986 return -1; 1987 } 1988 1989 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 1990 dprint_bsg_err(mrioc, 1991 "%s: NVMe device page size is zero for handle 0x%04x\n", 1992 __func__, dev_handle); 1993 mpi3mr_tgtdev_put(tgtdev); 1994 return -1; 1995 } 1996 1997 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 1998 mpi3mr_tgtdev_put(tgtdev); 1999 page_mask = dev_pgsz - 1; 2000 2001 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) { 2002 dprint_bsg_err(mrioc, 2003 "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 2004 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 2005 return -1; 2006 } 2007 2008 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) { 2009 dprint_bsg_err(mrioc, 2010 "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 2011 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 2012 return -1; 2013 } 2014 2015 /* 2016 * Not all commands require a data transfer. If no data, just return 2017 * without constructing any PRP. 2018 */ 2019 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2020 if (drv_buf_iter->data_dir == DMA_NONE) 2021 continue; 2022 length = drv_buf_iter->kern_buf_len; 2023 break; 2024 } 2025 2026 if (!length || !drv_buf_iter->num_dma_desc) 2027 return 0; 2028 2029 for (count = 0; count < drv_buf_iter->num_dma_desc; count++) { 2030 dma_addr = drv_buf_iter->dma_desc[count].dma_addr; 2031 if (dma_addr & page_mask) { 2032 dprint_bsg_err(mrioc, 2033 "%s:dma_addr %pad is not aligned with page size 0x%x\n", 2034 __func__, &dma_addr, dev_pgsz); 2035 return -1; 2036 } 2037 } 2038 2039 dma_addr = drv_buf_iter->dma_desc[0].dma_addr; 2040 desc_len = drv_buf_iter->dma_desc[0].size; 2041 2042 mrioc->prp_sz = 0; 2043 mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, 2044 dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); 2045 2046 if (!mrioc->prp_list_virt) 2047 return -1; 2048 mrioc->prp_sz = dev_pgsz; 2049 2050 /* 2051 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 2052 * PRP1 is located at a 24 byte offset from the start of the NVMe 2053 * command. Then set the current PRP entry pointer to PRP1. 2054 */ 2055 prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 2056 MPI3MR_NVME_CMD_PRP1_OFFSET); 2057 prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 2058 MPI3MR_NVME_CMD_PRP2_OFFSET); 2059 prp_entry = prp1_entry; 2060 /* 2061 * For the PRP entries, use the specially allocated buffer of 2062 * contiguous memory. 2063 */ 2064 prp_page = (__le64 *)mrioc->prp_list_virt; 2065 prp_page_dma = mrioc->prp_list_dma; 2066 2067 /* 2068 * Check if we are within 1 entry of a page boundary we don't 2069 * want our first entry to be a PRP List entry. 2070 */ 2071 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 2072 if (!page_mask_result) { 2073 dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", 2074 __func__); 2075 goto err_out; 2076 } 2077 2078 /* 2079 * Set PRP physical pointer, which initially points to the current PRP 2080 * DMA memory page. 2081 */ 2082 prp_entry_dma = prp_page_dma; 2083 2084 2085 /* Loop while the length is not zero. */ 2086 while (length) { 2087 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 2088 if (!page_mask_result && (length > dev_pgsz)) { 2089 dprint_bsg_err(mrioc, 2090 "%s: single PRP page is not sufficient\n", 2091 __func__); 2092 goto err_out; 2093 } 2094 2095 /* Need to handle if entry will be part of a page. */ 2096 offset = dma_addr & page_mask; 2097 entry_len = dev_pgsz - offset; 2098 2099 if (prp_entry == prp1_entry) { 2100 /* 2101 * Must fill in the first PRP pointer (PRP1) before 2102 * moving on. 2103 */ 2104 *prp1_entry = cpu_to_le64(dma_addr); 2105 if (*prp1_entry & sgemod_mask) { 2106 dprint_bsg_err(mrioc, 2107 "%s: PRP1 address collides with SGE modifier\n", 2108 __func__); 2109 goto err_out; 2110 } 2111 *prp1_entry &= ~sgemod_mask; 2112 *prp1_entry |= sgemod_val; 2113 2114 /* 2115 * Now point to the second PRP entry within the 2116 * command (PRP2). 2117 */ 2118 prp_entry = prp2_entry; 2119 } else if (prp_entry == prp2_entry) { 2120 /* 2121 * Should the PRP2 entry be a PRP List pointer or just 2122 * a regular PRP pointer? If there is more than one 2123 * more page of data, must use a PRP List pointer. 2124 */ 2125 if (length > dev_pgsz) { 2126 /* 2127 * PRP2 will contain a PRP List pointer because 2128 * more PRP's are needed with this command. The 2129 * list will start at the beginning of the 2130 * contiguous buffer. 2131 */ 2132 *prp2_entry = cpu_to_le64(prp_entry_dma); 2133 if (*prp2_entry & sgemod_mask) { 2134 dprint_bsg_err(mrioc, 2135 "%s: PRP list address collides with SGE modifier\n", 2136 __func__); 2137 goto err_out; 2138 } 2139 *prp2_entry &= ~sgemod_mask; 2140 *prp2_entry |= sgemod_val; 2141 2142 /* 2143 * The next PRP Entry will be the start of the 2144 * first PRP List. 2145 */ 2146 prp_entry = prp_page; 2147 continue; 2148 } else { 2149 /* 2150 * After this, the PRP Entries are complete. 2151 * This command uses 2 PRP's and no PRP list. 2152 */ 2153 *prp2_entry = cpu_to_le64(dma_addr); 2154 if (*prp2_entry & sgemod_mask) { 2155 dprint_bsg_err(mrioc, 2156 "%s: PRP2 collides with SGE modifier\n", 2157 __func__); 2158 goto err_out; 2159 } 2160 *prp2_entry &= ~sgemod_mask; 2161 *prp2_entry |= sgemod_val; 2162 } 2163 } else { 2164 /* 2165 * Put entry in list and bump the addresses. 2166 * 2167 * After PRP1 and PRP2 are filled in, this will fill in 2168 * all remaining PRP entries in a PRP List, one per 2169 * each time through the loop. 2170 */ 2171 *prp_entry = cpu_to_le64(dma_addr); 2172 if (*prp_entry & sgemod_mask) { 2173 dprint_bsg_err(mrioc, 2174 "%s: PRP address collides with SGE modifier\n", 2175 __func__); 2176 goto err_out; 2177 } 2178 *prp_entry &= ~sgemod_mask; 2179 *prp_entry |= sgemod_val; 2180 prp_entry++; 2181 prp_entry_dma += prp_size; 2182 } 2183 2184 /* decrement length accounting for last partial page. */ 2185 if (entry_len >= length) { 2186 length = 0; 2187 } else { 2188 if (entry_len <= desc_len) { 2189 dma_addr += entry_len; 2190 desc_len -= entry_len; 2191 } 2192 if (!desc_len) { 2193 if ((++desc_count) >= 2194 drv_buf_iter->num_dma_desc) { 2195 dprint_bsg_err(mrioc, 2196 "%s: Invalid len %zd while building PRP\n", 2197 __func__, length); 2198 goto err_out; 2199 } 2200 dma_addr = 2201 drv_buf_iter->dma_desc[desc_count].dma_addr; 2202 desc_len = 2203 drv_buf_iter->dma_desc[desc_count].size; 2204 } 2205 length -= entry_len; 2206 } 2207 } 2208 2209 return 0; 2210 err_out: 2211 if (mrioc->prp_list_virt) { 2212 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 2213 mrioc->prp_list_virt, mrioc->prp_list_dma); 2214 mrioc->prp_list_virt = NULL; 2215 } 2216 return -1; 2217 } 2218 2219 /** 2220 * mpi3mr_map_data_buffer_dma - build dma descriptors for data 2221 * buffers 2222 * @mrioc: Adapter instance reference 2223 * @drv_buf: buffer map descriptor 2224 * @desc_count: Number of already consumed dma descriptors 2225 * 2226 * This function computes how many pre-allocated DMA descriptors 2227 * are required for the given data buffer and if those number of 2228 * descriptors are free, then setup the mapping of the scattered 2229 * DMA address to the given data buffer, if the data direction 2230 * of the buffer is DMA_TO_DEVICE then the actual data is copied to 2231 * the DMA buffers 2232 * 2233 * Return: 0 on success, -1 on failure 2234 */ 2235 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc, 2236 struct mpi3mr_buf_map *drv_buf, 2237 u16 desc_count) 2238 { 2239 u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE; 2240 u32 buf_len = drv_buf->kern_buf_len, copied_len = 0; 2241 2242 if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 2243 needed_desc++; 2244 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 2245 dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n", 2246 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 2247 return -1; 2248 } 2249 drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc, 2250 GFP_KERNEL); 2251 if (!drv_buf->dma_desc) 2252 return -1; 2253 for (i = 0; i < needed_desc; i++, desc_count++) { 2254 drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr; 2255 drv_buf->dma_desc[i].dma_addr = 2256 mrioc->ioctl_sge[desc_count].dma_addr; 2257 if (buf_len < mrioc->ioctl_sge[desc_count].size) 2258 drv_buf->dma_desc[i].size = buf_len; 2259 else 2260 drv_buf->dma_desc[i].size = 2261 mrioc->ioctl_sge[desc_count].size; 2262 buf_len -= drv_buf->dma_desc[i].size; 2263 memset(drv_buf->dma_desc[i].addr, 0, 2264 mrioc->ioctl_sge[desc_count].size); 2265 if (drv_buf->data_dir == DMA_TO_DEVICE) { 2266 memcpy(drv_buf->dma_desc[i].addr, 2267 drv_buf->bsg_buf + copied_len, 2268 drv_buf->dma_desc[i].size); 2269 copied_len += drv_buf->dma_desc[i].size; 2270 } 2271 } 2272 drv_buf->num_dma_desc = needed_desc; 2273 return 0; 2274 } 2275 /** 2276 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler 2277 * @job: BSG job reference 2278 * 2279 * This function is the top level handler for MPI Pass through 2280 * command, this does basic validation of the input data buffers, 2281 * identifies the given buffer types and MPI command, allocates 2282 * DMAable memory for user given buffers, construstcs SGL 2283 * properly and passes the command to the firmware. 2284 * 2285 * Once the MPI command is completed the driver copies the data 2286 * if any and reply, sense information to user provided buffers. 2287 * If the command is timed out then issues controller reset 2288 * prior to returning. 2289 * 2290 * Return: 0 on success and proper error codes on failure 2291 */ 2292 2293 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) 2294 { 2295 long rval = -EINVAL; 2296 struct mpi3mr_ioc *mrioc = NULL; 2297 u8 *mpi_req = NULL, *sense_buff_k = NULL; 2298 u8 mpi_msg_size = 0; 2299 struct mpi3mr_bsg_packet *bsg_req = NULL; 2300 struct mpi3mr_bsg_mptcmd *karg; 2301 struct mpi3mr_buf_entry *buf_entries = NULL; 2302 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; 2303 u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0; 2304 u8 din_cnt = 0, dout_cnt = 0; 2305 u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 2306 u8 block_io = 0, nvme_fmt = 0, resp_code = 0; 2307 struct mpi3_request_header *mpi_header = NULL; 2308 struct mpi3_status_reply_descriptor *status_desc; 2309 struct mpi3_scsi_task_mgmt_request *tm_req; 2310 u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; 2311 u16 dev_handle; 2312 struct mpi3mr_tgt_dev *tgtdev; 2313 struct mpi3mr_stgt_priv_data *stgt_priv = NULL; 2314 struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; 2315 u32 din_size = 0, dout_size = 0; 2316 u8 *din_buf = NULL, *dout_buf = NULL; 2317 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; 2318 u16 rmc_size = 0, desc_count = 0; 2319 2320 bsg_req = job->request; 2321 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; 2322 2323 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); 2324 if (!mrioc) 2325 return -ENODEV; 2326 2327 if (!mrioc->ioctl_sges_allocated) { 2328 dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", 2329 __func__); 2330 return -ENOMEM; 2331 } 2332 2333 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) 2334 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; 2335 2336 mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); 2337 if (!mpi_req) 2338 return -ENOMEM; 2339 mpi_header = (struct mpi3_request_header *)mpi_req; 2340 2341 bufcnt = karg->buf_entry_list.num_of_entries; 2342 drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); 2343 if (!drv_bufs) { 2344 rval = -ENOMEM; 2345 goto out; 2346 } 2347 2348 dout_buf = kzalloc(job->request_payload.payload_len, 2349 GFP_KERNEL); 2350 if (!dout_buf) { 2351 rval = -ENOMEM; 2352 goto out; 2353 } 2354 2355 din_buf = kzalloc(job->reply_payload.payload_len, 2356 GFP_KERNEL); 2357 if (!din_buf) { 2358 rval = -ENOMEM; 2359 goto out; 2360 } 2361 2362 sg_copy_to_buffer(job->request_payload.sg_list, 2363 job->request_payload.sg_cnt, 2364 dout_buf, job->request_payload.payload_len); 2365 2366 buf_entries = karg->buf_entry_list.buf_entry; 2367 sgl_din_iter = din_buf; 2368 sgl_dout_iter = dout_buf; 2369 drv_buf_iter = drv_bufs; 2370 2371 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { 2372 2373 switch (buf_entries->buf_type) { 2374 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: 2375 sgl_iter = sgl_dout_iter; 2376 sgl_dout_iter += buf_entries->buf_len; 2377 drv_buf_iter->data_dir = DMA_TO_DEVICE; 2378 is_rmcb = 1; 2379 if ((count != 0) || !buf_entries->buf_len) 2380 invalid_be = 1; 2381 break; 2382 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: 2383 sgl_iter = sgl_din_iter; 2384 sgl_din_iter += buf_entries->buf_len; 2385 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 2386 is_rmrb = 1; 2387 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 2388 invalid_be = 1; 2389 break; 2390 case MPI3MR_BSG_BUFTYPE_DATA_IN: 2391 sgl_iter = sgl_din_iter; 2392 sgl_din_iter += buf_entries->buf_len; 2393 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 2394 din_cnt++; 2395 din_size += buf_entries->buf_len; 2396 if ((din_cnt > 1) && !is_rmcb) 2397 invalid_be = 1; 2398 break; 2399 case MPI3MR_BSG_BUFTYPE_DATA_OUT: 2400 sgl_iter = sgl_dout_iter; 2401 sgl_dout_iter += buf_entries->buf_len; 2402 drv_buf_iter->data_dir = DMA_TO_DEVICE; 2403 dout_cnt++; 2404 dout_size += buf_entries->buf_len; 2405 if ((dout_cnt > 1) && !is_rmcb) 2406 invalid_be = 1; 2407 break; 2408 case MPI3MR_BSG_BUFTYPE_MPI_REPLY: 2409 sgl_iter = sgl_din_iter; 2410 sgl_din_iter += buf_entries->buf_len; 2411 drv_buf_iter->data_dir = DMA_NONE; 2412 mpirep_offset = count; 2413 if (!buf_entries->buf_len) 2414 invalid_be = 1; 2415 break; 2416 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: 2417 sgl_iter = sgl_din_iter; 2418 sgl_din_iter += buf_entries->buf_len; 2419 drv_buf_iter->data_dir = DMA_NONE; 2420 erb_offset = count; 2421 if (!buf_entries->buf_len) 2422 invalid_be = 1; 2423 break; 2424 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: 2425 sgl_iter = sgl_dout_iter; 2426 sgl_dout_iter += buf_entries->buf_len; 2427 drv_buf_iter->data_dir = DMA_NONE; 2428 mpi_msg_size = buf_entries->buf_len; 2429 if ((!mpi_msg_size || (mpi_msg_size % 4)) || 2430 (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { 2431 dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", 2432 __func__); 2433 rval = -EINVAL; 2434 goto out; 2435 } 2436 memcpy(mpi_req, sgl_iter, buf_entries->buf_len); 2437 break; 2438 default: 2439 invalid_be = 1; 2440 break; 2441 } 2442 if (invalid_be) { 2443 dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", 2444 __func__); 2445 rval = -EINVAL; 2446 goto out; 2447 } 2448 2449 if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 2450 dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 2451 __func__); 2452 rval = -EINVAL; 2453 goto out; 2454 } 2455 if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 2456 dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 2457 __func__); 2458 rval = -EINVAL; 2459 goto out; 2460 } 2461 2462 drv_buf_iter->bsg_buf = sgl_iter; 2463 drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 2464 } 2465 2466 if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) { 2467 dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", 2468 __func__, __LINE__, mpi_header->function, din_size, 2469 dout_size); 2470 rval = -EINVAL; 2471 goto out; 2472 } 2473 2474 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { 2475 dprint_bsg_err(mrioc, 2476 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", 2477 __func__, __LINE__, mpi_header->function, din_size); 2478 rval = -EINVAL; 2479 goto out; 2480 } 2481 if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { 2482 dprint_bsg_err(mrioc, 2483 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", 2484 __func__, __LINE__, mpi_header->function, dout_size); 2485 rval = -EINVAL; 2486 goto out; 2487 } 2488 2489 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 2490 if (din_size > MPI3MR_IOCTL_SGE_SIZE || 2491 dout_size > MPI3MR_IOCTL_SGE_SIZE) { 2492 dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", 2493 __func__, __LINE__, din_cnt, dout_cnt, din_size, 2494 dout_size); 2495 rval = -EINVAL; 2496 goto out; 2497 } 2498 } 2499 2500 drv_buf_iter = drv_bufs; 2501 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2502 if (drv_buf_iter->data_dir == DMA_NONE) 2503 continue; 2504 2505 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; 2506 if (is_rmcb && !count) { 2507 drv_buf_iter->kern_buf_len = 2508 mrioc->ioctl_chain_sge.size; 2509 drv_buf_iter->kern_buf = 2510 mrioc->ioctl_chain_sge.addr; 2511 drv_buf_iter->kern_buf_dma = 2512 mrioc->ioctl_chain_sge.dma_addr; 2513 drv_buf_iter->dma_desc = NULL; 2514 drv_buf_iter->num_dma_desc = 0; 2515 memset(drv_buf_iter->kern_buf, 0, 2516 drv_buf_iter->kern_buf_len); 2517 tmplen = min(drv_buf_iter->kern_buf_len, 2518 drv_buf_iter->bsg_buf_len); 2519 rmc_size = tmplen; 2520 memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 2521 } else if (is_rmrb && (count == 1)) { 2522 drv_buf_iter->kern_buf_len = 2523 mrioc->ioctl_resp_sge.size; 2524 drv_buf_iter->kern_buf = 2525 mrioc->ioctl_resp_sge.addr; 2526 drv_buf_iter->kern_buf_dma = 2527 mrioc->ioctl_resp_sge.dma_addr; 2528 drv_buf_iter->dma_desc = NULL; 2529 drv_buf_iter->num_dma_desc = 0; 2530 memset(drv_buf_iter->kern_buf, 0, 2531 drv_buf_iter->kern_buf_len); 2532 tmplen = min(drv_buf_iter->kern_buf_len, 2533 drv_buf_iter->bsg_buf_len); 2534 drv_buf_iter->kern_buf_len = tmplen; 2535 memset(drv_buf_iter->bsg_buf, 0, 2536 drv_buf_iter->bsg_buf_len); 2537 } else { 2538 if (!drv_buf_iter->kern_buf_len) 2539 continue; 2540 if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { 2541 rval = -ENOMEM; 2542 dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", 2543 __func__, __LINE__); 2544 goto out; 2545 } 2546 desc_count += drv_buf_iter->num_dma_desc; 2547 } 2548 } 2549 2550 if (erb_offset != 0xFF) { 2551 sense_buff_k = kzalloc(erbsz, GFP_KERNEL); 2552 if (!sense_buff_k) { 2553 rval = -ENOMEM; 2554 goto out; 2555 } 2556 } 2557 2558 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { 2559 rval = -ERESTARTSYS; 2560 goto out; 2561 } 2562 if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { 2563 rval = -EAGAIN; 2564 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 2565 mutex_unlock(&mrioc->bsg_cmds.mutex); 2566 goto out; 2567 } 2568 if (mrioc->unrecoverable) { 2569 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 2570 __func__); 2571 rval = -EFAULT; 2572 mutex_unlock(&mrioc->bsg_cmds.mutex); 2573 goto out; 2574 } 2575 if (mrioc->reset_in_progress) { 2576 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 2577 rval = -EAGAIN; 2578 mutex_unlock(&mrioc->bsg_cmds.mutex); 2579 goto out; 2580 } 2581 if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 2582 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 2583 rval = -EAGAIN; 2584 mutex_unlock(&mrioc->bsg_cmds.mutex); 2585 goto out; 2586 } 2587 2588 if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { 2589 nvme_fmt = mpi3mr_get_nvme_data_fmt( 2590 (struct mpi3_nvme_encapsulated_request *)mpi_req); 2591 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 2592 if (mpi3mr_build_nvme_prp(mrioc, 2593 (struct mpi3_nvme_encapsulated_request *)mpi_req, 2594 drv_bufs, bufcnt)) { 2595 rval = -ENOMEM; 2596 mutex_unlock(&mrioc->bsg_cmds.mutex); 2597 goto out; 2598 } 2599 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 2600 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 2601 if (mpi3mr_build_nvme_sgl(mrioc, 2602 (struct mpi3_nvme_encapsulated_request *)mpi_req, 2603 drv_bufs, bufcnt)) { 2604 rval = -EINVAL; 2605 mutex_unlock(&mrioc->bsg_cmds.mutex); 2606 goto out; 2607 } 2608 } else { 2609 dprint_bsg_err(mrioc, 2610 "%s:invalid NVMe command format\n", __func__); 2611 rval = -EINVAL; 2612 mutex_unlock(&mrioc->bsg_cmds.mutex); 2613 goto out; 2614 } 2615 } else { 2616 if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size, 2617 drv_bufs, bufcnt, is_rmcb, is_rmrb, 2618 (dout_cnt + din_cnt))) { 2619 dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__); 2620 rval = -EAGAIN; 2621 mutex_unlock(&mrioc->bsg_cmds.mutex); 2622 goto out; 2623 } 2624 } 2625 2626 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { 2627 tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; 2628 if (tm_req->task_type != 2629 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 2630 dev_handle = tm_req->dev_handle; 2631 block_io = 1; 2632 } 2633 } 2634 if (block_io) { 2635 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2636 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { 2637 stgt_priv = (struct mpi3mr_stgt_priv_data *) 2638 tgtdev->starget->hostdata; 2639 atomic_inc(&stgt_priv->block_io); 2640 mpi3mr_tgtdev_put(tgtdev); 2641 } 2642 } 2643 2644 mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; 2645 mrioc->bsg_cmds.is_waiting = 1; 2646 mrioc->bsg_cmds.callback = NULL; 2647 mrioc->bsg_cmds.is_sense = 0; 2648 mrioc->bsg_cmds.sensebuf = sense_buff_k; 2649 memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); 2650 mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); 2651 if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { 2652 dprint_bsg_info(mrioc, 2653 "%s: posting bsg request to the controller\n", __func__); 2654 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 2655 "bsg_mpi3_req"); 2656 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 2657 drv_buf_iter = &drv_bufs[0]; 2658 dprint_dump(drv_buf_iter->kern_buf, 2659 rmc_size, "mpi3_mgmt_req"); 2660 } 2661 } 2662 2663 init_completion(&mrioc->bsg_cmds.done); 2664 rval = mpi3mr_admin_request_post(mrioc, mpi_req, 2665 MPI3MR_ADMIN_REQ_FRAME_SZ, 0); 2666 2667 2668 if (rval) { 2669 mrioc->bsg_cmds.is_waiting = 0; 2670 dprint_bsg_err(mrioc, 2671 "%s: posting bsg request is failed\n", __func__); 2672 rval = -EAGAIN; 2673 goto out_unlock; 2674 } 2675 wait_for_completion_timeout(&mrioc->bsg_cmds.done, 2676 (karg->timeout * HZ)); 2677 if (block_io && stgt_priv) 2678 atomic_dec(&stgt_priv->block_io); 2679 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { 2680 mrioc->bsg_cmds.is_waiting = 0; 2681 rval = -EAGAIN; 2682 if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) 2683 goto out_unlock; 2684 if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) && 2685 (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED)) 2686 || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) { 2687 ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n", 2688 __func__, karg->timeout); 2689 if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) { 2690 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 2691 "bsg_mpi3_req"); 2692 if (mpi_header->function == 2693 MPI3_FUNCTION_MGMT_PASSTHROUGH) { 2694 drv_buf_iter = &drv_bufs[0]; 2695 dprint_dump(drv_buf_iter->kern_buf, 2696 rmc_size, "mpi3_mgmt_req"); 2697 } 2698 } 2699 } 2700 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || 2701 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) { 2702 dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n" 2703 "issuing target reset to (0x%04x)\n", __func__, 2704 karg->timeout, mpi_header->function_dependent); 2705 mpi3mr_issue_tm(mrioc, 2706 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 2707 mpi_header->function_dependent, 0, 2708 MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, 2709 &mrioc->host_tm_cmds, &resp_code, NULL); 2710 } 2711 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && 2712 !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) 2713 mpi3mr_soft_reset_handler(mrioc, 2714 MPI3MR_RESET_FROM_APP_TIMEOUT, 1); 2715 goto out_unlock; 2716 } 2717 dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); 2718 2719 if (mrioc->prp_list_virt) { 2720 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 2721 mrioc->prp_list_virt, mrioc->prp_list_dma); 2722 mrioc->prp_list_virt = NULL; 2723 } 2724 2725 if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2726 != MPI3_IOCSTATUS_SUCCESS) { 2727 dprint_bsg_info(mrioc, 2728 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 2729 __func__, 2730 (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2731 mrioc->bsg_cmds.ioc_loginfo); 2732 } 2733 2734 if ((mpirep_offset != 0xFF) && 2735 drv_bufs[mpirep_offset].bsg_buf_len) { 2736 drv_buf_iter = &drv_bufs[mpirep_offset]; 2737 drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) + 2738 mrioc->reply_sz); 2739 bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); 2740 2741 if (!bsg_reply_buf) { 2742 rval = -ENOMEM; 2743 goto out_unlock; 2744 } 2745 if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { 2746 bsg_reply_buf->mpi_reply_type = 2747 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; 2748 memcpy(bsg_reply_buf->reply_buf, 2749 mrioc->bsg_cmds.reply, mrioc->reply_sz); 2750 } else { 2751 bsg_reply_buf->mpi_reply_type = 2752 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; 2753 status_desc = (struct mpi3_status_reply_descriptor *) 2754 bsg_reply_buf->reply_buf; 2755 status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; 2756 status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; 2757 } 2758 tmplen = min(drv_buf_iter->kern_buf_len, 2759 drv_buf_iter->bsg_buf_len); 2760 memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); 2761 } 2762 2763 if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && 2764 mrioc->bsg_cmds.is_sense) { 2765 drv_buf_iter = &drv_bufs[erb_offset]; 2766 tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); 2767 memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); 2768 } 2769 2770 drv_buf_iter = drv_bufs; 2771 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2772 if (drv_buf_iter->data_dir == DMA_NONE) 2773 continue; 2774 if ((count == 1) && is_rmrb) { 2775 memcpy(drv_buf_iter->bsg_buf, 2776 drv_buf_iter->kern_buf, 2777 drv_buf_iter->kern_buf_len); 2778 } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 2779 tmplen = 0; 2780 for (desc_count = 0; 2781 desc_count < drv_buf_iter->num_dma_desc; 2782 desc_count++) { 2783 memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen), 2784 drv_buf_iter->dma_desc[desc_count].addr, 2785 drv_buf_iter->dma_desc[desc_count].size); 2786 tmplen += 2787 drv_buf_iter->dma_desc[desc_count].size; 2788 } 2789 } 2790 } 2791 2792 out_unlock: 2793 if (din_buf) { 2794 job->reply_payload_rcv_len = 2795 sg_copy_from_buffer(job->reply_payload.sg_list, 2796 job->reply_payload.sg_cnt, 2797 din_buf, job->reply_payload.payload_len); 2798 } 2799 mrioc->bsg_cmds.is_sense = 0; 2800 mrioc->bsg_cmds.sensebuf = NULL; 2801 mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; 2802 mutex_unlock(&mrioc->bsg_cmds.mutex); 2803 out: 2804 kfree(sense_buff_k); 2805 kfree(dout_buf); 2806 kfree(din_buf); 2807 kfree(mpi_req); 2808 if (drv_bufs) { 2809 drv_buf_iter = drv_bufs; 2810 for (count = 0; count < bufcnt; count++, drv_buf_iter++) 2811 kfree(drv_buf_iter->dma_desc); 2812 kfree(drv_bufs); 2813 } 2814 kfree(bsg_reply_buf); 2815 return rval; 2816 } 2817 2818 /** 2819 * mpi3mr_app_save_logdata - Save Log Data events 2820 * @mrioc: Adapter instance reference 2821 * @event_data: event data associated with log data event 2822 * @event_data_size: event data size to copy 2823 * 2824 * If log data event caching is enabled by the applicatiobns, 2825 * then this function saves the log data in the circular queue 2826 * and Sends async signal SIGIO to indicate there is an async 2827 * event from the firmware to the event monitoring applications. 2828 * 2829 * Return:Nothing 2830 */ 2831 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 2832 u16 event_data_size) 2833 { 2834 u32 index = mrioc->logdata_buf_idx, sz; 2835 struct mpi3mr_logdata_entry *entry; 2836 2837 if (!(mrioc->logdata_buf)) 2838 return; 2839 2840 entry = (struct mpi3mr_logdata_entry *) 2841 (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); 2842 entry->valid_entry = 1; 2843 sz = min(mrioc->logdata_entry_sz, event_data_size); 2844 memcpy(entry->data, event_data, sz); 2845 mrioc->logdata_buf_idx = 2846 ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); 2847 atomic64_inc(&event_counter); 2848 } 2849 2850 /** 2851 * mpi3mr_bsg_request - bsg request entry point 2852 * @job: BSG job reference 2853 * 2854 * This is driver's entry point for bsg requests 2855 * 2856 * Return: 0 on success and proper error codes on failure 2857 */ 2858 static int mpi3mr_bsg_request(struct bsg_job *job) 2859 { 2860 long rval = -EINVAL; 2861 unsigned int reply_payload_rcv_len = 0; 2862 2863 struct mpi3mr_bsg_packet *bsg_req = job->request; 2864 2865 switch (bsg_req->cmd_type) { 2866 case MPI3MR_DRV_CMD: 2867 rval = mpi3mr_bsg_process_drv_cmds(job); 2868 break; 2869 case MPI3MR_MPT_CMD: 2870 rval = mpi3mr_bsg_process_mpt_cmds(job); 2871 break; 2872 default: 2873 pr_err("%s: unsupported BSG command(0x%08x)\n", 2874 MPI3MR_DRIVER_NAME, bsg_req->cmd_type); 2875 break; 2876 } 2877 2878 bsg_job_done(job, rval, reply_payload_rcv_len); 2879 2880 return 0; 2881 } 2882 2883 /** 2884 * mpi3mr_bsg_exit - de-registration from bsg layer 2885 * @mrioc: Adapter instance reference 2886 * 2887 * This will be called during driver unload and all 2888 * bsg resources allocated during load will be freed. 2889 * 2890 * Return:Nothing 2891 */ 2892 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) 2893 { 2894 struct device *bsg_dev = &mrioc->bsg_dev; 2895 if (!mrioc->bsg_queue) 2896 return; 2897 2898 bsg_remove_queue(mrioc->bsg_queue); 2899 mrioc->bsg_queue = NULL; 2900 2901 device_del(bsg_dev); 2902 put_device(bsg_dev); 2903 } 2904 2905 /** 2906 * mpi3mr_bsg_node_release -release bsg device node 2907 * @dev: bsg device node 2908 * 2909 * decrements bsg dev parent reference count 2910 * 2911 * Return:Nothing 2912 */ 2913 static void mpi3mr_bsg_node_release(struct device *dev) 2914 { 2915 put_device(dev->parent); 2916 } 2917 2918 /** 2919 * mpi3mr_bsg_init - registration with bsg layer 2920 * @mrioc: Adapter instance reference 2921 * 2922 * This will be called during driver load and it will 2923 * register driver with bsg layer 2924 * 2925 * Return:Nothing 2926 */ 2927 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) 2928 { 2929 struct device *bsg_dev = &mrioc->bsg_dev; 2930 struct device *parent = &mrioc->shost->shost_gendev; 2931 struct queue_limits lim = { 2932 .max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS, 2933 .max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS, 2934 }; 2935 2936 device_initialize(bsg_dev); 2937 2938 bsg_dev->parent = get_device(parent); 2939 bsg_dev->release = mpi3mr_bsg_node_release; 2940 2941 dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); 2942 2943 if (device_add(bsg_dev)) { 2944 ioc_err(mrioc, "%s: bsg device add failed\n", 2945 dev_name(bsg_dev)); 2946 put_device(bsg_dev); 2947 return; 2948 } 2949 2950 mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim, 2951 mpi3mr_bsg_request, NULL, 0); 2952 if (IS_ERR(mrioc->bsg_queue)) { 2953 ioc_err(mrioc, "%s: bsg registration failed\n", 2954 dev_name(bsg_dev)); 2955 device_del(bsg_dev); 2956 put_device(bsg_dev); 2957 } 2958 } 2959 2960 /** 2961 * version_fw_show - SysFS callback for firmware version read 2962 * @dev: class device 2963 * @attr: Device attributes 2964 * @buf: Buffer to copy 2965 * 2966 * Return: sysfs_emit() return after copying firmware version 2967 */ 2968 static ssize_t 2969 version_fw_show(struct device *dev, struct device_attribute *attr, 2970 char *buf) 2971 { 2972 struct Scsi_Host *shost = class_to_shost(dev); 2973 struct mpi3mr_ioc *mrioc = shost_priv(shost); 2974 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 2975 2976 return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", 2977 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 2978 fwver->ph_minor, fwver->cust_id, fwver->build_num); 2979 } 2980 static DEVICE_ATTR_RO(version_fw); 2981 2982 /** 2983 * fw_queue_depth_show - SysFS callback for firmware max cmds 2984 * @dev: class device 2985 * @attr: Device attributes 2986 * @buf: Buffer to copy 2987 * 2988 * Return: sysfs_emit() return after copying firmware max commands 2989 */ 2990 static ssize_t 2991 fw_queue_depth_show(struct device *dev, struct device_attribute *attr, 2992 char *buf) 2993 { 2994 struct Scsi_Host *shost = class_to_shost(dev); 2995 struct mpi3mr_ioc *mrioc = shost_priv(shost); 2996 2997 return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); 2998 } 2999 static DEVICE_ATTR_RO(fw_queue_depth); 3000 3001 /** 3002 * op_req_q_count_show - SysFS callback for request queue count 3003 * @dev: class device 3004 * @attr: Device attributes 3005 * @buf: Buffer to copy 3006 * 3007 * Return: sysfs_emit() return after copying request queue count 3008 */ 3009 static ssize_t 3010 op_req_q_count_show(struct device *dev, struct device_attribute *attr, 3011 char *buf) 3012 { 3013 struct Scsi_Host *shost = class_to_shost(dev); 3014 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3015 3016 return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); 3017 } 3018 static DEVICE_ATTR_RO(op_req_q_count); 3019 3020 /** 3021 * reply_queue_count_show - SysFS callback for reply queue count 3022 * @dev: class device 3023 * @attr: Device attributes 3024 * @buf: Buffer to copy 3025 * 3026 * Return: sysfs_emit() return after copying reply queue count 3027 */ 3028 static ssize_t 3029 reply_queue_count_show(struct device *dev, struct device_attribute *attr, 3030 char *buf) 3031 { 3032 struct Scsi_Host *shost = class_to_shost(dev); 3033 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3034 3035 return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); 3036 } 3037 3038 static DEVICE_ATTR_RO(reply_queue_count); 3039 3040 /** 3041 * logging_level_show - Show controller debug level 3042 * @dev: class device 3043 * @attr: Device attributes 3044 * @buf: Buffer to copy 3045 * 3046 * A sysfs 'read/write' shost attribute, to show the current 3047 * debug log level used by the driver for the specific 3048 * controller. 3049 * 3050 * Return: sysfs_emit() return 3051 */ 3052 static ssize_t 3053 logging_level_show(struct device *dev, 3054 struct device_attribute *attr, char *buf) 3055 3056 { 3057 struct Scsi_Host *shost = class_to_shost(dev); 3058 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3059 3060 return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); 3061 } 3062 3063 /** 3064 * logging_level_store- Change controller debug level 3065 * @dev: class device 3066 * @attr: Device attributes 3067 * @buf: Buffer to copy 3068 * @count: size of the buffer 3069 * 3070 * A sysfs 'read/write' shost attribute, to change the current 3071 * debug log level used by the driver for the specific 3072 * controller. 3073 * 3074 * Return: strlen() return 3075 */ 3076 static ssize_t 3077 logging_level_store(struct device *dev, 3078 struct device_attribute *attr, 3079 const char *buf, size_t count) 3080 { 3081 struct Scsi_Host *shost = class_to_shost(dev); 3082 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3083 int val = 0; 3084 3085 if (kstrtoint(buf, 0, &val) != 0) 3086 return -EINVAL; 3087 3088 mrioc->logging_level = val; 3089 ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); 3090 return strlen(buf); 3091 } 3092 static DEVICE_ATTR_RW(logging_level); 3093 3094 /** 3095 * adp_state_show() - SysFS callback for adapter state show 3096 * @dev: class device 3097 * @attr: Device attributes 3098 * @buf: Buffer to copy 3099 * 3100 * Return: sysfs_emit() return after copying adapter state 3101 */ 3102 static ssize_t 3103 adp_state_show(struct device *dev, struct device_attribute *attr, 3104 char *buf) 3105 { 3106 struct Scsi_Host *shost = class_to_shost(dev); 3107 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3108 enum mpi3mr_iocstate ioc_state; 3109 uint8_t adp_state; 3110 3111 ioc_state = mpi3mr_get_iocstate(mrioc); 3112 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 3113 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 3114 else if (mrioc->reset_in_progress || mrioc->stop_bsgs || 3115 mrioc->block_on_pci_err) 3116 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 3117 else if (ioc_state == MRIOC_STATE_FAULT) 3118 adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 3119 else 3120 adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 3121 3122 return sysfs_emit(buf, "%u\n", adp_state); 3123 } 3124 3125 static DEVICE_ATTR_RO(adp_state); 3126 3127 static struct attribute *mpi3mr_host_attrs[] = { 3128 &dev_attr_version_fw.attr, 3129 &dev_attr_fw_queue_depth.attr, 3130 &dev_attr_op_req_q_count.attr, 3131 &dev_attr_reply_queue_count.attr, 3132 &dev_attr_logging_level.attr, 3133 &dev_attr_adp_state.attr, 3134 NULL, 3135 }; 3136 3137 static const struct attribute_group mpi3mr_host_attr_group = { 3138 .attrs = mpi3mr_host_attrs 3139 }; 3140 3141 const struct attribute_group *mpi3mr_host_groups[] = { 3142 &mpi3mr_host_attr_group, 3143 NULL, 3144 }; 3145 3146 3147 /* 3148 * SCSI Device attributes under sysfs 3149 */ 3150 3151 /** 3152 * sas_address_show - SysFS callback for dev SASaddress display 3153 * @dev: class device 3154 * @attr: Device attributes 3155 * @buf: Buffer to copy 3156 * 3157 * Return: sysfs_emit() return after copying SAS address of the 3158 * specific SAS/SATA end device. 3159 */ 3160 static ssize_t 3161 sas_address_show(struct device *dev, struct device_attribute *attr, 3162 char *buf) 3163 { 3164 struct scsi_device *sdev = to_scsi_device(dev); 3165 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3166 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3167 struct mpi3mr_tgt_dev *tgtdev; 3168 3169 sdev_priv_data = sdev->hostdata; 3170 if (!sdev_priv_data) 3171 return 0; 3172 3173 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3174 if (!tgt_priv_data) 3175 return 0; 3176 tgtdev = tgt_priv_data->tgt_dev; 3177 if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) 3178 return 0; 3179 return sysfs_emit(buf, "0x%016llx\n", 3180 (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); 3181 } 3182 3183 static DEVICE_ATTR_RO(sas_address); 3184 3185 /** 3186 * device_handle_show - SysFS callback for device handle display 3187 * @dev: class device 3188 * @attr: Device attributes 3189 * @buf: Buffer to copy 3190 * 3191 * Return: sysfs_emit() return after copying firmware internal 3192 * device handle of the specific device. 3193 */ 3194 static ssize_t 3195 device_handle_show(struct device *dev, struct device_attribute *attr, 3196 char *buf) 3197 { 3198 struct scsi_device *sdev = to_scsi_device(dev); 3199 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3200 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3201 struct mpi3mr_tgt_dev *tgtdev; 3202 3203 sdev_priv_data = sdev->hostdata; 3204 if (!sdev_priv_data) 3205 return 0; 3206 3207 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3208 if (!tgt_priv_data) 3209 return 0; 3210 tgtdev = tgt_priv_data->tgt_dev; 3211 if (!tgtdev) 3212 return 0; 3213 return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); 3214 } 3215 3216 static DEVICE_ATTR_RO(device_handle); 3217 3218 /** 3219 * persistent_id_show - SysFS callback for persisten ID display 3220 * @dev: class device 3221 * @attr: Device attributes 3222 * @buf: Buffer to copy 3223 * 3224 * Return: sysfs_emit() return after copying persistent ID of the 3225 * of the specific device. 3226 */ 3227 static ssize_t 3228 persistent_id_show(struct device *dev, struct device_attribute *attr, 3229 char *buf) 3230 { 3231 struct scsi_device *sdev = to_scsi_device(dev); 3232 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3233 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3234 struct mpi3mr_tgt_dev *tgtdev; 3235 3236 sdev_priv_data = sdev->hostdata; 3237 if (!sdev_priv_data) 3238 return 0; 3239 3240 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3241 if (!tgt_priv_data) 3242 return 0; 3243 tgtdev = tgt_priv_data->tgt_dev; 3244 if (!tgtdev) 3245 return 0; 3246 return sysfs_emit(buf, "%d\n", tgtdev->perst_id); 3247 } 3248 static DEVICE_ATTR_RO(persistent_id); 3249 3250 /** 3251 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority 3252 * @dev: pointer to embedded device 3253 * @attr: sas_ncq_prio_supported attribute descriptor 3254 * @buf: the buffer returned 3255 * 3256 * A sysfs 'read-only' sdev attribute, only works with SATA devices 3257 */ 3258 static ssize_t 3259 sas_ncq_prio_supported_show(struct device *dev, 3260 struct device_attribute *attr, char *buf) 3261 { 3262 struct scsi_device *sdev = to_scsi_device(dev); 3263 3264 return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); 3265 } 3266 static DEVICE_ATTR_RO(sas_ncq_prio_supported); 3267 3268 /** 3269 * sas_ncq_prio_enable_show - send prioritized io commands to device 3270 * @dev: pointer to embedded device 3271 * @attr: sas_ncq_prio_enable attribute descriptor 3272 * @buf: the buffer returned 3273 * 3274 * A sysfs 'read/write' sdev attribute, only works with SATA devices 3275 */ 3276 static ssize_t 3277 sas_ncq_prio_enable_show(struct device *dev, 3278 struct device_attribute *attr, char *buf) 3279 { 3280 struct scsi_device *sdev = to_scsi_device(dev); 3281 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 3282 3283 if (!sdev_priv_data) 3284 return 0; 3285 3286 return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); 3287 } 3288 3289 static ssize_t 3290 sas_ncq_prio_enable_store(struct device *dev, 3291 struct device_attribute *attr, 3292 const char *buf, size_t count) 3293 { 3294 struct scsi_device *sdev = to_scsi_device(dev); 3295 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 3296 bool ncq_prio_enable = 0; 3297 3298 if (kstrtobool(buf, &ncq_prio_enable)) 3299 return -EINVAL; 3300 3301 if (!sas_ata_ncq_prio_supported(sdev)) 3302 return -EINVAL; 3303 3304 sdev_priv_data->ncq_prio_enable = ncq_prio_enable; 3305 3306 return strlen(buf); 3307 } 3308 static DEVICE_ATTR_RW(sas_ncq_prio_enable); 3309 3310 static struct attribute *mpi3mr_dev_attrs[] = { 3311 &dev_attr_sas_address.attr, 3312 &dev_attr_device_handle.attr, 3313 &dev_attr_persistent_id.attr, 3314 &dev_attr_sas_ncq_prio_supported.attr, 3315 &dev_attr_sas_ncq_prio_enable.attr, 3316 NULL, 3317 }; 3318 3319 static const struct attribute_group mpi3mr_dev_attr_group = { 3320 .attrs = mpi3mr_dev_attrs 3321 }; 3322 3323 const struct attribute_group *mpi3mr_dev_groups[] = { 3324 &mpi3mr_dev_attr_group, 3325 NULL, 3326 }; 3327