1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/bsg-lib.h> 12 #include <uapi/scsi/scsi_bsg_mpi3mr.h> 13 14 /** 15 * mpi3mr_alloc_trace_buffer: Allocate trace buffer 16 * @mrioc: Adapter instance reference 17 * @trace_size: Trace buffer size 18 * 19 * Allocate trace buffer 20 * Return: 0 on success, non-zero on failure. 21 */ 22 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size) 23 { 24 struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0]; 25 26 diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, 27 trace_size, &diag_buffer->dma_addr, GFP_KERNEL); 28 if (diag_buffer->addr) { 29 dprint_init(mrioc, "trace diag buffer is allocated successfully\n"); 30 return 0; 31 } 32 return -1; 33 } 34 35 /** 36 * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers 37 * @mrioc: Adapter instance reference 38 * 39 * This functions checks whether the driver defined buffer sizes 40 * are greater than IOCFacts provided controller local buffer 41 * sizes and if the driver defined sizes are more then the 42 * driver allocates the specific buffer by reading driver page1 43 * 44 * Return: Nothing. 45 */ 46 void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc) 47 { 48 struct diag_buffer_desc *diag_buffer; 49 struct mpi3_driver_page1 driver_pg1; 50 u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size, 51 trace_size, fw_size; 52 u16 pg_sz = sizeof(driver_pg1); 53 int retval = 0; 54 bool retry = false; 55 56 if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr) 57 return; 58 59 retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); 60 if (retval) { 61 ioc_warn(mrioc, 62 "%s: driver page 1 read failed, allocating trace\n" 63 "and firmware diag buffers of default size\n", __func__); 64 trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ; 65 trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ; 66 trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ; 67 68 } else { 69 trace_size = driver_pg1.host_diag_trace_max_size * 1024; 70 trace_dec_size = driver_pg1.host_diag_trace_decrement_size 71 * 1024; 72 trace_min_size = driver_pg1.host_diag_trace_min_size * 1024; 73 fw_size = driver_pg1.host_diag_fw_max_size * 1024; 74 fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024; 75 fw_min_size = driver_pg1.host_diag_fw_min_size * 1024; 76 dprint_init(mrioc, 77 "%s:trace diag buffer sizes read from driver\n" 78 "page1: maximum size = %dKB, decrement size = %dKB\n" 79 ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size, 80 driver_pg1.host_diag_trace_decrement_size, 81 driver_pg1.host_diag_trace_min_size); 82 dprint_init(mrioc, 83 "%s:firmware diag buffer sizes read from driver\n" 84 "page1: maximum size = %dKB, decrement size = %dKB\n" 85 ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size, 86 driver_pg1.host_diag_fw_decrement_size, 87 driver_pg1.host_diag_fw_min_size); 88 if ((trace_size == 0) && (fw_size == 0)) 89 return; 90 } 91 92 93 retry_trace: 94 diag_buffer = &mrioc->diag_buffers[0]; 95 diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE; 96 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 97 if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >= 98 trace_min_size)) { 99 if (!retry) 100 dprint_init(mrioc, 101 "trying to allocate trace diag buffer of size = %dKB\n", 102 trace_size / 1024); 103 if (get_order(trace_size) > MAX_PAGE_ORDER || 104 mpi3mr_alloc_trace_buffer(mrioc, trace_size)) { 105 retry = true; 106 trace_size -= trace_dec_size; 107 dprint_init(mrioc, "trace diag buffer allocation failed\n" 108 "retrying smaller size %dKB\n", trace_size / 1024); 109 goto retry_trace; 110 } else 111 diag_buffer->size = trace_size; 112 } 113 114 retry = false; 115 retry_fw: 116 117 diag_buffer = &mrioc->diag_buffers[1]; 118 119 diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW; 120 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 121 if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) { 122 if (get_order(fw_size) <= MAX_PAGE_ORDER) { 123 diag_buffer->addr 124 = dma_alloc_coherent(&mrioc->pdev->dev, fw_size, 125 &diag_buffer->dma_addr, 126 GFP_KERNEL); 127 } 128 if (!retry) 129 dprint_init(mrioc, 130 "%s:trying to allocate firmware diag buffer of size = %dKB\n", 131 __func__, fw_size / 1024); 132 if (diag_buffer->addr) { 133 dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n", 134 __func__); 135 diag_buffer->size = fw_size; 136 } else { 137 retry = true; 138 fw_size -= fw_dec_size; 139 dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n" 140 "retrying smaller size %dKB\n", 141 __func__, fw_size / 1024); 142 goto retry_fw; 143 } 144 } 145 } 146 147 /** 148 * mpi3mr_issue_diag_buf_post - Send diag buffer post req 149 * @mrioc: Adapter instance reference 150 * @diag_buffer: Diagnostic buffer descriptor 151 * 152 * Issue diagnostic buffer post MPI request through admin queue 153 * and wait for the completion of it or time out. 154 * 155 * Return: 0 on success, non-zero on failures. 156 */ 157 int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, 158 struct diag_buffer_desc *diag_buffer) 159 { 160 struct mpi3_diag_buffer_post_request diag_buf_post_req; 161 u8 prev_status; 162 int retval = 0; 163 164 memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req)); 165 mutex_lock(&mrioc->init_cmds.mutex); 166 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 167 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 168 mutex_unlock(&mrioc->init_cmds.mutex); 169 return -1; 170 } 171 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 172 mrioc->init_cmds.is_waiting = 1; 173 mrioc->init_cmds.callback = NULL; 174 diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 175 diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST; 176 diag_buf_post_req.type = diag_buffer->type; 177 diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr); 178 diag_buf_post_req.length = le32_to_cpu(diag_buffer->size); 179 180 dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__, 181 diag_buffer->type); 182 prev_status = diag_buffer->status; 183 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 184 init_completion(&mrioc->init_cmds.done); 185 retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req, 186 sizeof(diag_buf_post_req), 1); 187 if (retval) { 188 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 189 __func__); 190 goto out_unlock; 191 } 192 wait_for_completion_timeout(&mrioc->init_cmds.done, 193 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 194 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 195 mrioc->init_cmds.is_waiting = 0; 196 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 197 mpi3mr_check_rh_fault_ioc(mrioc, 198 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT); 199 retval = -1; 200 goto out_unlock; 201 } 202 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 203 != MPI3_IOCSTATUS_SUCCESS) { 204 dprint_bsg_err(mrioc, 205 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 206 __func__, diag_buffer->type, 207 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 208 mrioc->init_cmds.ioc_loginfo); 209 retval = -1; 210 goto out_unlock; 211 } 212 dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n", 213 __func__, diag_buffer->type); 214 215 out_unlock: 216 if (retval) 217 diag_buffer->status = prev_status; 218 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 219 mutex_unlock(&mrioc->init_cmds.mutex); 220 return retval; 221 } 222 223 /** 224 * mpi3mr_post_diag_bufs - Post diag buffers to the controller 225 * @mrioc: Adapter instance reference 226 * 227 * This function calls helper function to post both trace and 228 * firmware buffers to the controller. 229 * 230 * Return: None 231 */ 232 int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc) 233 { 234 u8 i; 235 struct diag_buffer_desc *diag_buffer; 236 237 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 238 diag_buffer = &mrioc->diag_buffers[i]; 239 if (!(diag_buffer->addr)) 240 continue; 241 if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) 242 return -1; 243 } 244 return 0; 245 } 246 247 /** 248 * mpi3mr_issue_diag_buf_release - Send diag buffer release req 249 * @mrioc: Adapter instance reference 250 * @diag_buffer: Diagnostic buffer descriptor 251 * 252 * Issue diagnostic buffer manage MPI request with release 253 * action request through admin queue and wait for the 254 * completion of it or time out. 255 * 256 * Return: 0 on success, non-zero on failures. 257 */ 258 int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, 259 struct diag_buffer_desc *diag_buffer) 260 { 261 struct mpi3_diag_buffer_manage_request diag_buf_manage_req; 262 int retval = 0; 263 264 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 265 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 266 return retval; 267 268 memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req)); 269 mutex_lock(&mrioc->init_cmds.mutex); 270 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 271 dprint_reset(mrioc, "%s: command is in use\n", __func__); 272 mutex_unlock(&mrioc->init_cmds.mutex); 273 return -1; 274 } 275 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 276 mrioc->init_cmds.is_waiting = 1; 277 mrioc->init_cmds.callback = NULL; 278 diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 279 diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE; 280 diag_buf_manage_req.type = diag_buffer->type; 281 diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE; 282 283 284 dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__, 285 diag_buffer->type); 286 init_completion(&mrioc->init_cmds.done); 287 retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req, 288 sizeof(diag_buf_manage_req), 1); 289 if (retval) { 290 dprint_reset(mrioc, "%s: admin request post failed\n", __func__); 291 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 292 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 293 goto out_unlock; 294 } 295 wait_for_completion_timeout(&mrioc->init_cmds.done, 296 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 297 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 298 mrioc->init_cmds.is_waiting = 0; 299 dprint_reset(mrioc, "%s: command timedout\n", __func__); 300 mpi3mr_check_rh_fault_ioc(mrioc, 301 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT); 302 retval = -1; 303 goto out_unlock; 304 } 305 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 306 != MPI3_IOCSTATUS_SUCCESS) { 307 dprint_reset(mrioc, 308 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 309 __func__, diag_buffer->type, 310 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 311 mrioc->init_cmds.ioc_loginfo); 312 retval = -1; 313 goto out_unlock; 314 } 315 dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n", 316 __func__, diag_buffer->type); 317 318 out_unlock: 319 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 320 mutex_unlock(&mrioc->init_cmds.mutex); 321 return retval; 322 } 323 324 /** 325 * mpi3mr_process_trigger - Generic HDB Trigger handler 326 * @mrioc: Adapter instance reference 327 * @trigger_type: Trigger type 328 * @trigger_data: Trigger data 329 * @trigger_flags: Trigger flags 330 * 331 * This function checks validity of HDB, triggers and based on 332 * trigger information, creates an event to be processed in the 333 * firmware event worker thread . 334 * 335 * This function should be called with trigger spinlock held 336 * 337 * Return: Nothing 338 */ 339 static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type, 340 union mpi3mr_trigger_data *trigger_data, u8 trigger_flags) 341 { 342 struct trigger_event_data event_data; 343 struct diag_buffer_desc *trace_hdb = NULL; 344 struct diag_buffer_desc *fw_hdb = NULL; 345 u64 global_trigger; 346 347 trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, 348 MPI3_DIAG_BUFFER_TYPE_TRACE); 349 if (trace_hdb && 350 (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 351 (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 352 trace_hdb = NULL; 353 354 fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 355 356 if (fw_hdb && 357 (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 358 (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 359 fw_hdb = NULL; 360 361 if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active 362 && mrioc->trace_release_trigger_active) || 363 (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) || 364 ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) 365 && (!mrioc->driver_pg2->num_triggers))) 366 return; 367 368 memset(&event_data, 0, sizeof(event_data)); 369 event_data.trigger_type = trigger_type; 370 memcpy(&event_data.trigger_specific_data, trigger_data, 371 sizeof(*trigger_data)); 372 global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); 373 374 if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) { 375 event_data.snapdump = true; 376 event_data.trace_hdb = trace_hdb; 377 event_data.fw_hdb = fw_hdb; 378 mrioc->snapdump_trigger_active = true; 379 } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) { 380 if ((trace_hdb) && (global_trigger & 381 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) && 382 (!mrioc->trace_release_trigger_active)) { 383 event_data.trace_hdb = trace_hdb; 384 mrioc->trace_release_trigger_active = true; 385 } 386 if ((fw_hdb) && (global_trigger & 387 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) && 388 (!mrioc->fw_release_trigger_active)) { 389 event_data.fw_hdb = fw_hdb; 390 mrioc->fw_release_trigger_active = true; 391 } 392 } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) { 393 if ((trace_hdb) && (trigger_flags & 394 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) && 395 (!mrioc->trace_release_trigger_active)) { 396 event_data.trace_hdb = trace_hdb; 397 mrioc->trace_release_trigger_active = true; 398 } 399 if ((fw_hdb) && (trigger_flags & 400 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) && 401 (!mrioc->fw_release_trigger_active)) { 402 event_data.fw_hdb = fw_hdb; 403 mrioc->fw_release_trigger_active = true; 404 } 405 } 406 407 if (event_data.trace_hdb || event_data.fw_hdb) 408 mpi3mr_hdb_trigger_data_event(mrioc, &event_data); 409 } 410 411 /** 412 * mpi3mr_global_trigger - Global HDB trigger handler 413 * @mrioc: Adapter instance reference 414 * @trigger_data: Trigger data 415 * 416 * This function checks whether the given global trigger is 417 * enabled in the driver page 2 and if so calls generic trigger 418 * handler to queue event for HDB release. 419 * 420 * Return: Nothing 421 */ 422 void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data) 423 { 424 unsigned long flags; 425 union mpi3mr_trigger_data trigger_specific_data; 426 427 spin_lock_irqsave(&mrioc->trigger_lock, flags); 428 if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) { 429 memset(&trigger_specific_data, 0, 430 sizeof(trigger_specific_data)); 431 trigger_specific_data.global = trigger_data; 432 mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL, 433 &trigger_specific_data, 0); 434 } 435 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 436 } 437 438 /** 439 * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler 440 * @mrioc: Adapter instance reference 441 * @sensekey: Sense Key 442 * @asc: Additional Sense Code 443 * @ascq: Additional Sense Code Qualifier 444 * 445 * This function compares SCSI sense trigger values with driver 446 * page 2 values and calls generic trigger handler to release 447 * HDBs if match found 448 * 449 * Return: Nothing 450 */ 451 void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc, 452 u8 ascq) 453 { 454 struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL; 455 u64 i = 0; 456 unsigned long flags; 457 u8 num_triggers, trigger_flags; 458 459 if (mrioc->scsisense_trigger_present) { 460 spin_lock_irqsave(&mrioc->trigger_lock, flags); 461 scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *) 462 mrioc->driver_pg2->trigger; 463 num_triggers = mrioc->driver_pg2->num_triggers; 464 for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) { 465 if (scsi_sense_trigger->type != 466 MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE) 467 continue; 468 if (!(scsi_sense_trigger->sense_key == 469 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL 470 || scsi_sense_trigger->sense_key == sensekey)) 471 continue; 472 if (!(scsi_sense_trigger->asc == 473 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL || 474 scsi_sense_trigger->asc == asc)) 475 continue; 476 if (!(scsi_sense_trigger->ascq == 477 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL || 478 scsi_sense_trigger->ascq == ascq)) 479 continue; 480 trigger_flags = scsi_sense_trigger->flags; 481 mpi3mr_process_trigger(mrioc, 482 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 483 (union mpi3mr_trigger_data *)scsi_sense_trigger, 484 trigger_flags); 485 break; 486 } 487 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 488 } 489 } 490 491 /** 492 * mpi3mr_event_trigger - MPI event HDB trigger handler 493 * @mrioc: Adapter instance reference 494 * @event: MPI Event 495 * 496 * This function compares event trigger values with driver page 497 * 2 values and calls generic trigger handler to release 498 * HDBs if match found. 499 * 500 * Return: Nothing 501 */ 502 void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event) 503 { 504 struct mpi3_driver2_trigger_event *event_trigger = NULL; 505 u64 i = 0; 506 unsigned long flags; 507 u8 num_triggers, trigger_flags; 508 509 if (mrioc->event_trigger_present) { 510 spin_lock_irqsave(&mrioc->trigger_lock, flags); 511 event_trigger = (struct mpi3_driver2_trigger_event *) 512 mrioc->driver_pg2->trigger; 513 num_triggers = mrioc->driver_pg2->num_triggers; 514 515 for (i = 0; i < num_triggers; i++, event_trigger++) { 516 if (event_trigger->type != 517 MPI3_DRIVER2_TRIGGER_TYPE_EVENT) 518 continue; 519 if (event_trigger->event != event) 520 continue; 521 trigger_flags = event_trigger->flags; 522 mpi3mr_process_trigger(mrioc, 523 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 524 (union mpi3mr_trigger_data *)event_trigger, 525 trigger_flags); 526 break; 527 } 528 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 529 } 530 } 531 532 /** 533 * mpi3mr_reply_trigger - MPI Reply HDB trigger handler 534 * @mrioc: Adapter instance reference 535 * @ioc_status: Masked value of IOC Status from MPI Reply 536 * @ioc_loginfo: IOC Log Info from MPI Reply 537 * 538 * This function compares IOC status and IOC log info trigger 539 * values with driver page 2 values and calls generic trigger 540 * handler to release HDBs if match found. 541 * 542 * Return: Nothing 543 */ 544 void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status, 545 u32 ioc_loginfo) 546 { 547 struct mpi3_driver2_trigger_reply *reply_trigger = NULL; 548 u64 i = 0; 549 unsigned long flags; 550 u8 num_triggers, trigger_flags; 551 552 if (mrioc->reply_trigger_present) { 553 spin_lock_irqsave(&mrioc->trigger_lock, flags); 554 reply_trigger = (struct mpi3_driver2_trigger_reply *) 555 mrioc->driver_pg2->trigger; 556 num_triggers = mrioc->driver_pg2->num_triggers; 557 for (i = 0; i < num_triggers; i++, reply_trigger++) { 558 if (reply_trigger->type != 559 MPI3_DRIVER2_TRIGGER_TYPE_REPLY) 560 continue; 561 if ((le16_to_cpu(reply_trigger->ioc_status) != 562 ioc_status) 563 && (le16_to_cpu(reply_trigger->ioc_status) != 564 MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL)) 565 continue; 566 if ((le32_to_cpu(reply_trigger->ioc_log_info) != 567 (le32_to_cpu(reply_trigger->ioc_log_info_mask) & 568 ioc_loginfo))) 569 continue; 570 trigger_flags = reply_trigger->flags; 571 mpi3mr_process_trigger(mrioc, 572 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 573 (union mpi3mr_trigger_data *)reply_trigger, 574 trigger_flags); 575 break; 576 } 577 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 578 } 579 } 580 581 /** 582 * mpi3mr_get_num_trigger - Gets number of HDB triggers 583 * @mrioc: Adapter instance reference 584 * @num_triggers: Number of triggers 585 * @page_action: Page action 586 * 587 * This function reads number of triggers by reading driver page 588 * 2 589 * 590 * Return: 0 on success and proper error codes on failure 591 */ 592 static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers, 593 u8 page_action) 594 { 595 struct mpi3_driver_page2 drvr_page2; 596 int retval = 0; 597 598 *num_triggers = 0; 599 600 retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2, 601 sizeof(struct mpi3_driver_page2), page_action); 602 603 if (retval) { 604 dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 605 return retval; 606 } 607 *num_triggers = drvr_page2.num_triggers; 608 return retval; 609 } 610 611 /** 612 * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG 613 * @mrioc: Adapter instance reference 614 * @page_action: Page action 615 * 616 * This function caches the driver page 2 in the driver's memory 617 * by reading driver page 2 from the controller for a given page 618 * type and updates the HDB trigger values 619 * 620 * Return: 0 on success and proper error codes on failure 621 */ 622 int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action) 623 { 624 u16 pg_sz = sizeof(struct mpi3_driver_page2); 625 struct mpi3_driver_page2 *drvr_page2 = NULL; 626 u8 trigger_type, num_triggers; 627 int retval; 628 int i = 0; 629 unsigned long flags; 630 631 retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action); 632 633 if (retval) 634 goto out; 635 636 pg_sz = offsetof(struct mpi3_driver_page2, trigger) + 637 (num_triggers * sizeof(union mpi3_driver2_trigger_element)); 638 drvr_page2 = kzalloc(pg_sz, GFP_KERNEL); 639 if (!drvr_page2) { 640 retval = -ENOMEM; 641 goto out; 642 } 643 644 retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action); 645 if (retval) { 646 dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 647 kfree(drvr_page2); 648 goto out; 649 } 650 spin_lock_irqsave(&mrioc->trigger_lock, flags); 651 kfree(mrioc->driver_pg2); 652 mrioc->driver_pg2 = drvr_page2; 653 mrioc->reply_trigger_present = false; 654 mrioc->event_trigger_present = false; 655 mrioc->scsisense_trigger_present = false; 656 657 for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) { 658 trigger_type = mrioc->driver_pg2->trigger[i].event.type; 659 switch (trigger_type) { 660 case MPI3_DRIVER2_TRIGGER_TYPE_REPLY: 661 mrioc->reply_trigger_present = true; 662 break; 663 case MPI3_DRIVER2_TRIGGER_TYPE_EVENT: 664 mrioc->event_trigger_present = true; 665 break; 666 case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE: 667 mrioc->scsisense_trigger_present = true; 668 break; 669 default: 670 break; 671 } 672 } 673 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 674 out: 675 return retval; 676 } 677 678 /** 679 * mpi3mr_release_diag_bufs - Release diag buffers 680 * @mrioc: Adapter instance reference 681 * @skip_rel_action: Skip release action and set buffer state 682 * 683 * This function calls helper function to release both trace and 684 * firmware buffers from the controller. 685 * 686 * Return: None 687 */ 688 void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action) 689 { 690 u8 i; 691 struct diag_buffer_desc *diag_buffer; 692 693 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 694 diag_buffer = &mrioc->diag_buffers[i]; 695 if (!(diag_buffer->addr)) 696 continue; 697 if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED) 698 continue; 699 if (!skip_rel_action) 700 mpi3mr_issue_diag_buf_release(mrioc, diag_buffer); 701 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 702 atomic64_inc(&event_counter); 703 } 704 } 705 706 /** 707 * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and 708 * trigger data 709 * 710 * @hdb: HDB pointer 711 * @type: Trigger type 712 * @data: Trigger data 713 * @force: Trigger overwrite flag 714 * @trigger_data: Pointer to trigger data information 715 * 716 * Updates trigger type and trigger data based on parameter 717 * passed to this function 718 * 719 * Return: Nothing 720 */ 721 void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, 722 u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 723 { 724 if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN)) 725 return; 726 hdb->trigger_type = type; 727 if (!trigger_data) 728 memset(&hdb->trigger_data, 0, sizeof(*trigger_data)); 729 else 730 memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data)); 731 } 732 733 /** 734 * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type 735 * and trigger data for all HDB 736 * 737 * @mrioc: Adapter instance reference 738 * @type: Trigger type 739 * @data: Trigger data 740 * @force: Trigger overwrite flag 741 * @trigger_data: Pointer to trigger data information 742 * 743 * Updates trigger type and trigger data based on parameter 744 * passed to this function 745 * 746 * Return: Nothing 747 */ 748 void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, 749 u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 750 { 751 struct diag_buffer_desc *hdb = NULL; 752 753 hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE); 754 if (hdb) 755 mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 756 hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 757 if (hdb) 758 mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 759 } 760 761 /** 762 * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf 763 * @mrioc: Adapter instance reference 764 * @event_reply: event data 765 * 766 * Modifies the status of the applicable diag buffer descriptors 767 * 768 * Return: Nothing 769 */ 770 void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 771 struct mpi3_event_notification_reply *event_reply) 772 { 773 struct mpi3_event_data_diag_buffer_status_change *evtdata; 774 struct diag_buffer_desc *diag_buffer; 775 776 evtdata = (struct mpi3_event_data_diag_buffer_status_change *) 777 event_reply->event_data; 778 779 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type); 780 if (!diag_buffer) 781 return; 782 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 783 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 784 return; 785 switch (evtdata->reason_code) { 786 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED: 787 { 788 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 789 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 790 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); 791 atomic64_inc(&event_counter); 792 break; 793 } 794 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED: 795 { 796 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 797 break; 798 } 799 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED: 800 { 801 diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED; 802 break; 803 } 804 default: 805 dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n", 806 __func__, evtdata->reason_code); 807 break; 808 } 809 } 810 811 /** 812 * mpi3mr_diag_buffer_for_type - returns buffer desc for type 813 * @mrioc: Adapter instance reference 814 * @buf_type: Diagnostic buffer type 815 * 816 * Identifies matching diag descriptor from mrioc for given diag 817 * buffer type. 818 * 819 * Return: diag buffer descriptor on success, NULL on failures. 820 */ 821 822 struct diag_buffer_desc * 823 mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type) 824 { 825 u8 i; 826 827 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 828 if (mrioc->diag_buffers[i].type == buf_type) 829 return &mrioc->diag_buffers[i]; 830 } 831 return NULL; 832 } 833 834 /** 835 * mpi3mr_bsg_pel_abort - sends PEL abort request 836 * @mrioc: Adapter instance reference 837 * 838 * This function sends PEL abort request to the firmware through 839 * admin request queue. 840 * 841 * Return: 0 on success, -1 on failure 842 */ 843 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) 844 { 845 struct mpi3_pel_req_action_abort pel_abort_req; 846 struct mpi3_pel_reply *pel_reply; 847 int retval = 0; 848 u16 pe_log_status; 849 850 if (mrioc->reset_in_progress) { 851 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 852 return -1; 853 } 854 if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 855 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 856 return -1; 857 } 858 859 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 860 mutex_lock(&mrioc->pel_abort_cmd.mutex); 861 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 862 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 863 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 864 return -1; 865 } 866 mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 867 mrioc->pel_abort_cmd.is_waiting = 1; 868 mrioc->pel_abort_cmd.callback = NULL; 869 pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); 870 pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 871 pel_abort_req.action = MPI3_PEL_ACTION_ABORT; 872 pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 873 874 mrioc->pel_abort_requested = 1; 875 init_completion(&mrioc->pel_abort_cmd.done); 876 retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, 877 sizeof(pel_abort_req), 0); 878 if (retval) { 879 retval = -1; 880 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 881 __func__); 882 mrioc->pel_abort_requested = 0; 883 goto out_unlock; 884 } 885 886 wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, 887 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 888 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 889 mrioc->pel_abort_cmd.is_waiting = 0; 890 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 891 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) 892 mpi3mr_soft_reset_handler(mrioc, 893 MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); 894 retval = -1; 895 goto out_unlock; 896 } 897 if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 898 != MPI3_IOCSTATUS_SUCCESS) { 899 dprint_bsg_err(mrioc, 900 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 901 __func__, (mrioc->pel_abort_cmd.ioc_status & 902 MPI3_IOCSTATUS_STATUS_MASK), 903 mrioc->pel_abort_cmd.ioc_loginfo); 904 retval = -1; 905 goto out_unlock; 906 } 907 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { 908 pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; 909 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 910 if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { 911 dprint_bsg_err(mrioc, 912 "%s: command failed, pel_status(0x%04x)\n", 913 __func__, pe_log_status); 914 retval = -1; 915 } 916 } 917 918 out_unlock: 919 mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 920 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 921 return retval; 922 } 923 /** 924 * mpi3mr_bsg_verify_adapter - verify adapter number is valid 925 * @ioc_number: Adapter number 926 * 927 * This function returns the adapter instance pointer of given 928 * adapter number. If adapter number does not match with the 929 * driver's adapter list, driver returns NULL. 930 * 931 * Return: adapter instance reference 932 */ 933 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) 934 { 935 struct mpi3mr_ioc *mrioc = NULL; 936 937 spin_lock(&mrioc_list_lock); 938 list_for_each_entry(mrioc, &mrioc_list, list) { 939 if (mrioc->id == ioc_number) { 940 spin_unlock(&mrioc_list_lock); 941 return mrioc; 942 } 943 } 944 spin_unlock(&mrioc_list_lock); 945 return NULL; 946 } 947 948 /** 949 * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data 950 * @mrioc: Adapter instance reference 951 * @job: BSG Job pointer 952 * 953 * This function reads the controller trigger config page as 954 * defined by the input page type and refreshes the driver's 955 * local trigger information structures with the controller's 956 * config page data. 957 * 958 * Return: 0 on success and proper error codes on failure 959 */ 960 static long 961 mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc, 962 struct bsg_job *job) 963 { 964 struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers; 965 uint32_t data_out_sz; 966 u8 page_action; 967 long rval = -EINVAL; 968 969 data_out_sz = job->request_payload.payload_len; 970 971 if (data_out_sz != sizeof(refresh_triggers)) { 972 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 973 __func__); 974 return rval; 975 } 976 977 if (mrioc->unrecoverable) { 978 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 979 __func__); 980 return -EFAULT; 981 } 982 if (mrioc->reset_in_progress) { 983 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 984 return -EAGAIN; 985 } 986 987 sg_copy_to_buffer(job->request_payload.sg_list, 988 job->request_payload.sg_cnt, 989 &refresh_triggers, sizeof(refresh_triggers)); 990 991 switch (refresh_triggers.page_type) { 992 case MPI3MR_HDB_REFRESH_TYPE_CURRENT: 993 page_action = MPI3_CONFIG_ACTION_READ_CURRENT; 994 break; 995 case MPI3MR_HDB_REFRESH_TYPE_DEFAULT: 996 page_action = MPI3_CONFIG_ACTION_READ_DEFAULT; 997 break; 998 case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT: 999 page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT; 1000 break; 1001 default: 1002 dprint_bsg_err(mrioc, 1003 "%s: unsupported refresh trigger, page_type %d\n", 1004 __func__, refresh_triggers.page_type); 1005 return rval; 1006 } 1007 rval = mpi3mr_refresh_trigger(mrioc, page_action); 1008 1009 return rval; 1010 } 1011 1012 /** 1013 * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space 1014 * @mrioc: Adapter instance reference 1015 * @job: BSG Job pointer 1016 * 1017 * Return: 0 on success and proper error codes on failure 1018 */ 1019 static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc, 1020 struct bsg_job *job) 1021 { 1022 struct mpi3mr_bsg_out_upload_hdb upload_hdb; 1023 struct diag_buffer_desc *diag_buffer; 1024 uint32_t data_out_size; 1025 uint32_t data_in_size; 1026 1027 data_out_size = job->request_payload.payload_len; 1028 data_in_size = job->reply_payload.payload_len; 1029 1030 if (data_out_size != sizeof(upload_hdb)) { 1031 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1032 __func__); 1033 return -EINVAL; 1034 } 1035 1036 sg_copy_to_buffer(job->request_payload.sg_list, 1037 job->request_payload.sg_cnt, 1038 &upload_hdb, sizeof(upload_hdb)); 1039 1040 if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) { 1041 dprint_bsg_err(mrioc, "%s: invalid length argument\n", 1042 __func__); 1043 return -EINVAL; 1044 } 1045 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type); 1046 if ((!diag_buffer) || (!diag_buffer->addr)) { 1047 dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 1048 __func__, upload_hdb.buf_type); 1049 return -EINVAL; 1050 } 1051 1052 if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) && 1053 (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) { 1054 dprint_bsg_err(mrioc, 1055 "%s: invalid buffer status %d for type %d\n", 1056 __func__, diag_buffer->status, upload_hdb.buf_type); 1057 return -EINVAL; 1058 } 1059 1060 if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) { 1061 dprint_bsg_err(mrioc, 1062 "%s: invalid start offset %d, length %d for type %d\n", 1063 __func__, upload_hdb.start_offset, upload_hdb.length, 1064 upload_hdb.buf_type); 1065 return -EINVAL; 1066 } 1067 sg_copy_from_buffer(job->reply_payload.sg_list, 1068 job->reply_payload.sg_cnt, 1069 (diag_buffer->addr + upload_hdb.start_offset), 1070 data_in_size); 1071 return 0; 1072 } 1073 1074 /** 1075 * mpi3mr_bsg_repost_hdb - Re-post HDB 1076 * @mrioc: Adapter instance reference 1077 * @job: BSG job pointer 1078 * 1079 * This function retrieves the HDB descriptor corresponding to a 1080 * given buffer type and if the HDB is in released status then 1081 * posts the HDB with the firmware. 1082 * 1083 * Return: 0 on success and proper error codes on failure 1084 */ 1085 static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc, 1086 struct bsg_job *job) 1087 { 1088 struct mpi3mr_bsg_out_repost_hdb repost_hdb; 1089 struct diag_buffer_desc *diag_buffer; 1090 uint32_t data_out_sz; 1091 1092 data_out_sz = job->request_payload.payload_len; 1093 1094 if (data_out_sz != sizeof(repost_hdb)) { 1095 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1096 __func__); 1097 return -EINVAL; 1098 } 1099 if (mrioc->unrecoverable) { 1100 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1101 __func__); 1102 return -EFAULT; 1103 } 1104 if (mrioc->reset_in_progress) { 1105 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1106 return -EAGAIN; 1107 } 1108 1109 sg_copy_to_buffer(job->request_payload.sg_list, 1110 job->request_payload.sg_cnt, 1111 &repost_hdb, sizeof(repost_hdb)); 1112 1113 diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type); 1114 if ((!diag_buffer) || (!diag_buffer->addr)) { 1115 dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 1116 __func__, repost_hdb.buf_type); 1117 return -EINVAL; 1118 } 1119 1120 if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) { 1121 dprint_bsg_err(mrioc, 1122 "%s: invalid buffer status %d for type %d\n", 1123 __func__, diag_buffer->status, repost_hdb.buf_type); 1124 return -EINVAL; 1125 } 1126 1127 if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) { 1128 dprint_bsg_err(mrioc, "%s: post failed for type %d\n", 1129 __func__, repost_hdb.buf_type); 1130 return -EFAULT; 1131 } 1132 mpi3mr_set_trigger_data_in_hdb(diag_buffer, 1133 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * mpi3mr_bsg_query_hdb - Handler for query HDB command 1140 * @mrioc: Adapter instance reference 1141 * @job: BSG job pointer 1142 * 1143 * This function prepares and copies the host diagnostic buffer 1144 * entries to the user buffer. 1145 * 1146 * Return: 0 on success and proper error codes on failure 1147 */ 1148 static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc, 1149 struct bsg_job *job) 1150 { 1151 long rval = 0; 1152 struct mpi3mr_bsg_in_hdb_status *hbd_status; 1153 struct mpi3mr_hdb_entry *hbd_status_entry; 1154 u32 length, min_length; 1155 u8 i; 1156 struct diag_buffer_desc *diag_buffer; 1157 uint32_t data_in_sz = 0; 1158 1159 data_in_sz = job->request_payload.payload_len; 1160 1161 length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) * 1162 sizeof(*hbd_status_entry))); 1163 hbd_status = kmalloc(length, GFP_KERNEL); 1164 if (!hbd_status) 1165 return -ENOMEM; 1166 hbd_status_entry = &hbd_status->entry[0]; 1167 1168 hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB; 1169 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 1170 diag_buffer = &mrioc->diag_buffers[i]; 1171 hbd_status_entry->buf_type = diag_buffer->type; 1172 hbd_status_entry->status = diag_buffer->status; 1173 hbd_status_entry->trigger_type = diag_buffer->trigger_type; 1174 memcpy(&hbd_status_entry->trigger_data, 1175 &diag_buffer->trigger_data, 1176 sizeof(hbd_status_entry->trigger_data)); 1177 hbd_status_entry->size = (diag_buffer->size / 1024); 1178 hbd_status_entry++; 1179 } 1180 hbd_status->element_trigger_format = 1181 MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA; 1182 1183 if (data_in_sz < 4) { 1184 dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__); 1185 rval = -EINVAL; 1186 goto out; 1187 } 1188 min_length = min(data_in_sz, length); 1189 if (job->request_payload.payload_len >= min_length) { 1190 sg_copy_from_buffer(job->request_payload.sg_list, 1191 job->request_payload.sg_cnt, 1192 hbd_status, min_length); 1193 rval = 0; 1194 } 1195 out: 1196 kfree(hbd_status); 1197 return rval; 1198 } 1199 1200 1201 /** 1202 * mpi3mr_enable_logdata - Handler for log data enable 1203 * @mrioc: Adapter instance reference 1204 * @job: BSG job reference 1205 * 1206 * This function enables log data caching in the driver if not 1207 * already enabled and return the maximum number of log data 1208 * entries that can be cached in the driver. 1209 * 1210 * Return: 0 on success and proper error codes on failure 1211 */ 1212 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, 1213 struct bsg_job *job) 1214 { 1215 struct mpi3mr_logdata_enable logdata_enable; 1216 1217 if (!mrioc->logdata_buf) { 1218 mrioc->logdata_entry_sz = 1219 (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) 1220 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; 1221 mrioc->logdata_buf_idx = 0; 1222 mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, 1223 mrioc->logdata_entry_sz, GFP_KERNEL); 1224 1225 if (!mrioc->logdata_buf) 1226 return -ENOMEM; 1227 } 1228 1229 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1230 logdata_enable.max_entries = 1231 MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 1232 if (job->request_payload.payload_len >= sizeof(logdata_enable)) { 1233 sg_copy_from_buffer(job->request_payload.sg_list, 1234 job->request_payload.sg_cnt, 1235 &logdata_enable, sizeof(logdata_enable)); 1236 return 0; 1237 } 1238 1239 return -EINVAL; 1240 } 1241 /** 1242 * mpi3mr_get_logdata - Handler for get log data 1243 * @mrioc: Adapter instance reference 1244 * @job: BSG job pointer 1245 * This function copies the log data entries to the user buffer 1246 * when log caching is enabled in the driver. 1247 * 1248 * Return: 0 on success and proper error codes on failure 1249 */ 1250 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, 1251 struct bsg_job *job) 1252 { 1253 u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; 1254 1255 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) 1256 return -EINVAL; 1257 1258 num_entries = job->request_payload.payload_len / entry_sz; 1259 if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) 1260 num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 1261 sz = num_entries * entry_sz; 1262 1263 if (job->request_payload.payload_len >= sz) { 1264 sg_copy_from_buffer(job->request_payload.sg_list, 1265 job->request_payload.sg_cnt, 1266 mrioc->logdata_buf, sz); 1267 return 0; 1268 } 1269 return -EINVAL; 1270 } 1271 1272 /** 1273 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver 1274 * @mrioc: Adapter instance reference 1275 * @job: BSG job pointer 1276 * 1277 * This function is the handler for PEL enable driver. 1278 * Validates the application given class and locale and if 1279 * requires aborts the existing PEL wait request and/or issues 1280 * new PEL wait request to the firmware and returns. 1281 * 1282 * Return: 0 on success and proper error codes on failure. 1283 */ 1284 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, 1285 struct bsg_job *job) 1286 { 1287 long rval = -EINVAL; 1288 struct mpi3mr_bsg_out_pel_enable pel_enable; 1289 u8 issue_pel_wait; 1290 u8 tmp_class; 1291 u16 tmp_locale; 1292 1293 if (job->request_payload.payload_len != sizeof(pel_enable)) { 1294 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1295 __func__); 1296 return rval; 1297 } 1298 1299 if (mrioc->unrecoverable) { 1300 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1301 __func__); 1302 return -EFAULT; 1303 } 1304 1305 if (mrioc->reset_in_progress) { 1306 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1307 return -EAGAIN; 1308 } 1309 1310 if (mrioc->stop_bsgs) { 1311 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 1312 return -EAGAIN; 1313 } 1314 1315 sg_copy_to_buffer(job->request_payload.sg_list, 1316 job->request_payload.sg_cnt, 1317 &pel_enable, sizeof(pel_enable)); 1318 1319 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1320 dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", 1321 __func__, pel_enable.pel_class); 1322 rval = 0; 1323 goto out; 1324 } 1325 if (!mrioc->pel_enabled) 1326 issue_pel_wait = 1; 1327 else { 1328 if ((mrioc->pel_class <= pel_enable.pel_class) && 1329 !((mrioc->pel_locale & pel_enable.pel_locale) ^ 1330 pel_enable.pel_locale)) { 1331 issue_pel_wait = 0; 1332 rval = 0; 1333 } else { 1334 pel_enable.pel_locale |= mrioc->pel_locale; 1335 1336 if (mrioc->pel_class < pel_enable.pel_class) 1337 pel_enable.pel_class = mrioc->pel_class; 1338 1339 rval = mpi3mr_bsg_pel_abort(mrioc); 1340 if (rval) { 1341 dprint_bsg_err(mrioc, 1342 "%s: pel_abort failed, status(%ld)\n", 1343 __func__, rval); 1344 goto out; 1345 } 1346 issue_pel_wait = 1; 1347 } 1348 } 1349 if (issue_pel_wait) { 1350 tmp_class = mrioc->pel_class; 1351 tmp_locale = mrioc->pel_locale; 1352 mrioc->pel_class = pel_enable.pel_class; 1353 mrioc->pel_locale = pel_enable.pel_locale; 1354 mrioc->pel_enabled = 1; 1355 rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); 1356 if (rval) { 1357 mrioc->pel_class = tmp_class; 1358 mrioc->pel_locale = tmp_locale; 1359 mrioc->pel_enabled = 0; 1360 dprint_bsg_err(mrioc, 1361 "%s: pel get sequence number failed, status(%ld)\n", 1362 __func__, rval); 1363 } 1364 } 1365 1366 out: 1367 return rval; 1368 } 1369 /** 1370 * mpi3mr_get_all_tgt_info - Get all target information 1371 * @mrioc: Adapter instance reference 1372 * @job: BSG job reference 1373 * 1374 * This function copies the driver managed target devices device 1375 * handle, persistent ID, bus ID and taret ID to the user 1376 * provided buffer for the specific controller. This function 1377 * also provides the number of devices managed by the driver for 1378 * the specific controller. 1379 * 1380 * Return: 0 on success and proper error codes on failure 1381 */ 1382 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, 1383 struct bsg_job *job) 1384 { 1385 u16 num_devices = 0, i = 0, size; 1386 unsigned long flags; 1387 struct mpi3mr_tgt_dev *tgtdev; 1388 struct mpi3mr_device_map_info *devmap_info = NULL; 1389 struct mpi3mr_all_tgt_info *alltgt_info = NULL; 1390 uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; 1391 1392 if (job->request_payload.payload_len < sizeof(u32)) { 1393 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1394 __func__); 1395 return -EINVAL; 1396 } 1397 1398 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1399 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 1400 num_devices++; 1401 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1402 1403 if ((job->request_payload.payload_len <= sizeof(u64)) || 1404 list_empty(&mrioc->tgtdev_list)) { 1405 sg_copy_from_buffer(job->request_payload.sg_list, 1406 job->request_payload.sg_cnt, 1407 &num_devices, sizeof(num_devices)); 1408 return 0; 1409 } 1410 1411 kern_entrylen = num_devices * sizeof(*devmap_info); 1412 size = sizeof(u64) + kern_entrylen; 1413 alltgt_info = kzalloc(size, GFP_KERNEL); 1414 if (!alltgt_info) 1415 return -ENOMEM; 1416 1417 devmap_info = alltgt_info->dmi; 1418 memset((u8 *)devmap_info, 0xFF, kern_entrylen); 1419 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1420 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1421 if (i < num_devices) { 1422 devmap_info[i].handle = tgtdev->dev_handle; 1423 devmap_info[i].perst_id = tgtdev->perst_id; 1424 if (tgtdev->host_exposed && tgtdev->starget) { 1425 devmap_info[i].target_id = tgtdev->starget->id; 1426 devmap_info[i].bus_id = 1427 tgtdev->starget->channel; 1428 } 1429 i++; 1430 } 1431 } 1432 num_devices = i; 1433 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1434 1435 alltgt_info->num_devices = num_devices; 1436 1437 usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) / 1438 sizeof(*devmap_info); 1439 usr_entrylen *= sizeof(*devmap_info); 1440 min_entrylen = min(usr_entrylen, kern_entrylen); 1441 1442 sg_copy_from_buffer(job->request_payload.sg_list, 1443 job->request_payload.sg_cnt, 1444 alltgt_info, (min_entrylen + sizeof(u64))); 1445 kfree(alltgt_info); 1446 return 0; 1447 } 1448 /** 1449 * mpi3mr_get_change_count - Get topology change count 1450 * @mrioc: Adapter instance reference 1451 * @job: BSG job reference 1452 * 1453 * This function copies the toplogy change count provided by the 1454 * driver in events and cached in the driver to the user 1455 * provided buffer for the specific controller. 1456 * 1457 * Return: 0 on success and proper error codes on failure 1458 */ 1459 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, 1460 struct bsg_job *job) 1461 { 1462 struct mpi3mr_change_count chgcnt; 1463 1464 memset(&chgcnt, 0, sizeof(chgcnt)); 1465 chgcnt.change_count = mrioc->change_count; 1466 if (job->request_payload.payload_len >= sizeof(chgcnt)) { 1467 sg_copy_from_buffer(job->request_payload.sg_list, 1468 job->request_payload.sg_cnt, 1469 &chgcnt, sizeof(chgcnt)); 1470 return 0; 1471 } 1472 return -EINVAL; 1473 } 1474 1475 /** 1476 * mpi3mr_bsg_adp_reset - Issue controller reset 1477 * @mrioc: Adapter instance reference 1478 * @job: BSG job reference 1479 * 1480 * This function identifies the user provided reset type and 1481 * issues approporiate reset to the controller and wait for that 1482 * to complete and reinitialize the controller and then returns 1483 * 1484 * Return: 0 on success and proper error codes on failure 1485 */ 1486 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, 1487 struct bsg_job *job) 1488 { 1489 long rval = -EINVAL; 1490 u8 save_snapdump; 1491 struct mpi3mr_bsg_adp_reset adpreset; 1492 1493 if (job->request_payload.payload_len != 1494 sizeof(adpreset)) { 1495 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 1496 __func__); 1497 goto out; 1498 } 1499 1500 if (mrioc->unrecoverable || mrioc->block_on_pci_err) 1501 return -EINVAL; 1502 1503 sg_copy_to_buffer(job->request_payload.sg_list, 1504 job->request_payload.sg_cnt, 1505 &adpreset, sizeof(adpreset)); 1506 1507 switch (adpreset.reset_type) { 1508 case MPI3MR_BSG_ADPRESET_SOFT: 1509 save_snapdump = 0; 1510 break; 1511 case MPI3MR_BSG_ADPRESET_DIAG_FAULT: 1512 save_snapdump = 1; 1513 break; 1514 default: 1515 dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", 1516 __func__, adpreset.reset_type); 1517 goto out; 1518 } 1519 1520 rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, 1521 save_snapdump); 1522 1523 if (rval) 1524 dprint_bsg_err(mrioc, 1525 "%s: reset handler returned error(%ld) for reset type %d\n", 1526 __func__, rval, adpreset.reset_type); 1527 out: 1528 return rval; 1529 } 1530 1531 /** 1532 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler 1533 * @mrioc: Adapter instance reference 1534 * @job: BSG job reference 1535 * 1536 * This function provides adapter information for the given 1537 * controller 1538 * 1539 * Return: 0 on success and proper error codes on failure 1540 */ 1541 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, 1542 struct bsg_job *job) 1543 { 1544 enum mpi3mr_iocstate ioc_state; 1545 struct mpi3mr_bsg_in_adpinfo adpinfo; 1546 1547 memset(&adpinfo, 0, sizeof(adpinfo)); 1548 adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; 1549 adpinfo.pci_dev_id = mrioc->pdev->device; 1550 adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; 1551 adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; 1552 adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; 1553 adpinfo.pci_bus = mrioc->pdev->bus->number; 1554 adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); 1555 adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); 1556 adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); 1557 adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; 1558 1559 ioc_state = mpi3mr_get_iocstate(mrioc); 1560 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 1561 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 1562 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 1563 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 1564 else if (ioc_state == MRIOC_STATE_FAULT) 1565 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 1566 else 1567 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 1568 1569 memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, 1570 sizeof(adpinfo.driver_info)); 1571 1572 if (job->request_payload.payload_len >= sizeof(adpinfo)) { 1573 sg_copy_from_buffer(job->request_payload.sg_list, 1574 job->request_payload.sg_cnt, 1575 &adpinfo, sizeof(adpinfo)); 1576 return 0; 1577 } 1578 return -EINVAL; 1579 } 1580 1581 /** 1582 * mpi3mr_bsg_process_drv_cmds - Driver Command handler 1583 * @job: BSG job reference 1584 * 1585 * This function is the top level handler for driver commands, 1586 * this does basic validation of the buffer and identifies the 1587 * opcode and switches to correct sub handler. 1588 * 1589 * Return: 0 on success and proper error codes on failure 1590 */ 1591 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) 1592 { 1593 long rval = -EINVAL; 1594 struct mpi3mr_ioc *mrioc = NULL; 1595 struct mpi3mr_bsg_packet *bsg_req = NULL; 1596 struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; 1597 1598 bsg_req = job->request; 1599 drvrcmd = &bsg_req->cmd.drvrcmd; 1600 1601 mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); 1602 if (!mrioc) 1603 return -ENODEV; 1604 1605 if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { 1606 rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); 1607 return rval; 1608 } 1609 1610 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) 1611 return -ERESTARTSYS; 1612 1613 switch (drvrcmd->opcode) { 1614 case MPI3MR_DRVBSG_OPCODE_ADPRESET: 1615 rval = mpi3mr_bsg_adp_reset(mrioc, job); 1616 break; 1617 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: 1618 rval = mpi3mr_get_all_tgt_info(mrioc, job); 1619 break; 1620 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: 1621 rval = mpi3mr_get_change_count(mrioc, job); 1622 break; 1623 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: 1624 rval = mpi3mr_enable_logdata(mrioc, job); 1625 break; 1626 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: 1627 rval = mpi3mr_get_logdata(mrioc, job); 1628 break; 1629 case MPI3MR_DRVBSG_OPCODE_PELENABLE: 1630 rval = mpi3mr_bsg_pel_enable(mrioc, job); 1631 break; 1632 case MPI3MR_DRVBSG_OPCODE_QUERY_HDB: 1633 rval = mpi3mr_bsg_query_hdb(mrioc, job); 1634 break; 1635 case MPI3MR_DRVBSG_OPCODE_REPOST_HDB: 1636 rval = mpi3mr_bsg_repost_hdb(mrioc, job); 1637 break; 1638 case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB: 1639 rval = mpi3mr_bsg_upload_hdb(mrioc, job); 1640 break; 1641 case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS: 1642 rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job); 1643 break; 1644 case MPI3MR_DRVBSG_OPCODE_UNKNOWN: 1645 default: 1646 pr_err("%s: unsupported driver command opcode %d\n", 1647 MPI3MR_DRIVER_NAME, drvrcmd->opcode); 1648 break; 1649 } 1650 mutex_unlock(&mrioc->bsg_cmds.mutex); 1651 return rval; 1652 } 1653 1654 /** 1655 * mpi3mr_total_num_ioctl_sges - Count number of SGEs required 1656 * @drv_bufs: DMA address of the buffers to be placed in sgl 1657 * @bufcnt: Number of DMA buffers 1658 * 1659 * This function returns total number of data SGEs required 1660 * including zero length SGEs and excluding management request 1661 * and response buffer for the given list of data buffer 1662 * descriptors 1663 * 1664 * Return: Number of SGE elements needed 1665 */ 1666 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs, 1667 u8 bufcnt) 1668 { 1669 u16 i, sge_count = 0; 1670 1671 for (i = 0; i < bufcnt; i++, drv_bufs++) { 1672 if (drv_bufs->data_dir == DMA_NONE || 1673 drv_bufs->kern_buf) 1674 continue; 1675 sge_count += drv_bufs->num_dma_desc; 1676 if (!drv_bufs->num_dma_desc) 1677 sge_count++; 1678 } 1679 return sge_count; 1680 } 1681 1682 /** 1683 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands 1684 * @mrioc: Adapter instance reference 1685 * @mpi_req: MPI request 1686 * @sgl_offset: offset to start sgl in the MPI request 1687 * @drv_bufs: DMA address of the buffers to be placed in sgl 1688 * @bufcnt: Number of DMA buffers 1689 * @is_rmc: Does the buffer list has management command buffer 1690 * @is_rmr: Does the buffer list has management response buffer 1691 * @num_datasges: Number of data buffers in the list 1692 * 1693 * This function places the DMA address of the given buffers in 1694 * proper format as SGEs in the given MPI request. 1695 * 1696 * Return: 0 on success,-1 on failure 1697 */ 1698 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req, 1699 u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs, 1700 u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges) 1701 { 1702 struct mpi3_request_header *mpi_header = 1703 (struct mpi3_request_header *)mpi_req; 1704 u8 *sgl = (mpi_req + sgl_offset), count = 0; 1705 struct mpi3_mgmt_passthrough_request *rmgmt_req = 1706 (struct mpi3_mgmt_passthrough_request *)mpi_req; 1707 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1708 u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag; 1709 u16 available_sges, i, sges_needed; 1710 u32 sge_element_size = sizeof(struct mpi3_sge_common); 1711 bool chain_used = false; 1712 1713 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 1714 MPI3_SGE_FLAGS_DLAS_SYSTEM; 1715 sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 1716 sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST; 1717 last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 1718 MPI3_SGE_FLAGS_DLAS_SYSTEM; 1719 1720 sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt); 1721 1722 if (is_rmc) { 1723 mpi3mr_add_sg_single(&rmgmt_req->command_sgl, 1724 sgl_flags_last, drv_buf_iter->kern_buf_len, 1725 drv_buf_iter->kern_buf_dma); 1726 sgl = (u8 *)drv_buf_iter->kern_buf + 1727 drv_buf_iter->bsg_buf_len; 1728 available_sges = (drv_buf_iter->kern_buf_len - 1729 drv_buf_iter->bsg_buf_len) / sge_element_size; 1730 1731 if (sges_needed > available_sges) 1732 return -1; 1733 1734 chain_used = true; 1735 drv_buf_iter++; 1736 count++; 1737 if (is_rmr) { 1738 mpi3mr_add_sg_single(&rmgmt_req->response_sgl, 1739 sgl_flags_last, drv_buf_iter->kern_buf_len, 1740 drv_buf_iter->kern_buf_dma); 1741 drv_buf_iter++; 1742 count++; 1743 } else 1744 mpi3mr_build_zero_len_sge( 1745 &rmgmt_req->response_sgl); 1746 if (num_datasges) { 1747 i = 0; 1748 goto build_sges; 1749 } 1750 } else { 1751 if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ) 1752 return -1; 1753 available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) / 1754 sge_element_size; 1755 if (!available_sges) 1756 return -1; 1757 } 1758 if (!num_datasges) { 1759 mpi3mr_build_zero_len_sge(sgl); 1760 return 0; 1761 } 1762 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 1763 if ((sges_needed > 2) || (sges_needed > available_sges)) 1764 return -1; 1765 for (; count < bufcnt; count++, drv_buf_iter++) { 1766 if (drv_buf_iter->data_dir == DMA_NONE || 1767 !drv_buf_iter->num_dma_desc) 1768 continue; 1769 mpi3mr_add_sg_single(sgl, sgl_flags_last, 1770 drv_buf_iter->dma_desc[0].size, 1771 drv_buf_iter->dma_desc[0].dma_addr); 1772 sgl += sge_element_size; 1773 } 1774 return 0; 1775 } 1776 i = 0; 1777 1778 build_sges: 1779 for (; count < bufcnt; count++, drv_buf_iter++) { 1780 if (drv_buf_iter->data_dir == DMA_NONE) 1781 continue; 1782 if (!drv_buf_iter->num_dma_desc) { 1783 if (chain_used && !available_sges) 1784 return -1; 1785 if (!chain_used && (available_sges == 1) && 1786 (sges_needed > 1)) 1787 goto setup_chain; 1788 flag = sgl_flag_eob; 1789 if (num_datasges == 1) 1790 flag = sgl_flags_last; 1791 mpi3mr_add_sg_single(sgl, flag, 0, 0); 1792 sgl += sge_element_size; 1793 sges_needed--; 1794 available_sges--; 1795 num_datasges--; 1796 continue; 1797 } 1798 for (; i < drv_buf_iter->num_dma_desc; i++) { 1799 if (chain_used && !available_sges) 1800 return -1; 1801 if (!chain_used && (available_sges == 1) && 1802 (sges_needed > 1)) 1803 goto setup_chain; 1804 flag = sgl_flags; 1805 if (i == (drv_buf_iter->num_dma_desc - 1)) { 1806 if (num_datasges == 1) 1807 flag = sgl_flags_last; 1808 else 1809 flag = sgl_flag_eob; 1810 } 1811 1812 mpi3mr_add_sg_single(sgl, flag, 1813 drv_buf_iter->dma_desc[i].size, 1814 drv_buf_iter->dma_desc[i].dma_addr); 1815 sgl += sge_element_size; 1816 available_sges--; 1817 sges_needed--; 1818 } 1819 num_datasges--; 1820 i = 0; 1821 } 1822 return 0; 1823 1824 setup_chain: 1825 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 1826 if (sges_needed > available_sges) 1827 return -1; 1828 mpi3mr_add_sg_single(sgl, last_chain_sgl_flag, 1829 (sges_needed * sge_element_size), 1830 mrioc->ioctl_chain_sge.dma_addr); 1831 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 1832 sgl = (u8 *)mrioc->ioctl_chain_sge.addr; 1833 chain_used = true; 1834 goto build_sges; 1835 } 1836 1837 /** 1838 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format 1839 * @nvme_encap_request: NVMe encapsulated MPI request 1840 * 1841 * This function returns the type of the data format specified 1842 * in user provided NVMe command in NVMe encapsulated request. 1843 * 1844 * Return: Data format of the NVMe command (PRP/SGL etc) 1845 */ 1846 static unsigned int mpi3mr_get_nvme_data_fmt( 1847 struct mpi3_nvme_encapsulated_request *nvme_encap_request) 1848 { 1849 u8 format = 0; 1850 1851 format = ((nvme_encap_request->command[0] & 0xc000) >> 14); 1852 return format; 1853 1854 } 1855 1856 /** 1857 * mpi3mr_build_nvme_sgl - SGL constructor for NVME 1858 * encapsulated request 1859 * @mrioc: Adapter instance reference 1860 * @nvme_encap_request: NVMe encapsulated MPI request 1861 * @drv_bufs: DMA address of the buffers to be placed in sgl 1862 * @bufcnt: Number of DMA buffers 1863 * 1864 * This function places the DMA address of the given buffers in 1865 * proper format as SGEs in the given NVMe encapsulated request. 1866 * 1867 * Return: 0 on success, -1 on failure 1868 */ 1869 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, 1870 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 1871 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 1872 { 1873 struct mpi3mr_nvme_pt_sge *nvme_sgl; 1874 __le64 sgl_dma; 1875 u8 count; 1876 size_t length = 0; 1877 u16 available_sges = 0, i; 1878 u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 1879 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1880 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 1881 mrioc->facts.sge_mod_shift) << 32); 1882 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 1883 mrioc->facts.sge_mod_shift) << 32; 1884 u32 size; 1885 1886 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 1887 ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); 1888 1889 /* 1890 * Not all commands require a data transfer. If no data, just return 1891 * without constructing any sgl. 1892 */ 1893 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1894 if (drv_buf_iter->data_dir == DMA_NONE) 1895 continue; 1896 length = drv_buf_iter->kern_buf_len; 1897 break; 1898 } 1899 if (!length || !drv_buf_iter->num_dma_desc) 1900 return 0; 1901 1902 if (drv_buf_iter->num_dma_desc == 1) { 1903 available_sges = 1; 1904 goto build_sges; 1905 } 1906 1907 sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr); 1908 if (sgl_dma & sgemod_mask) { 1909 dprint_bsg_err(mrioc, 1910 "%s: SGL chain address collides with SGE modifier\n", 1911 __func__); 1912 return -1; 1913 } 1914 1915 sgl_dma &= ~sgemod_mask; 1916 sgl_dma |= sgemod_val; 1917 1918 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 1919 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 1920 if (available_sges < drv_buf_iter->num_dma_desc) 1921 return -1; 1922 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 1923 nvme_sgl->base_addr = sgl_dma; 1924 size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 1925 nvme_sgl->length = cpu_to_le32(size); 1926 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 1927 nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr; 1928 1929 build_sges: 1930 for (i = 0; i < drv_buf_iter->num_dma_desc; i++) { 1931 sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr); 1932 if (sgl_dma & sgemod_mask) { 1933 dprint_bsg_err(mrioc, 1934 "%s: SGL address collides with SGE modifier\n", 1935 __func__); 1936 return -1; 1937 } 1938 1939 sgl_dma &= ~sgemod_mask; 1940 sgl_dma |= sgemod_val; 1941 1942 nvme_sgl->base_addr = sgl_dma; 1943 nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size); 1944 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 1945 nvme_sgl++; 1946 available_sges--; 1947 } 1948 1949 return 0; 1950 } 1951 1952 /** 1953 * mpi3mr_build_nvme_prp - PRP constructor for NVME 1954 * encapsulated request 1955 * @mrioc: Adapter instance reference 1956 * @nvme_encap_request: NVMe encapsulated MPI request 1957 * @drv_bufs: DMA address of the buffers to be placed in SGL 1958 * @bufcnt: Number of DMA buffers 1959 * 1960 * This function places the DMA address of the given buffers in 1961 * proper format as PRP entries in the given NVMe encapsulated 1962 * request. 1963 * 1964 * Return: 0 on success, -1 on failure 1965 */ 1966 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, 1967 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 1968 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 1969 { 1970 int prp_size = MPI3MR_NVME_PRP_SIZE; 1971 __le64 *prp_entry, *prp1_entry, *prp2_entry; 1972 __le64 *prp_page; 1973 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 1974 u32 offset, entry_len, dev_pgsz; 1975 u32 page_mask_result, page_mask; 1976 size_t length = 0, desc_len; 1977 u8 count; 1978 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 1979 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 1980 mrioc->facts.sge_mod_shift) << 32); 1981 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 1982 mrioc->facts.sge_mod_shift) << 32; 1983 u16 dev_handle = nvme_encap_request->dev_handle; 1984 struct mpi3mr_tgt_dev *tgtdev; 1985 u16 desc_count = 0; 1986 1987 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1988 if (!tgtdev) { 1989 dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", 1990 __func__, dev_handle); 1991 return -1; 1992 } 1993 1994 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 1995 dprint_bsg_err(mrioc, 1996 "%s: NVMe device page size is zero for handle 0x%04x\n", 1997 __func__, dev_handle); 1998 mpi3mr_tgtdev_put(tgtdev); 1999 return -1; 2000 } 2001 2002 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 2003 mpi3mr_tgtdev_put(tgtdev); 2004 page_mask = dev_pgsz - 1; 2005 2006 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) { 2007 dprint_bsg_err(mrioc, 2008 "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 2009 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 2010 return -1; 2011 } 2012 2013 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) { 2014 dprint_bsg_err(mrioc, 2015 "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 2016 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 2017 return -1; 2018 } 2019 2020 /* 2021 * Not all commands require a data transfer. If no data, just return 2022 * without constructing any PRP. 2023 */ 2024 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2025 if (drv_buf_iter->data_dir == DMA_NONE) 2026 continue; 2027 length = drv_buf_iter->kern_buf_len; 2028 break; 2029 } 2030 2031 if (!length || !drv_buf_iter->num_dma_desc) 2032 return 0; 2033 2034 for (count = 0; count < drv_buf_iter->num_dma_desc; count++) { 2035 dma_addr = drv_buf_iter->dma_desc[count].dma_addr; 2036 if (dma_addr & page_mask) { 2037 dprint_bsg_err(mrioc, 2038 "%s:dma_addr %pad is not aligned with page size 0x%x\n", 2039 __func__, &dma_addr, dev_pgsz); 2040 return -1; 2041 } 2042 } 2043 2044 dma_addr = drv_buf_iter->dma_desc[0].dma_addr; 2045 desc_len = drv_buf_iter->dma_desc[0].size; 2046 2047 mrioc->prp_sz = 0; 2048 mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, 2049 dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); 2050 2051 if (!mrioc->prp_list_virt) 2052 return -1; 2053 mrioc->prp_sz = dev_pgsz; 2054 2055 /* 2056 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 2057 * PRP1 is located at a 24 byte offset from the start of the NVMe 2058 * command. Then set the current PRP entry pointer to PRP1. 2059 */ 2060 prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 2061 MPI3MR_NVME_CMD_PRP1_OFFSET); 2062 prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 2063 MPI3MR_NVME_CMD_PRP2_OFFSET); 2064 prp_entry = prp1_entry; 2065 /* 2066 * For the PRP entries, use the specially allocated buffer of 2067 * contiguous memory. 2068 */ 2069 prp_page = (__le64 *)mrioc->prp_list_virt; 2070 prp_page_dma = mrioc->prp_list_dma; 2071 2072 /* 2073 * Check if we are within 1 entry of a page boundary we don't 2074 * want our first entry to be a PRP List entry. 2075 */ 2076 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 2077 if (!page_mask_result) { 2078 dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", 2079 __func__); 2080 goto err_out; 2081 } 2082 2083 /* 2084 * Set PRP physical pointer, which initially points to the current PRP 2085 * DMA memory page. 2086 */ 2087 prp_entry_dma = prp_page_dma; 2088 2089 2090 /* Loop while the length is not zero. */ 2091 while (length) { 2092 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 2093 if (!page_mask_result && (length > dev_pgsz)) { 2094 dprint_bsg_err(mrioc, 2095 "%s: single PRP page is not sufficient\n", 2096 __func__); 2097 goto err_out; 2098 } 2099 2100 /* Need to handle if entry will be part of a page. */ 2101 offset = dma_addr & page_mask; 2102 entry_len = dev_pgsz - offset; 2103 2104 if (prp_entry == prp1_entry) { 2105 /* 2106 * Must fill in the first PRP pointer (PRP1) before 2107 * moving on. 2108 */ 2109 *prp1_entry = cpu_to_le64(dma_addr); 2110 if (*prp1_entry & sgemod_mask) { 2111 dprint_bsg_err(mrioc, 2112 "%s: PRP1 address collides with SGE modifier\n", 2113 __func__); 2114 goto err_out; 2115 } 2116 *prp1_entry &= ~sgemod_mask; 2117 *prp1_entry |= sgemod_val; 2118 2119 /* 2120 * Now point to the second PRP entry within the 2121 * command (PRP2). 2122 */ 2123 prp_entry = prp2_entry; 2124 } else if (prp_entry == prp2_entry) { 2125 /* 2126 * Should the PRP2 entry be a PRP List pointer or just 2127 * a regular PRP pointer? If there is more than one 2128 * more page of data, must use a PRP List pointer. 2129 */ 2130 if (length > dev_pgsz) { 2131 /* 2132 * PRP2 will contain a PRP List pointer because 2133 * more PRP's are needed with this command. The 2134 * list will start at the beginning of the 2135 * contiguous buffer. 2136 */ 2137 *prp2_entry = cpu_to_le64(prp_entry_dma); 2138 if (*prp2_entry & sgemod_mask) { 2139 dprint_bsg_err(mrioc, 2140 "%s: PRP list address collides with SGE modifier\n", 2141 __func__); 2142 goto err_out; 2143 } 2144 *prp2_entry &= ~sgemod_mask; 2145 *prp2_entry |= sgemod_val; 2146 2147 /* 2148 * The next PRP Entry will be the start of the 2149 * first PRP List. 2150 */ 2151 prp_entry = prp_page; 2152 continue; 2153 } else { 2154 /* 2155 * After this, the PRP Entries are complete. 2156 * This command uses 2 PRP's and no PRP list. 2157 */ 2158 *prp2_entry = cpu_to_le64(dma_addr); 2159 if (*prp2_entry & sgemod_mask) { 2160 dprint_bsg_err(mrioc, 2161 "%s: PRP2 collides with SGE modifier\n", 2162 __func__); 2163 goto err_out; 2164 } 2165 *prp2_entry &= ~sgemod_mask; 2166 *prp2_entry |= sgemod_val; 2167 } 2168 } else { 2169 /* 2170 * Put entry in list and bump the addresses. 2171 * 2172 * After PRP1 and PRP2 are filled in, this will fill in 2173 * all remaining PRP entries in a PRP List, one per 2174 * each time through the loop. 2175 */ 2176 *prp_entry = cpu_to_le64(dma_addr); 2177 if (*prp_entry & sgemod_mask) { 2178 dprint_bsg_err(mrioc, 2179 "%s: PRP address collides with SGE modifier\n", 2180 __func__); 2181 goto err_out; 2182 } 2183 *prp_entry &= ~sgemod_mask; 2184 *prp_entry |= sgemod_val; 2185 prp_entry++; 2186 prp_entry_dma += prp_size; 2187 } 2188 2189 /* decrement length accounting for last partial page. */ 2190 if (entry_len >= length) { 2191 length = 0; 2192 } else { 2193 if (entry_len <= desc_len) { 2194 dma_addr += entry_len; 2195 desc_len -= entry_len; 2196 } 2197 if (!desc_len) { 2198 if ((++desc_count) >= 2199 drv_buf_iter->num_dma_desc) { 2200 dprint_bsg_err(mrioc, 2201 "%s: Invalid len %zd while building PRP\n", 2202 __func__, length); 2203 goto err_out; 2204 } 2205 dma_addr = 2206 drv_buf_iter->dma_desc[desc_count].dma_addr; 2207 desc_len = 2208 drv_buf_iter->dma_desc[desc_count].size; 2209 } 2210 length -= entry_len; 2211 } 2212 } 2213 2214 return 0; 2215 err_out: 2216 if (mrioc->prp_list_virt) { 2217 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 2218 mrioc->prp_list_virt, mrioc->prp_list_dma); 2219 mrioc->prp_list_virt = NULL; 2220 } 2221 return -1; 2222 } 2223 2224 /** 2225 * mpi3mr_map_data_buffer_dma - build dma descriptors for data 2226 * buffers 2227 * @mrioc: Adapter instance reference 2228 * @drv_buf: buffer map descriptor 2229 * @desc_count: Number of already consumed dma descriptors 2230 * 2231 * This function computes how many pre-allocated DMA descriptors 2232 * are required for the given data buffer and if those number of 2233 * descriptors are free, then setup the mapping of the scattered 2234 * DMA address to the given data buffer, if the data direction 2235 * of the buffer is DMA_TO_DEVICE then the actual data is copied to 2236 * the DMA buffers 2237 * 2238 * Return: 0 on success, -1 on failure 2239 */ 2240 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc, 2241 struct mpi3mr_buf_map *drv_buf, 2242 u16 desc_count) 2243 { 2244 u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE; 2245 u32 buf_len = drv_buf->kern_buf_len, copied_len = 0; 2246 2247 if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 2248 needed_desc++; 2249 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 2250 dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n", 2251 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 2252 return -1; 2253 } 2254 drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc, 2255 GFP_KERNEL); 2256 if (!drv_buf->dma_desc) 2257 return -1; 2258 for (i = 0; i < needed_desc; i++, desc_count++) { 2259 drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr; 2260 drv_buf->dma_desc[i].dma_addr = 2261 mrioc->ioctl_sge[desc_count].dma_addr; 2262 if (buf_len < mrioc->ioctl_sge[desc_count].size) 2263 drv_buf->dma_desc[i].size = buf_len; 2264 else 2265 drv_buf->dma_desc[i].size = 2266 mrioc->ioctl_sge[desc_count].size; 2267 buf_len -= drv_buf->dma_desc[i].size; 2268 memset(drv_buf->dma_desc[i].addr, 0, 2269 mrioc->ioctl_sge[desc_count].size); 2270 if (drv_buf->data_dir == DMA_TO_DEVICE) { 2271 memcpy(drv_buf->dma_desc[i].addr, 2272 drv_buf->bsg_buf + copied_len, 2273 drv_buf->dma_desc[i].size); 2274 copied_len += drv_buf->dma_desc[i].size; 2275 } 2276 } 2277 drv_buf->num_dma_desc = needed_desc; 2278 return 0; 2279 } 2280 /** 2281 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler 2282 * @job: BSG job reference 2283 * 2284 * This function is the top level handler for MPI Pass through 2285 * command, this does basic validation of the input data buffers, 2286 * identifies the given buffer types and MPI command, allocates 2287 * DMAable memory for user given buffers, construstcs SGL 2288 * properly and passes the command to the firmware. 2289 * 2290 * Once the MPI command is completed the driver copies the data 2291 * if any and reply, sense information to user provided buffers. 2292 * If the command is timed out then issues controller reset 2293 * prior to returning. 2294 * 2295 * Return: 0 on success and proper error codes on failure 2296 */ 2297 2298 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) 2299 { 2300 long rval = -EINVAL; 2301 struct mpi3mr_ioc *mrioc = NULL; 2302 u8 *mpi_req = NULL, *sense_buff_k = NULL; 2303 u8 mpi_msg_size = 0; 2304 struct mpi3mr_bsg_packet *bsg_req = NULL; 2305 struct mpi3mr_bsg_mptcmd *karg; 2306 struct mpi3mr_buf_entry *buf_entries = NULL; 2307 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; 2308 u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0; 2309 u8 din_cnt = 0, dout_cnt = 0; 2310 u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 2311 u8 block_io = 0, nvme_fmt = 0, resp_code = 0; 2312 struct mpi3_request_header *mpi_header = NULL; 2313 struct mpi3_status_reply_descriptor *status_desc; 2314 struct mpi3_scsi_task_mgmt_request *tm_req; 2315 u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; 2316 u16 dev_handle; 2317 struct mpi3mr_tgt_dev *tgtdev; 2318 struct mpi3mr_stgt_priv_data *stgt_priv = NULL; 2319 struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; 2320 u32 din_size = 0, dout_size = 0; 2321 u8 *din_buf = NULL, *dout_buf = NULL; 2322 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; 2323 u16 rmc_size = 0, desc_count = 0; 2324 2325 bsg_req = job->request; 2326 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; 2327 2328 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); 2329 if (!mrioc) 2330 return -ENODEV; 2331 2332 if (!mrioc->ioctl_sges_allocated) { 2333 dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", 2334 __func__); 2335 return -ENOMEM; 2336 } 2337 2338 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) 2339 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; 2340 2341 mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); 2342 if (!mpi_req) 2343 return -ENOMEM; 2344 mpi_header = (struct mpi3_request_header *)mpi_req; 2345 2346 bufcnt = karg->buf_entry_list.num_of_entries; 2347 drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); 2348 if (!drv_bufs) { 2349 rval = -ENOMEM; 2350 goto out; 2351 } 2352 2353 dout_buf = kzalloc(job->request_payload.payload_len, 2354 GFP_KERNEL); 2355 if (!dout_buf) { 2356 rval = -ENOMEM; 2357 goto out; 2358 } 2359 2360 din_buf = kzalloc(job->reply_payload.payload_len, 2361 GFP_KERNEL); 2362 if (!din_buf) { 2363 rval = -ENOMEM; 2364 goto out; 2365 } 2366 2367 sg_copy_to_buffer(job->request_payload.sg_list, 2368 job->request_payload.sg_cnt, 2369 dout_buf, job->request_payload.payload_len); 2370 2371 buf_entries = karg->buf_entry_list.buf_entry; 2372 sgl_din_iter = din_buf; 2373 sgl_dout_iter = dout_buf; 2374 drv_buf_iter = drv_bufs; 2375 2376 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { 2377 2378 switch (buf_entries->buf_type) { 2379 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: 2380 sgl_iter = sgl_dout_iter; 2381 sgl_dout_iter += buf_entries->buf_len; 2382 drv_buf_iter->data_dir = DMA_TO_DEVICE; 2383 is_rmcb = 1; 2384 if ((count != 0) || !buf_entries->buf_len) 2385 invalid_be = 1; 2386 break; 2387 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: 2388 sgl_iter = sgl_din_iter; 2389 sgl_din_iter += buf_entries->buf_len; 2390 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 2391 is_rmrb = 1; 2392 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 2393 invalid_be = 1; 2394 break; 2395 case MPI3MR_BSG_BUFTYPE_DATA_IN: 2396 sgl_iter = sgl_din_iter; 2397 sgl_din_iter += buf_entries->buf_len; 2398 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 2399 din_cnt++; 2400 din_size += buf_entries->buf_len; 2401 if ((din_cnt > 1) && !is_rmcb) 2402 invalid_be = 1; 2403 break; 2404 case MPI3MR_BSG_BUFTYPE_DATA_OUT: 2405 sgl_iter = sgl_dout_iter; 2406 sgl_dout_iter += buf_entries->buf_len; 2407 drv_buf_iter->data_dir = DMA_TO_DEVICE; 2408 dout_cnt++; 2409 dout_size += buf_entries->buf_len; 2410 if ((dout_cnt > 1) && !is_rmcb) 2411 invalid_be = 1; 2412 break; 2413 case MPI3MR_BSG_BUFTYPE_MPI_REPLY: 2414 sgl_iter = sgl_din_iter; 2415 sgl_din_iter += buf_entries->buf_len; 2416 drv_buf_iter->data_dir = DMA_NONE; 2417 mpirep_offset = count; 2418 if (!buf_entries->buf_len) 2419 invalid_be = 1; 2420 break; 2421 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: 2422 sgl_iter = sgl_din_iter; 2423 sgl_din_iter += buf_entries->buf_len; 2424 drv_buf_iter->data_dir = DMA_NONE; 2425 erb_offset = count; 2426 if (!buf_entries->buf_len) 2427 invalid_be = 1; 2428 break; 2429 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: 2430 sgl_iter = sgl_dout_iter; 2431 sgl_dout_iter += buf_entries->buf_len; 2432 drv_buf_iter->data_dir = DMA_NONE; 2433 mpi_msg_size = buf_entries->buf_len; 2434 if ((!mpi_msg_size || (mpi_msg_size % 4)) || 2435 (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { 2436 dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", 2437 __func__); 2438 rval = -EINVAL; 2439 goto out; 2440 } 2441 memcpy(mpi_req, sgl_iter, buf_entries->buf_len); 2442 break; 2443 default: 2444 invalid_be = 1; 2445 break; 2446 } 2447 if (invalid_be) { 2448 dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", 2449 __func__); 2450 rval = -EINVAL; 2451 goto out; 2452 } 2453 2454 if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 2455 dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 2456 __func__); 2457 rval = -EINVAL; 2458 goto out; 2459 } 2460 if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 2461 dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 2462 __func__); 2463 rval = -EINVAL; 2464 goto out; 2465 } 2466 2467 drv_buf_iter->bsg_buf = sgl_iter; 2468 drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 2469 } 2470 2471 if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) { 2472 dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", 2473 __func__, __LINE__, mpi_header->function, din_size, 2474 dout_size); 2475 rval = -EINVAL; 2476 goto out; 2477 } 2478 2479 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { 2480 dprint_bsg_err(mrioc, 2481 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", 2482 __func__, __LINE__, mpi_header->function, din_size); 2483 rval = -EINVAL; 2484 goto out; 2485 } 2486 if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { 2487 dprint_bsg_err(mrioc, 2488 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", 2489 __func__, __LINE__, mpi_header->function, dout_size); 2490 rval = -EINVAL; 2491 goto out; 2492 } 2493 2494 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 2495 if (din_size > MPI3MR_IOCTL_SGE_SIZE || 2496 dout_size > MPI3MR_IOCTL_SGE_SIZE) { 2497 dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", 2498 __func__, __LINE__, din_cnt, dout_cnt, din_size, 2499 dout_size); 2500 rval = -EINVAL; 2501 goto out; 2502 } 2503 } 2504 2505 drv_buf_iter = drv_bufs; 2506 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2507 if (drv_buf_iter->data_dir == DMA_NONE) 2508 continue; 2509 2510 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; 2511 if (is_rmcb && !count) { 2512 drv_buf_iter->kern_buf_len = 2513 mrioc->ioctl_chain_sge.size; 2514 drv_buf_iter->kern_buf = 2515 mrioc->ioctl_chain_sge.addr; 2516 drv_buf_iter->kern_buf_dma = 2517 mrioc->ioctl_chain_sge.dma_addr; 2518 drv_buf_iter->dma_desc = NULL; 2519 drv_buf_iter->num_dma_desc = 0; 2520 memset(drv_buf_iter->kern_buf, 0, 2521 drv_buf_iter->kern_buf_len); 2522 tmplen = min(drv_buf_iter->kern_buf_len, 2523 drv_buf_iter->bsg_buf_len); 2524 rmc_size = tmplen; 2525 memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 2526 } else if (is_rmrb && (count == 1)) { 2527 drv_buf_iter->kern_buf_len = 2528 mrioc->ioctl_resp_sge.size; 2529 drv_buf_iter->kern_buf = 2530 mrioc->ioctl_resp_sge.addr; 2531 drv_buf_iter->kern_buf_dma = 2532 mrioc->ioctl_resp_sge.dma_addr; 2533 drv_buf_iter->dma_desc = NULL; 2534 drv_buf_iter->num_dma_desc = 0; 2535 memset(drv_buf_iter->kern_buf, 0, 2536 drv_buf_iter->kern_buf_len); 2537 tmplen = min(drv_buf_iter->kern_buf_len, 2538 drv_buf_iter->bsg_buf_len); 2539 drv_buf_iter->kern_buf_len = tmplen; 2540 memset(drv_buf_iter->bsg_buf, 0, 2541 drv_buf_iter->bsg_buf_len); 2542 } else { 2543 if (!drv_buf_iter->kern_buf_len) 2544 continue; 2545 if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { 2546 rval = -ENOMEM; 2547 dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", 2548 __func__, __LINE__); 2549 goto out; 2550 } 2551 desc_count += drv_buf_iter->num_dma_desc; 2552 } 2553 } 2554 2555 if (erb_offset != 0xFF) { 2556 sense_buff_k = kzalloc(erbsz, GFP_KERNEL); 2557 if (!sense_buff_k) { 2558 rval = -ENOMEM; 2559 goto out; 2560 } 2561 } 2562 2563 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { 2564 rval = -ERESTARTSYS; 2565 goto out; 2566 } 2567 if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { 2568 rval = -EAGAIN; 2569 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 2570 mutex_unlock(&mrioc->bsg_cmds.mutex); 2571 goto out; 2572 } 2573 if (mrioc->unrecoverable) { 2574 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 2575 __func__); 2576 rval = -EFAULT; 2577 mutex_unlock(&mrioc->bsg_cmds.mutex); 2578 goto out; 2579 } 2580 if (mrioc->reset_in_progress) { 2581 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 2582 rval = -EAGAIN; 2583 mutex_unlock(&mrioc->bsg_cmds.mutex); 2584 goto out; 2585 } 2586 if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 2587 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 2588 rval = -EAGAIN; 2589 mutex_unlock(&mrioc->bsg_cmds.mutex); 2590 goto out; 2591 } 2592 2593 if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { 2594 nvme_fmt = mpi3mr_get_nvme_data_fmt( 2595 (struct mpi3_nvme_encapsulated_request *)mpi_req); 2596 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 2597 if (mpi3mr_build_nvme_prp(mrioc, 2598 (struct mpi3_nvme_encapsulated_request *)mpi_req, 2599 drv_bufs, bufcnt)) { 2600 rval = -ENOMEM; 2601 mutex_unlock(&mrioc->bsg_cmds.mutex); 2602 goto out; 2603 } 2604 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 2605 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 2606 if (mpi3mr_build_nvme_sgl(mrioc, 2607 (struct mpi3_nvme_encapsulated_request *)mpi_req, 2608 drv_bufs, bufcnt)) { 2609 rval = -EINVAL; 2610 mutex_unlock(&mrioc->bsg_cmds.mutex); 2611 goto out; 2612 } 2613 } else { 2614 dprint_bsg_err(mrioc, 2615 "%s:invalid NVMe command format\n", __func__); 2616 rval = -EINVAL; 2617 mutex_unlock(&mrioc->bsg_cmds.mutex); 2618 goto out; 2619 } 2620 } else { 2621 if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size, 2622 drv_bufs, bufcnt, is_rmcb, is_rmrb, 2623 (dout_cnt + din_cnt))) { 2624 dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__); 2625 rval = -EAGAIN; 2626 mutex_unlock(&mrioc->bsg_cmds.mutex); 2627 goto out; 2628 } 2629 } 2630 2631 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { 2632 tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; 2633 if (tm_req->task_type != 2634 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 2635 dev_handle = tm_req->dev_handle; 2636 block_io = 1; 2637 } 2638 } 2639 if (block_io) { 2640 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2641 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { 2642 stgt_priv = (struct mpi3mr_stgt_priv_data *) 2643 tgtdev->starget->hostdata; 2644 atomic_inc(&stgt_priv->block_io); 2645 mpi3mr_tgtdev_put(tgtdev); 2646 } 2647 } 2648 2649 mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; 2650 mrioc->bsg_cmds.is_waiting = 1; 2651 mrioc->bsg_cmds.callback = NULL; 2652 mrioc->bsg_cmds.is_sense = 0; 2653 mrioc->bsg_cmds.sensebuf = sense_buff_k; 2654 memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); 2655 mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); 2656 if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { 2657 dprint_bsg_info(mrioc, 2658 "%s: posting bsg request to the controller\n", __func__); 2659 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 2660 "bsg_mpi3_req"); 2661 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 2662 drv_buf_iter = &drv_bufs[0]; 2663 dprint_dump(drv_buf_iter->kern_buf, 2664 rmc_size, "mpi3_mgmt_req"); 2665 } 2666 } 2667 2668 init_completion(&mrioc->bsg_cmds.done); 2669 rval = mpi3mr_admin_request_post(mrioc, mpi_req, 2670 MPI3MR_ADMIN_REQ_FRAME_SZ, 0); 2671 2672 2673 if (rval) { 2674 mrioc->bsg_cmds.is_waiting = 0; 2675 dprint_bsg_err(mrioc, 2676 "%s: posting bsg request is failed\n", __func__); 2677 rval = -EAGAIN; 2678 goto out_unlock; 2679 } 2680 wait_for_completion_timeout(&mrioc->bsg_cmds.done, 2681 (karg->timeout * HZ)); 2682 if (block_io && stgt_priv) 2683 atomic_dec(&stgt_priv->block_io); 2684 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { 2685 mrioc->bsg_cmds.is_waiting = 0; 2686 rval = -EAGAIN; 2687 if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) 2688 goto out_unlock; 2689 if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) && 2690 (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED)) 2691 || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) { 2692 ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n", 2693 __func__, karg->timeout); 2694 if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) { 2695 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 2696 "bsg_mpi3_req"); 2697 if (mpi_header->function == 2698 MPI3_FUNCTION_MGMT_PASSTHROUGH) { 2699 drv_buf_iter = &drv_bufs[0]; 2700 dprint_dump(drv_buf_iter->kern_buf, 2701 rmc_size, "mpi3_mgmt_req"); 2702 } 2703 } 2704 } 2705 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || 2706 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) { 2707 dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n" 2708 "issuing target reset to (0x%04x)\n", __func__, 2709 karg->timeout, mpi_header->function_dependent); 2710 mpi3mr_issue_tm(mrioc, 2711 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 2712 mpi_header->function_dependent, 0, 2713 MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, 2714 &mrioc->host_tm_cmds, &resp_code, NULL); 2715 } 2716 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && 2717 !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) 2718 mpi3mr_soft_reset_handler(mrioc, 2719 MPI3MR_RESET_FROM_APP_TIMEOUT, 1); 2720 goto out_unlock; 2721 } 2722 dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); 2723 2724 if (mrioc->prp_list_virt) { 2725 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 2726 mrioc->prp_list_virt, mrioc->prp_list_dma); 2727 mrioc->prp_list_virt = NULL; 2728 } 2729 2730 if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2731 != MPI3_IOCSTATUS_SUCCESS) { 2732 dprint_bsg_info(mrioc, 2733 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 2734 __func__, 2735 (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2736 mrioc->bsg_cmds.ioc_loginfo); 2737 } 2738 2739 if ((mpirep_offset != 0xFF) && 2740 drv_bufs[mpirep_offset].bsg_buf_len) { 2741 drv_buf_iter = &drv_bufs[mpirep_offset]; 2742 drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) + 2743 mrioc->reply_sz); 2744 bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); 2745 2746 if (!bsg_reply_buf) { 2747 rval = -ENOMEM; 2748 goto out_unlock; 2749 } 2750 if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { 2751 bsg_reply_buf->mpi_reply_type = 2752 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; 2753 memcpy(bsg_reply_buf->reply_buf, 2754 mrioc->bsg_cmds.reply, mrioc->reply_sz); 2755 } else { 2756 bsg_reply_buf->mpi_reply_type = 2757 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; 2758 status_desc = (struct mpi3_status_reply_descriptor *) 2759 bsg_reply_buf->reply_buf; 2760 status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; 2761 status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; 2762 } 2763 tmplen = min(drv_buf_iter->kern_buf_len, 2764 drv_buf_iter->bsg_buf_len); 2765 memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); 2766 } 2767 2768 if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && 2769 mrioc->bsg_cmds.is_sense) { 2770 drv_buf_iter = &drv_bufs[erb_offset]; 2771 tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); 2772 memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); 2773 } 2774 2775 drv_buf_iter = drv_bufs; 2776 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 2777 if (drv_buf_iter->data_dir == DMA_NONE) 2778 continue; 2779 if ((count == 1) && is_rmrb) { 2780 memcpy(drv_buf_iter->bsg_buf, 2781 drv_buf_iter->kern_buf, 2782 drv_buf_iter->kern_buf_len); 2783 } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 2784 tmplen = 0; 2785 for (desc_count = 0; 2786 desc_count < drv_buf_iter->num_dma_desc; 2787 desc_count++) { 2788 memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen), 2789 drv_buf_iter->dma_desc[desc_count].addr, 2790 drv_buf_iter->dma_desc[desc_count].size); 2791 tmplen += 2792 drv_buf_iter->dma_desc[desc_count].size; 2793 } 2794 } 2795 } 2796 2797 out_unlock: 2798 if (din_buf) { 2799 job->reply_payload_rcv_len = 2800 sg_copy_from_buffer(job->reply_payload.sg_list, 2801 job->reply_payload.sg_cnt, 2802 din_buf, job->reply_payload.payload_len); 2803 } 2804 mrioc->bsg_cmds.is_sense = 0; 2805 mrioc->bsg_cmds.sensebuf = NULL; 2806 mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; 2807 mutex_unlock(&mrioc->bsg_cmds.mutex); 2808 out: 2809 kfree(sense_buff_k); 2810 kfree(dout_buf); 2811 kfree(din_buf); 2812 kfree(mpi_req); 2813 if (drv_bufs) { 2814 drv_buf_iter = drv_bufs; 2815 for (count = 0; count < bufcnt; count++, drv_buf_iter++) 2816 kfree(drv_buf_iter->dma_desc); 2817 kfree(drv_bufs); 2818 } 2819 kfree(bsg_reply_buf); 2820 return rval; 2821 } 2822 2823 /** 2824 * mpi3mr_app_save_logdata - Save Log Data events 2825 * @mrioc: Adapter instance reference 2826 * @event_data: event data associated with log data event 2827 * @event_data_size: event data size to copy 2828 * 2829 * If log data event caching is enabled by the applicatiobns, 2830 * then this function saves the log data in the circular queue 2831 * and Sends async signal SIGIO to indicate there is an async 2832 * event from the firmware to the event monitoring applications. 2833 * 2834 * Return:Nothing 2835 */ 2836 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 2837 u16 event_data_size) 2838 { 2839 u32 index = mrioc->logdata_buf_idx, sz; 2840 struct mpi3mr_logdata_entry *entry; 2841 2842 if (!(mrioc->logdata_buf)) 2843 return; 2844 2845 entry = (struct mpi3mr_logdata_entry *) 2846 (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); 2847 entry->valid_entry = 1; 2848 sz = min(mrioc->logdata_entry_sz, event_data_size); 2849 memcpy(entry->data, event_data, sz); 2850 mrioc->logdata_buf_idx = 2851 ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); 2852 atomic64_inc(&event_counter); 2853 } 2854 2855 /** 2856 * mpi3mr_bsg_request - bsg request entry point 2857 * @job: BSG job reference 2858 * 2859 * This is driver's entry point for bsg requests 2860 * 2861 * Return: 0 on success and proper error codes on failure 2862 */ 2863 static int mpi3mr_bsg_request(struct bsg_job *job) 2864 { 2865 long rval = -EINVAL; 2866 unsigned int reply_payload_rcv_len = 0; 2867 2868 struct mpi3mr_bsg_packet *bsg_req = job->request; 2869 2870 switch (bsg_req->cmd_type) { 2871 case MPI3MR_DRV_CMD: 2872 rval = mpi3mr_bsg_process_drv_cmds(job); 2873 break; 2874 case MPI3MR_MPT_CMD: 2875 rval = mpi3mr_bsg_process_mpt_cmds(job); 2876 break; 2877 default: 2878 pr_err("%s: unsupported BSG command(0x%08x)\n", 2879 MPI3MR_DRIVER_NAME, bsg_req->cmd_type); 2880 break; 2881 } 2882 2883 bsg_job_done(job, rval, reply_payload_rcv_len); 2884 2885 return 0; 2886 } 2887 2888 /** 2889 * mpi3mr_bsg_exit - de-registration from bsg layer 2890 * @mrioc: Adapter instance reference 2891 * 2892 * This will be called during driver unload and all 2893 * bsg resources allocated during load will be freed. 2894 * 2895 * Return:Nothing 2896 */ 2897 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) 2898 { 2899 struct device *bsg_dev = &mrioc->bsg_dev; 2900 if (!mrioc->bsg_queue) 2901 return; 2902 2903 bsg_remove_queue(mrioc->bsg_queue); 2904 mrioc->bsg_queue = NULL; 2905 2906 device_del(bsg_dev); 2907 put_device(bsg_dev); 2908 } 2909 2910 /** 2911 * mpi3mr_bsg_node_release -release bsg device node 2912 * @dev: bsg device node 2913 * 2914 * decrements bsg dev parent reference count 2915 * 2916 * Return:Nothing 2917 */ 2918 static void mpi3mr_bsg_node_release(struct device *dev) 2919 { 2920 put_device(dev->parent); 2921 } 2922 2923 /** 2924 * mpi3mr_bsg_init - registration with bsg layer 2925 * @mrioc: Adapter instance reference 2926 * 2927 * This will be called during driver load and it will 2928 * register driver with bsg layer 2929 * 2930 * Return:Nothing 2931 */ 2932 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) 2933 { 2934 struct device *bsg_dev = &mrioc->bsg_dev; 2935 struct device *parent = &mrioc->shost->shost_gendev; 2936 struct queue_limits lim = { 2937 .max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS, 2938 .max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS, 2939 }; 2940 2941 device_initialize(bsg_dev); 2942 2943 bsg_dev->parent = get_device(parent); 2944 bsg_dev->release = mpi3mr_bsg_node_release; 2945 2946 dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); 2947 2948 if (device_add(bsg_dev)) { 2949 ioc_err(mrioc, "%s: bsg device add failed\n", 2950 dev_name(bsg_dev)); 2951 put_device(bsg_dev); 2952 return; 2953 } 2954 2955 mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim, 2956 mpi3mr_bsg_request, NULL, 0); 2957 if (IS_ERR(mrioc->bsg_queue)) { 2958 ioc_err(mrioc, "%s: bsg registration failed\n", 2959 dev_name(bsg_dev)); 2960 device_del(bsg_dev); 2961 put_device(bsg_dev); 2962 } 2963 } 2964 2965 /** 2966 * version_fw_show - SysFS callback for firmware version read 2967 * @dev: class device 2968 * @attr: Device attributes 2969 * @buf: Buffer to copy 2970 * 2971 * Return: sysfs_emit() return after copying firmware version 2972 */ 2973 static ssize_t 2974 version_fw_show(struct device *dev, struct device_attribute *attr, 2975 char *buf) 2976 { 2977 struct Scsi_Host *shost = class_to_shost(dev); 2978 struct mpi3mr_ioc *mrioc = shost_priv(shost); 2979 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 2980 2981 return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", 2982 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 2983 fwver->ph_minor, fwver->cust_id, fwver->build_num); 2984 } 2985 static DEVICE_ATTR_RO(version_fw); 2986 2987 /** 2988 * fw_queue_depth_show - SysFS callback for firmware max cmds 2989 * @dev: class device 2990 * @attr: Device attributes 2991 * @buf: Buffer to copy 2992 * 2993 * Return: sysfs_emit() return after copying firmware max commands 2994 */ 2995 static ssize_t 2996 fw_queue_depth_show(struct device *dev, struct device_attribute *attr, 2997 char *buf) 2998 { 2999 struct Scsi_Host *shost = class_to_shost(dev); 3000 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3001 3002 return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); 3003 } 3004 static DEVICE_ATTR_RO(fw_queue_depth); 3005 3006 /** 3007 * op_req_q_count_show - SysFS callback for request queue count 3008 * @dev: class device 3009 * @attr: Device attributes 3010 * @buf: Buffer to copy 3011 * 3012 * Return: sysfs_emit() return after copying request queue count 3013 */ 3014 static ssize_t 3015 op_req_q_count_show(struct device *dev, struct device_attribute *attr, 3016 char *buf) 3017 { 3018 struct Scsi_Host *shost = class_to_shost(dev); 3019 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3020 3021 return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); 3022 } 3023 static DEVICE_ATTR_RO(op_req_q_count); 3024 3025 /** 3026 * reply_queue_count_show - SysFS callback for reply queue count 3027 * @dev: class device 3028 * @attr: Device attributes 3029 * @buf: Buffer to copy 3030 * 3031 * Return: sysfs_emit() return after copying reply queue count 3032 */ 3033 static ssize_t 3034 reply_queue_count_show(struct device *dev, struct device_attribute *attr, 3035 char *buf) 3036 { 3037 struct Scsi_Host *shost = class_to_shost(dev); 3038 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3039 3040 return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); 3041 } 3042 3043 static DEVICE_ATTR_RO(reply_queue_count); 3044 3045 /** 3046 * logging_level_show - Show controller debug level 3047 * @dev: class device 3048 * @attr: Device attributes 3049 * @buf: Buffer to copy 3050 * 3051 * A sysfs 'read/write' shost attribute, to show the current 3052 * debug log level used by the driver for the specific 3053 * controller. 3054 * 3055 * Return: sysfs_emit() return 3056 */ 3057 static ssize_t 3058 logging_level_show(struct device *dev, 3059 struct device_attribute *attr, char *buf) 3060 3061 { 3062 struct Scsi_Host *shost = class_to_shost(dev); 3063 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3064 3065 return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); 3066 } 3067 3068 /** 3069 * logging_level_store- Change controller debug level 3070 * @dev: class device 3071 * @attr: Device attributes 3072 * @buf: Buffer to copy 3073 * @count: size of the buffer 3074 * 3075 * A sysfs 'read/write' shost attribute, to change the current 3076 * debug log level used by the driver for the specific 3077 * controller. 3078 * 3079 * Return: strlen() return 3080 */ 3081 static ssize_t 3082 logging_level_store(struct device *dev, 3083 struct device_attribute *attr, 3084 const char *buf, size_t count) 3085 { 3086 struct Scsi_Host *shost = class_to_shost(dev); 3087 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3088 int val = 0; 3089 3090 if (kstrtoint(buf, 0, &val) != 0) 3091 return -EINVAL; 3092 3093 mrioc->logging_level = val; 3094 ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); 3095 return strlen(buf); 3096 } 3097 static DEVICE_ATTR_RW(logging_level); 3098 3099 /** 3100 * adp_state_show() - SysFS callback for adapter state show 3101 * @dev: class device 3102 * @attr: Device attributes 3103 * @buf: Buffer to copy 3104 * 3105 * Return: sysfs_emit() return after copying adapter state 3106 */ 3107 static ssize_t 3108 adp_state_show(struct device *dev, struct device_attribute *attr, 3109 char *buf) 3110 { 3111 struct Scsi_Host *shost = class_to_shost(dev); 3112 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3113 enum mpi3mr_iocstate ioc_state; 3114 uint8_t adp_state; 3115 3116 ioc_state = mpi3mr_get_iocstate(mrioc); 3117 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 3118 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 3119 else if (mrioc->reset_in_progress || mrioc->stop_bsgs || 3120 mrioc->block_on_pci_err) 3121 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 3122 else if (ioc_state == MRIOC_STATE_FAULT) 3123 adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 3124 else 3125 adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 3126 3127 return sysfs_emit(buf, "%u\n", adp_state); 3128 } 3129 3130 static DEVICE_ATTR_RO(adp_state); 3131 3132 static struct attribute *mpi3mr_host_attrs[] = { 3133 &dev_attr_version_fw.attr, 3134 &dev_attr_fw_queue_depth.attr, 3135 &dev_attr_op_req_q_count.attr, 3136 &dev_attr_reply_queue_count.attr, 3137 &dev_attr_logging_level.attr, 3138 &dev_attr_adp_state.attr, 3139 NULL, 3140 }; 3141 3142 static const struct attribute_group mpi3mr_host_attr_group = { 3143 .attrs = mpi3mr_host_attrs 3144 }; 3145 3146 const struct attribute_group *mpi3mr_host_groups[] = { 3147 &mpi3mr_host_attr_group, 3148 NULL, 3149 }; 3150 3151 3152 /* 3153 * SCSI Device attributes under sysfs 3154 */ 3155 3156 /** 3157 * sas_address_show - SysFS callback for dev SASaddress display 3158 * @dev: class device 3159 * @attr: Device attributes 3160 * @buf: Buffer to copy 3161 * 3162 * Return: sysfs_emit() return after copying SAS address of the 3163 * specific SAS/SATA end device. 3164 */ 3165 static ssize_t 3166 sas_address_show(struct device *dev, struct device_attribute *attr, 3167 char *buf) 3168 { 3169 struct scsi_device *sdev = to_scsi_device(dev); 3170 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3171 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3172 struct mpi3mr_tgt_dev *tgtdev; 3173 3174 sdev_priv_data = sdev->hostdata; 3175 if (!sdev_priv_data) 3176 return 0; 3177 3178 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3179 if (!tgt_priv_data) 3180 return 0; 3181 tgtdev = tgt_priv_data->tgt_dev; 3182 if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) 3183 return 0; 3184 return sysfs_emit(buf, "0x%016llx\n", 3185 (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); 3186 } 3187 3188 static DEVICE_ATTR_RO(sas_address); 3189 3190 /** 3191 * device_handle_show - SysFS callback for device handle display 3192 * @dev: class device 3193 * @attr: Device attributes 3194 * @buf: Buffer to copy 3195 * 3196 * Return: sysfs_emit() return after copying firmware internal 3197 * device handle of the specific device. 3198 */ 3199 static ssize_t 3200 device_handle_show(struct device *dev, struct device_attribute *attr, 3201 char *buf) 3202 { 3203 struct scsi_device *sdev = to_scsi_device(dev); 3204 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3205 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3206 struct mpi3mr_tgt_dev *tgtdev; 3207 3208 sdev_priv_data = sdev->hostdata; 3209 if (!sdev_priv_data) 3210 return 0; 3211 3212 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3213 if (!tgt_priv_data) 3214 return 0; 3215 tgtdev = tgt_priv_data->tgt_dev; 3216 if (!tgtdev) 3217 return 0; 3218 return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); 3219 } 3220 3221 static DEVICE_ATTR_RO(device_handle); 3222 3223 /** 3224 * persistent_id_show - SysFS callback for persisten ID display 3225 * @dev: class device 3226 * @attr: Device attributes 3227 * @buf: Buffer to copy 3228 * 3229 * Return: sysfs_emit() return after copying persistent ID of the 3230 * of the specific device. 3231 */ 3232 static ssize_t 3233 persistent_id_show(struct device *dev, struct device_attribute *attr, 3234 char *buf) 3235 { 3236 struct scsi_device *sdev = to_scsi_device(dev); 3237 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3238 struct mpi3mr_stgt_priv_data *tgt_priv_data; 3239 struct mpi3mr_tgt_dev *tgtdev; 3240 3241 sdev_priv_data = sdev->hostdata; 3242 if (!sdev_priv_data) 3243 return 0; 3244 3245 tgt_priv_data = sdev_priv_data->tgt_priv_data; 3246 if (!tgt_priv_data) 3247 return 0; 3248 tgtdev = tgt_priv_data->tgt_dev; 3249 if (!tgtdev) 3250 return 0; 3251 return sysfs_emit(buf, "%d\n", tgtdev->perst_id); 3252 } 3253 static DEVICE_ATTR_RO(persistent_id); 3254 3255 /** 3256 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority 3257 * @dev: pointer to embedded device 3258 * @attr: sas_ncq_prio_supported attribute descriptor 3259 * @buf: the buffer returned 3260 * 3261 * A sysfs 'read-only' sdev attribute, only works with SATA devices 3262 */ 3263 static ssize_t 3264 sas_ncq_prio_supported_show(struct device *dev, 3265 struct device_attribute *attr, char *buf) 3266 { 3267 struct scsi_device *sdev = to_scsi_device(dev); 3268 3269 return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); 3270 } 3271 static DEVICE_ATTR_RO(sas_ncq_prio_supported); 3272 3273 /** 3274 * sas_ncq_prio_enable_show - send prioritized io commands to device 3275 * @dev: pointer to embedded device 3276 * @attr: sas_ncq_prio_enable attribute descriptor 3277 * @buf: the buffer returned 3278 * 3279 * A sysfs 'read/write' sdev attribute, only works with SATA devices 3280 */ 3281 static ssize_t 3282 sas_ncq_prio_enable_show(struct device *dev, 3283 struct device_attribute *attr, char *buf) 3284 { 3285 struct scsi_device *sdev = to_scsi_device(dev); 3286 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 3287 3288 if (!sdev_priv_data) 3289 return 0; 3290 3291 return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); 3292 } 3293 3294 static ssize_t 3295 sas_ncq_prio_enable_store(struct device *dev, 3296 struct device_attribute *attr, 3297 const char *buf, size_t count) 3298 { 3299 struct scsi_device *sdev = to_scsi_device(dev); 3300 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 3301 bool ncq_prio_enable = 0; 3302 3303 if (kstrtobool(buf, &ncq_prio_enable)) 3304 return -EINVAL; 3305 3306 if (!sas_ata_ncq_prio_supported(sdev)) 3307 return -EINVAL; 3308 3309 sdev_priv_data->ncq_prio_enable = ncq_prio_enable; 3310 3311 return strlen(buf); 3312 } 3313 static DEVICE_ATTR_RW(sas_ncq_prio_enable); 3314 3315 static struct attribute *mpi3mr_dev_attrs[] = { 3316 &dev_attr_sas_address.attr, 3317 &dev_attr_device_handle.attr, 3318 &dev_attr_persistent_id.attr, 3319 &dev_attr_sas_ncq_prio_supported.attr, 3320 &dev_attr_sas_ncq_prio_enable.attr, 3321 NULL, 3322 }; 3323 3324 static const struct attribute_group mpi3mr_dev_attr_group = { 3325 .attrs = mpi3mr_dev_attrs 3326 }; 3327 3328 const struct attribute_group *mpi3mr_dev_groups[] = { 3329 &mpi3mr_dev_attr_group, 3330 NULL, 3331 }; 3332