1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /** 33 * @file 34 * Provide IO object allocation. 35 */ 36 37 /*! 38 * @defgroup io_alloc IO allocation 39 */ 40 41 #include "ocs.h" 42 #include "ocs_scsi.h" 43 #include "ocs_els.h" 44 #include "ocs_utils.h" 45 46 void ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *io); 47 void ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *io); 48 int ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *io); 49 50 static ocs_mgmt_functions_t io_mgmt_functions = { 51 .get_list_handler = ocs_mgmt_io_list, 52 .get_handler = ocs_mgmt_io_get, 53 .get_all_handler = ocs_mgmt_io_get_all, 54 }; 55 56 /** 57 * @brief IO pool. 58 * 59 * Structure encapsulating a pool of IO objects. 60 * 61 */ 62 63 struct ocs_io_pool_s { 64 ocs_t *ocs; /* Pointer to device object */ 65 ocs_lock_t lock; /* IO pool lock */ 66 uint32_t io_num_ios; /* Total IOs allocated */ 67 ocs_pool_t *pool; 68 }; 69 70 /** 71 * @brief Create a pool of IO objects. 72 * 73 * @par Description 74 * This function allocates memory in larger chucks called 75 * "slabs" which are a fixed size. It calculates the number of IO objects that 76 * fit within each "slab" and determines the number of "slabs" required to 77 * allocate the number of IOs requested. Each of the slabs is allocated and 78 * then it grabs each IO object within the slab and adds it to the free list. 79 * Individual command, response and SGL DMA buffers are allocated for each IO. 80 * 81 * "Slabs" 82 * +----------------+ 83 * | | 84 * +----------------+ | 85 * | IO | | 86 * +----------------+ | 87 * | ... | | 88 * +----------------+__+ 89 * | IO | 90 * +----------------+ 91 * 92 * @param ocs Driver instance's software context. 93 * @param num_io Number of IO contexts to allocate. 94 * @param num_sgl Number of SGL entries to allocate for each IO. 95 * 96 * @return Returns a pointer to a new ocs_io_pool_t on success, 97 * or NULL on failure. 98 */ 99 100 ocs_io_pool_t * 101 ocs_io_pool_create(ocs_t *ocs, uint32_t num_io, uint32_t num_sgl) 102 { 103 uint32_t i = 0; 104 int32_t rc = -1; 105 ocs_io_pool_t *io_pool; 106 107 /* Allocate the IO pool */ 108 io_pool = ocs_malloc(ocs, sizeof(*io_pool), OCS_M_ZERO | OCS_M_NOWAIT); 109 if (io_pool == NULL) { 110 ocs_log_err(ocs, "allocate of IO pool failed\n"); 111 return NULL; 112 } 113 114 io_pool->ocs = ocs; 115 io_pool->io_num_ios = num_io; 116 117 /* initialize IO pool lock */ 118 ocs_lock_init(ocs, &io_pool->lock, "io_pool lock[%d]", ocs->instance_index); 119 120 io_pool->pool = ocs_pool_alloc(ocs, sizeof(ocs_io_t), io_pool->io_num_ios, FALSE); 121 122 for (i = 0; i < io_pool->io_num_ios; i++) { 123 ocs_io_t *io = ocs_pool_get_instance(io_pool->pool, i); 124 125 io->tag = i; 126 io->instance_index = i; 127 io->ocs = ocs; 128 129 /* allocate a command/response dma buffer */ 130 if (ocs->enable_ini) { 131 rc = ocs_dma_alloc(ocs, &io->cmdbuf, SCSI_CMD_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT); 132 if (rc) { 133 ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n"); 134 ocs_io_pool_free(io_pool); 135 return NULL; 136 } 137 } 138 139 /* Allocate a response buffer */ 140 rc = ocs_dma_alloc(ocs, &io->rspbuf, SCSI_RSP_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT); 141 if (rc) { 142 ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n"); 143 ocs_io_pool_free(io_pool); 144 return NULL; 145 } 146 147 /* Allocate SGL */ 148 io->sgl = ocs_malloc(ocs, sizeof(*io->sgl) * num_sgl, OCS_M_NOWAIT | OCS_M_ZERO); 149 if (io->sgl == NULL) { 150 ocs_log_err(ocs, "malloc sgl's failed\n"); 151 ocs_io_pool_free(io_pool); 152 return NULL; 153 } 154 io->sgl_allocated = num_sgl; 155 io->sgl_count = 0; 156 157 /* Make IO backend call to initialize IO */ 158 ocs_scsi_tgt_io_init(io); 159 ocs_scsi_ini_io_init(io); 160 161 rc = ocs_dma_alloc(ocs, &io->els_req, OCS_ELS_REQ_LEN, OCS_MIN_DMA_ALIGNMENT); 162 if (rc) { 163 ocs_log_err(ocs, "ocs_dma_alloc els_req failed\n"); 164 ocs_io_pool_free(io_pool); 165 return NULL; 166 } 167 168 rc = ocs_dma_alloc(ocs, &io->els_rsp, OCS_ELS_GID_PT_RSP_LEN, OCS_MIN_DMA_ALIGNMENT); 169 if (rc) { 170 ocs_log_err(ocs, "ocs_dma_alloc els_rsp failed\n"); 171 ocs_io_pool_free(io_pool); 172 return NULL; 173 } 174 } 175 176 return io_pool; 177 } 178 179 /** 180 * @brief Free IO objects pool 181 * 182 * @par Description 183 * The pool of IO objects are freed. 184 * 185 * @param io_pool Pointer to IO pool object. 186 * 187 * @return Returns 0 on success, or a negative error code value on failure. 188 */ 189 int32_t 190 ocs_io_pool_free(ocs_io_pool_t *io_pool) 191 { 192 ocs_t *ocs; 193 uint32_t i; 194 ocs_io_t *io; 195 196 if (io_pool != NULL) { 197 ocs = io_pool->ocs; 198 for (i = 0; i < io_pool->io_num_ios; i++) { 199 io = ocs_pool_get_instance(io_pool->pool, i); 200 if (!io) 201 continue; 202 ocs_scsi_tgt_io_exit(io); 203 ocs_scsi_ini_io_exit(io); 204 if (io->sgl) { 205 ocs_free(ocs, io->sgl, sizeof(*io->sgl) * io->sgl_allocated); 206 } 207 ocs_dma_free(ocs, &io->cmdbuf); 208 ocs_dma_free(ocs, &io->rspbuf); 209 ocs_dma_free(ocs, &io->els_req); 210 ocs_dma_free(ocs, &io->els_rsp); 211 } 212 213 if (io_pool->pool != NULL) { 214 ocs_pool_free(io_pool->pool); 215 } 216 ocs_lock_free(&io_pool->lock); 217 ocs_free(ocs, io_pool, sizeof(*io_pool)); 218 ocs->xport->io_pool = NULL; 219 } 220 221 return 0; 222 } 223 224 uint32_t ocs_io_pool_allocated(ocs_io_pool_t *io_pool) 225 { 226 return io_pool->io_num_ios; 227 } 228 229 /** 230 * @ingroup io_alloc 231 * @brief Allocate an object used to track an IO. 232 * 233 * @param io_pool Pointer to the IO pool. 234 * 235 * @return Returns the pointer to a new object, or NULL if none available. 236 */ 237 ocs_io_t * 238 ocs_io_pool_io_alloc(ocs_io_pool_t *io_pool) 239 { 240 ocs_io_t *io = NULL; 241 ocs_t *ocs; 242 243 ocs_assert(io_pool, NULL); 244 245 ocs = io_pool->ocs; 246 247 ocs_lock(&io_pool->lock); 248 if ((io = ocs_pool_get(io_pool->pool)) != NULL) { 249 ocs_unlock(&io_pool->lock); 250 251 io->io_type = OCS_IO_TYPE_MAX; 252 io->hio_type = OCS_HW_IO_MAX; 253 io->hio = NULL; 254 io->transferred = 0; 255 io->ocs = ocs; 256 io->timeout = 0; 257 io->sgl_count = 0; 258 io->tgt_task_tag = 0; 259 io->init_task_tag = 0; 260 io->hw_tag = 0; 261 io->display_name = "pending"; 262 io->seq_init = 0; 263 io->els_req_free = 0; 264 io->mgmt_functions = &io_mgmt_functions; 265 io->io_free = 0; 266 ocs_atomic_add_return(&ocs->xport->io_active_count, 1); 267 ocs_atomic_add_return(&ocs->xport->io_total_alloc, 1); 268 } else { 269 ocs_unlock(&io_pool->lock); 270 } 271 return io; 272 } 273 274 /** 275 * @ingroup io_alloc 276 * @brief Free an object used to track an IO. 277 * 278 * @param io_pool Pointer to IO pool object. 279 * @param io Pointer to the IO object. 280 */ 281 void 282 ocs_io_pool_io_free(ocs_io_pool_t *io_pool, ocs_io_t *io) 283 { 284 ocs_t *ocs; 285 ocs_hw_io_t *hio = NULL; 286 287 ocs_assert(io_pool); 288 289 ocs = io_pool->ocs; 290 291 ocs_lock(&io_pool->lock); 292 hio = io->hio; 293 io->hio = NULL; 294 ocs_pool_put(io_pool->pool, io); 295 ocs_unlock(&io_pool->lock); 296 297 if (hio) { 298 ocs_hw_io_free(&ocs->hw, hio); 299 } 300 io->io_free = 1; 301 ocs_atomic_sub_return(&ocs->xport->io_active_count, 1); 302 ocs_atomic_add_return(&ocs->xport->io_total_free, 1); 303 } 304 305 /** 306 * @ingroup io_alloc 307 * @brief Find an I/O given it's node and ox_id. 308 * 309 * @param ocs Driver instance's software context. 310 * @param node Pointer to node. 311 * @param ox_id OX_ID to find. 312 * @param rx_id RX_ID to find (0xffff for unassigned). 313 */ 314 ocs_io_t * 315 ocs_io_find_tgt_io(ocs_t *ocs, ocs_node_t *node, uint16_t ox_id, uint16_t rx_id) 316 { 317 ocs_io_t *io = NULL; 318 319 ocs_lock(&node->active_ios_lock); 320 ocs_list_foreach(&node->active_ios, io) 321 if ((io->cmd_tgt && (io->init_task_tag == ox_id)) && 322 ((rx_id == 0xffff) || (io->tgt_task_tag == rx_id))) { 323 break; 324 } 325 ocs_unlock(&node->active_ios_lock); 326 return io; 327 } 328 329 /** 330 * @ingroup io_alloc 331 * @brief Return IO context given the instance index. 332 * 333 * @par Description 334 * Returns a pointer to the IO context given by the instance index. 335 * 336 * @param ocs Pointer to driver structure. 337 * @param index IO instance index to return. 338 * 339 * @return Returns a pointer to the IO context, or NULL if not found. 340 */ 341 ocs_io_t * 342 ocs_io_get_instance(ocs_t *ocs, uint32_t index) 343 { 344 ocs_xport_t *xport = ocs->xport; 345 ocs_io_pool_t *io_pool = xport->io_pool; 346 return ocs_pool_get_instance(io_pool->pool, index); 347 } 348 349 /** 350 * @brief Generate IO context ddump data. 351 * 352 * The ddump data for an IO context is generated. 353 * 354 * @param textbuf Pointer to text buffer. 355 * @param io Pointer to IO context. 356 * 357 * @return None. 358 */ 359 360 void 361 ocs_ddump_io(ocs_textbuf_t *textbuf, ocs_io_t *io) 362 { 363 ocs_ddump_section(textbuf, "io", io->instance_index); 364 ocs_ddump_value(textbuf, "display_name", "%s", io->display_name); 365 ocs_ddump_value(textbuf, "node_name", "%s", io->node->display_name); 366 367 ocs_ddump_value(textbuf, "ref_count", "%d", ocs_ref_read_count(&io->ref)); 368 ocs_ddump_value(textbuf, "io_type", "%d", io->io_type); 369 ocs_ddump_value(textbuf, "hio_type", "%d", io->hio_type); 370 ocs_ddump_value(textbuf, "cmd_tgt", "%d", io->cmd_tgt); 371 ocs_ddump_value(textbuf, "cmd_ini", "%d", io->cmd_ini); 372 ocs_ddump_value(textbuf, "send_abts", "%d", io->send_abts); 373 ocs_ddump_value(textbuf, "init_task_tag", "0x%x", io->init_task_tag); 374 ocs_ddump_value(textbuf, "tgt_task_tag", "0x%x", io->tgt_task_tag); 375 ocs_ddump_value(textbuf, "hw_tag", "0x%x", io->hw_tag); 376 ocs_ddump_value(textbuf, "tag", "0x%x", io->tag); 377 ocs_ddump_value(textbuf, "timeout", "%d", io->timeout); 378 ocs_ddump_value(textbuf, "tmf_cmd", "%d", io->tmf_cmd); 379 ocs_ddump_value(textbuf, "abort_rx_id", "0x%x", io->abort_rx_id); 380 381 ocs_ddump_value(textbuf, "busy", "%d", ocs_io_busy(io)); 382 ocs_ddump_value(textbuf, "transferred", "%zu", io->transferred); 383 ocs_ddump_value(textbuf, "auto_resp", "%d", io->auto_resp); 384 ocs_ddump_value(textbuf, "exp_xfer_len", "%d", io->exp_xfer_len); 385 ocs_ddump_value(textbuf, "xfer_req", "%d", io->xfer_req); 386 ocs_ddump_value(textbuf, "seq_init", "%d", io->seq_init); 387 388 ocs_ddump_value(textbuf, "alloc_link", "%d", ocs_list_on_list(&io->io_alloc_link)); 389 ocs_ddump_value(textbuf, "pending_link", "%d", ocs_list_on_list(&io->io_pending_link)); 390 ocs_ddump_value(textbuf, "backend_link", "%d", ocs_list_on_list(&io->link)); 391 392 if (io->hio) { 393 ocs_ddump_value(textbuf, "hw_tag", "%#x", io->hio->reqtag); 394 ocs_ddump_value(textbuf, "hw_xri", "%#x", io->hio->indicator); 395 ocs_ddump_value(textbuf, "hw_type", "%#x", io->hio->type); 396 } else { 397 ocs_ddump_value(textbuf, "hw_tag", "%s", "pending"); 398 ocs_ddump_value(textbuf, "hw_xri", "%s", "pending"); 399 ocs_ddump_value(textbuf, "hw_type", "%s", "pending"); 400 } 401 402 ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_IO, io); 403 ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_IO, io); 404 405 ocs_ddump_endsection(textbuf, "io", io->instance_index); 406 } 407 408 void 409 ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *object) 410 { 411 412 /* Readonly values */ 413 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name"); 414 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init_task_tag"); 415 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "tag"); 416 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "transferred"); 417 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "auto_resp"); 418 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "exp_xfer_len"); 419 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "xfer_req"); 420 } 421 422 int 423 ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object) 424 { 425 char qualifier[80]; 426 int retval = -1; 427 ocs_io_t *io = (ocs_io_t *) object; 428 429 snprintf(qualifier, sizeof(qualifier), "%s/io[%d]", parent, io->instance_index); 430 431 /* If it doesn't start with my qualifier I don't know what to do with it */ 432 if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { 433 char *unqualified_name = name + strlen(qualifier) +1; 434 435 /* See if it's a value I can supply */ 436 if (ocs_strcmp(unqualified_name, "display_name") == 0) { 437 ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name); 438 retval = 0; 439 } else if (ocs_strcmp(unqualified_name, "init_task_tag") == 0) { 440 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag); 441 retval = 0; 442 } else if (ocs_strcmp(unqualified_name, "tgt_task_tag") == 0) { 443 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag); 444 retval = 0; 445 } else if (ocs_strcmp(unqualified_name, "hw_tag") == 0) { 446 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag); 447 retval = 0; 448 } else if (ocs_strcmp(unqualified_name, "tag") == 0) { 449 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag); 450 retval = 0; 451 } else if (ocs_strcmp(unqualified_name, "transferred") == 0) { 452 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred); 453 retval = 0; 454 } else if (ocs_strcmp(unqualified_name, "auto_resp") == 0) { 455 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp); 456 retval = 0; 457 } else if (ocs_strcmp(unqualified_name, "exp_xfer_len") == 0) { 458 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len); 459 retval = 0; 460 } else if (ocs_strcmp(unqualified_name, "xfer_req") == 0) { 461 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req); 462 retval = 0; 463 } 464 } 465 466 return retval; 467 } 468 469 void 470 ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *object) 471 { 472 ocs_io_t *io = (ocs_io_t *) object; 473 474 ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name); 475 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag); 476 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag); 477 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag); 478 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag); 479 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred); 480 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp); 481 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len); 482 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req); 483 484 } 485