Lines Matching +full:a +full:- +full:hlm

1 /*-
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * @brief Allocate a transport object.
57 * A transport object is allocated, and associated with a device instance.
71 xport->ocs = ocs; in ocs_xport_alloc()
84 * @return Returns 0 on success, or a non-zero value on failure.
89 ocs_t *ocs = xport->ocs; in ocs_xport_rq_threads_teardown()
92 if (xport->num_rq_threads == 0 || in ocs_xport_rq_threads_teardown()
93 xport->rq_thread_info == NULL) { in ocs_xport_rq_threads_teardown()
98 for (i = 0; i < xport->num_rq_threads; i++) { in ocs_xport_rq_threads_teardown()
99 if (xport->rq_thread_info[i].thread_started) { in ocs_xport_rq_threads_teardown()
100 ocs_thread_terminate(&xport->rq_thread_info[i].thread); in ocs_xport_rq_threads_teardown()
103 while (xport->rq_thread_info[i].thread_started) { in ocs_xport_rq_threads_teardown()
108 if (xport->rq_thread_info[i].seq_cbuf != NULL) { in ocs_xport_rq_threads_teardown()
109 ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf); in ocs_xport_rq_threads_teardown()
110 xport->rq_thread_info[i].seq_cbuf = NULL; in ocs_xport_rq_threads_teardown()
125 * @return Returns 0 on success, or a non-zero value on failure.
130 ocs_t *ocs = xport->ocs; in ocs_xport_rq_threads_create()
134 xport->num_rq_threads = num_rq_threads; in ocs_xport_rq_threads_create()
141 …xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M… in ocs_xport_rq_threads_create()
142 if (xport->rq_thread_info == NULL) { in ocs_xport_rq_threads_create()
144 return -1; in ocs_xport_rq_threads_create()
149 xport->rq_thread_info[i].ocs = ocs; in ocs_xport_rq_threads_create()
150 xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR); in ocs_xport_rq_threads_create()
151 if (xport->rq_thread_info[i].seq_cbuf == NULL) { in ocs_xport_rq_threads_create()
155 ocs_snprintf(xport->rq_thread_info[i].thread_name, in ocs_xport_rq_threads_create()
156 sizeof(xport->rq_thread_info[i].thread_name), in ocs_xport_rq_threads_create()
157 "ocs_unsol_rq:%d:%d", ocs->instance_index, i); in ocs_xport_rq_threads_create()
158 rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread, in ocs_xport_rq_threads_create()
159 xport->rq_thread_info[i].thread_name, in ocs_xport_rq_threads_create()
160 &xport->rq_thread_info[i], OCS_THREAD_RUN); in ocs_xport_rq_threads_create()
165 xport->rq_thread_info[i].thread_started = TRUE; in ocs_xport_rq_threads_create()
171 return -1; in ocs_xport_rq_threads_create()
178 * Performs the functions required to get a device ready to run.
182 * @return Returns 0 on success, or a non-zero value on failure.
187 ocs_t *ocs = xport->ocs; in ocs_xport_attach()
199 ocs_list_init(&ocs->domain_list, ocs_domain_t, link); in ocs_xport_attach()
202 xport->fcfi[i].hold_frames = 1; in ocs_xport_attach()
203 ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i); in ocs_xport_attach()
204 ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link); in ocs_xport_attach()
207 rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version); in ocs_xport_attach()
210 return -1; in ocs_xport_attach()
213 rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC); in ocs_xport_attach()
215 ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc); in ocs_xport_attach()
216 return -1; in ocs_xport_attach()
217 } else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) { in ocs_xport_attach()
219 return -1; in ocs_xport_attach()
222 ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce); in ocs_xport_attach()
223 ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce); in ocs_xport_attach()
225 ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy); in ocs_xport_attach()
226 ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta); in ocs_xport_attach()
227 ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value); in ocs_xport_attach()
230 ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def); in ocs_xport_attach()
232 ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); in ocs_xport_attach()
233 max_sgl -= SLI4_SGE_MAX_RESERVED; in ocs_xport_attach()
237 if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) { in ocs_xport_attach()
242 if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) { in ocs_xport_attach()
243 ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc); in ocs_xport_attach()
244 return -1; in ocs_xport_attach()
246 ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl); in ocs_xport_attach()
249 ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes); in ocs_xport_attach()
251 if (!ocs->max_remote_nodes) in ocs_xport_attach()
252 ocs->max_remote_nodes = max_remote_nodes; in ocs_xport_attach()
254 rc = ocs_node_create_pool(ocs, ocs->max_remote_nodes); in ocs_xport_attach()
263 xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios, in ocs_xport_attach()
264 (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl); in ocs_xport_attach()
265 if (xport->io_pool == NULL) { in ocs_xport_attach()
275 if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) { in ocs_xport_attach()
284 ocs_io_pool_free(xport->io_pool); in ocs_xport_attach()
291 return -1; in ocs_xport_attach()
300 * @return Returns 0 on success or a non-zero value on failure.
305 ocs_t *ocs = xport->ocs; in ocs_xport_initialize_auto_xfer_ready()
311 ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy); in ocs_xport_initialize_auto_xfer_ready()
313 ocs->auto_xfer_rdy_size = 0; in ocs_xport_initialize_auto_xfer_ready()
318 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) { in ocs_xport_initialize_auto_xfer_ready()
319 ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
320 return -1; in ocs_xport_initialize_auto_xfer_ready()
325 * at the modules parameters here. The backend cannot allow a format in ocs_xport_initialize_auto_xfer_ready()
344 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) { in ocs_xport_initialize_auto_xfer_ready()
345 ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
346 return -1; in ocs_xport_initialize_auto_xfer_ready()
348 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) { in ocs_xport_initialize_auto_xfer_ready()
349 ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
350 return -1; in ocs_xport_initialize_auto_xfer_ready()
352 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) { in ocs_xport_initialize_auto_xfer_ready()
353 ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
354 return -1; in ocs_xport_initialize_auto_xfer_ready()
356 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) { in ocs_xport_initialize_auto_xfer_ready()
357 ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
358 return -1; in ocs_xport_initialize_auto_xfer_ready()
360 if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) { in ocs_xport_initialize_auto_xfer_ready()
361 ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc); in ocs_xport_initialize_auto_xfer_ready()
362 return -1; in ocs_xport_initialize_auto_xfer_ready()
374 * Topology can be fetched from mod-param or Persistet Topology(PT).
375 * a. Mod-param value is used when the value is 1(P2P) or 2(LOOP).
376 * a. PT is used if mod-param is not provided( i.e, default value of AUTO)
377 * Also, if mod-param is used, update PT.
381 * @return Returns 0 on success, or a non-zero value on failure.
388 if (ocs->topology == OCS_HW_TOPOLOGY_AUTO) { in ocs_topology_setup()
389 topology = ocs_hw_get_config_persistent_topology(&ocs->hw); in ocs_topology_setup()
391 topology = ocs->topology; in ocs_topology_setup()
392 /* ignore failure here. link will come-up either in auto mode in ocs_topology_setup()
394 ocs_hw_set_persistent_topology(&ocs->hw, topology, OCS_CMD_POLL); in ocs_topology_setup()
397 return ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topology); in ocs_topology_setup()
404 * Performs the functions required to make a device functional.
408 * @return Returns 0 on success, or a non-zero value on failure.
413 ocs_t *ocs = xport->ocs; in ocs_xport_initialize()
418 uint32_t hlm; in ocs_xport_initialize() local
429 ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); in ocs_xport_initialize()
430 if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
431 ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc); in ocs_xport_initialize()
432 return -1; in ocs_xport_initialize()
435 ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); in ocs_xport_initialize()
436 max_sgl -= SLI4_SGE_MAX_RESERVED; in ocs_xport_initialize()
438 if (ocs->enable_hlm) { in ocs_xport_initialize()
439 ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm); in ocs_xport_initialize()
440 if (!hlm) { in ocs_xport_initialize()
441 ocs->enable_hlm = FALSE; in ocs_xport_initialize()
445 if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) { in ocs_xport_initialize()
446 ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc); in ocs_xport_initialize()
447 return -1; in ocs_xport_initialize()
453 if (ocs->auto_xfer_rdy_size > 0 && in ocs_xport_initialize()
454 (ocs->auto_xfer_rdy_size < 2048 || in ocs_xport_initialize()
455 ocs->auto_xfer_rdy_size > 65536)) { in ocs_xport_initialize()
456 ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n"); in ocs_xport_initialize()
457 return -1; in ocs_xport_initialize()
460 ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); in ocs_xport_initialize()
462 if (ocs->auto_xfer_rdy_size > 0) { in ocs_xport_initialize()
464 ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc); in ocs_xport_initialize()
465 return -1; in ocs_xport_initialize()
467 if (ocs->esoc){ in ocs_xport_initialize()
468 ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE); in ocs_xport_initialize()
472 if (ocs->explicit_buffer_list) { in ocs_xport_initialize()
473 /* Are pre-registered SGL's required? */ in ocs_xport_initialize()
474 ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i); in ocs_xport_initialize()
478 ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE); in ocs_xport_initialize()
482 /* Setup persistent topology based on topology mod-param value */ in ocs_xport_initialize()
485 ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); in ocs_xport_initialize()
486 return -1; in ocs_xport_initialize()
489 if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
490 ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); in ocs_xport_initialize()
491 return -1; in ocs_xport_initialize()
493 ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT); in ocs_xport_initialize()
495 if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
496 ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc); in ocs_xport_initialize()
497 return -1; in ocs_xport_initialize()
500 if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
501 ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc); in ocs_xport_initialize()
502 return -1; in ocs_xport_initialize()
506 if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) { in ocs_xport_initialize()
507 if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
508 ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc); in ocs_xport_initialize()
509 return -1; in ocs_xport_initialize()
514 if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) { in ocs_xport_initialize()
520 if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE, in ocs_xport_initialize()
527 if (ocs->target_io_timer_sec || ocs->enable_ini) { in ocs_xport_initialize()
528 if (ocs->target_io_timer_sec) in ocs_xport_initialize()
529 ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec); in ocs_xport_initialize()
531 ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_WQE_TIMEOUT, TRUE); in ocs_xport_initialize()
534 ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs); in ocs_xport_initialize()
535 ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs); in ocs_xport_initialize()
536 ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs); in ocs_xport_initialize()
537 ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs); in ocs_xport_initialize()
539 ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV); in ocs_xport_initialize()
542 ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link); in ocs_xport_initialize()
543 ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index); in ocs_xport_initialize()
544 ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link); in ocs_xport_initialize()
545 ocs_atomic_init(&xport->io_active_count, 0); in ocs_xport_initialize()
546 ocs_atomic_init(&xport->io_pending_count, 0); in ocs_xport_initialize()
547 ocs_atomic_init(&xport->io_total_free, 0); in ocs_xport_initialize()
548 ocs_atomic_init(&xport->io_total_pending, 0); in ocs_xport_initialize()
549 ocs_atomic_init(&xport->io_alloc_failed_count, 0); in ocs_xport_initialize()
550 ocs_atomic_init(&xport->io_pending_recursing, 0); in ocs_xport_initialize()
551 ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs)); in ocs_xport_initialize()
552 rc = ocs_hw_init(&ocs->hw); in ocs_xport_initialize()
561 if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) { in ocs_xport_initialize()
562 ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc); in ocs_xport_initialize()
565 if (ocs->config_tgt) { in ocs_xport_initialize()
575 if (ocs->enable_ini) { in ocs_xport_initialize()
586 if (ocs->num_vports != 0) { in ocs_xport_initialize()
588 ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports); in ocs_xport_initialize()
590 if (ocs->num_vports < max_vports) { in ocs_xport_initialize()
591 ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports); in ocs_xport_initialize()
592 for (i = 0; i < ocs->num_vports; i++) { in ocs_xport_initialize()
593 ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL); in ocs_xport_initialize()
596 ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1); in ocs_xport_initialize()
614 ocs_hw_teardown(&ocs->hw); in ocs_xport_initialize()
617 return -1; in ocs_xport_initialize()
624 * Performs the functions required to shut down a device.
628 * @return Returns 0 on success or a non-zero value on failure.
633 ocs_t *ocs = xport->ocs; in ocs_xport_detach()
635 /* free resources associated with target-server and initiator-client */ in ocs_xport_detach()
636 if (ocs->config_tgt) in ocs_xport_detach()
639 if (ocs->enable_ini) { in ocs_xport_detach()
643 if (ocs_timer_pending(&ocs->xport->stats_timer)) in ocs_xport_detach()
644 ocs_del_timer(&ocs->xport->stats_timer); in ocs_xport_detach()
647 ocs_hw_teardown(&ocs->hw); in ocs_xport_detach()
681 * event to a node object. By doing this in the interrupt context, it has
683 * create a per event node lock.
690 * @return Returns 0 on success, a negative error code value on failure.
699 ocs_node_post_event(payload->node, payload->evt, payload->context); in ocs_xport_post_node_event_cb()
700 ocs_sem_v(&payload->sem); in ocs_xport_post_node_event_cb()
720 ocs_t *ocs = xport->ocs; in ocs_xport_force_free()
726 ocs_list_foreach_safe(&ocs->domain_list, domain, next) { in ocs_xport_force_free()
736 * Perform the attach function, which for the FC transport makes a HW call
747 * @return Returns 0 on success, or a negative error code value on failure.
757 ocs_assert(xport, -1); in ocs_xport_control()
758 ocs_assert(xport->ocs, -1); in ocs_xport_control()
759 ocs = xport->ocs; in ocs_xport_control()
763 /* Bring the port on-line */ in ocs_xport_control()
764 rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL); in ocs_xport_control()
766 ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc); in ocs_xport_control()
768 xport->configured_link_state = cmd; in ocs_xport_control()
773 if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { in ocs_xport_control()
776 xport->configured_link_state = cmd; in ocs_xport_control()
785 /* if a PHYSDEV reset was performed (e.g. hw dump), will affect in ocs_xport_control()
789 if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) { in ocs_xport_control()
801 if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { in ocs_xport_control()
822 * POST_NODE_EVENT: post an event to a node object in ocs_xport_control()
824 * This transport function is used to post an event to a node object. It does in ocs_xport_control()
825 * this by submitting a NOP mailbox command to defer execution to the in ocs_xport_control()
829 * A counting semaphore is used to make the call synchronous (we wait until in ocs_xport_control()
847 ocs_assert(node, -1); in ocs_xport_control()
848 ocs_assert(node->ocs, -1); in ocs_xport_control()
850 ocs = node->ocs; in ocs_xport_control()
851 hw = &ocs->hw; in ocs_xport_control()
854 if (!node->sm.current_state) { in ocs_xport_control()
856 return -1; in ocs_xport_control()
868 rc = -1; in ocs_xport_control()
875 rc = -1; in ocs_xport_control()
892 xport->req_wwnn = wwnn; in ocs_xport_control()
908 xport->req_wwpn = wwpn; in ocs_xport_control()
920 * @brief Return status on a link.
923 * Returns status information about a link.
941 * @return Returns 0 on success, or a negative error code value on failure.
952 ocs_assert(xport, -1); in ocs_xport_status()
953 ocs_assert(xport->ocs, -1); in ocs_xport_status()
955 ocs = xport->ocs; in ocs_xport_status()
959 ocs_assert(result, -1); in ocs_xport_status()
960 if (xport->configured_link_state == 0) { in ocs_xport_status()
963 xport->configured_link_state = OCS_XPORT_PORT_OFFLINE; in ocs_xport_status()
965 result->value = xport->configured_link_state; in ocs_xport_status()
969 ocs_assert(result, -1); in ocs_xport_status()
971 hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value); in ocs_xport_status()
974 result->value = 0; in ocs_xport_status()
976 result->value = 1; in ocs_xport_status()
980 rc = -1; in ocs_xport_status()
987 ocs_assert(result, -1); in ocs_xport_status()
988 result->value = 0; in ocs_xport_status()
990 rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed); in ocs_xport_status()
992 result->value = speed; in ocs_xport_status()
1001 ocs_assert(result, -1); in ocs_xport_status()
1002 speed = result->value; in ocs_xport_status()
1004 rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type); in ocs_xport_status()
1023 ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t)); in ocs_xport_status()
1027 /* Create a semaphore to synchronize the stat reset process. */ in ocs_xport_status()
1028 ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset"); in ocs_xport_status()
1031 if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) { in ocs_xport_status()
1037 /* TODO: Should there be a timeout on this? If so, how long? */ in ocs_xport_status()
1038 if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { in ocs_xport_status()
1041 rc = -ENXIO; in ocs_xport_status()
1046 if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) { in ocs_xport_status()
1052 if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { in ocs_xport_status()
1055 rc = -ENXIO; in ocs_xport_status()
1062 result->value = ocs_list_empty(&ocs->domain_list); in ocs_xport_status()
1066 rc = -1; in ocs_xport_status()
1079 …result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].… in ocs_xport_link_stats_cb()
1080 …result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].… in ocs_xport_link_stats_cb()
1081 …result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_… in ocs_xport_link_stats_cb()
1082 …result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID… in ocs_xport_link_stats_cb()
1083 result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter; in ocs_xport_link_stats_cb()
1085 ocs_sem_v(&(result->stats.semaphore)); in ocs_xport_link_stats_cb()
1093 … result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter; in ocs_xport_host_stats_cb()
1094 … result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter; in ocs_xport_host_stats_cb()
1095 … result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter; in ocs_xport_host_stats_cb()
1096 … result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter; in ocs_xport_host_stats_cb()
1098 ocs_sem_v(&(result->stats.semaphore)); in ocs_xport_host_stats_cb()
1102 * @brief Free a transport object.
1119 ocs = xport->ocs; in ocs_xport_free()
1120 ocs_io_pool_free(xport->io_pool); in ocs_xport_free()
1122 if(mtx_initialized(&xport->io_pending_lock.lock)) in ocs_xport_free()
1123 ocs_lock_free(&xport->io_pending_lock); in ocs_xport_free()
1126 ocs_lock_free(&xport->fcfi[i].pend_frames_lock); in ocs_xport_free()