/linux/drivers/mailbox/ |
H A D | ti-msgmgr.c | 74 u8 queue_count; member 650 if (qinst->queue_id > d->queue_count) { in ti_msgmgr_queue_setup() 652 idx, qinst->queue_id, d->queue_count); in ti_msgmgr_queue_setup() 774 .queue_count = 64, 789 .queue_count = 190, 819 int queue_count; in ti_msgmgr_probe() local 857 queue_count = desc->num_valid_queues; in ti_msgmgr_probe() 858 if (!queue_count || queue_count > desc->queue_count) { in ti_msgmgr_probe() 860 queue_count, desc->queue_count); in ti_msgmgr_probe() 863 inst->num_valid_queues = queue_count; in ti_msgmgr_probe() [all …]
|
/linux/drivers/s390/crypto/ |
H A D | ap_queue.c | 146 aq->queue_count = max_t(int, 0, aq->queue_count - 1); in ap_sm_recv() 147 if (!status.queue_empty && !aq->queue_count) in ap_sm_recv() 148 aq->queue_count++; in ap_sm_recv() 149 if (aq->queue_count > 0) in ap_sm_recv() 173 if (!status.queue_empty || aq->queue_count <= 0) in ap_sm_recv() 176 aq->queue_count = 0; in ap_sm_recv() 207 if (aq->queue_count > 0) { in ap_sm_read() 214 if (aq->queue_count > 0) in ap_sm_read() 265 aq->queue_count = max_t(int, 1, aq->queue_count + 1); in ap_sm_write() 266 if (aq->queue_count == 1) in ap_sm_write() [all …]
|
H A D | ap_bus.h | 199 int queue_count; /* # messages currently on AP queue. */ member
|
/linux/drivers/nvme/target/ |
H A D | loop.c | 224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_loop_init_hctx() 302 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_loop_destroy_io_queues() 306 ctrl->ctrl.queue_count = 1; in nvme_loop_destroy_io_queues() 334 ctrl->ctrl.queue_count++; in nvme_loop_init_io_queues() 348 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_loop_connect_io_queues() 366 ctrl->ctrl.queue_count = 1; in nvme_loop_configure_admin_queue() 409 if (ctrl->ctrl.queue_count > 1) { in nvme_loop_shutdown_ctrl() 472 ctrl->ctrl.queue_count - 1); in nvme_loop_reset_ctrl_work()
|
/linux/tools/testing/selftests/net/lib/py/ |
H A D | nsim.py | 53 def __init__(self, port_count=1, queue_count=1, ns=None): argument 63 self.ctrl_write("new_device", "%u %u %u" % (addr, port_count, queue_count))
|
/linux/drivers/net/ethernet/freescale/dpaa2/ |
H A D | dpaa2-ethtool.c | 913 int queue_count = dpaa2_eth_queue_count(priv); in dpaa2_eth_get_channels() local 915 channels->max_rx = queue_count; in dpaa2_eth_get_channels() 916 channels->max_tx = queue_count; in dpaa2_eth_get_channels() 917 channels->rx_count = queue_count; in dpaa2_eth_get_channels() 918 channels->tx_count = queue_count; in dpaa2_eth_get_channels() 921 channels->max_other = queue_count + 1; in dpaa2_eth_get_channels() 926 channels->other_count = queue_count + 1; in dpaa2_eth_get_channels()
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_sched.c | 567 u32 queue_count; member 856 for (i = 0; i < group->queue_count; i++) in group_release_work() 931 for (u32 i = 0; i < group->queue_count; i++) in group_bind_locked() 967 for (u32 i = 0; i < group->queue_count; i++) in group_unbind_locked() 1148 for (i = 0; i < group->queue_count; i++) { in csg_slot_sync_queues_state_locked() 1218 for (i = 0; i < group->queue_count; i++) { in csg_slot_sync_state_locked() 1250 for (i = 0; i < group->queue_count; i++) { in csg_slot_prog_locked() 1332 struct panthor_queue *queue = group && cs_id < group->queue_count ? in cs_slot_process_fault_event_locked() 1843 return hweight32(inactive_queues) == group->queue_count; in group_is_idle() 1976 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); in tick_ctx_init() [all …]
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_packet_manager.c | 45 unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; in pm_calc_rlib_size() local 52 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size() 76 queue_count * map_queue_size; in pm_calc_rlib_size()
|
H A D | kfd_packet_manager_v9.c | 56 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_v9() 115 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_aldebaran()
|
/linux/drivers/nvme/host/ |
H A D | fc.c | 2308 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues() 2329 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues() 2332 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues() 2342 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues() 2361 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues() 2381 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues() 2482 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios() 2483 for (q = 1; q < ctrl->ctrl.queue_count; q++) in __nvme_fc_abort_outstanding_ios() 2500 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios() 2910 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues() [all …]
|
H A D | rdma.c | 325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx() 664 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues() 672 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues() 733 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues() 738 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues() 883 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); in nvme_rdma_configure_io_queues() 902 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues() 947 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues() 1057 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl() 1083 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl() [all …]
|
H A D | tcp.c | 1912 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues() 1920 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues() 1986 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues() 2018 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues() 2057 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues() 2076 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues() 2179 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues() 2247 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl() 2272 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl() 2730 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_alloc_ctrl() [all …]
|
/linux/tools/testing/selftests/drivers/net/ |
H A D | queues.py | 60 with NetDrvEnv(__file__, queue_count=3) as cfg:
|
/linux/drivers/net/ethernet/pensando/ionic/ |
H A D | ionic_debugfs.c | 70 (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); in ionic_debugfs_add_sizes() 72 (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); in ionic_debugfs_add_sizes()
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_cq.c | 28 count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); in rxe_cq_chk_attr()
|
H A D | rxe_queue.c | 118 if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type))) in resize_finish()
|
H A D | rxe_queue.h | 170 static inline u32 queue_count(const struct rxe_queue *q, in queue_count() function
|
/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_pf.c | 502 u16 glort, queue_count, vsi_count, pc_count; in fm10k_configure_dglort_map_pf() local 517 queue_count = BIT(dglort->rss_l + dglort->pc_l); in fm10k_configure_dglort_map_pf() 524 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 534 queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); in fm10k_configure_dglort_map_pf() 540 for (queue = 0; queue < queue_count; queue++) { in fm10k_configure_dglort_map_pf()
|
/linux/drivers/scsi/hisi_sas/ |
H A D | hisi_sas_v3_hw.c | 611 for (i = 0; i < hisi_hba->queue_count; i++) in interrupt_enable_v3_hw() 635 (u32)((1ULL << hisi_hba->queue_count) - 1)); in init_reg_v3_hw() 707 for (i = 0; i < hisi_hba->queue_count; i++) { in init_reg_v3_hw() 2663 for (i = 0; i < hisi_hba->queue_count; i++) in interrupt_disable_v3_hw() 3137 (u32)((1ULL << hisi_hba->queue_count) - 1)); in debugfs_snapshot_restore_v3_hw() 3405 experimental_iopoll_q_cnt >= hisi_hba->queue_count) in hisi_sas_shost_alloc_pci() 3429 for (i = 0; i < hisi_hba->queue_count; i++) in debugfs_snapshot_cq_reg_v3_hw() 3441 for (i = 0; i < hisi_hba->queue_count; i++) { in debugfs_snapshot_dq_reg_v3_hw() 3817 for (c = 0; c < hisi_hba->queue_count; c++) { in debugfs_create_files_v3_hw() 3827 for (d = 0; d < hisi_hba->queue_count; d++) { in debugfs_create_files_v3_hw() [all …]
|
H A D | hisi_sas_v1_hw.c | 657 (u32)((1ULL << hisi_hba->queue_count) - 1)); in init_reg_v1_hw() 699 for (i = 0; i < hisi_hba->queue_count; i++) { in init_reg_v1_hw() 1650 for (i = 0; i < hisi_hba->queue_count; i++, idx++) { in interrupt_init_v1_hw() 1664 idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count; in interrupt_init_v1_hw() 1679 hisi_hba->cq_nvecs = hisi_hba->queue_count; in interrupt_init_v1_hw()
|
H A D | hisi_sas_main.c | 668 int queue = i % hisi_hba->queue_count; in hisi_sas_alloc_dev() 701 if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) in hisi_sas_queue_is_poll() 718 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_sync_poll_cqs() 731 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_sync_cqs() 2141 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_init_mem() 2193 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_alloc() 2416 &hisi_hba->queue_count)) { in hisi_sas_get_fw_info()
|
/linux/drivers/net/wireless/microchip/wilc1000/ |
H A D | netdev.c | 737 int queue_count; in wilc_mac_xmit() local 758 queue_count = wilc_wlan_txq_add_net_pkt(ndev, tx_data, in wilc_mac_xmit() 762 if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) { in wilc_mac_xmit()
|
/linux/drivers/md/dm-vdo/ |
H A D | vdo.h | 292 unsigned int queue_count, void *contexts[]);
|
/linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
H A D | rs.h | 309 u8 queue_count; /* number of queues that has member
|
/linux/drivers/net/wireless/intel/iwlegacy/ |
H A D | 4965-rs.c | 240 while (tl->queue_count && tl->time_stamp < oldest_time) { in il4965_rs_tl_rm_old_stats() 244 tl->queue_count--; in il4965_rs_tl_rm_old_stats() 278 if (!(tl->queue_count)) { in il4965_rs_tl_add_packet() 281 tl->queue_count = 1; in il4965_rs_tl_add_packet() 299 if ((idx + 1) > tl->queue_count) in il4965_rs_tl_add_packet() 300 tl->queue_count = idx + 1; in il4965_rs_tl_add_packet() 323 if (!(tl->queue_count)) in il4965_rs_tl_get_load()
|