/linux/drivers/accel/habanalabs/common/ |
H A D | security.c | 308 u32 dcore_offset, u32 num_instances, u32 instance_offset, in hl_init_pb_with_mask() argument 327 for (j = 0 ; j < num_instances ; j++) { in hl_init_pb_with_mask() 328 int seq = i * num_instances + j; in hl_init_pb_with_mask() 360 u32 num_instances, u32 instance_offset, in hl_init_pb() argument 365 num_instances, instance_offset, pb_blocks, in hl_init_pb() 388 u32 dcore_offset, u32 num_instances, u32 instance_offset, in hl_init_pb_ranges_with_mask() argument 411 for (j = 0 ; j < num_instances ; j++) { in hl_init_pb_ranges_with_mask() 412 int seq = i * num_instances + j; in hl_init_pb_ranges_with_mask() 446 u32 dcore_offset, u32 num_instances, u32 instance_offset, in hl_init_pb_ranges() argument 452 num_instances, instance_offset, pb_blocks, in hl_init_pb_ranges() [all …]
|
H A D | habanalabs.h | 4203 u32 dcore_offset, u32 num_instances, u32 instance_offset, 4207 u32 num_instances, u32 instance_offset, 4211 u32 dcore_offset, u32 num_instances, u32 instance_offset, 4216 u32 dcore_offset, u32 num_instances, u32 instance_offset, 4221 u32 num_instances, u32 instance_offset, 4225 u32 num_instances, u32 instance_offset, 4230 u32 num_instances, u32 instance_offset, 4233 u32 dcore_offset, u32 num_instances, u32 instance_offset, 4236 u32 num_instances, u32 instance_offset,
|
/linux/drivers/iommu/arm/arm-smmu/ |
H A D | arm-smmu-nvidia.c | 37 unsigned int num_instances; member 69 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_write_reg() 90 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_write_reg64() 112 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_tlb_sync() 137 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_reset() 182 for (inst = 0; inst < nvidia->num_instances; inst++) { in nvidia_smmu_global_fault() 230 for (inst = 0; inst < nvidia->num_instances; inst++) { in nvidia_smmu_context_fault() 323 nvidia_smmu->num_instances++; in nvidia_smmu_impl_init() 334 nvidia_smmu->num_instances++; in nvidia_smmu_impl_init() 337 if (nvidia_smmu->num_instances == 1) in nvidia_smmu_impl_init()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_sdma.c | 45 for (i = 0; i < adev->sdma.num_instances; i++) in amdgpu_sdma_get_instance_from_ring() 58 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_get_index_from_ring() 103 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_ras_late_init() 193 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_destroy_inst_ctx() 240 for (i = 1; i < adev->sdma.num_instances; i++) in amdgpu_sdma_init_microcode() 252 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_init_microcode() 371 mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1; in amdgpu_debugfs_sdma_sched_mask_set() 376 for (i = 0; i < adev->sdma.num_instances; ++i) { in amdgpu_debugfs_sdma_sched_mask_set() 415 for (i = 0; i < adev->sdma.num_instances; ++i) { in amdgpu_debugfs_sdma_sched_mask_get() 450 if (!(adev->sdma.num_instances > 1)) in amdgpu_debugfs_sdma_sched_mask_init() [all …]
|
H A D | sdma_v4_4_2.c | 168 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_inst_init_golden_registers() 197 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_init_microcode() 1457 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_sw_init() 1516 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v4_4_2_sw_init() 1534 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_sw_fini() 1559 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_hw_init() 1577 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_hw_fini() 1579 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_hw_fini() 1614 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_is_idle() 1631 for (j = 0; j < adev->sdma.num_instances; j++) { in sdma_v4_4_2_wait_for_idle() [all …]
|
H A D | sdma_v7_0.c | 403 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_stop() 458 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_enable() 563 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v7_0_gfx_resume_instance() 637 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_resume() 664 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v12_0_free_ucode_buffer() 701 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_load_microcode() 760 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_soft_reset() 795 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_check_soft_reset() 812 if (ring->me >= adev->sdma.num_instances) { in sdma_v7_0_reset_queue() 1315 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_sw_init() [all …]
|
H A D | sdma_v6_0.c | 399 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_stop() 435 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_ctxempty_int_enable() 465 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_enable() 567 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v6_0_gfx_resume_instance() 635 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_resume() 717 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_load_microcode() 767 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_soft_reset() 802 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_check_soft_reset() 1329 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_sw_init() 1372 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v6_0_sw_init() [all …]
|
H A D | sdma_v5_2.c | 477 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_ctx_switch_enable() 511 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v5_2_enable() 518 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_enable() 700 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_gfx_resume() 740 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_load_microcode() 795 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_soft_reset() 1312 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_sw_init() 1320 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_sw_init() 1364 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v5_2_sw_init() 1382 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v5_2_sw_fini() [all …]
|
H A D | vpe_v6_1.c | 78 for (i = 0; i < vpe->num_instances; i++) { in vpe_v6_1_halt() 108 for (i = 0; i < vpe->num_instances; i++) { in vpe_v6_1_set_collaborate_mode() 133 for (j = 0; j < vpe->num_instances; j++) { in vpe_v6_1_load_microcode() 183 for (j = 0; j < vpe->num_instances; j++) { in vpe_v6_1_load_microcode() 215 for (i = 0; i < vpe->num_instances; i++) { in vpe_v6_1_ring_start() 282 for (i = 0; i < vpe->num_instances; i++) { in vpe_v_6_1_ring_stop()
|
H A D | sdma_v5_0.c | 294 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_init_microcode() 627 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_ctx_switch_enable() 662 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v5_0_enable() 671 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_enable() 853 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_gfx_resume() 893 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_load_microcode() 1401 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_sw_init() 1440 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v5_0_sw_init() 1458 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v5_0_sw_fini() 1509 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_is_idle() [all …]
|
H A D | aqua_vanjaram.c | 50 for (i = 0; i < adev->sdma.num_instances; i++) in aqua_vanjaram_doorbell_index_init() 175 num_sdma = adev->sdma.num_instances; in __aqua_vanjaram_get_xcp_ip_info() 285 max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; in aqua_vanjaram_get_xcp_res_info() 542 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); in aqua_vanjaram_init_soc_config() 675 pcie_reg_state->common_header.num_instances = 1; in aqua_vanjaram_read_pcie_state() 759 xgmi_reg_state->common_header.num_instances = max_xgmi_instances; in aqua_vanjaram_read_xgmi_state() 832 wafl_reg_state->common_header.num_instances = max_wafl_instances; in aqua_vanjaram_read_wafl_state() 951 usr_reg_state->common_header.num_instances = max_usr_instances; in aqua_vanjaram_read_usr_state()
|
H A D | sdma_v4_4.c | 243 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_reset_ras_error_count() 256 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_query_ras_error_count()
|
H A D | amdgpu_discovery.c | 1413 if (adev->sdma.num_instances < in amdgpu_discovery_reg_base_init() 1415 adev->sdma.num_instances++; in amdgpu_discovery_reg_base_init() 1420 adev->sdma.num_instances + 1, in amdgpu_discovery_reg_base_init() 1426 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES) in amdgpu_discovery_reg_base_init() 1427 adev->vpe.num_instances++; in amdgpu_discovery_reg_base_init() 1430 adev->vpe.num_instances + 1, in amdgpu_discovery_reg_base_init() 2564 adev->sdma.num_instances = 2; in amdgpu_discovery_set_ip_blocks() 2591 adev->sdma.num_instances = 2; in amdgpu_discovery_set_ip_blocks() 2618 adev->sdma.num_instances = 1; in amdgpu_discovery_set_ip_blocks() 2664 adev->sdma.num_instances = 2; in amdgpu_discovery_set_ip_blocks() [all …]
|
H A D | amdgpu_sdma.h | 124 int num_instances; member
|
H A D | amdgpu_vpe.c | 735 for (i = 0; i < vpe->num_instances; i++) { in vpe_ring_set_wptr() 897 if (adev->vpe.num_instances) { in amdgpu_vpe_sysfs_reset_mask_init() 909 if (adev->vpe.num_instances) in amdgpu_vpe_sysfs_reset_mask_fini()
|
H A D | amdgpu_kms.c | 321 if (query_fw->index >= adev->sdma.num_instances) in amdgpu_firmware_info() 441 for (i = 0; i < adev->sdma.num_instances; i++) in amdgpu_hw_ip_info() 713 count = adev->sdma.num_instances; in amdgpu_info_ioctl() 1853 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_debugfs_firmware_info_show()
|
H A D | amdgpu_mes.c | 148 num_pipes = adev->sdma.num_instances; in amdgpu_mes_init()
|
H A D | soc15.c | 1282 for (i = 0; i < adev->sdma.num_instances; i++) { in soc15_sdma_doorbell_range_init()
|
H A D | amdgpu_ras.c | 379 mask = GENMASK(adev->sdma.num_instances - 1, 0); in amdgpu_ras_instance_mask_check() 5260 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) in amdgpu_ras_query_boot_status() argument 5264 for (i = 0; i < num_instances; i++) { in amdgpu_ras_query_boot_status()
|
/linux/drivers/gpu/drm/amd/include/ |
H A D | amdgpu_reg_state.h | 51 uint8_t num_instances; member
|
/linux/drivers/media/platform/samsung/exynos4-is/ |
H A D | fimc-lite.h | 69 unsigned short num_instances; member
|
/linux/include/sound/ |
H A D | timer.h | 79 int num_instances; /* current number of timer instances */ member
|
/linux/sound/core/ |
H A D | timer.c | 235 if (master->timer->num_instances >= master->timer->max_instances) in check_matching_master_slave() 238 master->timer->num_instances++; in check_matching_master_slave() 353 if (timer->num_instances >= timer->max_instances) { in snd_timer_open() 380 timer->num_instances++; in snd_timer_open() 406 timer->num_instances--; in remove_slave_links() 438 timer->num_instances--; in snd_timer_close_locked()
|
/linux/include/net/bluetooth/ |
H A D | mgmt.h | 551 __u8 num_instances; member
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | resource_tracker.c | 426 int vf, int num_instances) in initialize_res_quotas() argument 428 res_alloc->guaranteed[vf] = num_instances / in initialize_res_quotas() 430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; in initialize_res_quotas() 432 res_alloc->res_free = num_instances; in initialize_res_quotas()
|