Lines Matching +full:0 +full:xd4000000

54 #define GAUDI2_TPC_FULL_MASK			0x1FFFFFF
55 #define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
56 #define GAUDI2_DECODER_FULL_MASK 0x3FF
58 #define GAUDI2_NA_EVENT_CAUSE 0xFF
82 #define GAUDI2_ARB_WDT_TIMEOUT (0x1000000)
104 #define PCIE_DEC_EN_MASK 0x300
105 #define DEC_WORK_STATE_IDLE 0
114 #define GAUDI2_HBM_MMU_SCRM_MOD_SHIFT 0
119 #define MMU_RANGE_INV_EN_SHIFT 0
126 #define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0)
127 #define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0)
138 ((x & 0xF8001FF0) >> 4)
141 ((x & 0x00001FF0) >> 4)
143 #define RAZWI_INITIATOR_AXUER_L_X_SHIFT 0
144 #define RAZWI_INITIATOR_AXUER_L_X_MASK 0x1F
146 #define RAZWI_INITIATOR_AXUER_L_Y_MASK 0xF
149 #define RAZWI_INITIATOR_AXUER_H_X_MASK 0x1F
164 /* HW scrambles only bits 0-25 */
177 {RAZWI_INITIATOR_ID_X_Y(2, 4, 0), mmDCORE0_RTR0_CTRL_BASE,
185 {RAZWI_INITIATOR_ID_X_Y(2, 11, 0), mmDCORE2_RTR0_CTRL_BASE,
299 {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
303 {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
307 {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
311 {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
315 {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
319 {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
323 {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
327 {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
428 [HBM_ID0] = 0xFFFC,
429 [HBM_ID1] = 0xFFCF,
430 [HBM_ID2] = 0xF7F7,
431 [HBM_ID3] = 0x7F7F,
432 [HBM_ID4] = 0xFCFF,
433 [HBM_ID5] = 0xCFFF,
437 [0] = HBM_ID0,
796 "FENCE 0 inc over max value and clipped",
800 "FENCE 0 dec under min value and clipped",
817 "FENCE 0 inc over max value and clipped",
821 "FENCE 0 dec under min value and clipped",
1948 "gaudi2 vdec 0_0", "gaudi2 vdec 0_0 abnormal",
1949 "gaudi2 vdec 0_1", "gaudi2 vdec 0_1 abnormal",
2071 MME_WAP0 = 0,
2201 static s64 gaudi2_state_dump_specs_props[SP_MAX] = {0};
2242 ctx->rc = 0; in gaudi2_iterate_tpcs()
2244 for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) { in gaudi2_iterate_tpcs()
2245 for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) { in gaudi2_iterate_tpcs()
2267 ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx); in gaudi2_iterate_tpcs()
2289 return 0; in set_number_of_functional_hbms()
2300 "HBM binning supports max of %d faulty HBMs, supplied mask 0x%llx.\n", in set_number_of_functional_hbms()
2310 return 0; in set_number_of_functional_hbms()
2330 u64 hbm_drv_base_offset = 0, edma_pq_base_addr; in gaudi2_set_dram_properties()
2331 u32 basic_hbm_page_size, edma_idx = 0; in gaudi2_set_dram_properties()
2398 for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) { in gaudi2_set_dram_properties()
2406 return 0; in gaudi2_set_dram_properties()
2413 u32 num_sync_stream_queues = 0; in gaudi2_set_fixed_properties()
2425 for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) { in gaudi2_set_fixed_properties()
2427 q_props[i].driver_only = 0; in gaudi2_set_fixed_properties()
2430 q_props[i].supports_sync_stream = 0; in gaudi2_set_fixed_properties()
2484 prop->dmmu.host_resident = 0; in gaudi2_set_fixed_properties()
2577 prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER + in gaudi2_set_fixed_properties()
2580 prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER + in gaudi2_set_fixed_properties()
2587 prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER; in gaudi2_set_fixed_properties()
2606 return 0; in gaudi2_set_fixed_properties()
2625 return 0; in gaudi2_pci_bars_map()
2665 return 0; in gaudi2_init_iatu()
2667 /* Temporary inbound Region 0 - Bar 0 - Point to CFG in gaudi2_init_iatu()
2675 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); in gaudi2_init_iatu()
2681 bar_addr_low = RREG32(mmPCIE_DBI_BAR0_REG + STM_FLASH_ALIGNED_OFF) & ~0xF; in gaudi2_init_iatu()
2685 /* Inbound Region 0 - Bar 0 - Point to CFG */ in gaudi2_init_iatu()
2688 inbound_region.offset_in_bar = 0; in gaudi2_init_iatu()
2691 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); in gaudi2_init_iatu()
2695 /* Inbound Region 1 - Bar 0 - Point to BAR0_RESERVED + SRAM */ in gaudi2_init_iatu()
2713 /* Outbound Region 0 - Point to Host */ in gaudi2_init_iatu()
2735 dev_err(hdev->dev, "TPC binning is supported for max of %d faulty TPCs, provided mask 0x%llx\n", in gaudi2_tpc_binning_init_prop()
2744 return 0; in gaudi2_tpc_binning_init_prop()
2752 u8 subst_idx = 0; in gaudi2_set_tpc_binning_masks()
2761 for (i = 0 ; i < MAX_FAULTY_TPCS ; i++) { in gaudi2_set_tpc_binning_masks()
2764 if (tpc_binning_mask == 0) in gaudi2_set_tpc_binning_masks()
2767 if (subst_idx == 0) { in gaudi2_set_tpc_binning_masks()
2802 return 0; in gaudi2_set_tpc_binning_masks()
2817 …r(hdev->dev, "decoder binning is supported for max of single faulty decoder, provided mask 0x%x\n", in gaudi2_set_dec_binning_masks()
2829 return 0; in gaudi2_set_dec_binning_masks()
2838 prop->dram_binning_mask = 0; in gaudi2_set_dram_binning_masks()
2863 "EDMA binning is supported for max of single faulty EDMA, provided mask 0x%x\n", in gaudi2_set_edma_binning_masks()
2869 prop->edma_binning_mask = 0; in gaudi2_set_edma_binning_masks()
2871 return 0; in gaudi2_set_edma_binning_masks()
2888 return 0; in gaudi2_set_edma_binning_masks()
2899 return 0; in gaudi2_set_xbar_edge_enable_mask()
2903 * note that it can be set to value other than 0 only after cpucp packet (i.e. in gaudi2_set_xbar_edge_enable_mask()
2904 * only the FW can set a redundancy value). for user it'll always be 0. in gaudi2_set_xbar_edge_enable_mask()
2924 return 0; in gaudi2_set_xbar_edge_enable_mask()
2936 hdev->asic_prop.faulty_dram_cluster_map = 0; in gaudi2_set_cluster_binning_masks_common()
2952 return 0; in gaudi2_set_cluster_binning_masks_common()
2971 return 0; in gaudi2_set_cluster_binning_masks()
2990 return 0; in gaudi2_set_binning_masks()
3002 return 0; in gaudi2_cpucp_info_get()
3008 return 0; in gaudi2_cpucp_info_get()
3041 dev_dbg(hdev->dev, "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x\n", in gaudi2_cpucp_info_get()
3058 if (max_power < 0) in gaudi2_cpucp_info_get()
3063 return 0; in gaudi2_cpucp_info_get()
3073 return 0; in gaudi2_fetch_psoc_frequency()
3081 return 0; in gaudi2_fetch_psoc_frequency()
3091 return 0; in gaudi2_mmu_clear_pgt_range()
3094 return 0; in gaudi2_mmu_clear_pgt_range()
3096 rc = gaudi2_memset_device_memory(hdev, prop->mmu_pgt_addr, prop->dmmu.pgt_size, 0); in gaudi2_mmu_clear_pgt_range()
3167 return 0; in gaudi2_early_init()
3181 return 0; in gaudi2_early_fini()
3250 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
3262 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
3272 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
3278 reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 0); in gaudi2_scrub_arc_dccm()
3282 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
3288 return 0; in gaudi2_scrub_arc_dccm()
3305 return 0; in gaudi2_scrub_arcs_dccm()
3340 return 0; in gaudi2_late_init()
3343 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0); in gaudi2_late_init()
3376 for (i = 0 ; i < NUM_ARC_CPUS ; i++) { in gaudi2_user_mapped_blocks_init()
3412 for (i = 0 ; i < num_umr_blocks ; i++) { in gaudi2_user_mapped_blocks_init()
3445 int i, j, rc = 0; in gaudi2_alloc_cpu_accessible_dma_mem()
3452 for (i = 0 ; i < GAUDI2_ALLOC_CPU_MEM_RETRY_CNT ; i++) { in gaudi2_alloc_cpu_accessible_dma_mem()
3476 for (j = 0 ; j < i ; j++) in gaudi2_alloc_cpu_accessible_dma_mem()
3510 region->offset_in_bar = 0; in gaudi2_set_pci_memory_regions()
3522 HL_USR_INTR_STRUCT_INIT(hdev->tpc_interrupt, hdev, 0, HL_USR_INTERRUPT_TPC); in gaudi2_user_interrupt_setup()
3525 HL_USR_INTR_STRUCT_INIT(hdev->unexpected_error_interrupt, hdev, 0, in gaudi2_user_interrupt_setup()
3545 for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM; in gaudi2_user_interrupt_setup()
3550 for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++) in gaudi2_user_interrupt_setup()
3597 for (i = 0 ; i < prop->num_of_special_blocks ; i++) in gaudi2_special_blocks_config()
3602 memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg)); in gaudi2_special_blocks_config()
3608 sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL); in gaudi2_special_blocks_config()
3624 sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL); in gaudi2_special_blocks_config()
3630 for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++) in gaudi2_special_blocks_config()
3639 return 0; in gaudi2_special_blocks_config()
3660 for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) { in gaudi2_test_queues_msgs_free()
3677 for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) { in gaudi2_test_queues_msgs_alloc()
3689 return 0; in gaudi2_test_queues_msgs_alloc()
3707 for (i = 0 ; i < ARRAY_SIZE(gaudi2_irq_map_table) ; i++) { in gaudi2_sw_init()
3721 for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) in gaudi2_sw_init()
3733 GAUDI2_DMA_POOL_BLK_SIZE, DEVICE_CACHE_LINE_SIZE, 0); in gaudi2_sw_init()
3784 if (hl_fw_version_cmp(hdev, 1, 11, 0) < 0) in gaudi2_sw_init()
3801 return 0; in gaudi2_sw_init()
3839 return 0; in gaudi2_sw_fini()
3871 * counters to 0 (standard clear of fence counters)
3890 val = skip_fence ? U32_MAX : 0; in gaudi2_clear_qm_fence_counters_common()
3919 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_stop_dma_qmans()
3920 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_stop_dma_qmans()
3943 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_stop_mme_qmans()
3960 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_stop_tpc_qmans()
3978 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_stop_rot_qmans()
3998 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_stop_nic_qmans()
4011 reg_val = FIELD_PREP(PDMA0_CORE_CFG_1_HALT_MASK, 0x1); in gaudi2_stall_dma_common()
4030 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_dma_stall()
4031 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_dma_stall()
4054 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_mme_stall()
4068 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_tpc_stall()
4086 reg_val = FIELD_PREP(ROT_MSS_HALT_WBC_MASK, 0x1) | in gaudi2_rotator_stall()
4087 FIELD_PREP(ROT_MSS_HALT_RSB_MASK, 0x1) | in gaudi2_rotator_stall()
4088 FIELD_PREP(ROT_MSS_HALT_MRSB_MASK, 0x1); in gaudi2_rotator_stall()
4090 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_rotator_stall()
4100 WREG32(reg_base + QM_GLBL_CFG0_OFFSET, 0); in gaudi2_disable_qman_common()
4118 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_disable_dma_qmans()
4119 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_disable_dma_qmans()
4142 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_disable_mme_qmans()
4156 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_disable_tpc_qmans()
4174 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_disable_rot_qmans()
4194 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_disable_nic_qmans()
4206 WREG32(mmPSOC_TIMESTAMP_BASE, 0); in gaudi2_enable_timestamp()
4209 WREG32(mmPSOC_TIMESTAMP_BASE + 0xC, 0); in gaudi2_enable_timestamp()
4210 WREG32(mmPSOC_TIMESTAMP_BASE + 0x8, 0); in gaudi2_enable_timestamp()
4219 WREG32(mmPSOC_TIMESTAMP_BASE, 0); in gaudi2_disable_timestamp()
4270 for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0; in gaudi2_dec_enable_msix()
4286 rc = request_irq(irq, hl_irq_handler_dec_abnrm, 0, in gaudi2_dec_enable_msix()
4289 rc = request_irq(irq, hl_irq_user_interrupt_handler, 0, gaudi2_irq_name(i), in gaudi2_dec_enable_msix()
4299 return 0; in gaudi2_dec_enable_msix()
4314 return 0; in gaudi2_enable_msix()
4320 if (rc < 0) { in gaudi2_enable_msix()
4328 rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_COMPLETION), cq); in gaudi2_enable_msix()
4335 rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_EVENT_QUEUE), in gaudi2_enable_msix()
4366 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0; in gaudi2_enable_msix()
4372 rc = request_irq(irq, hl_irq_user_interrupt_handler, 0, gaudi2_irq_name(i), in gaudi2_enable_msix()
4391 return 0; in gaudi2_enable_msix()
4442 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count; in gaudi2_sync_irqs()
4475 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0; in gaudi2_disable_msix()
4497 u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); in gaudi2_stop_dcore_dec()
4507 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_stop_dcore_dec()
4514 WREG32(mmDCORE0_DEC0_CMD_SWREG16 + offset, 0); in gaudi2_stop_dcore_dec()
4537 u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); in gaudi2_stop_pcie_dec()
4547 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_stop_pcie_dec()
4554 WREG32(mmPCIE_DEC0_CMD_SWREG16 + offset, 0); in gaudi2_stop_pcie_dec()
4580 if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == 0) in gaudi2_stop_dec()
4583 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_stop_dec()
4632 val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0); in gaudi2_verify_arc_running_mode()
4663 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_nic_qmans_manual_flush()
4676 for (i = 0 ; i < num_cores ; i++) { in gaudi2_set_engine_cores()
4681 for (i = 0 ; i < num_cores ; i++) { in gaudi2_set_engine_cores()
4694 return 0; in gaudi2_set_engine_cores()
4703 return 0; in gaudi2_set_tpc_engine_mode()
4707 return 0; in gaudi2_set_tpc_engine_mode()
4712 (engine_command == HL_ENGINE_STALL) ? 1 : 0); in gaudi2_set_tpc_engine_mode()
4718 RMWREG32(reg_addr, 0x1, DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK); in gaudi2_set_tpc_engine_mode()
4721 return 0; in gaudi2_set_tpc_engine_mode()
4731 return 0; in gaudi2_set_mme_engine_mode()
4736 (engine_command == HL_ENGINE_STALL) ? 1 : 0); in gaudi2_set_mme_engine_mode()
4739 return 0; in gaudi2_set_mme_engine_mode()
4748 return 0; in gaudi2_set_edma_engine_mode()
4752 return 0; in gaudi2_set_edma_engine_mode()
4757 (engine_command == HL_ENGINE_STALL) ? 1 : 0); in gaudi2_set_edma_engine_mode()
4761 reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 0x1) | in gaudi2_set_edma_engine_mode()
4762 FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK, 0x1); in gaudi2_set_edma_engine_mode()
4766 return 0; in gaudi2_set_edma_engine_mode()
4774 for (i = 0 ; i < num_engines ; ++i) { in gaudi2_set_engine_modes()
4809 return 0; in gaudi2_set_engine_modes()
4931 return 0; in gaudi2_init_cpu()
4934 return 0; in gaudi2_init_cpu()
4942 return 0; in gaudi2_init_cpu()
4956 return 0; in gaudi2_init_cpu_queues()
4959 return 0; in gaudi2_init_cpu_queues()
4979 WREG32(mmCPU_IF_EQ_RD_OFFS, 0); in gaudi2_init_cpu_queues()
4981 WREG32(mmCPU_IF_PF_PQ_PI, 0); in gaudi2_init_cpu_queues()
5011 return 0; in gaudi2_init_cpu_queues()
5020 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { in gaudi2_init_qman_pq()
5036 WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pq()
5037 WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pq()
5050 for (cp_id = 0 ; cp_id < NUM_OF_CP_PER_QMAN; cp_id++) { in gaudi2_init_qman_cp()
5060 WREG32(reg_base + QM_CP_CFG_OFFSET, FIELD_PREP(PDMA0_QM_CP_CFG_SWITCH_EN_MASK, 0x1)); in gaudi2_init_qman_cp()
5072 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { in gaudi2_init_qman_pqc()
5083 WREG32(reg_base + QM_PQC_PI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pqc()
5136 return 0; in gaudi2_get_dyn_sp_reg()
5161 WREG32(reg_base + QM_GLBL_CFG1_OFFSET, 0); in gaudi2_init_qman_common()
5162 WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0); in gaudi2_init_qman_common()
5183 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) in gaudi2_init_qman()
5283 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_init_edma()
5284 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_init_edma()
5322 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); in gaudi2_arm_monitors_for_virt_msix_db()
5339 payload = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 0x7FFF) | /* "-1" */ in gaudi2_arm_monitors_for_virt_msix_db()
5352 mask = ~BIT(sob_id & 0x7); in gaudi2_arm_monitors_for_virt_msix_db()
5353 mode = 0; /* comparison mode is "greater than or equal to" */ in gaudi2_arm_monitors_for_virt_msix_db()
5384 for (decoder_id = 0 ; decoder_id < NUMBER_OF_DEC ; ++decoder_id) { in gaudi2_prepare_sm_for_virt_msix_db()
5411 for (i = 0 ; i < GAUDI2_MAX_PENDING_CS ; i++) in gaudi2_init_sm()
5423 for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) { in gaudi2_init_sm()
5436 WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC, 0x10000); in gaudi2_init_sm()
5437 WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV, 0); in gaudi2_init_sm()
5449 reg_val = FIELD_PREP(MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK, 0); in gaudi2_init_mme_acc()
5457 WREG32(reg_base + MME_ACC_AP_LFSR_POLY_OFFSET, 0x80DEADAF); in gaudi2_init_mme_acc()
5459 for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) { in gaudi2_init_mme_acc()
5471 case 0: in gaudi2_init_dcore_mme()
5505 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_init_mme()
5515 WREG32(reg_base + TPC_CFG_TPC_INTR_MASK_OFFSET, 0x23FFFE); in gaudi2_init_tpc_cfg()
5536 if (dcore == 0 && inst == (NUM_DCORE0_TPC - 1)) in gaudi2_init_tpc_config()
5560 init_cfg_data.dcore_tpc_qid_base[0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0; in gaudi2_init_tpc()
5576 for (i = 0 ; i < NUM_OF_ROT ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_init_rotator()
5613 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_init_dec()
5614 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_init_dec()
5630 for (dec_id = 0 ; dec_id < NUM_OF_PCIE_VDEC ; dec_id++) { in gaudi2_init_dec()
5658 WREG32(stlb_base + STLB_BUSY_OFFSET, 0x80000000); in gaudi2_mmu_update_asid_hop0_addr()
5664 !(status & 0x80000000), in gaudi2_mmu_update_asid_hop0_addr()
5673 return 0; in gaudi2_mmu_update_asid_hop0_addr()
5682 WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION, 0x1); in gaudi2_mmu_send_invalidate_cache_cmd()
5705 status & 0x1, in gaudi2_mmu_invalidate_cache_status_poll()
5712 /* Need to manually reset the status to 0 */ in gaudi2_mmu_invalidate_cache_status_poll()
5713 WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, 0x0); in gaudi2_mmu_invalidate_cache_status_poll()
5720 return 0; in gaudi2_mmu_invalidate_cache_status_poll()
5729 !(status & 0x1), in gaudi2_mmu_invalidate_cache_status_poll()
5817 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { in gaudi2_hmmus_invalidate_cache()
5818 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { in gaudi2_hmmus_invalidate_cache()
5827 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { in gaudi2_hmmus_invalidate_cache()
5828 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { in gaudi2_hmmus_invalidate_cache()
5841 return 0; in gaudi2_hmmus_invalidate_cache()
5848 int rc = 0; in gaudi2_mmu_invalidate_cache()
5862 invld_params.flags = 0; in gaudi2_mmu_invalidate_cache()
5872 struct gaudi2_cache_invld_params invld_params = {0}; in gaudi2_mmu_invalidate_cache_range()
5876 int rc = 0; in gaudi2_mmu_invalidate_cache_range()
5879 return 0; in gaudi2_mmu_invalidate_cache_range()
5933 for (asid = 0 ; asid < max_asid ; asid++) { in gaudi2_mmu_update_hop0_addr()
5946 return 0; in gaudi2_mmu_update_hop0_addr()
5977 WREG32(mmu_base + MMU_BYPASS_OFFSET, 0); in gaudi2_mmu_init_common()
6003 return 0; in gaudi2_pci_mmu_init()
6009 (0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) | in gaudi2_pci_mmu_init()
6020 WREG32(stlb_base + STLB_LL_LOOKUP_MASK_63_32_OFFSET, 0); in gaudi2_pci_mmu_init()
6043 return 0; in gaudi2_pci_mmu_init()
6063 return 0; in gaudi2_dcore_hmmu_init()
6073 FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) | in gaudi2_dcore_hmmu_init()
6095 return 0; in gaudi2_dcore_hmmu_init()
6102 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_hbm_mmu_init()
6103 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) { in gaudi2_hbm_mmu_init()
6109 return 0; in gaudi2_hbm_mmu_init()
6124 return 0; in gaudi2_mmu_init()
6203 return 0; in gaudi2_hw_init()
6344 return 0; in gaudi2_execute_soft_reset()
6349 int i, rc = 0; in gaudi2_poll_btm_indication()
6355 for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++) in gaudi2_poll_btm_indication()
6360 reg_val == 0, in gaudi2_poll_btm_indication()
6365 dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val); in gaudi2_poll_btm_indication()
6438 return 0; in gaudi2_hw_fini()
6458 memset(gaudi2->events_stat, 0, sizeof(gaudi2->events_stat)); in gaudi2_hw_fini()
6465 return 0; in gaudi2_hw_fini()
6470 return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0); in gaudi2_suspend()
6508 u64 hw_cap_mask = 0; in gaudi2_is_queue_enabled()
6509 u64 hw_tpc_cap_bit = 0; in gaudi2_is_queue_enabled()
6510 u64 hw_nic_cap_bit = 0; in gaudi2_is_queue_enabled()
6511 u64 hw_test_cap_bit = 0; in gaudi2_is_queue_enabled()
6558 return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(0)); in gaudi2_is_queue_enabled()
6589 return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(0)); in gaudi2_is_queue_enabled()
6685 * Masking the H/W queue ID with 0x3 extracts the QMAN internal PQ in gaudi2_ring_doorbell()
6688 pq_offset = (hw_queue_id & 0x3) * 4; in gaudi2_ring_doorbell()
6713 pqe[0] = pbd[0]; in gaudi2_pqe_write()
6736 *result = 0; in gaudi2_send_cpu_message()
6737 return 0; in gaudi2_send_cpu_message()
6787 return 0; in gaudi2_validate_cb_address()
6793 return 0; in gaudi2_validate_cb_address()
6800 return 0; in gaudi2_validate_cb_address()
6812 return 0; in gaudi2_validate_cb_address()
6816 return 0; in gaudi2_validate_cb_address()
6819 return 0; in gaudi2_validate_cb_address()
6822 dev_err(hdev->dev, "CB address %p + 0x%x for internal QMAN is not valid\n", in gaudi2_validate_cb_address()
6840 return 0; in gaudi2_cs_parser()
6848 return 0; in gaudi2_send_heartbeat()
6881 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); in gaudi2_arm_cq_monitor()
6883 /* Configure this address with CQ_ID 0 because CQ_EN is set */ in gaudi2_arm_cq_monitor()
6890 mask = ~(1 << (sob_id & 0x7)); in gaudi2_arm_cq_monitor()
6905 u32 comp_val, commit_mask, *polling_addr, timeout, status = 0; in gaudi2_send_job_to_kdma()
6959 *polling_addr = 0; in gaudi2_send_job_to_kdma()
6969 return 0; in gaudi2_send_job_to_kdma()
6976 for (i = 0 ; i < size ; i += sizeof(u32)) in gaudi2_memset_device_lbw()
6986 WREG32(reg_base + QM_PQC_CFG_OFFSET, 0); in gaudi2_qman_set_test_mode()
6995 return hdev->asic_prop.first_available_user_sob[0] + in gaudi2_test_queue_hw_queue_id_to_sob_id()
7005 WREG32(sob_addr, 0); in gaudi2_test_queue_clear()
7055 dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n", in gaudi2_test_queue_wait_completion()
7072 return 0; in gaudi2_test_cpu_queue()
7081 u32 sob_val = 0x5a5a; in gaudi2_test_queues()
7136 irq_arr_size = gaudi2->num_of_valid_hw_events * sizeof(gaudi2->hw_events[0]); in gaudi2_compute_reset_late_init()
7156 for (i = 0; i < NUM_OF_DCORES; i++) { in gaudi2_get_edma_idle_status()
7157 for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) { in gaudi2_get_edma_idle_status()
7205 for (i = 0 ; i < NUM_OF_PDMA ; i++) { in gaudi2_get_pdma_idle_status()
7238 u64 offset = 0; in gaudi2_get_nic_idle_status()
7246 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { in gaudi2_get_nic_idle_status()
7291 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_get_mme_idle_status()
7327 if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1))) in gaudi2_is_tpc_engine_idle()
7398 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_get_decoder_idle_status()
7399 for (j = 0 ; j < NUM_OF_DEC_PER_DCORE ; j++) { in gaudi2_get_decoder_idle_status()
7427 for (i = 0 ; i < NUM_OF_DEC_PER_DCORE ; i++) { in gaudi2_get_decoder_idle_status()
7464 for (i = 0 ; i < NUM_OF_ROT ; i++) { in gaudi2_get_rotator_idle_status()
7480 hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N", in gaudi2_get_rotator_idle_status()
7529 return 0; in gaudi2_get_eeprom_data()
7586 WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7588 WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7593 WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7596 WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7605 if (dcore_id > 0) { in gaudi2_mmu_dcore_prepare()
7609 WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7612 WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7615 for (i = 0 ; i < NUM_OF_MME_SBTE_PORTS ; i++) { in gaudi2_mmu_dcore_prepare()
7618 dcore_offset + ports_offset, 0); in gaudi2_mmu_dcore_prepare()
7623 for (i = 0 ; i < NUM_OF_MME_WB_PORTS ; i++) { in gaudi2_mmu_dcore_prepare()
7626 dcore_offset + ports_offset, 0); in gaudi2_mmu_dcore_prepare()
7631 WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
7637 for (vdec_id = 0 ; vdec_id < NUM_OF_DEC_PER_DCORE ; vdec_id++) { in gaudi2_mmu_dcore_prepare()
7639 gaudi2_mmu_vdec_dcore_prepare(hdev, dcore_id, vdec_id, rw_asid, 0); in gaudi2_mmu_dcore_prepare()
7675 u32 reg_base, reg_offset, reg_val = 0; in gaudi2_arc_mmu_prepare()
7680 reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK, 0); in gaudi2_arc_mmu_prepare()
7734 return 0; in gaudi2_arc_mmu_prepare_all()
7747 WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
7749 WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
7752 WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
7754 WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
7757 for (i = 0 ; i < NUM_OF_ROT ; i++) { in gaudi2_mmu_shared_prepare()
7760 WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); in gaudi2_mmu_shared_prepare()
7767 if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0)) in gaudi2_mmu_shared_prepare()
7768 gudi2_mmu_vdec_shared_prepare(hdev, 0, rw_asid, 0); in gaudi2_mmu_shared_prepare()
7771 gudi2_mmu_vdec_shared_prepare(hdev, 1, rw_asid, 0); in gaudi2_mmu_shared_prepare()
7774 for (i = 0 ; i < NUM_OF_ARC_FARMS_ARC ; i++) in gaudi2_mmu_shared_prepare()
7775 gudi2_mmu_arc_farm_arc_dup_eng_prepare(hdev, i, rw_asid, 0); in gaudi2_mmu_shared_prepare()
7781 return 0; in gaudi2_mmu_shared_prepare()
7789 WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0); in gaudi2_tpc_mmu_prepare()
7791 WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); in gaudi2_tpc_mmu_prepare()
7812 return 0; in gaudi2_mmu_prepare()
7822 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_mmu_prepare()
7825 return 0; in gaudi2_mmu_prepare()
7871 u64 ecc_address = 0, ecc_syndrome = 0; in gaudi2_handle_ecc_event()
7872 u8 memory_wrapper_idx = 0; in gaudi2_handle_ecc_event()
7876 if (hl_fw_version_cmp(hdev, 1, 12, 0) >= 0) in gaudi2_handle_ecc_event()
7906 is_arc_cq = FIELD_GET(PDMA0_QM_CP_STS_CUR_CQ_MASK, cp_sts); /* 0 - legacy CQ, 1 - ARC_CQ */ in handle_lower_qman_data_on_err()
7929 memset(undef_opcode, 0, sizeof(*undef_opcode)); in handle_lower_qman_data_on_err()
7935 undef_opcode->write_enable = 0; in handle_lower_qman_data_on_err()
7942 u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0; in gaudi2_handle_qman_err_generic()
7950 for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) { in gaudi2_handle_qman_err_generic()
7964 for (j = 0 ; j < num_error_causes ; j++) in gaudi2_handle_qman_err_generic()
7988 for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) { in gaudi2_handle_qman_err_generic()
8025 "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n", in gaudi2_razwi_rr_hbw_shared_printf_info()
8050 …"%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x… in gaudi2_razwi_rr_lbw_shared_printf_info()
8113 u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; in gaudi2_ack_module_razwi_event_handler()
8114 u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; in gaudi2_ack_module_razwi_event_handler()
8267 for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) { in gaudi2_check_if_razwi_happened()
8269 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
8273 for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) in gaudi2_check_if_razwi_happened()
8279 for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) in gaudi2_check_if_razwi_happened()
8281 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
8284 for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++) in gaudi2_check_if_razwi_happened()
8285 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
8288 for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++) in gaudi2_check_if_razwi_happened()
8290 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0, in gaudi2_check_if_razwi_happened()
8294 for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++) in gaudi2_check_if_razwi_happened()
8296 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
8299 for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++) in gaudi2_check_if_razwi_happened()
8300 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
8308 int i, num_of_eng = 0; in gaudi2_psoc_razwi_get_engines()
8309 u16 str_size = 0; in gaudi2_psoc_razwi_get_engines()
8311 for (i = 0 ; i < array_size ; i++) { in gaudi2_psoc_razwi_get_engines()
8334 u32 axuser_xy = RAZWI_GET_AXUSER_XY(razwi_reg), addr_hi = 0, addr_lo = 0; in gaudi2_handle_psoc_razwi_happened()
8354 for (i = 0 ; i < num_of_eng ; i++) { in gaudi2_handle_psoc_razwi_happened()
8361 "PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n", in gaudi2_handle_psoc_razwi_happened()
8363 hl_handle_razwi(hdev, addr, &eng_id[0], in gaudi2_handle_psoc_razwi_happened()
8375 "PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n", in gaudi2_handle_psoc_razwi_happened()
8377 hl_handle_razwi(hdev, addr, &eng_id[0], in gaudi2_handle_psoc_razwi_happened()
8387 "PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n", in gaudi2_handle_psoc_razwi_happened()
8389 hl_handle_razwi(hdev, addr_lo, &eng_id[0], in gaudi2_handle_psoc_razwi_happened()
8399 "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n", in gaudi2_handle_psoc_razwi_happened()
8401 hl_handle_razwi(hdev, addr_lo, &eng_id[0], in gaudi2_handle_psoc_razwi_happened()
8422 u32 razwi_mask_info, razwi_intr = 0, error_count = 0; in gaudi2_ack_psoc_razwi_event_handler()
8427 return 0; in gaudi2_ack_psoc_razwi_event_handler()
8433 "PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n", in gaudi2_ack_psoc_razwi_event_handler()
8444 "PSOC RAZWI interrupt: invalid razwi info (0x%x)\n", in gaudi2_ack_psoc_razwi_event_handler()
8456 u32 i, sts_val, sts_clr_val = 0, error_count = 0; in _gaudi2_handle_qm_sei_err()
8460 for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) { in _gaudi2_handle_qm_sei_err()
8478 u32 error_count = 0; in gaudi2_handle_qm_sei_err()
8517 return 0; in gaudi2_handle_qm_sei_err()
8530 gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, event_mask); in gaudi2_handle_qm_sei_err()
8539 u32 qid_base, error_count = 0; in gaudi2_handle_qman_err()
8541 u8 index = 0; in gaudi2_handle_qman_err()
8585 index = 0; in gaudi2_handle_qman_err()
8641 return 0; in gaudi2_handle_qman_err()
8650 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, index, 0, event_mask); in gaudi2_handle_qman_err()
8660 u32 i, sts_val, sts_clr_val, error_count = 0, arc_farm; in gaudi2_handle_arc_farm_sei_err()
8662 for (arc_farm = 0 ; arc_farm < NUM_OF_ARC_FARMS_ARC ; arc_farm++) { in gaudi2_handle_arc_farm_sei_err()
8663 sts_clr_val = 0; in gaudi2_handle_arc_farm_sei_err()
8667 for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_arc_farm_sei_err()
8680 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ARC_FARM, 0, 0, event_mask); in gaudi2_handle_arc_farm_sei_err()
8688 u32 i, sts_val, sts_clr_val = 0, error_count = 0; in gaudi2_handle_cpu_sei_err()
8692 for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_cpu_sei_err()
8713 u32 error_count = 0; in gaudi2_handle_rot_err()
8716 for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++) in gaudi2_handle_rot_err()
8724 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, event_mask); in gaudi2_handle_rot_err()
8735 u32 error_count = 0; in gaudi2_tpc_ack_interrupts()
8738 for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++) in gaudi2_tpc_ack_interrupts()
8746 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, event_mask); in gaudi2_tpc_ack_interrupts()
8755 u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0; in gaudi2_handle_dec_err()
8770 for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) { in gaudi2_handle_dec_err()
8780 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, event_mask); in gaudi2_handle_dec_err()
8792 u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; in gaudi2_handle_mme_err()
8800 for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) { in gaudi2_handle_mme_err()
8835 u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; in gaudi2_handle_mme_wap_err()
8843 for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) { in gaudi2_handle_mme_wap_err()
8865 u32 error_count = 0; in gaudi2_handle_kdma_core_event()
8873 for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) in gaudi2_handle_kdma_core_event()
8887 u32 error_count = 0; in gaudi2_handle_dma_core_event()
8890 for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) in gaudi2_handle_dma_core_event()
8910 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
8917 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
8924 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
8931 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
8938 u32 error_count = 0; in gaudi2_print_pcie_addr_dec_info()
8941 for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) { in gaudi2_print_pcie_addr_dec_info()
8966 u32 error_count = 0; in gaudi2_handle_pif_fatal()
8969 for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) { in gaudi2_handle_pif_fatal()
8982 u32 error_count = 0; in gaudi2_handle_hif_fatal()
8985 for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) { in gaudi2_handle_hif_fatal()
9013 dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr); in gaudi2_handle_page_error()
9017 dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n", in gaudi2_handle_page_error()
9021 hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask); in gaudi2_handle_page_error()
9023 WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0); in gaudi2_handle_page_error()
9044 dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n", in gaudi2_handle_access_error()
9046 WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0); in gaudi2_handle_access_error()
9052 u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0; in gaudi2_handle_mmu_spi_sei_generic()
9057 for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) { in gaudi2_handle_mmu_spi_sei_generic()
9062 if (i == 0) in gaudi2_handle_mmu_spi_sei_generic()
9067 if (gaudi2_mmu_spi_sei[i].clear_bit >= 0) in gaudi2_handle_mmu_spi_sei_generic()
9086 cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0; in gaudi2_handle_sm_err()
9102 for (i = 0 ; i < GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_sm_err()
9107 "err cause: %s. %s: 0x%X", in gaudi2_handle_sm_err()
9116 WREG32(sei_cause_addr, 0); in gaudi2_handle_sm_err()
9130 WREG32(cq_intr_addr, 0); in gaudi2_handle_sm_err()
9145 dcore = 0; in get_hmmu_base()
9146 index_in_dcore = 0; in get_hmmu_base()
9151 index_in_dcore = 0; in get_hmmu_base()
9155 dcore = 0; in get_hmmu_base()
9185 dcore = 0; in get_hmmu_base()
9195 dcore = 0; in get_hmmu_base()
9206 index_in_dcore = 0; in get_hmmu_base()
9211 index_in_dcore = 0; in get_hmmu_base()
9233 u32 error_count = 0; in gaudi2_handle_mmu_spi_sei_err()
9248 return 0; in gaudi2_handle_mmu_spi_sei_err()
9252 return 0; in gaudi2_handle_mmu_spi_sei_err()
9285 for (beat = 0 ; beat < 4 ; beat++) { in gaudi2_hbm_sei_handle_read_err()
9333 dev_err_ratelimited(hdev->dev, "CK-0 DERR: 0x%02x, CK-1 DERR: 0x%02x\n", in gaudi2_hbm_sei_print_wr_par_info()
9334 derr & 0x3, derr & 0xc); in gaudi2_hbm_sei_print_wr_par_info()
9338 for (i = 0 ; i < HBM_WR_PAR_CMD_LIFO_LEN ; i++) { in gaudi2_hbm_sei_print_wr_par_info()
9360 for (i = 0 ; i < HBM_CA_ERR_CMD_LIFO_LEN ; i++) in gaudi2_hbm_sei_print_ca_par_info()
9361 dev_err_ratelimited(hdev->dev, "cmd%u: ROW(0x%04x) COL(0x%05x)\n", i, in gaudi2_hbm_sei_print_ca_par_info()
9362 le16_to_cpu(row_cmd[i]) & (u16)GENMASK(13, 0), in gaudi2_hbm_sei_print_ca_par_info()
9363 le32_to_cpu(col_cmd[i]) & (u32)GENMASK(17, 0)); in gaudi2_hbm_sei_print_ca_par_info()
9441 return 0; in gaudi2_handle_hbm_cattrip()
9446 u32 i, error_count = 0; in gaudi2_handle_hbm_mc_spi()
9448 for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++) in gaudi2_handle_hbm_mc_spi()
9460 ktime_t zero_time = ktime_set(0, 0); in gaudi2_print_clk_change_info()
9516 u32 p2p_intr, msix_gw_intr, error_count = 0; in gaudi2_handle_pcie_p2p_msix()
9523 "pcie p2p transaction terminated due to security, req_id(0x%x)", in gaudi2_handle_pcie_p2p_msix()
9526 WREG32(mmPCIE_WRAP_P2P_INTR, 0x1); in gaudi2_handle_pcie_p2p_msix()
9532 "pcie msi-x gen denied due to vector num check failure, vec(0x%X)", in gaudi2_handle_pcie_p2p_msix()
9535 WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1); in gaudi2_handle_pcie_p2p_msix()
9545 u64 cause, error_count = 0; in gaudi2_handle_pcie_drain()
9549 if (cause & BIT_ULL(0)) { in gaudi2_handle_pcie_drain()
9564 u32 error_count = 0; in gaudi2_handle_psoc_drain()
9567 for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) { in gaudi2_handle_psoc_drain()
9611 return 0; in hl_arc_event_handle()
9632 index = 0; in event_id_to_engine_id()
9784 u32 index, ctl, reset_flags = 0, error_count = 0; in gaudi2_handle_eqe()
9785 u64 event_mask = 0; in gaudi2_handle_eqe()
10022 if (hl_fw_version_cmp(hdev, 1, 13, 0) >= 0) in gaudi2_handle_eqe()
10166 dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n", in gaudi2_handle_eqe()
10167 le64_to_cpu(eq_entry->data[0])); in gaudi2_handle_eqe()
10172 dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n", in gaudi2_handle_eqe()
10173 le64_to_cpu(eq_entry->data[0])); in gaudi2_handle_eqe()
10229 else if (error_count == 0) in gaudi2_handle_eqe()
10272 int rc = 0, i; in gaudi2_memset_memory_chunk_using_edma_qm()
10286 for (i = 0; i < 3; i++) { in gaudi2_memset_memory_chunk_using_edma_qm()
10315 int rc = 0, dma_num = 0, i; in gaudi2_memset_device_memory()
10318 if (prop->edma_enabled_mask == 0) { in gaudi2_memset_device_memory()
10323 sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4; in gaudi2_memset_device_memory()
10357 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
10358 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
10378 WREG32(sob_addr, 0); in gaudi2_memset_device_memory()
10381 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
10382 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
10409 dev_err(hdev->dev, "DMA Timeout during HBM scrubbing(sob: 0x%x, dma_num: 0x%x)\n", in gaudi2_memset_device_memory()
10414 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
10415 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
10423 WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, 0); in gaudi2_memset_device_memory()
10424 WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, 0); in gaudi2_memset_device_memory()
10425 WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, 0); in gaudi2_memset_device_memory()
10431 memset(lin_dma_pkts_arr, 0, sizeof(u64)); in gaudi2_memset_device_memory()
10434 for (i = 0; i < cb_len / sizeof(u64); i += sizeof(u64)) in gaudi2_memset_device_memory()
10438 WREG32(sob_addr, 0); in gaudi2_memset_device_memory()
10454 dev_err(hdev->dev, "Failed to scrub dram, address: 0x%llx size: %llu\n", in gaudi2_scrub_device_dram()
10467 return 0; in gaudi2_scrub_device_mem()
10471 size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET); in gaudi2_scrub_device_mem()
10472 dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx, val: 0x%llx\n", in gaudi2_scrub_device_mem()
10486 return 0; in gaudi2_scrub_device_mem()
10496 offset = hdev->asic_prop.first_available_cq[0] * 4; in gaudi2_restore_user_sm_registers()
10507 gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
10508 gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
10509 gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0); in gaudi2_restore_user_sm_registers()
10510 gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
10511 gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
10512 gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0); in gaudi2_restore_user_sm_registers()
10523 gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
10524 gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
10525 gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0); in gaudi2_restore_user_sm_registers()
10526 gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
10527 gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
10528 gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0); in gaudi2_restore_user_sm_registers()
10538 offset = hdev->asic_prop.first_available_user_mon[0] * 4; in gaudi2_restore_user_sm_registers()
10547 gaudi2_memset_device_lbw(hdev, addr, size, 0); in gaudi2_restore_user_sm_registers()
10555 gaudi2_memset_device_lbw(hdev, mon_cfg_addr, size, 0); in gaudi2_restore_user_sm_registers()
10560 offset = hdev->asic_prop.first_available_user_sob[0] * 4; in gaudi2_restore_user_sm_registers()
10562 val = 0; in gaudi2_restore_user_sm_registers()
10593 WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); in gaudi2_restore_user_qm_registers()
10612 WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); in gaudi2_restore_nic_qm_registers()
10621 return 0; in gaudi2_context_switch()
10635 for (i = 0 ; i < cfg_ctx->instances ; i++) { in gaudi2_init_block_instances()
10654 for (i = 0 ; i < cfg_ctx->blocks ; i++) in gaudi2_init_blocks_with_mask()
10670 int rc = 0; in gaudi2_debugfs_read_dma()
10719 pos = 0; in gaudi2_debugfs_read_dma()
10723 while (size_left > 0) { in gaudi2_debugfs_read_dma()
10769 return 0; in gaudi2_internal_cb_pool_init()
10818 return 0; in gaudi2_internal_cb_pool_init()
10893 return 0; in gaudi2_ctx_init()
10935 return 0; in gaudi2_pre_schedule_cs()
10952 return 0; in gaudi2_pre_schedule_cs()
10967 memset(pkt, 0, pkt_size); in gaudi2_gen_signal_cb()
10989 memset(pkt, 0, pkt_size); in gaudi2_add_mon_msg_short()
10992 ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ in gaudi2_add_mon_msg_short()
10994 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_mon_msg_short()
10995 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 0); in gaudi2_add_mon_msg_short()
11011 return 0; in gaudi2_add_arm_monitor_pkt()
11014 memset(pkt, 0, pkt_size); in gaudi2_add_arm_monitor_pkt()
11018 value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/ in gaudi2_add_arm_monitor_pkt()
11022 ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ in gaudi2_add_arm_monitor_pkt()
11024 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_arm_monitor_pkt()
11037 memset(pkt, 0, pkt_size); in gaudi2_add_fence_pkt()
11044 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_fence_pkt()
11058 u64 monitor_base, fence_addr = 0; in gaudi2_gen_wait_cb()
11111 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, 0); in gaudi2_reset_sob()
11129 return 0; in gaudi2_collective_wait_init_cs()
11145 * (addr[47:0] / 48M) * 64M + addr % 48M + addr[63:48]
11153 * PA1 0x3000000 VA1 0x9C000000 SVA1= (VA1/48M)*64M 0xD0000000 <- PA1/48M 0x1
11154 * PA2 0x9000000 VA2 0x9F000000 SVA2= (VA2/48M)*64M 0xD4000000 <- PA2/48M 0x3
11200 u32 base = 0, dcore_id, dec_id; in gaudi2_get_dec_base_addr()
11227 for (i = 0 ; i < NUM_USER_MAPPED_BLOCKS ; i++) { in gaudi2_get_hw_block_id()
11232 return 0; in gaudi2_get_hw_block_id()
11345 return 0; in gaudi2_get_mmu_base()
11369 for (i = 0 ; i < num_of_hmmus ; i++) { in gaudi2_ack_mmu_page_fault_or_access_error()
11380 return 0; in gaudi2_ack_mmu_page_fault_or_access_error()
11411 return 0; in gaudi2_gen_sync_to_engine_map()
11417 return 0; in gaudi2_monitor_valid()
11424 return 0; in gaudi2_print_single_monitor()
11433 return 0; in gaudi2_print_fences_single_engine()
11453 return 0; in gaudi2_get_sob_addr()
11479 return 0; in gaudi2_mmu_get_real_page_size()
11495 return 0; in gaudi2_mmu_get_real_page_size()
11498 dev_err(hdev->dev, "page size of 0x%X is not 0x%X aligned, can't map\n", in gaudi2_mmu_get_real_page_size()
11513 return 0; in gaudi2_send_device_activity()