ivpu_mmu.c (263b2ba5fc93c875129e0d2b4034d7d8a34b3d39) | ivpu_mmu.c (852be13f3bd32c1eab808840cfac41b1fea25991) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6#include <linux/circ_buf.h> 7#include <linux/highmem.h> 8 9#include "ivpu_drv.h" 10#include "ivpu_hw_mtl_reg.h" 11#include "ivpu_hw_reg_io.h" 12#include "ivpu_mmu.h" 13#include "ivpu_mmu_context.h" | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6#include <linux/circ_buf.h> 7#include <linux/highmem.h> 8 9#include "ivpu_drv.h" 10#include "ivpu_hw_mtl_reg.h" 11#include "ivpu_hw_reg_io.h" 12#include "ivpu_mmu.h" 13#include "ivpu_mmu_context.h" |
14#include "ivpu_pm.h" |
|
14 15#define IVPU_MMU_IDR0_REF 0x080f3e0f 16#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f 17#define IVPU_MMU_IDR1_REF 0x0e739d18 18#define IVPU_MMU_IDR3_REF 0x0000003c 19#define IVPU_MMU_IDR5_REF 0x00040070 20#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075 21#define IVPU_MMU_IDR5_REF_FPGA 0x00800075 --- 787 unchanged lines hidden (view full) --- 809 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; 810 REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); 811 812 return evt; 813} 814 815void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) 816{ | 15 16#define IVPU_MMU_IDR0_REF 0x080f3e0f 17#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f 18#define IVPU_MMU_IDR1_REF 0x0e739d18 19#define IVPU_MMU_IDR3_REF 0x0000003c 20#define IVPU_MMU_IDR5_REF 0x00040070 21#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075 22#define IVPU_MMU_IDR5_REF_FPGA 0x00800075 --- 787 unchanged lines hidden (view full) --- 810 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; 811 REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); 812 813 return evt; 814} 815 816void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) 817{ |
818 bool schedule_recovery = false; |
|
817 u32 *event; 818 u32 ssid; 819 820 ivpu_dbg(vdev, IRQ, "MMU event queue\n"); 821 822 while ((event = ivpu_mmu_get_event(vdev)) != NULL) { 823 ivpu_mmu_dump_event(vdev, event); 824 825 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]); | 819 u32 *event; 820 u32 ssid; 821 822 ivpu_dbg(vdev, IRQ, "MMU event queue\n"); 823 824 while ((event = ivpu_mmu_get_event(vdev)) != NULL) { 825 ivpu_mmu_dump_event(vdev, event); 826 827 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]); |
826 if (ssid != IVPU_GLOBAL_CONTEXT_MMU_SSID) | 828 if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) 829 schedule_recovery = true; 830 else |
827 ivpu_mmu_user_context_mark_invalid(vdev, ssid); 828 } | 831 ivpu_mmu_user_context_mark_invalid(vdev, ssid); 832 } |
833 834 if (schedule_recovery) 835 ivpu_pm_schedule_recovery(vdev); |
|
829} 830 831void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) 832{ 833 u32 gerror_val, gerrorn_val, active; 834 835 ivpu_dbg(vdev, IRQ, "MMU error\n"); 836 --- 40 unchanged lines hidden --- | 836} 837 838void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) 839{ 840 u32 gerror_val, gerrorn_val, active; 841 842 ivpu_dbg(vdev, IRQ, "MMU error\n"); 843 --- 40 unchanged lines hidden --- |