Lines Matching refs:mmu
574 lockdep_assert_held(&ptdev->mmu->as.slots_lock);
601 mutex_lock(&ptdev->mmu->as.slots_lock);
603 mutex_unlock(&ptdev->mmu->as.slots_lock);
676 lockdep_assert_held(&ptdev->mmu->as.slots_lock);
681 ptdev->mmu->as.slots[vm->as.id].vm = NULL;
682 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
710 mutex_lock(&ptdev->mmu->as.slots_lock);
720 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
728 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
731 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
737 lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
752 set_bit(as, &ptdev->mmu->as.alloc_mask);
753 ptdev->mmu->as.slots[as].vm = vm;
770 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
772 ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
773 ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
774 gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
786 mutex_unlock(&ptdev->mmu->as.slots_lock);
810 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
814 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
817 mutex_unlock(&ptdev->mmu->as.slots_lock);
1669 mutex_lock(&ptdev->mmu->as.slots_lock);
1671 ptdev->mmu->as.faulty_mask |= mask;
1673 panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
1700 ptdev->mmu->irq.mask = new_int_mask;
1702 if (ptdev->mmu->as.slots[as].vm)
1703 ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
1707 mutex_unlock(&ptdev->mmu->as.slots_lock);
1716 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
1730 mutex_lock(&ptdev->mmu->as.slots_lock);
1731 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1732 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1739 mutex_unlock(&ptdev->mmu->as.slots_lock);
1741 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1755 mutex_lock(&ptdev->mmu->as.slots_lock);
1756 ptdev->mmu->as.alloc_mask = 0;
1757 ptdev->mmu->as.faulty_mask = 0;
1758 mutex_unlock(&ptdev->mmu->as.slots_lock);
1760 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1777 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1779 mutex_lock(&ptdev->mmu->vm.lock);
1780 ptdev->mmu->vm.reset_in_progress = true;
1781 list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
1783 mutex_unlock(&ptdev->mmu->vm.lock);
1797 mutex_lock(&ptdev->mmu->as.slots_lock);
1802 ptdev->mmu->as.alloc_mask = 0;
1803 ptdev->mmu->as.faulty_mask = 0;
1805 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1806 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1812 mutex_unlock(&ptdev->mmu->as.slots_lock);
1814 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1817 mutex_lock(&ptdev->mmu->vm.lock);
1818 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
1821 ptdev->mmu->vm.reset_in_progress = false;
1822 mutex_unlock(&ptdev->mmu->vm.lock);
1836 mutex_lock(&ptdev->mmu->vm.lock);
1843 if (ptdev->mmu->vm.reset_in_progress)
1845 mutex_unlock(&ptdev->mmu->vm.lock);
1850 mutex_lock(&ptdev->mmu->as.slots_lock);
1859 ptdev->mmu->as.slots[vm->as.id].vm = NULL;
1860 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
1863 mutex_unlock(&ptdev->mmu->as.slots_lock);
2294 .submit_wq = ptdev->mmu->vm.wq,
2371 mutex_lock(&ptdev->mmu->vm.lock);
2372 list_add_tail(&vm->node, &ptdev->mmu->vm.list);
2375 if (ptdev->mmu->vm.reset_in_progress)
2377 mutex_unlock(&ptdev->mmu->vm.lock);
2696 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
2698 mutex_lock(&ptdev->mmu->as.slots_lock);
2699 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
2700 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
2707 mutex_unlock(&ptdev->mmu->as.slots_lock);
2724 struct panthor_mmu *mmu;
2727 mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
2728 if (!mmu)
2731 INIT_LIST_HEAD(&mmu->as.lru_list);
2733 ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
2737 INIT_LIST_HEAD(&mmu->vm.list);
2738 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
2742 ptdev->mmu = mmu;
2744 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
2748 ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
2753 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
2754 if (!mmu->vm.wq)
2766 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
2790 mutex_lock(&ptdev->mmu->vm.lock);
2791 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
2798 mutex_unlock(&ptdev->mmu->vm.lock);
2826 pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);