Lines Matching refs:stt

39 	struct kvmppc_spapr_tce_table *stt;  in kvmppc_find_table()  local
41 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) in kvmppc_find_table()
42 if (stt->liobn == liobn) in kvmppc_find_table()
43 return stt; in kvmppc_find_table()
85 struct kvmppc_spapr_tce_table *stt; in kvm_spapr_tce_release_iommu_group() local
90 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { in kvm_spapr_tce_release_iommu_group()
96 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { in kvm_spapr_tce_release_iommu_group()
112 struct kvmppc_spapr_tce_table *stt = NULL; in kvm_spapr_tce_attach_iommu_group() local
124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { in kvm_spapr_tce_attach_iommu_group()
125 if (stt == fd_file(f)->private_data) { in kvm_spapr_tce_attach_iommu_group()
145 if ((tbltmp->it_page_shift <= stt->page_shift) && in kvm_spapr_tce_attach_iommu_group()
147 stt->offset << stt->page_shift) && in kvm_spapr_tce_attach_iommu_group()
149 stt->size << stt->page_shift)) { in kvm_spapr_tce_attach_iommu_group()
162 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { in kvm_spapr_tce_attach_iommu_group()
190 list_add_rcu(&stit->next, &stt->iommu_tables); in kvm_spapr_tce_attach_iommu_group()
197 struct kvmppc_spapr_tce_table *stt = container_of(head, in release_spapr_tce_table() local
199 unsigned long i, npages = kvmppc_tce_pages(stt->size); in release_spapr_tce_table()
202 if (stt->pages[i]) in release_spapr_tce_table()
203 __free_page(stt->pages[i]); in release_spapr_tce_table()
205 kfree(stt); in release_spapr_tce_table()
208 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt, in kvm_spapr_get_tce_page() argument
211 struct page *page = stt->pages[sttpage]; in kvm_spapr_get_tce_page()
216 mutex_lock(&stt->alloc_lock); in kvm_spapr_get_tce_page()
217 page = stt->pages[sttpage]; in kvm_spapr_get_tce_page()
222 stt->pages[sttpage] = page; in kvm_spapr_get_tce_page()
224 mutex_unlock(&stt->alloc_lock); in kvm_spapr_get_tce_page()
231 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; in kvm_spapr_tce_fault() local
234 if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) in kvm_spapr_tce_fault()
237 page = kvm_spapr_get_tce_page(stt, vmf->pgoff); in kvm_spapr_tce_fault()
258 struct kvmppc_spapr_tce_table *stt = filp->private_data; in kvm_spapr_tce_release() local
260 struct kvm *kvm = stt->kvm; in kvm_spapr_tce_release()
263 list_del_rcu(&stt->list); in kvm_spapr_tce_release()
266 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { in kvm_spapr_tce_release()
275 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); in kvm_spapr_tce_release()
277 kvm_put_kvm(stt->kvm); in kvm_spapr_tce_release()
279 call_rcu(&stt->rcu, release_spapr_tce_table); in kvm_spapr_tce_release()
292 struct kvmppc_spapr_tce_table *stt = NULL; in kvm_vm_ioctl_create_spapr_tce() local
308 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN); in kvm_vm_ioctl_create_spapr_tce()
309 if (!stt) in kvm_vm_ioctl_create_spapr_tce()
312 stt->liobn = args->liobn; in kvm_vm_ioctl_create_spapr_tce()
313 stt->page_shift = args->page_shift; in kvm_vm_ioctl_create_spapr_tce()
314 stt->offset = args->offset; in kvm_vm_ioctl_create_spapr_tce()
315 stt->size = args->size; in kvm_vm_ioctl_create_spapr_tce()
316 stt->kvm = kvm; in kvm_vm_ioctl_create_spapr_tce()
317 mutex_init(&stt->alloc_lock); in kvm_vm_ioctl_create_spapr_tce()
318 INIT_LIST_HEAD_RCU(&stt->iommu_tables); in kvm_vm_ioctl_create_spapr_tce()
334 stt, O_RDWR | O_CLOEXEC); in kvm_vm_ioctl_create_spapr_tce()
337 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); in kvm_vm_ioctl_create_spapr_tce()
346 kfree(stt); in kvm_vm_ioctl_create_spapr_tce()
368 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, in kvmppc_tce_validate() argument
380 if (iommu_tce_check_gpa(stt->page_shift, gpa)) in kvmppc_tce_validate()
383 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) in kvmppc_tce_validate()
387 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { in kvmppc_tce_validate()
392 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); in kvmppc_tce_validate()
408 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, in kvmppc_tce_put() argument
415 idx -= stt->offset; in kvmppc_tce_put()
417 page = stt->pages[sttpage]; in kvmppc_tce_put()
424 page = kvm_spapr_get_tce_page(stt, sttpage); in kvmppc_tce_put()
433 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt, in kvmppc_clear_tce() argument
437 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_clear_tce()
438 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); in kvmppc_clear_tce()
491 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, in kvmppc_tce_iommu_unmap() argument
495 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_unmap()
548 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, in kvmppc_tce_iommu_map() argument
553 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_map()
573 struct kvmppc_spapr_tce_table *stt; in kvmppc_h_put_tce() local
582 stt = kvmppc_find_table(vcpu->kvm, liobn); in kvmppc_h_put_tce()
583 if (!stt) in kvmppc_h_put_tce()
586 ret = kvmppc_ioba_validate(stt, ioba, 1); in kvmppc_h_put_tce()
592 ret = kvmppc_tce_validate(stt, tce); in kvmppc_h_put_tce()
603 entry = ioba >> stt->page_shift; in kvmppc_h_put_tce()
605 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { in kvmppc_h_put_tce()
607 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, in kvmppc_h_put_tce()
610 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, in kvmppc_h_put_tce()
615 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry); in kvmppc_h_put_tce()
620 kvmppc_tce_put(stt, entry, tce); in kvmppc_h_put_tce()
633 struct kvmppc_spapr_tce_table *stt; in kvmppc_h_put_tce_indirect() local
640 stt = kvmppc_find_table(vcpu->kvm, liobn); in kvmppc_h_put_tce_indirect()
641 if (!stt) in kvmppc_h_put_tce_indirect()
644 entry = ioba >> stt->page_shift; in kvmppc_h_put_tce_indirect()
655 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_h_put_tce_indirect()
673 ret = kvmppc_tce_validate(stt, tce); in kvmppc_h_put_tce_indirect()
700 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { in kvmppc_h_put_tce_indirect()
701 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, in kvmppc_h_put_tce_indirect()
706 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, in kvmppc_h_put_tce_indirect()
712 kvmppc_tce_put(stt, entry + i, tce); in kvmppc_h_put_tce_indirect()
726 struct kvmppc_spapr_tce_table *stt; in kvmppc_h_stuff_tce() local
730 stt = kvmppc_find_table(vcpu->kvm, liobn); in kvmppc_h_stuff_tce()
731 if (!stt) in kvmppc_h_stuff_tce()
734 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_h_stuff_tce()
742 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { in kvmppc_h_stuff_tce()
743 unsigned long entry = ioba >> stt->page_shift; in kvmppc_h_stuff_tce()
746 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, in kvmppc_h_stuff_tce()
756 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); in kvmppc_h_stuff_tce()
760 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) in kvmppc_h_stuff_tce()
761 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); in kvmppc_h_stuff_tce()
770 struct kvmppc_spapr_tce_table *stt; in kvmppc_h_get_tce() local
776 stt = kvmppc_find_table(vcpu->kvm, liobn); in kvmppc_h_get_tce()
777 if (!stt) in kvmppc_h_get_tce()
780 ret = kvmppc_ioba_validate(stt, ioba, 1); in kvmppc_h_get_tce()
784 idx = (ioba >> stt->page_shift) - stt->offset; in kvmppc_h_get_tce()
785 page = stt->pages[idx / TCES_PER_PAGE]; in kvmppc_h_get_tce()