Lines Matching refs:vmm

23 #include "vmm.h"
75 struct nvkm_vmm *vmm;
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
129 if (it->vmm->func->flush) {
131 it->vmm->func->flush(it->vmm, it->flush);
145 struct nvkm_vmm *vmm = it->vmm;
159 func->sparse(vmm, pgd->pt[0], pdei, 1);
162 func->unmap(vmm, pgd->pt[0], pdei, 1);
170 func->pde(vmm, pgd, pdei);
177 func->pde(vmm, pgd, pdei);
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
203 struct nvkm_vmm *vmm = it->vmm;
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
302 struct nvkm_vmm *vmm = it->vmm;
348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
419 struct nvkm_vmm *vmm = it->vmm;
420 struct nvkm_mmu *mmu = vmm->mmu;
457 desc->func->sparse(vmm, pt, pteb, ptes);
459 desc->func->invalid(vmm, pt, pteb, ptes);
462 desc->func->unmap(vmm, pt, pteb, ptes);
470 desc->func->sparse(vmm, pt, 0, pten);
472 desc->func->invalid(vmm, pt, 0, pten);
478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
513 it.vmm = vmm;
523 it.pt[it.max] = vmm->pd;
567 MAP_PTES(vmm, pt, ptei, ptes, map);
569 CLR_PTES(vmm, pt, ptei, ptes);
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
632 const struct nvkm_vmm_page *page = vmm->func->page;
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
665 nvkm_vmm_ptes_sparse(vmm, start, size, false);
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
685 mutex_lock(&vmm->mutex.map);
686 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
690 mutex_unlock(&vmm->mutex.map);
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
698 mutex_lock(&vmm->mutex.map);
699 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
701 mutex_unlock(&vmm->mutex.map);
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
708 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
716 mutex_lock(&vmm->mutex.ref);
717 nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
718 mutex_unlock(&vmm->mutex.ref);
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
727 mutex_lock(&vmm->mutex.ref);
728 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
732 nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
733 mutex_unlock(&vmm->mutex.ref);
736 mutex_unlock(&vmm->mutex.ref);
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
746 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
756 if (vmm->managed.raw) {
757 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
758 nvkm_vmm_ptes_put(vmm, page, addr, size);
760 __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
769 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
773 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
786 if (vmm->managed.raw) {
787 ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
791 nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
795 return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
838 rb_erase(&vma->tree, &vmm->free);
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
844 nvkm_vmm_free_remove(vmm, vma);
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
852 struct rb_node **ptr = &vmm->free.rb_node;
874 rb_insert_color(&vma->tree, &vmm->free);
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
880 rb_erase(&vma->tree, &vmm->root);
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
886 nvkm_vmm_node_remove(vmm, vma);
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
894 struct rb_node **ptr = &vmm->root.rb_node;
910 rb_insert_color(&vma->tree, &vmm->root);
914 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
916 struct rb_node *node = vmm->root.rb_node;
930 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
934 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
940 nvkm_vmm_node_delete(vmm, next);
943 nvkm_vmm_node_delete(vmm, vma);
950 nvkm_vmm_node_remove(vmm, next);
954 nvkm_vmm_node_insert(vmm, next);
960 nvkm_vmm_node_remove(vmm, vma);
964 nvkm_vmm_node_insert(vmm, vma);
967 nvkm_vmm_node_delete(vmm, vma);
976 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
986 nvkm_vmm_node_insert(vmm, vma);
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
996 nvkm_vmm_node_insert(vmm, tmp);
1019 nvkm_vmm_dump(struct nvkm_vmm *vmm)
1022 list_for_each_entry(vma, &vmm->list, head) {
1028 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
1033 if (vmm->rm.client.gsp)
1034 r535_mmu_vaspace_del(vmm);
1037 nvkm_vmm_dump(vmm);
1039 while ((node = rb_first(&vmm->root))) {
1041 nvkm_vmm_put(vmm, &vma);
1044 if (vmm->bootstrapped) {
1045 const struct nvkm_vmm_page *page = vmm->func->page;
1046 const u64 limit = vmm->limit - vmm->start;
1051 nvkm_mmu_ptc_dump(vmm->mmu);
1052 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
1055 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1058 WARN_ON(!list_empty(&vmm->list));
1060 if (vmm->nullp) {
1061 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1062 vmm->nullp, vmm->null);
1065 if (vmm->pd) {
1066 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1067 nvkm_vmm_pt_del(&vmm->pd);
1072 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1080 nvkm_vmm_node_insert(vmm, vma);
1081 list_add_tail(&vma->head, &vmm->list);
1089 struct nvkm_vmm *vmm)
1097 vmm->func = func;
1098 vmm->mmu = mmu;
1099 vmm->name = name;
1100 vmm->debug = mmu->subdev.debug;
1101 kref_init(&vmm->kref);
1103 __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
1104 mutex_init(&vmm->mutex.ref);
1105 mutex_init(&vmm->mutex.map);
1126 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1127 if (!vmm->pd)
1129 vmm->pd->refs[0] = 1;
1130 INIT_LIST_HEAD(&vmm->join);
1137 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1138 if (!vmm->pd->pt[0])
1143 INIT_LIST_HEAD(&vmm->list);
1144 vmm->free = RB_ROOT;
1145 vmm->root = RB_ROOT;
1152 vmm->start = 0;
1153 vmm->limit = 1ULL << bits;
1154 if (addr + size < addr || addr + size > vmm->limit)
1158 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1161 vmm->managed.p.addr = 0;
1162 vmm->managed.p.size = addr;
1168 nvkm_vmm_free_insert(vmm, vma);
1169 list_add_tail(&vma->head, &vmm->list);
1174 size = vmm->limit - addr;
1175 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1178 vmm->managed.n.addr = addr;
1179 vmm->managed.n.size = size;
1184 vmm->start = addr;
1185 vmm->limit = size ? (addr + size) : (1ULL << bits);
1186 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1189 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1192 nvkm_vmm_free_insert(vmm, vma);
1193 list_add(&vma->head, &vmm->list);
1211 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1229 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1230 return nvkm_vmm_node_split(vmm, vma, addr, size);
1234 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1236 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1250 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1253 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1270 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1272 const struct nvkm_vmm_page *page = vmm->func->page;
1288 addr + size < addr || addr + size > vmm->limit) {
1289 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1294 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1334 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1336 vmm->func->page, map);
1343 tmp->refd = page - vmm->func->page;
1356 ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1360 nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1365 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1393 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1398 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1406 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1410 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1412 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1415 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1418 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1421 nvkm_vmm_unmap_region(vmm, vma);
1425 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1428 mutex_lock(&vmm->mutex.vmm);
1429 nvkm_vmm_unmap_locked(vmm, vma, false);
1430 mutex_unlock(&vmm->mutex.vmm);
1435 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1441 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1448 VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1461 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1467 return vmm->func->valid(vmm, argv, argc, map);
1471 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1474 for (map->page = vmm->func->page; map->page->shift; map->page++) {
1475 VMM_DEBUG(vmm, "trying %d", map->page->shift);
1476 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1483 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1493 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1503 const u32 debug = vmm->debug;
1504 vmm->debug = 0;
1505 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1506 vmm->debug = debug;
1508 VMM_DEBUG(vmm, "invalid at any page size");
1509 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1515 map->page = &vmm->func->page[vma->refd];
1517 map->page = &vmm->func->page[vma->page];
1519 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1521 VMM_DEBUG(vmm, "invalid %d\n", ret);
1553 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1557 vma->refd = map->page - vmm->func->page;
1559 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1562 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1571 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1576 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
1577 vmm->managed.raw)
1578 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1580 mutex_lock(&vmm->mutex.vmm);
1581 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1583 mutex_unlock(&vmm->mutex.vmm);
1588 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1595 nvkm_vmm_free_delete(vmm, prev);
1600 nvkm_vmm_free_delete(vmm, next);
1603 nvkm_vmm_free_insert(vmm, vma);
1607 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1609 const struct nvkm_vmm_page *page = vmm->func->page;
1634 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1640 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1652 nvkm_vmm_unmap_region(vmm, next);
1664 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1675 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1679 nvkm_vmm_node_remove(vmm, vma);
1685 nvkm_vmm_put_region(vmm, vma);
1689 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1693 mutex_lock(&vmm->mutex.vmm);
1694 nvkm_vmm_put_locked(vmm, vma);
1695 mutex_unlock(&vmm->mutex.vmm);
1701 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1704 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1710 VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1716 VMM_DEBUG(vmm, "args %016llx %d %d %d",
1727 if (unlikely((getref || vmm->func->page_block) && !shift)) {
1728 VMM_DEBUG(vmm, "page size required: %d %016llx",
1729 getref, vmm->func->page_block);
1737 for (page = vmm->func->page; page->shift; page++) {
1743 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1752 temp = vmm->free.rb_node;
1773 const int p = page - vmm->func->page;
1776 if (vmm->func->page_block && prev && prev->page != p)
1777 addr = ALIGN(addr, vmm->func->page_block);
1781 if (vmm->func->page_block && next && next->page != p)
1782 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1785 nvkm_vmm_free_remove(vmm, this);
1799 nvkm_vmm_put_region(vmm, vma);
1802 nvkm_vmm_free_insert(vmm, vma);
1808 nvkm_vmm_put_region(vmm, vma);
1811 nvkm_vmm_free_insert(vmm, tmp);
1816 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1818 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1820 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1824 nvkm_vmm_put_region(vmm, vma);
1830 vma->page = page - vmm->func->page;
1833 nvkm_vmm_node_insert(vmm, vma);
1839 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1842 mutex_lock(&vmm->mutex.vmm);
1843 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1844 mutex_unlock(&vmm->mutex.vmm);
1849 nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
1852 const struct nvkm_vmm_page *page = &vmm->func->page[refd];
1854 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
1858 nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1860 const struct nvkm_vmm_page *page = vmm->func->page;
1862 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1866 nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1868 const struct nvkm_vmm_page *page = vmm->func->page;
1873 return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
1877 nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
1881 mutex_lock(&vmm->mutex.ref);
1882 ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
1883 mutex_unlock(&vmm->mutex.ref);
1889 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1891 if (inst && vmm && vmm->func->part) {
1892 mutex_lock(&vmm->mutex.vmm);
1893 vmm->func->part(vmm, inst);
1894 mutex_unlock(&vmm->mutex.vmm);
1899 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1902 if (vmm->func->join) {
1903 mutex_lock(&vmm->mutex.vmm);
1904 ret = vmm->func->join(vmm, inst);
1905 mutex_unlock(&vmm->mutex.vmm);
1915 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1920 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1922 const struct nvkm_vmm_page *page = vmm->func->page;
1923 const u64 limit = vmm->limit - vmm->start;
1929 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1933 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1935 vmm->bootstrapped = true;
1942 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1943 nvkm_vmm_dtor(vmm);
1944 kfree(vmm);
1950 struct nvkm_vmm *vmm = *pvmm;
1951 if (vmm) {
1952 kref_put(&vmm->kref, nvkm_vmm_del);
1958 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1960 if (vmm)
1961 kref_get(&vmm->kref);
1962 return vmm;
1971 struct nvkm_vmm *vmm = NULL;
1973 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1974 key, name, &vmm);
1976 nvkm_vmm_unref(&vmm);
1977 *pvmm = vmm;