Lines Matching refs:vma
802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
803 if (vma) {
804 vma->addr = addr;
805 vma->size = size;
806 vma->page = NVKM_VMA_PAGE_NONE;
807 vma->refd = NVKM_VMA_PAGE_NONE;
809 return vma;
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
817 BUG_ON(vma->size == tail);
819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
821 vma->size -= tail;
823 new->mapref = vma->mapref;
824 new->sparse = vma->sparse;
825 new->page = vma->page;
826 new->refd = vma->refd;
827 new->used = vma->used;
828 new->part = vma->part;
829 new->busy = vma->busy;
830 new->mapped = vma->mapped;
831 list_add(&new->head, &vma->head);
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
838 rb_erase(&vma->tree, &vmm->free);
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
844 nvkm_vmm_free_remove(vmm, vma);
845 list_del(&vma->head);
846 kfree(vma);
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
858 if (vma->size < this->size)
861 if (vma->size > this->size)
864 if (vma->addr < this->addr)
867 if (vma->addr > this->addr)
873 rb_link_node(&vma->tree, parent, ptr);
874 rb_insert_color(&vma->tree, &vmm->free);
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
880 rb_erase(&vma->tree, &vmm->root);
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
886 nvkm_vmm_node_remove(vmm, vma);
887 list_del(&vma->head);
888 kfree(vma);
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
900 if (vma->addr < this->addr)
903 if (vma->addr > this->addr)
909 rb_link_node(&vma->tree, parent, ptr);
910 rb_insert_color(&vma->tree, &vmm->root);
918 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
919 if (addr < vma->addr)
922 if (addr >= vma->addr + vma->size)
925 return vma;
935 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
938 if (vma->size == size) {
939 vma->size += next->size;
942 prev->size += vma->size;
943 nvkm_vmm_node_delete(vmm, vma);
946 return vma;
951 vma->size -= size;
959 if (vma->size != size) {
960 nvkm_vmm_node_remove(vmm, vma);
962 vma->addr += size;
963 vma->size -= size;
964 nvkm_vmm_node_insert(vmm, vma);
966 prev->size += vma->size;
967 nvkm_vmm_node_delete(vmm, vma);
972 return vma;
977 struct nvkm_vma *vma, u64 addr, u64 size)
981 if (vma->addr != addr) {
982 prev = vma;
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
985 vma->part = true;
986 nvkm_vmm_node_insert(vmm, vma);
989 if (vma->size != size) {
991 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
999 return vma;
1003 nvkm_vma_dump(struct nvkm_vma *vma)
1006 vma->addr, (u64)vma->size,
1007 vma->used ? '-' : 'F',
1008 vma->mapref ? 'R' : '-',
1009 vma->sparse ? 'S' : '-',
1010 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
1011 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
1012 vma->part ? 'P' : '-',
1013 vma->busy ? 'B' : '-',
1014 vma->mapped ? 'M' : '-',
1015 vma->memory);
1021 struct nvkm_vma *vma;
1022 list_for_each_entry(vma, &vmm->list, head) {
1023 nvkm_vma_dump(vma);
1030 struct nvkm_vma *vma;
1040 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
1041 nvkm_vmm_put(vmm, &vma);
1055 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1056 list_del(&vma->head);
1057 kfree(vma);
1074 struct nvkm_vma *vma;
1075 if (!(vma = nvkm_vma_new(addr, size)))
1077 vma->mapref = true;
1078 vma->sparse = false;
1079 vma->used = true;
1080 nvkm_vmm_node_insert(vmm, vma);
1081 list_add_tail(&vma->head, &vmm->list);
1094 struct nvkm_vma *vma;
1166 if (!(vma = nvkm_vma_new(addr, size)))
1168 nvkm_vmm_free_insert(vmm, vma);
1169 list_add_tail(&vma->head, &vmm->list);
1189 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1192 nvkm_vmm_free_insert(vmm, vma);
1193 list_add(&vma->head, &vmm->list);
1211 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1217 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1222 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1229 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1230 return nvkm_vmm_node_split(vmm, vma, addr, size);
1236 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1241 if (!vma)
1245 if (!vma->mapped || vma->memory)
1248 size = min(limit - start, vma->size - (start - vma->addr));
1250 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1253 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1255 vma = next;
1256 vma->refd = NVKM_VMA_PAGE_NONE;
1257 vma->mapped = false;
1259 } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1273 struct nvkm_vma *vma, *tmp;
1294 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1299 bool mapped = vma->mapped;
1312 size = min_t(u64, size, vma->size + vma->addr - addr);
1317 if (!vma->mapref || vma->memory) {
1334 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1346 vma = tmp;
1372 if (vma->addr + vma->size == addr + size)
1373 vma = node(vma, next);
1387 } while (vma && start < limit);
1393 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1398 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1399 nvkm_memory_unref(&vma->memory);
1400 vma->mapped = false;
1402 if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1404 if ((next = node(vma, next)) && (!next->part || next->mapped))
1406 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1410 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1412 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1414 if (vma->mapref) {
1415 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1416 vma->refd = NVKM_VMA_PAGE_NONE;
1418 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1421 nvkm_vmm_unmap_region(vmm, vma);
1425 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1427 if (vma->memory) {
1429 nvkm_vmm_unmap_locked(vmm, vma, false);
1435 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1457 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
1458 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1462 vma->addr, (u64)vma->size, map->offset, map->page->shift,
1471 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1476 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1483 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1489 map->no_comp = vma->no_comp;
1492 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1495 map->offset, (u64)vma->size);
1500 if (vma->page == NVKM_VMA_PAGE_NONE &&
1501 vma->refd == NVKM_VMA_PAGE_NONE) {
1505 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1509 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1514 if (vma->refd != NVKM_VMA_PAGE_NONE)
1515 map->page = &vmm->func->page[vma->refd];
1517 map->page = &vmm->func->page[vma->page];
1519 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1552 if (vma->refd == NVKM_VMA_PAGE_NONE) {
1553 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1557 vma->refd = map->page - vmm->func->page;
1559 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1562 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1563 nvkm_memory_unref(&vma->memory);
1564 vma->memory = nvkm_memory_ref(map->memory);
1565 vma->mapped = true;
1566 vma->tags = map->tags;
1571 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1576 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
1578 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1581 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1582 vma->busy = false;
1588 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1592 if ((prev = node(vma, prev)) && !prev->used) {
1593 vma->addr = prev->addr;
1594 vma->size += prev->size;
1598 if ((next = node(vma, next)) && !next->used) {
1599 vma->size += next->size;
1603 nvkm_vmm_free_insert(vmm, vma);
1607 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1610 struct nvkm_vma *next = vma;
1612 BUG_ON(vma->part);
1614 if (vma->mapref || !vma->sparse) {
1635 size, vma->sparse,
1649 next = vma;
1653 } while ((next = node(vma, next)) && next->part);
1655 if (vma->sparse && !vma->mapref) {
1664 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1666 if (vma->sparse) {
1675 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1679 nvkm_vmm_node_remove(vmm, vma);
1682 vma->page = NVKM_VMA_PAGE_NONE;
1683 vma->refd = NVKM_VMA_PAGE_NONE;
1684 vma->used = false;
1685 nvkm_vmm_put_region(vmm, vma);
1691 struct nvkm_vma *vma = *pvma;
1692 if (vma) {
1694 nvkm_vmm_put_locked(vmm, vma);
1706 struct nvkm_vma *vma = NULL, *tmp;
1786 vma = this;
1791 if (unlikely(!vma))
1797 if (addr != vma->addr) {
1798 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1799 nvkm_vmm_put_region(vmm, vma);
1802 nvkm_vmm_free_insert(vmm, vma);
1803 vma = tmp;
1806 if (size != vma->size) {
1807 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1808 nvkm_vmm_put_region(vmm, vma);
1816 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1818 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1820 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1824 nvkm_vmm_put_region(vmm, vma);
1828 vma->mapref = mapref && !getref;
1829 vma->sparse = sparse;
1830 vma->page = page - vmm->func->page;
1831 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1832 vma->used = true;
1833 nvkm_vmm_node_insert(vmm, vma);
1834 *pvma = vma;