Lines Matching +full:software +full:- +full:locked

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
45 * only be used to manage mappings into linear user-space VMs.
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
55 * This offset manager works on page-based addresses. That is, every argument
58 * must always be page-aligned (as usual).
59 * If you want to get a valid byte-based user-space address for a given offset,
63 * management. For every open-file context that is allowed to access a given
65 * open-file with the offset of the node will fail with -EACCES. To revoke
71 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @page_offset: Offset of available memory area (page-based)
74 * @size: Size of available address space range (page-based)
76 * Initialize a new offset-manager. The offset and area size available for the
78 * page-numbers, not bytes.
80 * Adding/removing nodes from the manager is locked internally and protected
82 * for the caller. While calling into the vma-manager, a given node must
88 rwlock_init(&mgr->vm_lock); in drm_vma_offset_manager_init()
89 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); in drm_vma_offset_manager_init()
94 * drm_vma_offset_manager_destroy() - Destroy offset manager
106 drm_mm_takedown(&mgr->vm_addr_space_mm); in drm_vma_offset_manager_destroy()
111 * drm_vma_offset_lookup_locked() - Find node in offset space
113 * @start: Start address for object (page-based)
114 * @pages: Size of object (page-based)
148 iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; in drm_vma_offset_lookup_locked()
153 offset = node->start; in drm_vma_offset_lookup_locked()
155 iter = iter->rb_right; in drm_vma_offset_lookup_locked()
160 iter = iter->rb_left; in drm_vma_offset_lookup_locked()
166 offset = best->start + best->size; in drm_vma_offset_lookup_locked()
179 * drm_vma_offset_add() - Add offset node to manager
182 * @pages: Allocation size visible to user-space (in number of pages)
184 * Add a node to the offset-manager. If the node was already added, this does
195 * that you want to map. It only limits the size that user-space can map into
206 write_lock(&mgr->vm_lock); in drm_vma_offset_add()
208 if (!drm_mm_node_allocated(&node->vm_node)) in drm_vma_offset_add()
209 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, in drm_vma_offset_add()
210 &node->vm_node, pages); in drm_vma_offset_add()
212 write_unlock(&mgr->vm_lock); in drm_vma_offset_add()
219 * drm_vma_offset_remove() - Remove offset node from manager
232 write_lock(&mgr->vm_lock); in drm_vma_offset_remove()
234 if (drm_mm_node_allocated(&node->vm_node)) { in drm_vma_offset_remove()
235 drm_mm_remove_node(&node->vm_node); in drm_vma_offset_remove()
236 memset(&node->vm_node, 0, sizeof(node->vm_node)); in drm_vma_offset_remove()
239 write_unlock(&mgr->vm_lock); in drm_vma_offset_remove()
252 * unlikely that an open-file is added twice to a single node so we in vma_node_allow()
257 write_lock(&node->vm_lock); in vma_node_allow()
259 iter = &node->vm_files.rb_node; in vma_node_allow()
265 if (tag == entry->vm_tag) { in vma_node_allow()
267 entry->vm_count++; in vma_node_allow()
269 } else if (tag > entry->vm_tag) { in vma_node_allow()
270 iter = &(*iter)->rb_right; in vma_node_allow()
272 iter = &(*iter)->rb_left; in vma_node_allow()
277 ret = -ENOMEM; in vma_node_allow()
281 new->vm_tag = tag; in vma_node_allow()
282 new->vm_count = 1; in vma_node_allow()
283 rb_link_node(&new->vm_rb, parent, iter); in vma_node_allow()
284 rb_insert_color(&new->vm_rb, &node->vm_files); in vma_node_allow()
288 write_unlock(&node->vm_lock); in vma_node_allow()
294 * drm_vma_node_allow - Add open-file to list of allowed users
298 * Add @tag to the list of allowed open-files for this node. If @tag is
299 * already on this list, the ref-count is incremented.
301 * The list of allowed-users is preserved across drm_vma_offset_add() and
303 * not added to any offset-manager.
305 * You must remove all open-files the same number of times as you added them
308 * This is locked against concurrent access internally.
311 * 0 on success, negative error code on internal failure (out-of-mem)
320 * drm_vma_node_allow_once - Add open-file to list of allowed users
324 * Add @tag to the list of allowed open-files for this node.
326 * The list of allowed-users is preserved across drm_vma_offset_add() and
328 * not added to any offset-manager.
330 * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
333 * This is locked against concurrent access internally.
336 * 0 on success, negative error code on internal failure (out-of-mem)
345 * drm_vma_node_revoke - Remove open-file from list of allowed users
349 * Decrement the ref-count of @tag in the list of allowed open-files on @node.
350 * If the ref-count drops to zero, remove @tag from the list. You must call
353 * This is locked against concurrent access internally.
363 write_lock(&node->vm_lock); in drm_vma_node_revoke()
365 iter = node->vm_files.rb_node; in drm_vma_node_revoke()
368 if (tag == entry->vm_tag) { in drm_vma_node_revoke()
369 if (!--entry->vm_count) { in drm_vma_node_revoke()
370 rb_erase(&entry->vm_rb, &node->vm_files); in drm_vma_node_revoke()
374 } else if (tag > entry->vm_tag) { in drm_vma_node_revoke()
375 iter = iter->rb_right; in drm_vma_node_revoke()
377 iter = iter->rb_left; in drm_vma_node_revoke()
381 write_unlock(&node->vm_lock); in drm_vma_node_revoke()
386 * drm_vma_node_is_allowed - Check whether an open-file is granted access
391 * open-files (see drm_vma_node_allow()).
393 * This is locked against concurrent access internally.
404 read_lock(&node->vm_lock); in drm_vma_node_is_allowed()
406 iter = node->vm_files.rb_node; in drm_vma_node_is_allowed()
409 if (tag == entry->vm_tag) in drm_vma_node_is_allowed()
411 else if (tag > entry->vm_tag) in drm_vma_node_is_allowed()
412 iter = iter->rb_right; in drm_vma_node_is_allowed()
414 iter = iter->rb_left; in drm_vma_node_is_allowed()
417 read_unlock(&node->vm_lock); in drm_vma_node_is_allowed()