Lines Matching +full:buffer +full:- +full:manager

1 // SPDX-License-Identifier: GPL-2.0
11 * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
12 * the buffer descriptor.
14 * @mmg: parent unified memory manager
15 * @handle: requested buffer handle
17 * Find the buffer in the store and return a pointer to its descriptor.
18 * Increase buffer refcount. If not found - return NULL.
24 spin_lock(&mmg->lock); in hl_mmap_mem_buf_get()
25 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_get()
27 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_get()
28 dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle); in hl_mmap_mem_buf_get()
31 kref_get(&buf->refcount); in hl_mmap_mem_buf_get()
32 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_get()
37 * hl_mmap_mem_buf_destroy - destroy the unused buffer
39 * @buf: memory manager buffer descriptor
41 * Internal function, used as a final step of buffer release. Shall be invoked
42 * only when the buffer is no longer in use (removed from idr). Will call the
47 if (buf->behavior->release) in hl_mmap_mem_buf_destroy()
48 buf->behavior->release(buf); in hl_mmap_mem_buf_destroy()
54 * hl_mmap_mem_buf_release - release buffer
59 * the buffer is released. Shall be called from an interrupt context.
66 spin_lock(&buf->mmg->lock); in hl_mmap_mem_buf_release()
67 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_release()
68 spin_unlock(&buf->mmg->lock); in hl_mmap_mem_buf_release()
74 * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
79 * Will remove the buffer from idr, without destroying it.
86 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_remove_idr_locked()
90 * hl_mmap_mem_buf_put - decrease the reference to the buffer
92 * @buf: memory manager buffer descriptor
94 * Decrease the reference to the buffer, and release it if it was the last one.
99 return kref_put(&buf->refcount, hl_mmap_mem_buf_release); in hl_mmap_mem_buf_put()
103 * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
106 * @mmg: parent unified memory manager
107 * @handle: requested buffer handle
109 * Decrease the reference to the buffer, and release it if it was the last one.
110 * Shall not be called from an interrupt context. Return -EINVAL if handle was
117 spin_lock(&mmg->lock); in hl_mmap_mem_buf_put_handle()
118 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_put_handle()
120 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_put_handle()
121 dev_dbg(mmg->dev, in hl_mmap_mem_buf_put_handle()
123 return -EINVAL; in hl_mmap_mem_buf_put_handle()
126 if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) { in hl_mmap_mem_buf_put_handle()
127 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_put_handle()
132 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_put_handle()
137 * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
139 * @mmg: parent unified memory manager
140 * @behavior: behavior object describing this buffer polymorphic behavior
142 * @args: additional args passed to behavior->alloc
144 * Allocate and register a new memory buffer inside the give memory manager.
145 * Return the pointer to the new buffer on success or NULL on failure.
159 spin_lock(&mmg->lock); in hl_mmap_mem_buf_alloc()
160 rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC); in hl_mmap_mem_buf_alloc()
161 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_alloc()
163 dev_err(mmg->dev, in hl_mmap_mem_buf_alloc()
164 "%s: Failed to allocate IDR for a new buffer, rc=%d\n", in hl_mmap_mem_buf_alloc()
165 behavior->topic, rc); in hl_mmap_mem_buf_alloc()
169 buf->mmg = mmg; in hl_mmap_mem_buf_alloc()
170 buf->behavior = behavior; in hl_mmap_mem_buf_alloc()
171 buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT); in hl_mmap_mem_buf_alloc()
172 kref_init(&buf->refcount); in hl_mmap_mem_buf_alloc()
174 rc = buf->behavior->alloc(buf, gfp, args); in hl_mmap_mem_buf_alloc()
176 dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n", in hl_mmap_mem_buf_alloc()
177 behavior->topic, rc); in hl_mmap_mem_buf_alloc()
184 spin_lock(&mmg->lock); in hl_mmap_mem_buf_alloc()
185 idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_alloc()
186 spin_unlock(&mmg->lock); in hl_mmap_mem_buf_alloc()
193 * hl_mmap_mem_buf_vm_close - handle mmap close
197 * Put the memory buffer if it is no longer mapped.
202 (struct hl_mmap_mem_buf *)vma->vm_private_data; in hl_mmap_mem_buf_vm_close()
205 new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start); in hl_mmap_mem_buf_vm_close()
208 buf->real_mapped_size = new_mmap_size; in hl_mmap_mem_buf_vm_close()
212 atomic_set(&buf->mmap, 0); in hl_mmap_mem_buf_vm_close()
214 vma->vm_private_data = NULL; in hl_mmap_mem_buf_vm_close()
222 * hl_mem_mgr_mmap - map the given buffer to the user
224 * @mmg: unified memory manager
226 * @args: additional args passed to behavior->mmap
228 * Map the buffer specified by the vma->vm_pgoff to the given vma.
241 handle = vma->vm_pgoff << PAGE_SHIFT;
242 vma->vm_pgoff = 0;
247 dev_err(mmg->dev,
249 return -EINVAL;
253 user_mem_size = vma->vm_end - vma->vm_start;
254 if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
255 dev_err(mmg->dev,
257 buf->behavior->topic, user_mem_size, buf->mappable_size);
258 rc = -EINVAL;
263 if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
266 if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
269 dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
270 buf->behavior->topic, vma->vm_start);
272 rc = -EINVAL;
276 if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
277 dev_err(mmg->dev,
279 buf->behavior->topic);
280 rc = -EINVAL;
284 vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
286 /* Note: We're transferring the memory reference to vma->vm_private_data here. */
288 vma->vm_private_data = buf;
290 rc = buf->behavior->mmap(buf, vma, args);
292 atomic_set(&buf->mmap, 0);
296 buf->real_mapped_size = buf->mappable_size;
297 vma->vm_pgoff = handle >> PAGE_SHIFT;
307 * hl_mem_mgr_init - initialize unified memory manager
312 * Initialize an instance of unified memory manager
316 mmg->dev = dev;
317 spin_lock_init(&mmg->lock);
318 idr_init(&mmg->handles);
336 ++stats->n_busy_cb;
339 ++stats->n_busy_ts;
343 ++stats->n_busy_other;
348 * hl_mem_mgr_fini - release unified memory manager
350 * @mmg: parent unified memory manager
351 * @stats: if non-NULL, will return some counters for handles that could not be removed.
353 * Release the unified memory manager. Shall be called from an interrupt context.
365 idp = &mmg->handles;
368 topic = buf->behavior->topic;
369 mem_id = buf->behavior->mem_id;
371 dev_err(mmg->dev,
380 * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
381 * @mmg: parent unified memory manager
383 * Destroy the memory manager IDR.
388 if (!idr_is_empty(&mmg->handles))
389 dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
391 idr_destroy(&mmg->handles);