Lines Matching full:buffer
53 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_attach() local
61 ret = sg_alloc_table_from_pages(&a->table, buffer->pages, in cma_heap_attach()
62 buffer->pagecount, 0, in cma_heap_attach()
63 buffer->pagecount << PAGE_SHIFT, in cma_heap_attach()
76 mutex_lock(&buffer->lock); in cma_heap_attach()
77 list_add(&a->list, &buffer->attachments); in cma_heap_attach()
78 mutex_unlock(&buffer->lock); in cma_heap_attach()
86 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_detach() local
89 mutex_lock(&buffer->lock); in cma_heap_detach()
91 mutex_unlock(&buffer->lock); in cma_heap_detach()
124 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_begin_cpu_access() local
127 mutex_lock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
129 if (buffer->vmap_cnt) in cma_heap_dma_buf_begin_cpu_access()
130 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_begin_cpu_access()
132 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_begin_cpu_access()
137 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
145 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_end_cpu_access() local
148 mutex_lock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
150 if (buffer->vmap_cnt) in cma_heap_dma_buf_end_cpu_access()
151 flush_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_end_cpu_access()
153 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_end_cpu_access()
158 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
166 struct cma_heap_buffer *buffer = vma->vm_private_data; in cma_heap_vm_fault() local
168 if (vmf->pgoff >= buffer->pagecount) in cma_heap_vm_fault()
171 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); in cma_heap_vm_fault()
180 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_mmap() local
188 vma->vm_private_data = buffer; in cma_heap_mmap()
193 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) in cma_heap_do_vmap() argument
197 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); in cma_heap_do_vmap()
206 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vmap() local
210 mutex_lock(&buffer->lock); in cma_heap_vmap()
211 if (buffer->vmap_cnt) { in cma_heap_vmap()
212 buffer->vmap_cnt++; in cma_heap_vmap()
213 iosys_map_set_vaddr(map, buffer->vaddr); in cma_heap_vmap()
217 vaddr = cma_heap_do_vmap(buffer); in cma_heap_vmap()
222 buffer->vaddr = vaddr; in cma_heap_vmap()
223 buffer->vmap_cnt++; in cma_heap_vmap()
224 iosys_map_set_vaddr(map, buffer->vaddr); in cma_heap_vmap()
226 mutex_unlock(&buffer->lock); in cma_heap_vmap()
233 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vunmap() local
235 mutex_lock(&buffer->lock); in cma_heap_vunmap()
236 if (!--buffer->vmap_cnt) { in cma_heap_vunmap()
237 vunmap(buffer->vaddr); in cma_heap_vunmap()
238 buffer->vaddr = NULL; in cma_heap_vunmap()
240 mutex_unlock(&buffer->lock); in cma_heap_vunmap()
246 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_release() local
247 struct cma_heap *cma_heap = buffer->heap; in cma_heap_dma_buf_release()
249 if (buffer->vmap_cnt > 0) { in cma_heap_dma_buf_release()
250 WARN(1, "%s: buffer still mapped in the kernel\n", __func__); in cma_heap_dma_buf_release()
251 vunmap(buffer->vaddr); in cma_heap_dma_buf_release()
252 buffer->vaddr = NULL; in cma_heap_dma_buf_release()
256 kfree(buffer->pages); in cma_heap_dma_buf_release()
258 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in cma_heap_dma_buf_release()
259 kfree(buffer); in cma_heap_dma_buf_release()
281 struct cma_heap_buffer *buffer; in cma_heap_allocate() local
291 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in cma_heap_allocate()
292 if (!buffer) in cma_heap_allocate()
295 INIT_LIST_HEAD(&buffer->attachments); in cma_heap_allocate()
296 mutex_init(&buffer->lock); in cma_heap_allocate()
297 buffer->len = size; in cma_heap_allocate()
329 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); in cma_heap_allocate()
330 if (!buffer->pages) { in cma_heap_allocate()
336 buffer->pages[pg] = &cma_pages[pg]; in cma_heap_allocate()
338 buffer->cma_pages = cma_pages; in cma_heap_allocate()
339 buffer->heap = cma_heap; in cma_heap_allocate()
340 buffer->pagecount = pagecount; in cma_heap_allocate()
345 exp_info.size = buffer->len; in cma_heap_allocate()
347 exp_info.priv = buffer; in cma_heap_allocate()
356 kfree(buffer->pages); in cma_heap_allocate()
360 kfree(buffer); in cma_heap_allocate()