/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_page_dirty.c | 82 static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) in vmw_bo_dirty_scan_pagetable() argument 84 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable() 85 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_pagetable() 86 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable() 120 static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) in vmw_bo_dirty_scan_mkwrite() argument 122 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite() 123 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_mkwrite() 124 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite() 130 num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite() 163 void vmw_bo_dirty_scan(struct vmw_bo *vbo) in vmw_bo_dirty_scan() argument [all …]
|
H A D | vmwgfx_bo.c | 35 static void vmw_bo_release(struct vmw_bo *vbo) in vmw_bo_release() argument 39 WARN_ON(vbo->tbo.base.funcs && in vmw_bo_release() 40 kref_read(&vbo->tbo.base.refcount) != 0); in vmw_bo_release() 41 vmw_bo_unmap(vbo); in vmw_bo_release() 43 xa_destroy(&vbo->detached_resources); in vmw_bo_release() 44 WARN_ON(vbo->is_dumb && !vbo->dumb_surface); in vmw_bo_release() 45 if (vbo->is_dumb && vbo->dumb_surface) { in vmw_bo_release() 46 res = &vbo->dumb_surface->res; in vmw_bo_release() 47 WARN_ON(vbo != res->guest_memory_bo); in vmw_bo_release() 62 vmw_surface_unreference(&vbo->dumb_surface); in vmw_bo_release() [all …]
|
H A D | vmwgfx_bo.h | 136 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 137 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size); 138 void vmw_bo_unmap(struct vmw_bo *vbo); 144 void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 145 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); 146 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); 157 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) in vmw_bo_prio_adjust() argument 159 int i = ARRAY_SIZE(vbo->res_prios); in vmw_bo_prio_adjust() 162 if (vbo->res_prios[i]) { in vmw_bo_prio_adjust() 163 vbo->tbo.priority = i; in vmw_bo_prio_adjust() [all …]
|
H A D | vmwgfx_validation.c | 158 struct vmw_bo *vbo) in vmw_validation_find_bo_dup() argument 167 unsigned long key = (unsigned long) vbo; in vmw_validation_find_bo_dup() 179 if (entry->base.bo == &vbo->tbo) { in vmw_validation_find_bo_dup() 247 struct vmw_bo *vbo) in vmw_validation_add_bo() argument 251 bo_node = vmw_validation_find_bo_dup(ctx, vbo); in vmw_validation_add_bo() 260 bo_node->hash.key = (unsigned long) vbo; in vmw_validation_add_bo() 265 val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); in vmw_validation_add_bo() 383 struct vmw_bo *vbo, in vmw_validation_res_switch_backup() argument 394 val->new_guest_memory_bo = vbo; in vmw_validation_res_switch_backup() 424 struct vmw_bo *vbo = res->guest_memory_bo; in vmw_validation_res_reserve() local [all …]
|
H A D | vmwgfx_gem.c | 56 struct vmw_bo *vbo = to_vmw_bo(obj); in vmw_gem_object_pin() local 58 vmw_bo_pin_reserved(vbo, true); in vmw_gem_object_pin() 65 struct vmw_bo *vbo = to_vmw_bo(obj); in vmw_gem_object_unpin() local 67 vmw_bo_pin_reserved(vbo, false); in vmw_gem_object_unpin() 202 struct vmw_bo *vbo; in vmw_prime_import_sg_table() local 217 ret = vmw_bo_create(dev_priv, ¶ms, &vbo); in vmw_prime_import_sg_table() 221 vbo->tbo.base.funcs = &vmw_gem_object_funcs; in vmw_prime_import_sg_table() 223 gem = &vbo->tbo.base; in vmw_prime_import_sg_table() 237 struct vmw_bo *vbo; in vmw_gem_object_create_ioctl() local 242 req->size, &handle, &vbo); in vmw_gem_object_create_ioctl() [all …]
|
H A D | vmwgfx_prime.c | 95 struct vmw_bo *vbo; in vmw_prime_handle_to_fd() local 102 ret = vmw_user_bo_lookup(file_priv, handle, &vbo); in vmw_prime_handle_to_fd() 105 if (vbo && vbo->is_dumb) { in vmw_prime_handle_to_fd() 110 vbo, in vmw_prime_handle_to_fd() 120 vmw_user_bo_unref(&vbo); in vmw_prime_handle_to_fd()
|
H A D | vmwgfx_resource.c | 760 void vmw_resource_unbind_list(struct vmw_bo *vbo) in vmw_resource_unbind_list() argument 763 .bo = &vbo->tbo, in vmw_resource_unbind_list() 767 dma_resv_assert_held(vbo->tbo.base.resv); in vmw_resource_unbind_list() 768 while (!RB_EMPTY_ROOT(&vbo->res_tree)) { in vmw_resource_unbind_list() 769 struct rb_node *node = vbo->res_tree.rb_node; in vmw_resource_unbind_list() 781 (void) ttm_bo_wait(&vbo->tbo, false, false); in vmw_resource_unbind_list() 978 struct vmw_bo *vbo = NULL; in vmw_resource_pin() local 981 vbo = res->guest_memory_bo; in vmw_resource_pin() 983 ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL); in vmw_resource_pin() 986 if (!vbo->tbo.pin_count) { in vmw_resource_pin() [all …]
|
H A D | vmwgfx_ttm_buffer.c | 568 struct vmw_bo *vbo; in vmw_bo_create_and_populate() local 579 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_bo_create_and_populate() 583 ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); in vmw_bo_create_and_populate() 586 container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_create_and_populate() 590 ttm_bo_unreserve(&vbo->tbo); in vmw_bo_create_and_populate() 593 *bo_p = vbo; in vmw_bo_create_and_populate()
|
H A D | vmwgfx_validation.h | 155 struct vmw_bo *vbo); 171 struct vmw_bo *vbo,
|
H A D | vmwgfx_drv.h | 825 void vmw_resource_unbind_list(struct vmw_bo *vbo); 831 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, 1396 void vmw_bo_dirty_scan(struct vmw_bo *vbo); 1397 int vmw_bo_dirty_add(struct vmw_bo *vbo); 1400 void vmw_bo_dirty_release(struct vmw_bo *vbo); 1401 void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
|
H A D | vmwgfx_drv.c | 397 struct vmw_bo *vbo; in vmw_dummy_query_bo_create() local 415 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_dummy_query_bo_create() 419 ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); in vmw_dummy_query_bo_create() 427 vmw_bo_pin_reserved(vbo, false); in vmw_dummy_query_bo_create() 428 ttm_bo_unreserve(&vbo->tbo); in vmw_dummy_query_bo_create() 432 vmw_bo_unreference(&vbo); in vmw_dummy_query_bo_create() 434 dev_priv->dummy_query_bo = vbo; in vmw_dummy_query_bo_create()
|
H A D | vmwgfx_kms.c | 237 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) in vmw_du_destroy_cursor_mob() argument 239 if (!(*vbo)) in vmw_du_destroy_cursor_mob() 242 ttm_bo_unpin(&(*vbo)->tbo); in vmw_du_destroy_cursor_mob() 243 vmw_bo_unreference(vbo); in vmw_du_destroy_cursor_mob() 630 struct vmw_bo *vbo = vps->cursor.bo; in vmw_du_cursor_plane_unmap_cm() local 632 if (!vbo || !vbo->map.virtual) in vmw_du_cursor_plane_unmap_cm() 635 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); in vmw_du_cursor_plane_unmap_cm() 637 vmw_bo_unmap(vbo); in vmw_du_cursor_plane_unmap_cm() 638 ttm_bo_unreserve(&vbo->tbo); in vmw_du_cursor_plane_unmap_cm()
|
/linux/fs/ntfs3/ |
H A D | attrib.c | 1247 u64 vbo; in attr_data_read_resident() local 1259 vbo = folio->index << PAGE_SHIFT; in attr_data_read_resident() 1261 if (vbo > data_size) in attr_data_read_resident() 1264 len = min(data_size - vbo, folio_size(folio)); in attr_data_read_resident() 1266 folio_fill_tail(folio, 0, resident_data(attr) + vbo, len); in attr_data_read_resident() 1274 u64 vbo; in attr_data_write_resident() local 1288 vbo = folio->index << PAGE_SHIFT; in attr_data_write_resident() 1290 if (vbo < data_size) { in attr_data_write_resident() 1292 size_t len = min(data_size - vbo, folio_size(folio)); in attr_data_write_resident() 1294 memcpy_from_folio(data + vbo, folio, 0, len); in attr_data_write_resident() [all …]
|
H A D | dir.c | 353 const struct INDEX_HDR *hdr, u64 vbo, u64 pos, in ntfs_read_hdr() argument 374 if (vbo + off < pos) in ntfs_read_hdr() 380 ctx->pos = vbo + off; in ntfs_read_hdr() 400 u64 vbo; in ntfs_readdir() local 468 vbo = (u64)bit << index_bits; in ntfs_readdir() 469 if (vbo >= i_size) { in ntfs_readdir() 483 vbo = (u64)bit << index_bits; in ntfs_readdir() 484 if (vbo >= i_size) { in ntfs_readdir() 495 vbo + sbi->record_size, pos, name, ctx); in ntfs_readdir()
|
H A D | fsntfs.c | 774 u64 vbo; in ntfs_clear_mft_tail() local 786 vbo = (u64)from * rs; in ntfs_clear_mft_tail() 787 for (; from < to; from++, vbo += rs) { in ntfs_clear_mft_tail() 790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb); in ntfs_clear_mft_tail() 1132 u64 vbo, const void *buf, size_t bytes, int sync) in ntfs_sb_write_run() argument 1136 u32 off = vbo & sbi->cluster_mask; in ntfs_sb_write_run() 1137 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next; in ntfs_sb_write_run() 1180 const struct runs_tree *run, u64 vbo) in ntfs_bread_run() argument 1187 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL)) in ntfs_bread_run() 1190 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask); in ntfs_bread_run() [all …]
|
H A D | file.c | 256 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) in ntfs_zero_range() argument 261 pgoff_t idx = vbo >> PAGE_SHIFT; in ntfs_zero_range() 262 u32 from = vbo & (PAGE_SIZE - 1); in ntfs_zero_range() 542 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) in ntfs_fallocate() argument 549 loff_t end = vbo + len; in ntfs_fallocate() 550 loff_t vbo_down = round_down(vbo, max_t(unsigned long, in ntfs_fallocate() 612 err = attr_punch_hole(ni, vbo, len, &frame_size); in ntfs_fallocate() 623 vbo_a = (vbo + mask) & ~mask; in ntfs_fallocate() 627 if (tmp > vbo) { in ntfs_fallocate() 628 err = ntfs_zero_range(inode, vbo, tmp); in ntfs_fallocate() [all …]
|
H A D | frecord.c | 955 u64 vbo; in ni_ins_attr_ext() local 986 vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0; in ni_ins_attr_ext() 1002 vbo <= ((u64)mi->rno << sbi->record_bits))) { in ni_ins_attr_ext() 1049 if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) { in ni_ins_attr_ext() 1915 __u64 vbo, __u64 len) in ni_fiemap() argument 1922 CLST vcn = vbo >> cluster_bits; in ni_fiemap() 1961 end = vbo + len; in ni_fiemap() 1966 while (vbo < end) { in ni_fiemap() 2001 vbo = (u64)vcn << cluster_bits; in ni_fiemap() 2022 vbo = (u64)vcn << cluster_bits; in ni_fiemap() [all …]
|
H A D | fslog.c | 911 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo) in alloc_rsttbl_from_idx() argument 920 if (vbo >= bytes) { in alloc_rsttbl_from_idx() 925 u32 bytes2idx = vbo - bytes; in alloc_rsttbl_from_idx() 937 e = Add2Ptr(rt, vbo); in alloc_rsttbl_from_idx() 949 if (off == vbo) { in alloc_rsttbl_from_idx() 971 if (off == vbo) { in alloc_rsttbl_from_idx() 1001 u32 vbo; member 1081 u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3); in lsn_to_vbo() local 1083 return vbo; in lsn_to_vbo() 1142 static int read_log_page(struct ntfs_log *log, u32 vbo, in read_log_page() argument [all …]
|
H A D | index.c | 208 size_t data_size, valid_size, vbo, off = bit >> 3; in bmp_buf_get() local 263 vbo = off & ~(size_t)sbi->block_mask; in bmp_buf_get() 265 bbuf->new_valid = vbo + blocksize; in bmp_buf_get() 271 if (vbo >= valid_size) { in bmp_buf_get() 273 } else if (vbo + blocksize > valid_size) { in bmp_buf_get() 375 size_t vbo = from >> 3; in scan_nres_bitmap() local 376 sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits; in scan_nres_bitmap() 377 sector_t vblock = vbo >> sb->s_blocksize_bits; in scan_nres_bitmap() 390 vcn = vbo >> sbi->cluster_bits; in scan_nres_bitmap() 422 vbo = (u64)vblock << sb->s_blocksize_bits; in scan_nres_bitmap() [all …]
|
H A D | bitmap.c | 508 size_t wpos, wbit, iw, vbo; in wnd_rescan() local 517 vbo = 0; in wnd_rescan() 528 vbo * 8 - prev_tail, in wnd_rescan() 543 u32 off = vbo & sbi->cluster_mask; in wnd_rescan() 545 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, in wnd_rescan() 569 wbit = vbo * 8; in wnd_rescan() 613 vbo += blocksize; in wnd_rescan() 682 size_t vbo; in wnd_map() local 690 vbo = (u64)iw << sb->s_blocksize_bits; in wnd_map() 692 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen, in wnd_map() [all …]
|
H A D | record.c | 121 u64 vbo = (u64)mi->rno << sbi->record_bits; in mi_read() local 133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read() 152 vbo >> sbi->cluster_bits); in mi_read() 162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read() 420 u64 vbo = (u64)rno << sbi->record_bits; in mi_format_new() local 461 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size, in mi_format_new()
|
H A D | ntfs_fs.h | 453 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 454 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 455 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size); 569 __u64 vbo, __u64 len); 626 u64 vbo, const void *buf, size_t bytes, int sync); 628 const struct runs_tree *run, u64 vbo); 630 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb); 631 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 634 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 639 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, [all …]
|
H A D | inode.c | 564 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, in ntfs_get_block_vbo() argument 601 vcn = vbo >> cluster_bits; in ntfs_get_block_vbo() 602 off = vbo & sbi->cluster_mask; in ntfs_get_block_vbo() 638 if (vbo >= valid) in ntfs_get_block_vbo() 645 if (vbo >= valid) in ntfs_get_block_vbo() 648 if (vbo + bytes > valid) { in ntfs_get_block_vbo() 649 ni->i_valid = vbo + bytes; in ntfs_get_block_vbo() 652 } else if (vbo >= valid) { in ntfs_get_block_vbo() 655 } else if (vbo + bytes <= valid) { in ntfs_get_block_vbo() 657 } else if (vbo + block_size <= valid) { in ntfs_get_block_vbo() [all …]
|
/linux/drivers/gpu/drm/imx/ipuv3/ |
H A D | ipuv3-plane.c | 377 unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; in ipu_plane_atomic_check() local 466 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_check() 468 if (vbo & 0x7 || vbo > 0xfffff8) in ipu_plane_atomic_check() 473 if (vbo != old_vbo) in ipu_plane_atomic_check() 586 unsigned long eba, ubo, vbo; in ipu_plane_atomic_update() local 710 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_update() 714 swap(ubo, vbo); in ipu_plane_atomic_update() 717 fb->pitches[1], ubo, vbo); in ipu_plane_atomic_update() 720 "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, in ipu_plane_atomic_update()
|
/linux/drivers/gpu/drm/vc4/ |
H A D | vc4_validate.c | 899 struct drm_gem_dma_object *vbo = in validate_gl_shader_rec() local 908 to_vc4_bo(&vbo->base)->write_seqno); in validate_gl_shader_rec() 913 if (vbo->base.size < offset || in validate_gl_shader_rec() 914 vbo->base.size - offset < attr_size) { in validate_gl_shader_rec() 916 offset, attr_size, vbo->base.size); in validate_gl_shader_rec() 921 max_index = ((vbo->base.size - offset - attr_size) / in validate_gl_shader_rec() 931 *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset; in validate_gl_shader_rec()
|