xref: /linux/drivers/gpu/drm/nouveau/nouveau_bo.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #ifndef __NOUVEAU_BO_H__
2 #define __NOUVEAU_BO_H__
3 
4 #include <drm/drm_gem.h>
5 
6 struct nouveau_channel;
7 struct nouveau_fence;
8 struct nvkm_vma;
9 
10 struct nouveau_bo {
11 	struct ttm_buffer_object bo;
12 	struct ttm_placement placement;
13 	u32 valid_domains;
14 	struct ttm_place placements[3];
15 	struct ttm_place busy_placements[3];
16 	bool force_coherent;
17 	struct ttm_bo_kmap_obj kmap;
18 	struct list_head head;
19 
20 	/* protected by ttm_bo_reserve() */
21 	struct drm_file *reserved_by;
22 	struct list_head entry;
23 	int pbbo_index;
24 	bool validate_mapped;
25 
26 	struct list_head vma_list;
27 	unsigned page_shift;
28 
29 	struct nouveau_cli *cli;
30 
31 	u32 tile_mode;
32 	u32 tile_flags;
33 	struct nouveau_drm_tile *tile;
34 
35 	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
36 	 * gem reference to it! For debugging, use gem.filp != NULL to test
37 	 * whether it is valid. */
38 	struct drm_gem_object gem;
39 
40 	/* protect by the ttm reservation lock */
41 	int pin_refcnt;
42 
43 	struct ttm_bo_kmap_obj dma_buf_vmap;
44 };
45 
46 static inline struct nouveau_bo *
47 nouveau_bo(struct ttm_buffer_object *bo)
48 {
49 	return container_of(bo, struct nouveau_bo, bo);
50 }
51 
52 static inline int
53 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
54 {
55 	struct nouveau_bo *prev;
56 
57 	if (!pnvbo)
58 		return -EINVAL;
59 	prev = *pnvbo;
60 
61 	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
62 	if (prev) {
63 		struct ttm_buffer_object *bo = &prev->bo;
64 
65 		ttm_bo_unref(&bo);
66 	}
67 
68 	return 0;
69 }
70 
71 extern struct ttm_bo_driver nouveau_bo_driver;
72 
73 void nouveau_bo_move_init(struct nouveau_drm *);
74 int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
75 		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
76 		    struct reservation_object *robj,
77 		    struct nouveau_bo **);
78 int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
79 int  nouveau_bo_unpin(struct nouveau_bo *);
80 int  nouveau_bo_map(struct nouveau_bo *);
81 void nouveau_bo_unmap(struct nouveau_bo *);
82 void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
83 void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
84 u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
85 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
86 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
87 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
88 			 bool no_wait_gpu);
89 void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
90 void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
91 
92 struct nvkm_vma *
93 nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
94 
95 int  nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
96 			struct nvkm_vma *);
97 void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
98 
99 /* TODO: submit equivalent to TTM generic API upstream? */
100 static inline void __iomem *
101 nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
102 {
103 	bool is_iomem;
104 	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
105 						&nvbo->kmap, &is_iomem);
106 	WARN_ON_ONCE(ioptr && !is_iomem);
107 	return ioptr;
108 }
109 
110 #endif
111