xref: /linux/drivers/gpu/drm/panfrost/panfrost_gem.h (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3  
4  #ifndef __PANFROST_GEM_H__
5  #define __PANFROST_GEM_H__
6  
7  #include <drm/drm_gem_shmem_helper.h>
8  #include <drm/drm_mm.h>
9  
10  struct panfrost_mmu;
11  
12  struct panfrost_gem_object {
13  	struct drm_gem_shmem_object base;
14  	struct sg_table *sgts;
15  
16  	/*
17  	 * Use a list for now. If searching a mapping ever becomes the
18  	 * bottleneck, we should consider using an RB-tree, or even better,
19  	 * let the core store drm_gem_object_mapping entries (where we
20  	 * could place driver specific data) instead of drm_gem_object ones
21  	 * in its drm_file->object_idr table.
22  	 *
23  	 * struct drm_gem_object_mapping {
24  	 *	struct drm_gem_object *obj;
25  	 *	void *driver_priv;
26  	 * };
27  	 */
28  	struct {
29  		struct list_head list;
30  		struct mutex lock;
31  	} mappings;
32  
33  	/*
34  	 * Count the number of jobs referencing this BO so we don't let the
35  	 * shrinker reclaim this object prematurely.
36  	 */
37  	atomic_t gpu_usecount;
38  
39  	/*
40  	 * Object chunk size currently mapped onto physical memory
41  	 */
42  	size_t heap_rss_size;
43  
44  	bool noexec		:1;
45  	bool is_heap		:1;
46  };
47  
48  struct panfrost_gem_mapping {
49  	struct list_head node;
50  	struct kref refcount;
51  	struct panfrost_gem_object *obj;
52  	struct drm_mm_node mmnode;
53  	struct panfrost_mmu *mmu;
54  	bool active		:1;
55  };
56  
57  static inline
to_panfrost_bo(struct drm_gem_object * obj)58  struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
59  {
60  	return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
61  }
62  
63  static inline struct panfrost_gem_mapping *
drm_mm_node_to_panfrost_mapping(struct drm_mm_node * node)64  drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
65  {
66  	return container_of(node, struct panfrost_gem_mapping, mmnode);
67  }
68  
69  struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
70  
71  struct drm_gem_object *
72  panfrost_gem_prime_import_sg_table(struct drm_device *dev,
73  				   struct dma_buf_attachment *attach,
74  				   struct sg_table *sgt);
75  
76  struct panfrost_gem_object *
77  panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
78  
79  int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
80  void panfrost_gem_close(struct drm_gem_object *obj,
81  			struct drm_file *file_priv);
82  
83  struct panfrost_gem_mapping *
84  panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
85  			 struct panfrost_file_priv *priv);
86  void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
87  void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
88  
89  int panfrost_gem_shrinker_init(struct drm_device *dev);
90  void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
91  
92  #endif /* __PANFROST_GEM_H__ */
93