xref: /linux/include/linux/host1x.h (revision ec8c17e5ecb4a5a74069687ccb6d2cfe1851302e)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
4  */
5 
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
8 
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-fence.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 
15 enum host1x_class {
16 	HOST1X_CLASS_HOST1X = 0x1,
17 	HOST1X_CLASS_NVJPG1 = 0x7,
18 	HOST1X_CLASS_NVENC = 0x21,
19 	HOST1X_CLASS_NVENC1 = 0x22,
20 	HOST1X_CLASS_GR2D = 0x51,
21 	HOST1X_CLASS_GR2D_SB = 0x52,
22 	HOST1X_CLASS_VIC = 0x5D,
23 	HOST1X_CLASS_GR3D = 0x60,
24 	HOST1X_CLASS_NVJPG = 0xC0,
25 	HOST1X_CLASS_NVDEC = 0xF0,
26 	HOST1X_CLASS_NVDEC1 = 0xF5,
27 	HOST1X_CLASS_OFA = 0xF8,
28 };
29 
30 struct host1x;
31 struct host1x_client;
32 struct iommu_group;
33 
34 u64 host1x_get_dma_mask(struct host1x *host1x);
35 
36 /**
37  * struct host1x_bo_cache - host1x buffer object cache
38  * @mappings: list of mappings
39  * @lock: synchronizes accesses to the list of mappings
40  *
41  * Note that entries are not periodically evicted from this cache and instead need to be
42  * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
43  * released when the last reference to a buffer object represented by a mapping in this
44  * cache is dropped.
45  */
46 struct host1x_bo_cache {
47 	struct list_head mappings;
48 	struct mutex lock;
49 };
50 
host1x_bo_cache_init(struct host1x_bo_cache * cache)51 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
52 {
53 	INIT_LIST_HEAD(&cache->mappings);
54 	mutex_init(&cache->lock);
55 }
56 
host1x_bo_cache_destroy(struct host1x_bo_cache * cache)57 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
58 {
59 	/* XXX warn if not empty? */
60 	mutex_destroy(&cache->lock);
61 }
62 
63 /**
64  * struct host1x_client_ops - host1x client operations
65  * @early_init: host1x client early initialization code
66  * @init: host1x client initialization code
67  * @exit: host1x client tear down code
68  * @late_exit: host1x client late tear down code
69  * @suspend: host1x client suspend code
70  * @resume: host1x client resume code
71  */
72 struct host1x_client_ops {
73 	int (*early_init)(struct host1x_client *client);
74 	int (*init)(struct host1x_client *client);
75 	int (*exit)(struct host1x_client *client);
76 	int (*late_exit)(struct host1x_client *client);
77 	int (*suspend)(struct host1x_client *client);
78 	int (*resume)(struct host1x_client *client);
79 };
80 
81 /**
82  * struct host1x_client - host1x client structure
83  * @list: list node for the host1x client
84  * @host: pointer to struct device representing the host1x controller
85  * @dev: pointer to struct device backing this host1x client
86  * @group: IOMMU group that this client is a member of
87  * @ops: host1x client operations
88  * @class: host1x class represented by this client
89  * @channel: host1x channel associated with this client
90  * @syncpts: array of syncpoints requested for this client
91  * @num_syncpts: number of syncpoints requested for this client
92  * @parent: pointer to parent structure
93  * @usecount: reference count for this structure
94  * @lock: mutex for mutually exclusive concurrency
95  * @cache: host1x buffer object cache
96  */
97 struct host1x_client {
98 	struct list_head list;
99 	struct device *host;
100 	struct device *dev;
101 	struct iommu_group *group;
102 
103 	const struct host1x_client_ops *ops;
104 
105 	enum host1x_class class;
106 	struct host1x_channel *channel;
107 
108 	struct host1x_syncpt **syncpts;
109 	unsigned int num_syncpts;
110 
111 	struct host1x_client *parent;
112 	unsigned int usecount;
113 	struct mutex lock;
114 
115 	struct host1x_bo_cache cache;
116 };
117 
118 /*
119  * host1x buffer objects
120  */
121 
122 struct host1x_bo;
123 struct sg_table;
124 
125 struct host1x_bo_mapping {
126 	struct kref ref;
127 	struct dma_buf_attachment *attach;
128 	enum dma_data_direction direction;
129 	struct list_head list;
130 	struct host1x_bo *bo;
131 	struct sg_table *sgt;
132 	unsigned int chunks;
133 	struct device *dev;
134 	dma_addr_t phys;
135 	size_t size;
136 
137 	struct host1x_bo_cache *cache;
138 	struct list_head entry;
139 };
140 
to_host1x_bo_mapping(struct kref * ref)141 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
142 {
143 	return container_of(ref, struct host1x_bo_mapping, ref);
144 }
145 
146 struct host1x_bo_ops {
147 	struct host1x_bo *(*get)(struct host1x_bo *bo);
148 	void (*put)(struct host1x_bo *bo);
149 	struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
150 					 enum dma_data_direction dir);
151 	void (*unpin)(struct host1x_bo_mapping *map);
152 	void *(*mmap)(struct host1x_bo *bo);
153 	void (*munmap)(struct host1x_bo *bo, void *addr);
154 };
155 
156 struct host1x_bo {
157 	const struct host1x_bo_ops *ops;
158 	struct list_head mappings;
159 	spinlock_t lock;
160 };
161 
host1x_bo_init(struct host1x_bo * bo,const struct host1x_bo_ops * ops)162 static inline void host1x_bo_init(struct host1x_bo *bo,
163 				  const struct host1x_bo_ops *ops)
164 {
165 	INIT_LIST_HEAD(&bo->mappings);
166 	spin_lock_init(&bo->lock);
167 	bo->ops = ops;
168 }
169 
host1x_bo_get(struct host1x_bo * bo)170 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
171 {
172 	return bo->ops->get(bo);
173 }
174 
host1x_bo_put(struct host1x_bo * bo)175 static inline void host1x_bo_put(struct host1x_bo *bo)
176 {
177 	bo->ops->put(bo);
178 }
179 
180 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
181 					enum dma_data_direction dir,
182 					struct host1x_bo_cache *cache);
183 void host1x_bo_unpin(struct host1x_bo_mapping *map);
184 
host1x_bo_mmap(struct host1x_bo * bo)185 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
186 {
187 	return bo->ops->mmap(bo);
188 }
189 
host1x_bo_munmap(struct host1x_bo * bo,void * addr)190 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
191 {
192 	bo->ops->munmap(bo, addr);
193 }
194 
195 /*
196  * host1x syncpoints
197  */
198 
199 #define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
200 #define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
201 
202 struct host1x_syncpt_base;
203 struct host1x_syncpt;
204 struct host1x;
205 
206 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
207 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
208 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
209 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
210 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
211 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
212 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
213 int host1x_syncpt_incr(struct host1x_syncpt *sp);
214 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
215 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
216 		       u32 *value);
217 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
218 					    unsigned long flags);
219 void host1x_syncpt_put(struct host1x_syncpt *sp);
220 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
221 					  unsigned long flags,
222 					  const char *name);
223 
224 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
225 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
226 
227 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
228 					      u32 syncpt_id);
229 
230 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
231 				      bool timeout);
232 void host1x_fence_cancel(struct dma_fence *fence);
233 
234 /*
235  * host1x channel
236  */
237 
238 struct host1x_channel;
239 struct host1x_job;
240 
241 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
242 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
243 void host1x_channel_stop(struct host1x_channel *channel);
244 void host1x_channel_put(struct host1x_channel *channel);
245 int host1x_job_submit(struct host1x_job *job);
246 
247 /*
248  * host1x job
249  */
250 
251 #define HOST1X_RELOC_READ	(1 << 0)
252 #define HOST1X_RELOC_WRITE	(1 << 1)
253 
254 struct host1x_reloc {
255 	struct {
256 		struct host1x_bo *bo;
257 		unsigned long offset;
258 	} cmdbuf;
259 	struct {
260 		struct host1x_bo *bo;
261 		unsigned long offset;
262 	} target;
263 	unsigned long shift;
264 	unsigned long flags;
265 };
266 
267 struct host1x_job {
268 	/* When refcount goes to zero, job can be freed */
269 	struct kref ref;
270 
271 	/* List entry */
272 	struct list_head list;
273 
274 	/* Channel where job is submitted to */
275 	struct host1x_channel *channel;
276 
277 	/* client where the job originated */
278 	struct host1x_client *client;
279 
280 	/* Gathers and their memory */
281 	struct host1x_job_cmd *cmds;
282 	unsigned int num_cmds;
283 
284 	/* Array of handles to be pinned & unpinned */
285 	struct host1x_reloc *relocs;
286 	unsigned int num_relocs;
287 	struct host1x_job_unpin_data *unpins;
288 	unsigned int num_unpins;
289 
290 	dma_addr_t *addr_phys;
291 	dma_addr_t *gather_addr_phys;
292 	dma_addr_t *reloc_addr_phys;
293 
294 	/* Sync point id, number of increments and end related to the submit */
295 	struct host1x_syncpt *syncpt;
296 	u32 syncpt_incrs;
297 	u32 syncpt_end;
298 
299 	/* Completion fence for job tracking */
300 	struct dma_fence *fence;
301 	struct dma_fence_cb fence_cb;
302 
303 	/* Maximum time to wait for this job */
304 	unsigned int timeout;
305 
306 	/* Job has timed out and should be released */
307 	bool cancelled;
308 
309 	/* Index and number of slots used in the push buffer */
310 	unsigned int first_get;
311 	unsigned int num_slots;
312 
313 	/* Copy of gathers */
314 	size_t gather_copy_size;
315 	dma_addr_t gather_copy;
316 	u8 *gather_copy_mapped;
317 
318 	/* Check if register is marked as an address reg */
319 	int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
320 
321 	/* Check if class belongs to the unit */
322 	int (*is_valid_class)(u32 class);
323 
324 	/* Request a SETCLASS to this class */
325 	u32 class;
326 
327 	/* Add a channel wait for previous ops to complete */
328 	bool serialize;
329 
330 	/* Fast-forward syncpoint increments on job timeout */
331 	bool syncpt_recovery;
332 
333 	/* Callback called when job is freed */
334 	void (*release)(struct host1x_job *job);
335 	void *user_data;
336 
337 	/* Whether host1x-side firewall should be ran for this job or not */
338 	bool enable_firewall;
339 
340 	/* Options for configuring engine data stream ID */
341 	/* Context device to use for job */
342 	struct host1x_memory_context *memory_context;
343 	/* Stream ID to use if context isolation is disabled (!memory_context) */
344 	u32 engine_fallback_streamid;
345 	/* Engine offset to program stream ID to */
346 	u32 engine_streamid_offset;
347 };
348 
349 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
350 				    u32 num_cmdbufs, u32 num_relocs,
351 				    bool skip_firewall);
352 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
353 			   unsigned int words, unsigned int offset);
354 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
355 			 bool relative, u32 next_class);
356 struct host1x_job *host1x_job_get(struct host1x_job *job);
357 void host1x_job_put(struct host1x_job *job);
358 int host1x_job_pin(struct host1x_job *job, struct device *dev);
359 void host1x_job_unpin(struct host1x_job *job);
360 
361 /*
362  * subdevice probe infrastructure
363  */
364 
365 struct host1x_device;
366 
367 /**
368  * struct host1x_driver - host1x logical device driver
369  * @driver: core driver
370  * @subdevs: table of OF device IDs matching subdevices for this driver
371  * @list: list node for the driver
372  * @probe: called when the host1x logical device is probed
373  * @remove: called when the host1x logical device is removed
374  * @shutdown: called when the host1x logical device is shut down
375  */
376 struct host1x_driver {
377 	struct device_driver driver;
378 
379 	const struct of_device_id *subdevs;
380 	struct list_head list;
381 
382 	int (*probe)(struct host1x_device *device);
383 	int (*remove)(struct host1x_device *device);
384 	void (*shutdown)(struct host1x_device *device);
385 };
386 
387 static inline struct host1x_driver *
to_host1x_driver(struct device_driver * driver)388 to_host1x_driver(struct device_driver *driver)
389 {
390 	return container_of(driver, struct host1x_driver, driver);
391 }
392 
393 int host1x_driver_register_full(struct host1x_driver *driver,
394 				struct module *owner);
395 void host1x_driver_unregister(struct host1x_driver *driver);
396 
397 #define host1x_driver_register(driver) \
398 	host1x_driver_register_full(driver, THIS_MODULE)
399 
400 struct host1x_device {
401 	struct host1x_driver *driver;
402 	struct list_head list;
403 	struct device dev;
404 
405 	struct mutex subdevs_lock;
406 	struct list_head subdevs;
407 	struct list_head active;
408 
409 	struct mutex clients_lock;
410 	struct list_head clients;
411 
412 	bool registered;
413 
414 	struct device_dma_parameters dma_parms;
415 };
416 
to_host1x_device(struct device * dev)417 static inline struct host1x_device *to_host1x_device(struct device *dev)
418 {
419 	return container_of(dev, struct host1x_device, dev);
420 }
421 
422 int host1x_device_init(struct host1x_device *device);
423 int host1x_device_exit(struct host1x_device *device);
424 
425 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
426 void host1x_client_exit(struct host1x_client *client);
427 
428 #define host1x_client_init(client)			\
429 	({						\
430 		static struct lock_class_key __key;	\
431 		__host1x_client_init(client, &__key);	\
432 	})
433 
434 int __host1x_client_register(struct host1x_client *client);
435 
436 /*
437  * Note that this wrapper calls __host1x_client_init() for compatibility
438  * with existing callers. Callers that want to separately initialize and
439  * register a host1x client must first initialize using either of the
440  * __host1x_client_init() or host1x_client_init() functions and then use
441  * the low-level __host1x_client_register() function to avoid the client
442  * getting reinitialized.
443  */
444 #define host1x_client_register(client)			\
445 	({						\
446 		static struct lock_class_key __key;	\
447 		__host1x_client_init(client, &__key);	\
448 		__host1x_client_register(client);	\
449 	})
450 
451 void host1x_client_unregister(struct host1x_client *client);
452 
453 int host1x_client_suspend(struct host1x_client *client);
454 int host1x_client_resume(struct host1x_client *client);
455 
456 struct tegra_mipi_device;
457 
458 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
459 					     struct device_node *np);
460 void tegra_mipi_free(struct tegra_mipi_device *device);
461 int tegra_mipi_enable(struct tegra_mipi_device *device);
462 int tegra_mipi_disable(struct tegra_mipi_device *device);
463 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
464 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
465 
466 /* host1x memory contexts */
467 
468 struct host1x_memory_context {
469 	struct host1x *host;
470 
471 	refcount_t ref;
472 	struct pid *owner;
473 
474 	struct device_dma_parameters dma_parms;
475 	struct device dev;
476 	u64 dma_mask;
477 	u32 stream_id;
478 };
479 
480 #ifdef CONFIG_IOMMU_API
481 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
482 							  struct device *dev,
483 							  struct pid *pid);
484 void host1x_memory_context_get(struct host1x_memory_context *cd);
485 void host1x_memory_context_put(struct host1x_memory_context *cd);
486 #else
host1x_memory_context_alloc(struct host1x * host1x,struct device * dev,struct pid * pid)487 static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
488 									struct device *dev,
489 									struct pid *pid)
490 {
491 	return NULL;
492 }
493 
host1x_memory_context_get(struct host1x_memory_context * cd)494 static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
495 {
496 }
497 
host1x_memory_context_put(struct host1x_memory_context * cd)498 static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
499 {
500 }
501 #endif
502 
503 #endif
504