xref: /linux/drivers/gpu/drm/msm/msm_mmu.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_MMU_H__
8 #define __MSM_MMU_H__
9 
10 #include <linux/iommu.h>
11 
12 struct msm_mmu_prealloc;
13 struct msm_mmu;
14 struct msm_gpu;
15 
16 struct msm_mmu_funcs {
17 	void (*detach)(struct msm_mmu *mmu);
18 	void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
19 			       uint64_t iova, size_t len);
20 	int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
21 	void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
22 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
23 			size_t off, size_t len, int prot);
24 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
25 	void (*destroy)(struct msm_mmu *mmu);
26 	void (*set_stall)(struct msm_mmu *mmu, bool enable);
27 };
28 
29 enum msm_mmu_type {
30 	MSM_MMU_GPUMMU,
31 	MSM_MMU_IOMMU,
32 	MSM_MMU_IOMMU_PAGETABLE,
33 };
34 
35 /**
36  * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
37  */
38 struct msm_mmu_prealloc {
39 	/** @count: Number of pages reserved. */
40 	uint32_t count;
41 	/** @ptr: Index of first unused page in @pages */
42 	uint32_t ptr;
43 	/**
44 	 * @pages: Array of pages preallocated for MMU table updates.
45 	 *
46 	 * After a VM operation, there might be free pages remaining in this
47 	 * array (since the amount allocated is a worst-case).  These are
48 	 * returned to the pt_cache at mmu->prealloc_cleanup().
49 	 */
50 	void **pages;
51 };
52 
53 struct msm_mmu {
54 	const struct msm_mmu_funcs *funcs;
55 	struct device *dev;
56 	int (*handler)(void *arg, unsigned long iova, int flags, void *data);
57 	void *arg;
58 	enum msm_mmu_type type;
59 
60 	/**
61 	 * @prealloc: pre-allocated pages for pgtable
62 	 *
63 	 * Set while a VM_BIND job is running, serialized under
64 	 * msm_gem_vm::mmu_lock.
65 	 */
66 	struct msm_mmu_prealloc *prealloc;
67 };
68 
msm_mmu_init(struct msm_mmu * mmu,struct device * dev,const struct msm_mmu_funcs * funcs,enum msm_mmu_type type)69 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
70 		const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
71 {
72 	mmu->dev = dev;
73 	mmu->funcs = funcs;
74 	mmu->type = type;
75 }
76 
77 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
78 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
79 struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
80 
msm_mmu_set_fault_handler(struct msm_mmu * mmu,void * arg,int (* handler)(void * arg,unsigned long iova,int flags,void * data))81 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
82 		int (*handler)(void *arg, unsigned long iova, int flags, void *data))
83 {
84 	mmu->arg = arg;
85 	mmu->handler = handler;
86 }
87 
88 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
89 
90 int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
91 			       int *asid);
92 int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
93 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
94 
95 #endif /* __MSM_MMU_H__ */
96