xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_driver.h (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1*592ffb21SWarner Losh /**************************************************************************
2*592ffb21SWarner Losh  *
3*592ffb21SWarner Losh  * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4*592ffb21SWarner Losh  * All Rights Reserved.
5*592ffb21SWarner Losh  *
6*592ffb21SWarner Losh  * Permission is hereby granted, free of charge, to any person obtaining a
7*592ffb21SWarner Losh  * copy of this software and associated documentation files (the
8*592ffb21SWarner Losh  * "Software"), to deal in the Software without restriction, including
9*592ffb21SWarner Losh  * without limitation the rights to use, copy, modify, merge, publish,
10*592ffb21SWarner Losh  * distribute, sub license, and/or sell copies of the Software, and to
11*592ffb21SWarner Losh  * permit persons to whom the Software is furnished to do so, subject to
12*592ffb21SWarner Losh  * the following conditions:
13*592ffb21SWarner Losh  *
14*592ffb21SWarner Losh  * The above copyright notice and this permission notice (including the
15*592ffb21SWarner Losh  * next paragraph) shall be included in all copies or substantial portions
16*592ffb21SWarner Losh  * of the Software.
17*592ffb21SWarner Losh  *
18*592ffb21SWarner Losh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*592ffb21SWarner Losh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*592ffb21SWarner Losh  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*592ffb21SWarner Losh  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*592ffb21SWarner Losh  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*592ffb21SWarner Losh  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*592ffb21SWarner Losh  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*592ffb21SWarner Losh  *
26*592ffb21SWarner Losh  **************************************************************************/
27*592ffb21SWarner Losh /*
28*592ffb21SWarner Losh  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29*592ffb21SWarner Losh  */
30*592ffb21SWarner Losh 
31*592ffb21SWarner Losh #ifndef _TTM_BO_DRIVER_H_
32*592ffb21SWarner Losh #define _TTM_BO_DRIVER_H_
33*592ffb21SWarner Losh 
34*592ffb21SWarner Losh #include <dev/drm2/drmP.h>
35*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_api.h>
36*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_memory.h>
37*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
38*592ffb21SWarner Losh #include <dev/drm2/drm_global.h>
39*592ffb21SWarner Losh #include <sys/rwlock.h>
40*592ffb21SWarner Losh #include <sys/tree.h>
41*592ffb21SWarner Losh 
42*592ffb21SWarner Losh struct ttm_backend_func {
43*592ffb21SWarner Losh 	/**
44*592ffb21SWarner Losh 	 * struct ttm_backend_func member bind
45*592ffb21SWarner Losh 	 *
46*592ffb21SWarner Losh 	 * @ttm: Pointer to a struct ttm_tt.
47*592ffb21SWarner Losh 	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
48*592ffb21SWarner Losh 	 * memory type and location for binding.
49*592ffb21SWarner Losh 	 *
50*592ffb21SWarner Losh 	 * Bind the backend pages into the aperture in the location
51*592ffb21SWarner Losh 	 * indicated by @bo_mem. This function should be able to handle
52*592ffb21SWarner Losh 	 * differences between aperture and system page sizes.
53*592ffb21SWarner Losh 	 */
54*592ffb21SWarner Losh 	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
55*592ffb21SWarner Losh 
56*592ffb21SWarner Losh 	/**
57*592ffb21SWarner Losh 	 * struct ttm_backend_func member unbind
58*592ffb21SWarner Losh 	 *
59*592ffb21SWarner Losh 	 * @ttm: Pointer to a struct ttm_tt.
60*592ffb21SWarner Losh 	 *
61*592ffb21SWarner Losh 	 * Unbind previously bound backend pages. This function should be
62*592ffb21SWarner Losh 	 * able to handle differences between aperture and system page sizes.
63*592ffb21SWarner Losh 	 */
64*592ffb21SWarner Losh 	int (*unbind) (struct ttm_tt *ttm);
65*592ffb21SWarner Losh 
66*592ffb21SWarner Losh 	/**
67*592ffb21SWarner Losh 	 * struct ttm_backend_func member destroy
68*592ffb21SWarner Losh 	 *
69*592ffb21SWarner Losh 	 * @ttm: Pointer to a struct ttm_tt.
70*592ffb21SWarner Losh 	 *
71*592ffb21SWarner Losh 	 * Destroy the backend. This will be call back from ttm_tt_destroy so
72*592ffb21SWarner Losh 	 * don't call ttm_tt_destroy from the callback or infinite loop.
73*592ffb21SWarner Losh 	 */
74*592ffb21SWarner Losh 	void (*destroy) (struct ttm_tt *ttm);
75*592ffb21SWarner Losh };
76*592ffb21SWarner Losh 
77*592ffb21SWarner Losh #define TTM_PAGE_FLAG_WRITE           (1 << 3)
78*592ffb21SWarner Losh #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
79*592ffb21SWarner Losh #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
80*592ffb21SWarner Losh #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
81*592ffb21SWarner Losh #define TTM_PAGE_FLAG_DMA32           (1 << 7)
82*592ffb21SWarner Losh #define TTM_PAGE_FLAG_SG              (1 << 8)
83*592ffb21SWarner Losh 
84*592ffb21SWarner Losh enum ttm_caching_state {
85*592ffb21SWarner Losh 	tt_uncached,
86*592ffb21SWarner Losh 	tt_wc,
87*592ffb21SWarner Losh 	tt_cached
88*592ffb21SWarner Losh };
89*592ffb21SWarner Losh 
90*592ffb21SWarner Losh /**
91*592ffb21SWarner Losh  * struct ttm_tt
92*592ffb21SWarner Losh  *
93*592ffb21SWarner Losh  * @bdev: Pointer to a struct ttm_bo_device.
94*592ffb21SWarner Losh  * @func: Pointer to a struct ttm_backend_func that describes
95*592ffb21SWarner Losh  * the backend methods.
96*592ffb21SWarner Losh  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
97*592ffb21SWarner Losh  * pointer.
98*592ffb21SWarner Losh  * @pages: Array of pages backing the data.
99*592ffb21SWarner Losh  * @num_pages: Number of pages in the page array.
100*592ffb21SWarner Losh  * @bdev: Pointer to the current struct ttm_bo_device.
101*592ffb21SWarner Losh  * @be: Pointer to the ttm backend.
102*592ffb21SWarner Losh  * @swap_storage: Pointer to shmem struct file for swap storage.
103*592ffb21SWarner Losh  * @caching_state: The current caching state of the pages.
104*592ffb21SWarner Losh  * @state: The current binding state of the pages.
105*592ffb21SWarner Losh  *
106*592ffb21SWarner Losh  * This is a structure holding the pages, caching- and aperture binding
107*592ffb21SWarner Losh  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
108*592ffb21SWarner Losh  * memory.
109*592ffb21SWarner Losh  */
110*592ffb21SWarner Losh 
111*592ffb21SWarner Losh struct ttm_tt {
112*592ffb21SWarner Losh 	struct ttm_bo_device *bdev;
113*592ffb21SWarner Losh 	struct ttm_backend_func *func;
114*592ffb21SWarner Losh 	struct vm_page *dummy_read_page;
115*592ffb21SWarner Losh 	struct vm_page **pages;
116*592ffb21SWarner Losh 	uint32_t page_flags;
117*592ffb21SWarner Losh 	unsigned long num_pages;
118*592ffb21SWarner Losh 	struct sg_table *sg; /* for SG objects via dma-buf */
119*592ffb21SWarner Losh 	struct ttm_bo_global *glob;
120*592ffb21SWarner Losh 	struct vm_object *swap_storage;
121*592ffb21SWarner Losh 	enum ttm_caching_state caching_state;
122*592ffb21SWarner Losh 	enum {
123*592ffb21SWarner Losh 		tt_bound,
124*592ffb21SWarner Losh 		tt_unbound,
125*592ffb21SWarner Losh 		tt_unpopulated,
126*592ffb21SWarner Losh 	} state;
127*592ffb21SWarner Losh };
128*592ffb21SWarner Losh 
129*592ffb21SWarner Losh /**
130*592ffb21SWarner Losh  * struct ttm_dma_tt
131*592ffb21SWarner Losh  *
132*592ffb21SWarner Losh  * @ttm: Base ttm_tt struct.
133*592ffb21SWarner Losh  * @dma_address: The DMA (bus) addresses of the pages
134*592ffb21SWarner Losh  * @pages_list: used by some page allocation backend
135*592ffb21SWarner Losh  *
136*592ffb21SWarner Losh  * This is a structure holding the pages, caching- and aperture binding
137*592ffb21SWarner Losh  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
138*592ffb21SWarner Losh  * memory.
139*592ffb21SWarner Losh  */
140*592ffb21SWarner Losh struct ttm_dma_tt {
141*592ffb21SWarner Losh 	struct ttm_tt ttm;
142*592ffb21SWarner Losh 	dma_addr_t *dma_address;
143*592ffb21SWarner Losh 	struct list_head pages_list;
144*592ffb21SWarner Losh };
145*592ffb21SWarner Losh 
146*592ffb21SWarner Losh #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
147*592ffb21SWarner Losh #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
148*592ffb21SWarner Losh #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
149*592ffb21SWarner Losh 
150*592ffb21SWarner Losh struct ttm_mem_type_manager;
151*592ffb21SWarner Losh 
152*592ffb21SWarner Losh struct ttm_mem_type_manager_func {
153*592ffb21SWarner Losh 	/**
154*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager member init
155*592ffb21SWarner Losh 	 *
156*592ffb21SWarner Losh 	 * @man: Pointer to a memory type manager.
157*592ffb21SWarner Losh 	 * @p_size: Implementation dependent, but typically the size of the
158*592ffb21SWarner Losh 	 * range to be managed in pages.
159*592ffb21SWarner Losh 	 *
160*592ffb21SWarner Losh 	 * Called to initialize a private range manager. The function is
161*592ffb21SWarner Losh 	 * expected to initialize the man::priv member.
162*592ffb21SWarner Losh 	 * Returns 0 on success, negative error code on failure.
163*592ffb21SWarner Losh 	 */
164*592ffb21SWarner Losh 	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
165*592ffb21SWarner Losh 
166*592ffb21SWarner Losh 	/**
167*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager member takedown
168*592ffb21SWarner Losh 	 *
169*592ffb21SWarner Losh 	 * @man: Pointer to a memory type manager.
170*592ffb21SWarner Losh 	 *
171*592ffb21SWarner Losh 	 * Called to undo the setup done in init. All allocated resources
172*592ffb21SWarner Losh 	 * should be freed.
173*592ffb21SWarner Losh 	 */
174*592ffb21SWarner Losh 	int  (*takedown)(struct ttm_mem_type_manager *man);
175*592ffb21SWarner Losh 
176*592ffb21SWarner Losh 	/**
177*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager member get_node
178*592ffb21SWarner Losh 	 *
179*592ffb21SWarner Losh 	 * @man: Pointer to a memory type manager.
180*592ffb21SWarner Losh 	 * @bo: Pointer to the buffer object we're allocating space for.
181*592ffb21SWarner Losh 	 * @placement: Placement details.
182*592ffb21SWarner Losh 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
183*592ffb21SWarner Losh 	 *
184*592ffb21SWarner Losh 	 * This function should allocate space in the memory type managed
185*592ffb21SWarner Losh 	 * by @man. Placement details if
186*592ffb21SWarner Losh 	 * applicable are given by @placement. If successful,
187*592ffb21SWarner Losh 	 * @mem::mm_node should be set to a non-null value, and
188*592ffb21SWarner Losh 	 * @mem::start should be set to a value identifying the beginning
189*592ffb21SWarner Losh 	 * of the range allocated, and the function should return zero.
190*592ffb21SWarner Losh 	 * If the memory region accommodate the buffer object, @mem::mm_node
191*592ffb21SWarner Losh 	 * should be set to NULL, and the function should return 0.
192*592ffb21SWarner Losh 	 * If a system error occurred, preventing the request to be fulfilled,
193*592ffb21SWarner Losh 	 * the function should return a negative error code.
194*592ffb21SWarner Losh 	 *
195*592ffb21SWarner Losh 	 * Note that @mem::mm_node will only be dereferenced by
196*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager functions and optionally by the driver,
197*592ffb21SWarner Losh 	 * which has knowledge of the underlying type.
198*592ffb21SWarner Losh 	 *
199*592ffb21SWarner Losh 	 * This function may not be called from within atomic context, so
200*592ffb21SWarner Losh 	 * an implementation can and must use either a mutex or a spinlock to
201*592ffb21SWarner Losh 	 * protect any data structures managing the space.
202*592ffb21SWarner Losh 	 */
203*592ffb21SWarner Losh 	int  (*get_node)(struct ttm_mem_type_manager *man,
204*592ffb21SWarner Losh 			 struct ttm_buffer_object *bo,
205*592ffb21SWarner Losh 			 struct ttm_placement *placement,
206*592ffb21SWarner Losh 			 struct ttm_mem_reg *mem);
207*592ffb21SWarner Losh 
208*592ffb21SWarner Losh 	/**
209*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager member put_node
210*592ffb21SWarner Losh 	 *
211*592ffb21SWarner Losh 	 * @man: Pointer to a memory type manager.
212*592ffb21SWarner Losh 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
213*592ffb21SWarner Losh 	 *
214*592ffb21SWarner Losh 	 * This function frees memory type resources previously allocated
215*592ffb21SWarner Losh 	 * and that are identified by @mem::mm_node and @mem::start. May not
216*592ffb21SWarner Losh 	 * be called from within atomic context.
217*592ffb21SWarner Losh 	 */
218*592ffb21SWarner Losh 	void (*put_node)(struct ttm_mem_type_manager *man,
219*592ffb21SWarner Losh 			 struct ttm_mem_reg *mem);
220*592ffb21SWarner Losh 
221*592ffb21SWarner Losh 	/**
222*592ffb21SWarner Losh 	 * struct ttm_mem_type_manager member debug
223*592ffb21SWarner Losh 	 *
224*592ffb21SWarner Losh 	 * @man: Pointer to a memory type manager.
225*592ffb21SWarner Losh 	 * @prefix: Prefix to be used in printout to identify the caller.
226*592ffb21SWarner Losh 	 *
227*592ffb21SWarner Losh 	 * This function is called to print out the state of the memory
228*592ffb21SWarner Losh 	 * type manager to aid debugging of out-of-memory conditions.
229*592ffb21SWarner Losh 	 * It may not be called from within atomic context.
230*592ffb21SWarner Losh 	 */
231*592ffb21SWarner Losh 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
232*592ffb21SWarner Losh };
233*592ffb21SWarner Losh 
234*592ffb21SWarner Losh /**
235*592ffb21SWarner Losh  * struct ttm_mem_type_manager
236*592ffb21SWarner Losh  *
237*592ffb21SWarner Losh  * @has_type: The memory type has been initialized.
238*592ffb21SWarner Losh  * @use_type: The memory type is enabled.
239*592ffb21SWarner Losh  * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
240*592ffb21SWarner Losh  * managed by this memory type.
241*592ffb21SWarner Losh  * @gpu_offset: If used, the GPU offset of the first managed page of
242*592ffb21SWarner Losh  * fixed memory or the first managed location in an aperture.
243*592ffb21SWarner Losh  * @size: Size of the managed region.
244*592ffb21SWarner Losh  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
245*592ffb21SWarner Losh  * as defined in ttm_placement_common.h
246*592ffb21SWarner Losh  * @default_caching: The default caching policy used for a buffer object
247*592ffb21SWarner Losh  * placed in this memory type if the user doesn't provide one.
248*592ffb21SWarner Losh  * @func: structure pointer implementing the range manager. See above
249*592ffb21SWarner Losh  * @priv: Driver private closure for @func.
250*592ffb21SWarner Losh  * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
251*592ffb21SWarner Losh  * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
252*592ffb21SWarner Losh  * reserved by the TTM vm system.
253*592ffb21SWarner Losh  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
254*592ffb21SWarner Losh  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
255*592ffb21SWarner Losh  * static information. bdev::driver::io_mem_free is never used.
256*592ffb21SWarner Losh  * @lru: The lru list for this memory type.
257*592ffb21SWarner Losh  *
258*592ffb21SWarner Losh  * This structure is used to identify and manage memory types for a device.
259*592ffb21SWarner Losh  * It's set up by the ttm_bo_driver::init_mem_type method.
260*592ffb21SWarner Losh  */
261*592ffb21SWarner Losh 
262*592ffb21SWarner Losh 
263*592ffb21SWarner Losh 
264*592ffb21SWarner Losh struct ttm_mem_type_manager {
265*592ffb21SWarner Losh 	struct ttm_bo_device *bdev;
266*592ffb21SWarner Losh 
267*592ffb21SWarner Losh 	/*
268*592ffb21SWarner Losh 	 * No protection. Constant from start.
269*592ffb21SWarner Losh 	 */
270*592ffb21SWarner Losh 
271*592ffb21SWarner Losh 	bool has_type;
272*592ffb21SWarner Losh 	bool use_type;
273*592ffb21SWarner Losh 	uint32_t flags;
274*592ffb21SWarner Losh 	unsigned long gpu_offset;
275*592ffb21SWarner Losh 	uint64_t size;
276*592ffb21SWarner Losh 	uint32_t available_caching;
277*592ffb21SWarner Losh 	uint32_t default_caching;
278*592ffb21SWarner Losh 	const struct ttm_mem_type_manager_func *func;
279*592ffb21SWarner Losh 	void *priv;
280*592ffb21SWarner Losh 	struct sx io_reserve_mutex;
281*592ffb21SWarner Losh 	bool use_io_reserve_lru;
282*592ffb21SWarner Losh 	bool io_reserve_fastpath;
283*592ffb21SWarner Losh 
284*592ffb21SWarner Losh 	/*
285*592ffb21SWarner Losh 	 * Protected by @io_reserve_mutex:
286*592ffb21SWarner Losh 	 */
287*592ffb21SWarner Losh 
288*592ffb21SWarner Losh 	struct list_head io_reserve_lru;
289*592ffb21SWarner Losh 
290*592ffb21SWarner Losh 	/*
291*592ffb21SWarner Losh 	 * Protected by the global->lru_lock.
292*592ffb21SWarner Losh 	 */
293*592ffb21SWarner Losh 
294*592ffb21SWarner Losh 	struct list_head lru;
295*592ffb21SWarner Losh };
296*592ffb21SWarner Losh 
297*592ffb21SWarner Losh /**
298*592ffb21SWarner Losh  * struct ttm_bo_driver
299*592ffb21SWarner Losh  *
300*592ffb21SWarner Losh  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
301*592ffb21SWarner Losh  * @invalidate_caches: Callback to invalidate read caches when a buffer object
302*592ffb21SWarner Losh  * has been evicted.
303*592ffb21SWarner Losh  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
304*592ffb21SWarner Losh  * structure.
305*592ffb21SWarner Losh  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
306*592ffb21SWarner Losh  * @move: Callback for a driver to hook in accelerated functions to
307*592ffb21SWarner Losh  * move a buffer.
308*592ffb21SWarner Losh  * If set to NULL, a potentially slow memcpy() move is used.
309*592ffb21SWarner Losh  * @sync_obj_signaled: See ttm_fence_api.h
310*592ffb21SWarner Losh  * @sync_obj_wait: See ttm_fence_api.h
311*592ffb21SWarner Losh  * @sync_obj_flush: See ttm_fence_api.h
312*592ffb21SWarner Losh  * @sync_obj_unref: See ttm_fence_api.h
313*592ffb21SWarner Losh  * @sync_obj_ref: See ttm_fence_api.h
314*592ffb21SWarner Losh  */
315*592ffb21SWarner Losh 
316*592ffb21SWarner Losh struct ttm_bo_driver {
317*592ffb21SWarner Losh 	/**
318*592ffb21SWarner Losh 	 * ttm_tt_create
319*592ffb21SWarner Losh 	 *
320*592ffb21SWarner Losh 	 * @bdev: pointer to a struct ttm_bo_device:
321*592ffb21SWarner Losh 	 * @size: Size of the data needed backing.
322*592ffb21SWarner Losh 	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
323*592ffb21SWarner Losh 	 * @dummy_read_page: See struct ttm_bo_device.
324*592ffb21SWarner Losh 	 *
325*592ffb21SWarner Losh 	 * Create a struct ttm_tt to back data with system memory pages.
326*592ffb21SWarner Losh 	 * No pages are actually allocated.
327*592ffb21SWarner Losh 	 * Returns:
328*592ffb21SWarner Losh 	 * NULL: Out of memory.
329*592ffb21SWarner Losh 	 */
330*592ffb21SWarner Losh 	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
331*592ffb21SWarner Losh 					unsigned long size,
332*592ffb21SWarner Losh 					uint32_t page_flags,
333*592ffb21SWarner Losh 					struct vm_page *dummy_read_page);
334*592ffb21SWarner Losh 
335*592ffb21SWarner Losh 	/**
336*592ffb21SWarner Losh 	 * ttm_tt_populate
337*592ffb21SWarner Losh 	 *
338*592ffb21SWarner Losh 	 * @ttm: The struct ttm_tt to contain the backing pages.
339*592ffb21SWarner Losh 	 *
340*592ffb21SWarner Losh 	 * Allocate all backing pages
341*592ffb21SWarner Losh 	 * Returns:
342*592ffb21SWarner Losh 	 * -ENOMEM: Out of memory.
343*592ffb21SWarner Losh 	 */
344*592ffb21SWarner Losh 	int (*ttm_tt_populate)(struct ttm_tt *ttm);
345*592ffb21SWarner Losh 
346*592ffb21SWarner Losh 	/**
347*592ffb21SWarner Losh 	 * ttm_tt_unpopulate
348*592ffb21SWarner Losh 	 *
349*592ffb21SWarner Losh 	 * @ttm: The struct ttm_tt to contain the backing pages.
350*592ffb21SWarner Losh 	 *
351*592ffb21SWarner Losh 	 * Free all backing page
352*592ffb21SWarner Losh 	 */
353*592ffb21SWarner Losh 	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
354*592ffb21SWarner Losh 
355*592ffb21SWarner Losh 	/**
356*592ffb21SWarner Losh 	 * struct ttm_bo_driver member invalidate_caches
357*592ffb21SWarner Losh 	 *
358*592ffb21SWarner Losh 	 * @bdev: the buffer object device.
359*592ffb21SWarner Losh 	 * @flags: new placement of the rebound buffer object.
360*592ffb21SWarner Losh 	 *
361*592ffb21SWarner Losh 	 * A previosly evicted buffer has been rebound in a
362*592ffb21SWarner Losh 	 * potentially new location. Tell the driver that it might
363*592ffb21SWarner Losh 	 * consider invalidating read (texture) caches on the next command
364*592ffb21SWarner Losh 	 * submission as a consequence.
365*592ffb21SWarner Losh 	 */
366*592ffb21SWarner Losh 
367*592ffb21SWarner Losh 	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
368*592ffb21SWarner Losh 	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
369*592ffb21SWarner Losh 			      struct ttm_mem_type_manager *man);
370*592ffb21SWarner Losh 	/**
371*592ffb21SWarner Losh 	 * struct ttm_bo_driver member evict_flags:
372*592ffb21SWarner Losh 	 *
373*592ffb21SWarner Losh 	 * @bo: the buffer object to be evicted
374*592ffb21SWarner Losh 	 *
375*592ffb21SWarner Losh 	 * Return the bo flags for a buffer which is not mapped to the hardware.
376*592ffb21SWarner Losh 	 * These will be placed in proposed_flags so that when the move is
377*592ffb21SWarner Losh 	 * finished, they'll end up in bo->mem.flags
378*592ffb21SWarner Losh 	 */
379*592ffb21SWarner Losh 
380*592ffb21SWarner Losh 	 void(*evict_flags) (struct ttm_buffer_object *bo,
381*592ffb21SWarner Losh 				struct ttm_placement *placement);
382*592ffb21SWarner Losh 	/**
383*592ffb21SWarner Losh 	 * struct ttm_bo_driver member move:
384*592ffb21SWarner Losh 	 *
385*592ffb21SWarner Losh 	 * @bo: the buffer to move
386*592ffb21SWarner Losh 	 * @evict: whether this motion is evicting the buffer from
387*592ffb21SWarner Losh 	 * the graphics address space
388*592ffb21SWarner Losh 	 * @interruptible: Use interruptible sleeps if possible when sleeping.
389*592ffb21SWarner Losh 	 * @no_wait: whether this should give up and return -EBUSY
390*592ffb21SWarner Losh 	 * if this move would require sleeping
391*592ffb21SWarner Losh 	 * @new_mem: the new memory region receiving the buffer
392*592ffb21SWarner Losh 	 *
393*592ffb21SWarner Losh 	 * Move a buffer between two memory regions.
394*592ffb21SWarner Losh 	 */
395*592ffb21SWarner Losh 	int (*move) (struct ttm_buffer_object *bo,
396*592ffb21SWarner Losh 		     bool evict, bool interruptible,
397*592ffb21SWarner Losh 		     bool no_wait_gpu,
398*592ffb21SWarner Losh 		     struct ttm_mem_reg *new_mem);
399*592ffb21SWarner Losh 
400*592ffb21SWarner Losh 	/**
401*592ffb21SWarner Losh 	 * struct ttm_bo_driver_member verify_access
402*592ffb21SWarner Losh 	 *
403*592ffb21SWarner Losh 	 * @bo: Pointer to a buffer object.
404*592ffb21SWarner Losh 	 * @filp: Pointer to a struct file trying to access the object.
405*592ffb21SWarner Losh 	 * FreeBSD: use devfs_get_cdevpriv etc.
406*592ffb21SWarner Losh 	 *
407*592ffb21SWarner Losh 	 * Called from the map / write / read methods to verify that the
408*592ffb21SWarner Losh 	 * caller is permitted to access the buffer object.
409*592ffb21SWarner Losh 	 * This member may be set to NULL, which will refuse this kind of
410*592ffb21SWarner Losh 	 * access for all buffer objects.
411*592ffb21SWarner Losh 	 * This function should return 0 if access is granted, -EPERM otherwise.
412*592ffb21SWarner Losh 	 */
413*592ffb21SWarner Losh 	int (*verify_access) (struct ttm_buffer_object *bo);
414*592ffb21SWarner Losh 
415*592ffb21SWarner Losh 	/**
416*592ffb21SWarner Losh 	 * In case a driver writer dislikes the TTM fence objects,
417*592ffb21SWarner Losh 	 * the driver writer can replace those with sync objects of
418*592ffb21SWarner Losh 	 * his / her own. If it turns out that no driver writer is
419*592ffb21SWarner Losh 	 * using these. I suggest we remove these hooks and plug in
420*592ffb21SWarner Losh 	 * fences directly. The bo driver needs the following functionality:
421*592ffb21SWarner Losh 	 * See the corresponding functions in the fence object API
422*592ffb21SWarner Losh 	 * documentation.
423*592ffb21SWarner Losh 	 */
424*592ffb21SWarner Losh 
425*592ffb21SWarner Losh 	bool (*sync_obj_signaled) (void *sync_obj);
426*592ffb21SWarner Losh 	int (*sync_obj_wait) (void *sync_obj,
427*592ffb21SWarner Losh 			      bool lazy, bool interruptible);
428*592ffb21SWarner Losh 	int (*sync_obj_flush) (void *sync_obj);
429*592ffb21SWarner Losh 	void (*sync_obj_unref) (void **sync_obj);
430*592ffb21SWarner Losh 	void *(*sync_obj_ref) (void *sync_obj);
431*592ffb21SWarner Losh 
432*592ffb21SWarner Losh 	/* hook to notify driver about a driver move so it
433*592ffb21SWarner Losh 	 * can do tiling things */
434*592ffb21SWarner Losh 	void (*move_notify)(struct ttm_buffer_object *bo,
435*592ffb21SWarner Losh 			    struct ttm_mem_reg *new_mem);
436*592ffb21SWarner Losh 	/* notify the driver we are taking a fault on this BO
437*592ffb21SWarner Losh 	 * and have reserved it */
438*592ffb21SWarner Losh 	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
439*592ffb21SWarner Losh 
440*592ffb21SWarner Losh 	/**
441*592ffb21SWarner Losh 	 * notify the driver that we're about to swap out this bo
442*592ffb21SWarner Losh 	 */
443*592ffb21SWarner Losh 	void (*swap_notify) (struct ttm_buffer_object *bo);
444*592ffb21SWarner Losh 
445*592ffb21SWarner Losh 	/**
446*592ffb21SWarner Losh 	 * Driver callback on when mapping io memory (for bo_move_memcpy
447*592ffb21SWarner Losh 	 * for instance). TTM will take care to call io_mem_free whenever
448*592ffb21SWarner Losh 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
449*592ffb21SWarner Losh 	 * are balanced.
450*592ffb21SWarner Losh 	 */
451*592ffb21SWarner Losh 	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
452*592ffb21SWarner Losh 	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
453*592ffb21SWarner Losh };
454*592ffb21SWarner Losh 
455*592ffb21SWarner Losh /**
456*592ffb21SWarner Losh  * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
457*592ffb21SWarner Losh  */
458*592ffb21SWarner Losh 
459*592ffb21SWarner Losh struct ttm_bo_global_ref {
460*592ffb21SWarner Losh 	struct drm_global_reference ref;
461*592ffb21SWarner Losh 	struct ttm_mem_global *mem_glob;
462*592ffb21SWarner Losh };
463*592ffb21SWarner Losh 
464*592ffb21SWarner Losh /**
465*592ffb21SWarner Losh  * struct ttm_bo_global - Buffer object driver global data.
466*592ffb21SWarner Losh  *
467*592ffb21SWarner Losh  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
468*592ffb21SWarner Losh  * @dummy_read_page: Pointer to a dummy page used for mapping requests
469*592ffb21SWarner Losh  * of unpopulated pages.
470*592ffb21SWarner Losh  * @shrink: A shrink callback object used for buffer object swap.
471*592ffb21SWarner Losh  * @device_list_mutex: Mutex protecting the device list.
472*592ffb21SWarner Losh  * This mutex is held while traversing the device list for pm options.
473*592ffb21SWarner Losh  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
474*592ffb21SWarner Losh  * @device_list: List of buffer object devices.
475*592ffb21SWarner Losh  * @swap_lru: Lru list of buffer objects used for swapping.
476*592ffb21SWarner Losh  */
477*592ffb21SWarner Losh 
478*592ffb21SWarner Losh struct ttm_bo_global {
479*592ffb21SWarner Losh 	u_int kobj_ref;
480*592ffb21SWarner Losh 
481*592ffb21SWarner Losh 	/**
482*592ffb21SWarner Losh 	 * Constant after init.
483*592ffb21SWarner Losh 	 */
484*592ffb21SWarner Losh 
485*592ffb21SWarner Losh 	struct ttm_mem_global *mem_glob;
486*592ffb21SWarner Losh 	struct vm_page *dummy_read_page;
487*592ffb21SWarner Losh 	struct ttm_mem_shrink shrink;
488*592ffb21SWarner Losh 	struct sx device_list_mutex;
489*592ffb21SWarner Losh 	struct mtx lru_lock;
490*592ffb21SWarner Losh 
491*592ffb21SWarner Losh 	/**
492*592ffb21SWarner Losh 	 * Protected by device_list_mutex.
493*592ffb21SWarner Losh 	 */
494*592ffb21SWarner Losh 	struct list_head device_list;
495*592ffb21SWarner Losh 
496*592ffb21SWarner Losh 	/**
497*592ffb21SWarner Losh 	 * Protected by the lru_lock.
498*592ffb21SWarner Losh 	 */
499*592ffb21SWarner Losh 	struct list_head swap_lru;
500*592ffb21SWarner Losh 
501*592ffb21SWarner Losh 	/**
502*592ffb21SWarner Losh 	 * Internal protection.
503*592ffb21SWarner Losh 	 */
504*592ffb21SWarner Losh 	atomic_t bo_count;
505*592ffb21SWarner Losh };
506*592ffb21SWarner Losh 
507*592ffb21SWarner Losh 
508*592ffb21SWarner Losh #define TTM_NUM_MEM_TYPES 8
509*592ffb21SWarner Losh 
510*592ffb21SWarner Losh #define TTM_BO_PRIV_FLAG_MOVING  0	/* Buffer object is moving and needs
511*592ffb21SWarner Losh 					   idling before CPU mapping */
512*592ffb21SWarner Losh #define TTM_BO_PRIV_FLAG_MAX 1
513*592ffb21SWarner Losh /**
514*592ffb21SWarner Losh  * struct ttm_bo_device - Buffer object driver device-specific data.
515*592ffb21SWarner Losh  *
516*592ffb21SWarner Losh  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
517*592ffb21SWarner Losh  * @man: An array of mem_type_managers.
518*592ffb21SWarner Losh  * @fence_lock: Protects the synchronizing members on *all* bos belonging
519*592ffb21SWarner Losh  * to this device.
520*592ffb21SWarner Losh  * @addr_space_mm: Range manager for the device address space.
521*592ffb21SWarner Losh  * lru_lock: Spinlock that protects the buffer+device lru lists and
522*592ffb21SWarner Losh  * ddestroy lists.
523*592ffb21SWarner Losh  * @val_seq: Current validation sequence.
524*592ffb21SWarner Losh  * @dev_mapping: A pointer to the struct address_space representing the
525*592ffb21SWarner Losh  * device address space.
526*592ffb21SWarner Losh  * @wq: Work queue structure for the delayed delete workqueue.
527*592ffb21SWarner Losh  *
528*592ffb21SWarner Losh  */
529*592ffb21SWarner Losh 
530*592ffb21SWarner Losh struct ttm_bo_device {
531*592ffb21SWarner Losh 
532*592ffb21SWarner Losh 	/*
533*592ffb21SWarner Losh 	 * Constant after bo device init / atomic.
534*592ffb21SWarner Losh 	 */
535*592ffb21SWarner Losh 	struct list_head device_list;
536*592ffb21SWarner Losh 	struct ttm_bo_global *glob;
537*592ffb21SWarner Losh 	struct ttm_bo_driver *driver;
538*592ffb21SWarner Losh 	struct rwlock vm_lock;
539*592ffb21SWarner Losh 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
540*592ffb21SWarner Losh 	struct mtx fence_lock;
541*592ffb21SWarner Losh 	/*
542*592ffb21SWarner Losh 	 * Protected by the vm lock.
543*592ffb21SWarner Losh 	 */
544*592ffb21SWarner Losh 	RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
545*592ffb21SWarner Losh 	struct drm_mm addr_space_mm;
546*592ffb21SWarner Losh 
547*592ffb21SWarner Losh 	/*
548*592ffb21SWarner Losh 	 * Protected by the global:lru lock.
549*592ffb21SWarner Losh 	 */
550*592ffb21SWarner Losh 	struct list_head ddestroy;
551*592ffb21SWarner Losh 	uint32_t val_seq;
552*592ffb21SWarner Losh 
553*592ffb21SWarner Losh 	/*
554*592ffb21SWarner Losh 	 * Protected by load / firstopen / lastclose /unload sync.
555*592ffb21SWarner Losh 	 */
556*592ffb21SWarner Losh 
557*592ffb21SWarner Losh 	struct address_space *dev_mapping;
558*592ffb21SWarner Losh 
559*592ffb21SWarner Losh 	/*
560*592ffb21SWarner Losh 	 * Internal protection.
561*592ffb21SWarner Losh 	 */
562*592ffb21SWarner Losh 
563*592ffb21SWarner Losh 	struct timeout_task wq;
564*592ffb21SWarner Losh 
565*592ffb21SWarner Losh 	bool need_dma32;
566*592ffb21SWarner Losh };
567*592ffb21SWarner Losh 
568*592ffb21SWarner Losh /**
569*592ffb21SWarner Losh  * ttm_flag_masked
570*592ffb21SWarner Losh  *
571*592ffb21SWarner Losh  * @old: Pointer to the result and original value.
572*592ffb21SWarner Losh  * @new: New value of bits.
573*592ffb21SWarner Losh  * @mask: Mask of bits to change.
574*592ffb21SWarner Losh  *
575*592ffb21SWarner Losh  * Convenience function to change a number of bits identified by a mask.
576*592ffb21SWarner Losh  */
577*592ffb21SWarner Losh 
578*592ffb21SWarner Losh static inline uint32_t
ttm_flag_masked(uint32_t * old,uint32_t new,uint32_t mask)579*592ffb21SWarner Losh ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
580*592ffb21SWarner Losh {
581*592ffb21SWarner Losh 	*old ^= (*old ^ new) & mask;
582*592ffb21SWarner Losh 	return *old;
583*592ffb21SWarner Losh }
584*592ffb21SWarner Losh 
585*592ffb21SWarner Losh /**
586*592ffb21SWarner Losh  * ttm_tt_init
587*592ffb21SWarner Losh  *
588*592ffb21SWarner Losh  * @ttm: The struct ttm_tt.
589*592ffb21SWarner Losh  * @bdev: pointer to a struct ttm_bo_device:
590*592ffb21SWarner Losh  * @size: Size of the data needed backing.
591*592ffb21SWarner Losh  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
592*592ffb21SWarner Losh  * @dummy_read_page: See struct ttm_bo_device.
593*592ffb21SWarner Losh  *
594*592ffb21SWarner Losh  * Create a struct ttm_tt to back data with system memory pages.
595*592ffb21SWarner Losh  * No pages are actually allocated.
596*592ffb21SWarner Losh  * Returns:
597*592ffb21SWarner Losh  * NULL: Out of memory.
598*592ffb21SWarner Losh  */
599*592ffb21SWarner Losh extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
600*592ffb21SWarner Losh 			unsigned long size, uint32_t page_flags,
601*592ffb21SWarner Losh 			struct vm_page *dummy_read_page);
602*592ffb21SWarner Losh extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
603*592ffb21SWarner Losh 			   unsigned long size, uint32_t page_flags,
604*592ffb21SWarner Losh 			   struct vm_page *dummy_read_page);
605*592ffb21SWarner Losh 
606*592ffb21SWarner Losh /**
607*592ffb21SWarner Losh  * ttm_tt_fini
608*592ffb21SWarner Losh  *
609*592ffb21SWarner Losh  * @ttm: the ttm_tt structure.
610*592ffb21SWarner Losh  *
611*592ffb21SWarner Losh  * Free memory of ttm_tt structure
612*592ffb21SWarner Losh  */
613*592ffb21SWarner Losh extern void ttm_tt_fini(struct ttm_tt *ttm);
614*592ffb21SWarner Losh extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
615*592ffb21SWarner Losh 
616*592ffb21SWarner Losh /**
617*592ffb21SWarner Losh  * ttm_ttm_bind:
618*592ffb21SWarner Losh  *
619*592ffb21SWarner Losh  * @ttm: The struct ttm_tt containing backing pages.
620*592ffb21SWarner Losh  * @bo_mem: The struct ttm_mem_reg identifying the binding location.
621*592ffb21SWarner Losh  *
622*592ffb21SWarner Losh  * Bind the pages of @ttm to an aperture location identified by @bo_mem
623*592ffb21SWarner Losh  */
624*592ffb21SWarner Losh extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
625*592ffb21SWarner Losh 
626*592ffb21SWarner Losh /**
627*592ffb21SWarner Losh  * ttm_ttm_destroy:
628*592ffb21SWarner Losh  *
629*592ffb21SWarner Losh  * @ttm: The struct ttm_tt.
630*592ffb21SWarner Losh  *
631*592ffb21SWarner Losh  * Unbind, unpopulate and destroy common struct ttm_tt.
632*592ffb21SWarner Losh  */
633*592ffb21SWarner Losh extern void ttm_tt_destroy(struct ttm_tt *ttm);
634*592ffb21SWarner Losh 
635*592ffb21SWarner Losh /**
636*592ffb21SWarner Losh  * ttm_ttm_unbind:
637*592ffb21SWarner Losh  *
638*592ffb21SWarner Losh  * @ttm: The struct ttm_tt.
639*592ffb21SWarner Losh  *
640*592ffb21SWarner Losh  * Unbind a struct ttm_tt.
641*592ffb21SWarner Losh  */
642*592ffb21SWarner Losh extern void ttm_tt_unbind(struct ttm_tt *ttm);
643*592ffb21SWarner Losh 
644*592ffb21SWarner Losh /**
645*592ffb21SWarner Losh  * ttm_tt_swapin:
646*592ffb21SWarner Losh  *
647*592ffb21SWarner Losh  * @ttm: The struct ttm_tt.
648*592ffb21SWarner Losh  *
649*592ffb21SWarner Losh  * Swap in a previously swap out ttm_tt.
650*592ffb21SWarner Losh  */
651*592ffb21SWarner Losh extern int ttm_tt_swapin(struct ttm_tt *ttm);
652*592ffb21SWarner Losh 
653*592ffb21SWarner Losh /**
654*592ffb21SWarner Losh  * ttm_tt_cache_flush:
655*592ffb21SWarner Losh  *
656*592ffb21SWarner Losh  * @pages: An array of pointers to struct page:s to flush.
657*592ffb21SWarner Losh  * @num_pages: Number of pages to flush.
658*592ffb21SWarner Losh  *
659*592ffb21SWarner Losh  * Flush the data of the indicated pages from the cpu caches.
660*592ffb21SWarner Losh  * This is used when changing caching attributes of the pages from
661*592ffb21SWarner Losh  * cache-coherent.
662*592ffb21SWarner Losh  */
663*592ffb21SWarner Losh extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
664*592ffb21SWarner Losh 
665*592ffb21SWarner Losh /**
666*592ffb21SWarner Losh  * ttm_tt_set_placement_caching:
667*592ffb21SWarner Losh  *
668*592ffb21SWarner Losh  * @ttm A struct ttm_tt the backing pages of which will change caching policy.
669*592ffb21SWarner Losh  * @placement: Flag indicating the desired caching policy.
670*592ffb21SWarner Losh  *
671*592ffb21SWarner Losh  * This function will change caching policy of any default kernel mappings of
672*592ffb21SWarner Losh  * the pages backing @ttm. If changing from cached to uncached or
673*592ffb21SWarner Losh  * write-combined,
674*592ffb21SWarner Losh  * all CPU caches will first be flushed to make sure the data of the pages
675*592ffb21SWarner Losh  * hit RAM. This function may be very costly as it involves global TLB
676*592ffb21SWarner Losh  * and cache flushes and potential page splitting / combining.
677*592ffb21SWarner Losh  */
678*592ffb21SWarner Losh extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
679*592ffb21SWarner Losh extern int ttm_tt_swapout(struct ttm_tt *ttm,
680*592ffb21SWarner Losh 			  struct vm_object *persistent_swap_storage);
681*592ffb21SWarner Losh 
682*592ffb21SWarner Losh /*
683*592ffb21SWarner Losh  * ttm_bo.c
684*592ffb21SWarner Losh  */
685*592ffb21SWarner Losh 
686*592ffb21SWarner Losh /**
687*592ffb21SWarner Losh  * ttm_mem_reg_is_pci
688*592ffb21SWarner Losh  *
689*592ffb21SWarner Losh  * @bdev: Pointer to a struct ttm_bo_device.
690*592ffb21SWarner Losh  * @mem: A valid struct ttm_mem_reg.
691*592ffb21SWarner Losh  *
692*592ffb21SWarner Losh  * Returns true if the memory described by @mem is PCI memory,
693*592ffb21SWarner Losh  * false otherwise.
694*592ffb21SWarner Losh  */
695*592ffb21SWarner Losh extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
696*592ffb21SWarner Losh 				   struct ttm_mem_reg *mem);
697*592ffb21SWarner Losh 
698*592ffb21SWarner Losh /**
699*592ffb21SWarner Losh  * ttm_bo_mem_space
700*592ffb21SWarner Losh  *
701*592ffb21SWarner Losh  * @bo: Pointer to a struct ttm_buffer_object. the data of which
702*592ffb21SWarner Losh  * we want to allocate space for.
703*592ffb21SWarner Losh  * @proposed_placement: Proposed new placement for the buffer object.
704*592ffb21SWarner Losh  * @mem: A struct ttm_mem_reg.
705*592ffb21SWarner Losh  * @interruptible: Sleep interruptible when sliping.
706*592ffb21SWarner Losh  * @no_wait_gpu: Return immediately if the GPU is busy.
707*592ffb21SWarner Losh  *
708*592ffb21SWarner Losh  * Allocate memory space for the buffer object pointed to by @bo, using
709*592ffb21SWarner Losh  * the placement flags in @mem, potentially evicting other idle buffer objects.
710*592ffb21SWarner Losh  * This function may sleep while waiting for space to become available.
711*592ffb21SWarner Losh  * Returns:
712*592ffb21SWarner Losh  * -EBUSY: No space available (only if no_wait == 1).
713*592ffb21SWarner Losh  * -ENOMEM: Could not allocate memory for the buffer object, either due to
714*592ffb21SWarner Losh  * fragmentation or concurrent allocators.
715*592ffb21SWarner Losh  * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
716*592ffb21SWarner Losh  */
717*592ffb21SWarner Losh extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
718*592ffb21SWarner Losh 				struct ttm_placement *placement,
719*592ffb21SWarner Losh 				struct ttm_mem_reg *mem,
720*592ffb21SWarner Losh 				bool interruptible,
721*592ffb21SWarner Losh 				bool no_wait_gpu);
722*592ffb21SWarner Losh 
723*592ffb21SWarner Losh extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
724*592ffb21SWarner Losh 			   struct ttm_mem_reg *mem);
725*592ffb21SWarner Losh extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
726*592ffb21SWarner Losh 				  struct ttm_mem_reg *mem);
727*592ffb21SWarner Losh 
728*592ffb21SWarner Losh extern void ttm_bo_global_release(struct drm_global_reference *ref);
729*592ffb21SWarner Losh extern int ttm_bo_global_init(struct drm_global_reference *ref);
730*592ffb21SWarner Losh 
731*592ffb21SWarner Losh extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
732*592ffb21SWarner Losh 
733*592ffb21SWarner Losh /**
734*592ffb21SWarner Losh  * ttm_bo_device_init
735*592ffb21SWarner Losh  *
736*592ffb21SWarner Losh  * @bdev: A pointer to a struct ttm_bo_device to initialize.
737*592ffb21SWarner Losh  * @glob: A pointer to an initialized struct ttm_bo_global.
738*592ffb21SWarner Losh  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
739*592ffb21SWarner Losh  * @file_page_offset: Offset into the device address space that is available
740*592ffb21SWarner Losh  * for buffer data. This ensures compatibility with other users of the
741*592ffb21SWarner Losh  * address space.
742*592ffb21SWarner Losh  *
743*592ffb21SWarner Losh  * Initializes a struct ttm_bo_device:
744*592ffb21SWarner Losh  * Returns:
745*592ffb21SWarner Losh  * !0: Failure.
746*592ffb21SWarner Losh  */
747*592ffb21SWarner Losh extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
748*592ffb21SWarner Losh 			      struct ttm_bo_global *glob,
749*592ffb21SWarner Losh 			      struct ttm_bo_driver *driver,
750*592ffb21SWarner Losh 			      uint64_t file_page_offset, bool need_dma32);
751*592ffb21SWarner Losh 
752*592ffb21SWarner Losh /**
753*592ffb21SWarner Losh  * ttm_bo_unmap_virtual
754*592ffb21SWarner Losh  *
755*592ffb21SWarner Losh  * @bo: tear down the virtual mappings for this BO
756*592ffb21SWarner Losh  */
757*592ffb21SWarner Losh extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
758*592ffb21SWarner Losh 
759*592ffb21SWarner Losh /**
760*592ffb21SWarner Losh  * ttm_bo_unmap_virtual
761*592ffb21SWarner Losh  *
762*592ffb21SWarner Losh  * @bo: tear down the virtual mappings for this BO
763*592ffb21SWarner Losh  *
764*592ffb21SWarner Losh  * The caller must take ttm_mem_io_lock before calling this function.
765*592ffb21SWarner Losh  */
766*592ffb21SWarner Losh extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
767*592ffb21SWarner Losh 
768*592ffb21SWarner Losh extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
769*592ffb21SWarner Losh extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
770*592ffb21SWarner Losh extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
771*592ffb21SWarner Losh 			   bool interruptible);
772*592ffb21SWarner Losh extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
773*592ffb21SWarner Losh 
774*592ffb21SWarner Losh 
775*592ffb21SWarner Losh /**
776*592ffb21SWarner Losh  * ttm_bo_reserve:
777*592ffb21SWarner Losh  *
778*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
779*592ffb21SWarner Losh  * @interruptible: Sleep interruptible if waiting.
780*592ffb21SWarner Losh  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
781*592ffb21SWarner Losh  * @use_sequence: If @bo is already reserved, Only sleep waiting for
782*592ffb21SWarner Losh  * it to become unreserved if @sequence < (@bo)->sequence.
783*592ffb21SWarner Losh  *
784*592ffb21SWarner Losh  * Locks a buffer object for validation. (Or prevents other processes from
785*592ffb21SWarner Losh  * locking it for validation) and removes it from lru lists, while taking
786*592ffb21SWarner Losh  * a number of measures to prevent deadlocks.
787*592ffb21SWarner Losh  *
788*592ffb21SWarner Losh  * Deadlocks may occur when two processes try to reserve multiple buffers in
789*592ffb21SWarner Losh  * different order, either by will or as a result of a buffer being evicted
790*592ffb21SWarner Losh  * to make room for a buffer already reserved. (Buffers are reserved before
791*592ffb21SWarner Losh  * they are evicted). The following algorithm prevents such deadlocks from
792*592ffb21SWarner Losh  * occurring:
793*592ffb21SWarner Losh  * Processes attempting to reserve multiple buffers other than for eviction,
794*592ffb21SWarner Losh  * (typically execbuf), should first obtain a unique 32-bit
795*592ffb21SWarner Losh  * validation sequence number,
796*592ffb21SWarner Losh  * and call this function with @use_sequence == 1 and @sequence == the unique
797*592ffb21SWarner Losh  * sequence number. If upon call of this function, the buffer object is already
798*592ffb21SWarner Losh  * reserved, the validation sequence is checked against the validation
799*592ffb21SWarner Losh  * sequence of the process currently reserving the buffer,
800*592ffb21SWarner Losh  * and if the current validation sequence is greater than that of the process
801*592ffb21SWarner Losh  * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
802*592ffb21SWarner Losh  * waiting for the buffer to become unreserved, after which it retries
803*592ffb21SWarner Losh  * reserving.
804*592ffb21SWarner Losh  * The caller should, when receiving an -EAGAIN error
805*592ffb21SWarner Losh  * release all its buffer reservations, wait for @bo to become unreserved, and
806*592ffb21SWarner Losh  * then rerun the validation with the same validation sequence. This procedure
807*592ffb21SWarner Losh  * will always guarantee that the process with the lowest validation sequence
808*592ffb21SWarner Losh  * will eventually succeed, preventing both deadlocks and starvation.
809*592ffb21SWarner Losh  *
810*592ffb21SWarner Losh  * Returns:
811*592ffb21SWarner Losh  * -EAGAIN: The reservation may cause a deadlock.
812*592ffb21SWarner Losh  * Release all buffer reservations, wait for @bo to become unreserved and
813*592ffb21SWarner Losh  * try again. (only if use_sequence == 1).
814*592ffb21SWarner Losh  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
815*592ffb21SWarner Losh  * a signal. Release all buffer reservations and return to user-space.
816*592ffb21SWarner Losh  * -EBUSY: The function needed to sleep, but @no_wait was true
817*592ffb21SWarner Losh  * -EDEADLK: Bo already reserved using @sequence. This error code will only
818*592ffb21SWarner Losh  * be returned if @use_sequence is set to true.
819*592ffb21SWarner Losh  */
820*592ffb21SWarner Losh extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
821*592ffb21SWarner Losh 			  bool interruptible,
822*592ffb21SWarner Losh 			  bool no_wait, bool use_sequence, uint32_t sequence);
823*592ffb21SWarner Losh 
824*592ffb21SWarner Losh /**
825*592ffb21SWarner Losh  * ttm_bo_reserve_slowpath_nolru:
826*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
827*592ffb21SWarner Losh  * @interruptible: Sleep interruptible if waiting.
828*592ffb21SWarner Losh  * @sequence: Set (@bo)->sequence to this value after lock
829*592ffb21SWarner Losh  *
830*592ffb21SWarner Losh  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
831*592ffb21SWarner Losh  * from all our other reservations. Because there are no other reservations
832*592ffb21SWarner Losh  * held by us, this function cannot deadlock any more.
833*592ffb21SWarner Losh  *
834*592ffb21SWarner Losh  * Will not remove reserved buffers from the lru lists.
835*592ffb21SWarner Losh  * Otherwise identical to ttm_bo_reserve_slowpath.
836*592ffb21SWarner Losh  */
837*592ffb21SWarner Losh extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
838*592ffb21SWarner Losh 					 bool interruptible,
839*592ffb21SWarner Losh 					 uint32_t sequence);
840*592ffb21SWarner Losh 
841*592ffb21SWarner Losh 
842*592ffb21SWarner Losh /**
843*592ffb21SWarner Losh  * ttm_bo_reserve_slowpath:
844*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
845*592ffb21SWarner Losh  * @interruptible: Sleep interruptible if waiting.
846*592ffb21SWarner Losh  * @sequence: Set (@bo)->sequence to this value after lock
847*592ffb21SWarner Losh  *
848*592ffb21SWarner Losh  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
849*592ffb21SWarner Losh  * from all our other reservations. Because there are no other reservations
850*592ffb21SWarner Losh  * held by us, this function cannot deadlock any more.
851*592ffb21SWarner Losh  */
852*592ffb21SWarner Losh extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
853*592ffb21SWarner Losh 				   bool interruptible, uint32_t sequence);
854*592ffb21SWarner Losh 
855*592ffb21SWarner Losh /**
856*592ffb21SWarner Losh  * ttm_bo_reserve_nolru:
857*592ffb21SWarner Losh  *
858*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
859*592ffb21SWarner Losh  * @interruptible: Sleep interruptible if waiting.
860*592ffb21SWarner Losh  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
861*592ffb21SWarner Losh  * @use_sequence: If @bo is already reserved, Only sleep waiting for
862*592ffb21SWarner Losh  * it to become unreserved if @sequence < (@bo)->sequence.
863*592ffb21SWarner Losh  *
864*592ffb21SWarner Losh  * Will not remove reserved buffers from the lru lists.
865*592ffb21SWarner Losh  * Otherwise identical to ttm_bo_reserve.
866*592ffb21SWarner Losh  *
867*592ffb21SWarner Losh  * Returns:
868*592ffb21SWarner Losh  * -EAGAIN: The reservation may cause a deadlock.
869*592ffb21SWarner Losh  * Release all buffer reservations, wait for @bo to become unreserved and
870*592ffb21SWarner Losh  * try again. (only if use_sequence == 1).
871*592ffb21SWarner Losh  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
872*592ffb21SWarner Losh  * a signal. Release all buffer reservations and return to user-space.
873*592ffb21SWarner Losh  * -EBUSY: The function needed to sleep, but @no_wait was true
874*592ffb21SWarner Losh  * -EDEADLK: Bo already reserved using @sequence. This error code will only
875*592ffb21SWarner Losh  * be returned if @use_sequence is set to true.
876*592ffb21SWarner Losh  */
877*592ffb21SWarner Losh extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
878*592ffb21SWarner Losh 				 bool interruptible,
879*592ffb21SWarner Losh 				 bool no_wait, bool use_sequence,
880*592ffb21SWarner Losh 				 uint32_t sequence);
881*592ffb21SWarner Losh 
882*592ffb21SWarner Losh /**
883*592ffb21SWarner Losh  * ttm_bo_unreserve
884*592ffb21SWarner Losh  *
885*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
886*592ffb21SWarner Losh  *
887*592ffb21SWarner Losh  * Unreserve a previous reservation of @bo.
888*592ffb21SWarner Losh  */
889*592ffb21SWarner Losh extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
890*592ffb21SWarner Losh 
891*592ffb21SWarner Losh /**
892*592ffb21SWarner Losh  * ttm_bo_unreserve_locked
893*592ffb21SWarner Losh  *
894*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
895*592ffb21SWarner Losh  *
896*592ffb21SWarner Losh  * Unreserve a previous reservation of @bo.
897*592ffb21SWarner Losh  * Needs to be called with struct ttm_bo_global::lru_lock held.
898*592ffb21SWarner Losh  */
899*592ffb21SWarner Losh extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
900*592ffb21SWarner Losh 
901*592ffb21SWarner Losh /*
902*592ffb21SWarner Losh  * ttm_bo_util.c
903*592ffb21SWarner Losh  */
904*592ffb21SWarner Losh 
905*592ffb21SWarner Losh /**
906*592ffb21SWarner Losh  * ttm_bo_move_ttm
907*592ffb21SWarner Losh  *
908*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
909*592ffb21SWarner Losh  * @evict: 1: This is an eviction. Don't try to pipeline.
910*592ffb21SWarner Losh  * @no_wait_gpu: Return immediately if the GPU is busy.
911*592ffb21SWarner Losh  * @new_mem: struct ttm_mem_reg indicating where to move.
912*592ffb21SWarner Losh  *
913*592ffb21SWarner Losh  * Optimized move function for a buffer object with both old and
914*592ffb21SWarner Losh  * new placement backed by a TTM. The function will, if successful,
915*592ffb21SWarner Losh  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
916*592ffb21SWarner Losh  * and update the (@bo)->mem placement flags. If unsuccessful, the old
917*592ffb21SWarner Losh  * data remains untouched, and it's up to the caller to free the
918*592ffb21SWarner Losh  * memory space indicated by @new_mem.
919*592ffb21SWarner Losh  * Returns:
920*592ffb21SWarner Losh  * !0: Failure.
921*592ffb21SWarner Losh  */
922*592ffb21SWarner Losh 
923*592ffb21SWarner Losh extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
924*592ffb21SWarner Losh 			   bool evict, bool no_wait_gpu,
925*592ffb21SWarner Losh 			   struct ttm_mem_reg *new_mem);
926*592ffb21SWarner Losh 
927*592ffb21SWarner Losh /**
928*592ffb21SWarner Losh  * ttm_bo_move_memcpy
929*592ffb21SWarner Losh  *
930*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
931*592ffb21SWarner Losh  * @evict: 1: This is an eviction. Don't try to pipeline.
932*592ffb21SWarner Losh  * @no_wait_gpu: Return immediately if the GPU is busy.
933*592ffb21SWarner Losh  * @new_mem: struct ttm_mem_reg indicating where to move.
934*592ffb21SWarner Losh  *
935*592ffb21SWarner Losh  * Fallback move function for a mappable buffer object in mappable memory.
936*592ffb21SWarner Losh  * The function will, if successful,
937*592ffb21SWarner Losh  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
938*592ffb21SWarner Losh  * and update the (@bo)->mem placement flags. If unsuccessful, the old
939*592ffb21SWarner Losh  * data remains untouched, and it's up to the caller to free the
940*592ffb21SWarner Losh  * memory space indicated by @new_mem.
941*592ffb21SWarner Losh  * Returns:
942*592ffb21SWarner Losh  * !0: Failure.
943*592ffb21SWarner Losh  */
944*592ffb21SWarner Losh 
945*592ffb21SWarner Losh extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
946*592ffb21SWarner Losh 			      bool evict, bool no_wait_gpu,
947*592ffb21SWarner Losh 			      struct ttm_mem_reg *new_mem);
948*592ffb21SWarner Losh 
949*592ffb21SWarner Losh /**
950*592ffb21SWarner Losh  * ttm_bo_free_old_node
951*592ffb21SWarner Losh  *
952*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
953*592ffb21SWarner Losh  *
954*592ffb21SWarner Losh  * Utility function to free an old placement after a successful move.
955*592ffb21SWarner Losh  */
956*592ffb21SWarner Losh extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
957*592ffb21SWarner Losh 
958*592ffb21SWarner Losh /**
959*592ffb21SWarner Losh  * ttm_bo_move_accel_cleanup.
960*592ffb21SWarner Losh  *
961*592ffb21SWarner Losh  * @bo: A pointer to a struct ttm_buffer_object.
962*592ffb21SWarner Losh  * @sync_obj: A sync object that signals when moving is complete.
963*592ffb21SWarner Losh  * @evict: This is an evict move. Don't return until the buffer is idle.
964*592ffb21SWarner Losh  * @no_wait_gpu: Return immediately if the GPU is busy.
965*592ffb21SWarner Losh  * @new_mem: struct ttm_mem_reg indicating where to move.
966*592ffb21SWarner Losh  *
967*592ffb21SWarner Losh  * Accelerated move function to be called when an accelerated move
968*592ffb21SWarner Losh  * has been scheduled. The function will create a new temporary buffer object
969*592ffb21SWarner Losh  * representing the old placement, and put the sync object on both buffer
970*592ffb21SWarner Losh  * objects. After that the newly created buffer object is unref'd to be
971*592ffb21SWarner Losh  * destroyed when the move is complete. This will help pipeline
972*592ffb21SWarner Losh  * buffer moves.
973*592ffb21SWarner Losh  */
974*592ffb21SWarner Losh 
975*592ffb21SWarner Losh extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
976*592ffb21SWarner Losh 				     void *sync_obj,
977*592ffb21SWarner Losh 				     bool evict, bool no_wait_gpu,
978*592ffb21SWarner Losh 				     struct ttm_mem_reg *new_mem);
979*592ffb21SWarner Losh /**
980*592ffb21SWarner Losh  * ttm_io_prot
981*592ffb21SWarner Losh  *
982*592ffb21SWarner Losh  * @c_state: Caching state.
983*592ffb21SWarner Losh  * @tmp: Page protection flag for a normal, cached mapping.
984*592ffb21SWarner Losh  *
985*592ffb21SWarner Losh  * Utility function that returns the pgprot_t that should be used for
986*592ffb21SWarner Losh  * setting up a PTE with the caching model indicated by @c_state.
987*592ffb21SWarner Losh  */
988*592ffb21SWarner Losh extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
989*592ffb21SWarner Losh 
990*592ffb21SWarner Losh extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
991*592ffb21SWarner Losh 
992*592ffb21SWarner Losh #if __OS_HAS_AGP
993*592ffb21SWarner Losh #define TTM_HAS_AGP
994*592ffb21SWarner Losh 
995*592ffb21SWarner Losh /**
996*592ffb21SWarner Losh  * ttm_agp_tt_create
997*592ffb21SWarner Losh  *
998*592ffb21SWarner Losh  * @bdev: Pointer to a struct ttm_bo_device.
999*592ffb21SWarner Losh  * @bridge: The agp bridge this device is sitting on.
1000*592ffb21SWarner Losh  * @size: Size of the data needed backing.
1001*592ffb21SWarner Losh  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1002*592ffb21SWarner Losh  * @dummy_read_page: See struct ttm_bo_device.
1003*592ffb21SWarner Losh  *
1004*592ffb21SWarner Losh  *
1005*592ffb21SWarner Losh  * Create a TTM backend that uses the indicated AGP bridge as an aperture
1006*592ffb21SWarner Losh  * for TT memory. This function uses the linux agpgart interface to
1007*592ffb21SWarner Losh  * bind and unbind memory backing a ttm_tt.
1008*592ffb21SWarner Losh  */
1009*592ffb21SWarner Losh extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1010*592ffb21SWarner Losh 					device_t bridge,
1011*592ffb21SWarner Losh 					unsigned long size, uint32_t page_flags,
1012*592ffb21SWarner Losh 					struct vm_page *dummy_read_page);
1013*592ffb21SWarner Losh int ttm_agp_tt_populate(struct ttm_tt *ttm);
1014*592ffb21SWarner Losh void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1015*592ffb21SWarner Losh #endif
1016*592ffb21SWarner Losh 
1017*592ffb21SWarner Losh int	ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1018*592ffb21SWarner Losh 	    struct ttm_buffer_object *b);
1019*592ffb21SWarner Losh 
1020*592ffb21SWarner Losh RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1021*592ffb21SWarner Losh     ttm_bo_cmp_rb_tree_items);
1022*592ffb21SWarner Losh 
1023*592ffb21SWarner Losh #endif
1024