1592ffb21SWarner Losh /**************************************************************************
2592ffb21SWarner Losh *
3592ffb21SWarner Losh * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4592ffb21SWarner Losh * All Rights Reserved.
5592ffb21SWarner Losh *
6592ffb21SWarner Losh * Permission is hereby granted, free of charge, to any person obtaining a
7592ffb21SWarner Losh * copy of this software and associated documentation files (the
8592ffb21SWarner Losh * "Software"), to deal in the Software without restriction, including
9592ffb21SWarner Losh * without limitation the rights to use, copy, modify, merge, publish,
10592ffb21SWarner Losh * distribute, sub license, and/or sell copies of the Software, and to
11592ffb21SWarner Losh * permit persons to whom the Software is furnished to do so, subject to
12592ffb21SWarner Losh * the following conditions:
13592ffb21SWarner Losh *
14592ffb21SWarner Losh * The above copyright notice and this permission notice (including the
15592ffb21SWarner Losh * next paragraph) shall be included in all copies or substantial portions
16592ffb21SWarner Losh * of the Software.
17592ffb21SWarner Losh *
18592ffb21SWarner Losh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19592ffb21SWarner Losh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20592ffb21SWarner Losh * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21592ffb21SWarner Losh * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22592ffb21SWarner Losh * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23592ffb21SWarner Losh * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24592ffb21SWarner Losh * USE OR OTHER DEALINGS IN THE SOFTWARE.
25592ffb21SWarner Losh *
26592ffb21SWarner Losh **************************************************************************/
27592ffb21SWarner Losh /*
28592ffb21SWarner Losh * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29592ffb21SWarner Losh */
30592ffb21SWarner Losh /*
31592ffb21SWarner Losh * Copyright (c) 2013 The FreeBSD Foundation
32592ffb21SWarner Losh * All rights reserved.
33592ffb21SWarner Losh *
34592ffb21SWarner Losh * Portions of this software were developed by Konstantin Belousov
35592ffb21SWarner Losh * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36592ffb21SWarner Losh */
37592ffb21SWarner Losh
38592ffb21SWarner Losh #include <sys/cdefs.h>
39592ffb21SWarner Losh #include <dev/drm2/drmP.h>
40592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
41592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_driver.h>
42592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_placement.h>
43592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_page_alloc.h>
44592ffb21SWarner Losh
45592ffb21SWarner Losh MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
46592ffb21SWarner Losh
47592ffb21SWarner Losh /**
48592ffb21SWarner Losh * Allocates storage for pointers to the pages that back the ttm.
49592ffb21SWarner Losh */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)50592ffb21SWarner Losh static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
51592ffb21SWarner Losh {
52592ffb21SWarner Losh ttm->pages = malloc(ttm->num_pages * sizeof(void *),
53592ffb21SWarner Losh M_TTM_PD, M_WAITOK | M_ZERO);
54592ffb21SWarner Losh }
55592ffb21SWarner Losh
ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt * ttm)56592ffb21SWarner Losh static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57592ffb21SWarner Losh {
58592ffb21SWarner Losh ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
59592ffb21SWarner Losh M_TTM_PD, M_WAITOK | M_ZERO);
60592ffb21SWarner Losh ttm->dma_address = malloc(ttm->ttm.num_pages *
61592ffb21SWarner Losh sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
62592ffb21SWarner Losh }
63592ffb21SWarner Losh
64592ffb21SWarner Losh #if defined(__i386__) || defined(__amd64__)
ttm_tt_set_page_caching(vm_page_t p,enum ttm_caching_state c_old,enum ttm_caching_state c_new)65592ffb21SWarner Losh static inline int ttm_tt_set_page_caching(vm_page_t p,
66592ffb21SWarner Losh enum ttm_caching_state c_old,
67592ffb21SWarner Losh enum ttm_caching_state c_new)
68592ffb21SWarner Losh {
69592ffb21SWarner Losh
70592ffb21SWarner Losh /* XXXKIB our VM does not need this. */
71592ffb21SWarner Losh #if 0
72592ffb21SWarner Losh if (c_old != tt_cached) {
73592ffb21SWarner Losh /* p isn't in the default caching state, set it to
74592ffb21SWarner Losh * writeback first to free its current memtype. */
75592ffb21SWarner Losh pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
76592ffb21SWarner Losh }
77592ffb21SWarner Losh #endif
78592ffb21SWarner Losh
79592ffb21SWarner Losh if (c_new == tt_wc)
80592ffb21SWarner Losh pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
81592ffb21SWarner Losh else if (c_new == tt_uncached)
82592ffb21SWarner Losh pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
83592ffb21SWarner Losh
84592ffb21SWarner Losh return (0);
85592ffb21SWarner Losh }
86592ffb21SWarner Losh #else
ttm_tt_set_page_caching(vm_page_t p,enum ttm_caching_state c_old,enum ttm_caching_state c_new)87592ffb21SWarner Losh static inline int ttm_tt_set_page_caching(vm_page_t p,
88592ffb21SWarner Losh enum ttm_caching_state c_old,
89592ffb21SWarner Losh enum ttm_caching_state c_new)
90592ffb21SWarner Losh {
91592ffb21SWarner Losh return 0;
92592ffb21SWarner Losh }
93592ffb21SWarner Losh #endif
94592ffb21SWarner Losh
95592ffb21SWarner Losh /*
96592ffb21SWarner Losh * Change caching policy for the linear kernel map
97592ffb21SWarner Losh * for range of pages in a ttm.
98592ffb21SWarner Losh */
99592ffb21SWarner Losh
ttm_tt_set_caching(struct ttm_tt * ttm,enum ttm_caching_state c_state)100592ffb21SWarner Losh static int ttm_tt_set_caching(struct ttm_tt *ttm,
101592ffb21SWarner Losh enum ttm_caching_state c_state)
102592ffb21SWarner Losh {
103592ffb21SWarner Losh int i, j;
104592ffb21SWarner Losh vm_page_t cur_page;
105592ffb21SWarner Losh int ret;
106592ffb21SWarner Losh
107592ffb21SWarner Losh if (ttm->caching_state == c_state)
108592ffb21SWarner Losh return 0;
109592ffb21SWarner Losh
110592ffb21SWarner Losh if (ttm->state == tt_unpopulated) {
111592ffb21SWarner Losh /* Change caching but don't populate */
112592ffb21SWarner Losh ttm->caching_state = c_state;
113592ffb21SWarner Losh return 0;
114592ffb21SWarner Losh }
115592ffb21SWarner Losh
116592ffb21SWarner Losh if (ttm->caching_state == tt_cached)
117592ffb21SWarner Losh drm_clflush_pages(ttm->pages, ttm->num_pages);
118592ffb21SWarner Losh
119592ffb21SWarner Losh for (i = 0; i < ttm->num_pages; ++i) {
120592ffb21SWarner Losh cur_page = ttm->pages[i];
121592ffb21SWarner Losh if (likely(cur_page != NULL)) {
122592ffb21SWarner Losh ret = ttm_tt_set_page_caching(cur_page,
123592ffb21SWarner Losh ttm->caching_state,
124592ffb21SWarner Losh c_state);
125592ffb21SWarner Losh if (unlikely(ret != 0))
126592ffb21SWarner Losh goto out_err;
127592ffb21SWarner Losh }
128592ffb21SWarner Losh }
129592ffb21SWarner Losh
130592ffb21SWarner Losh ttm->caching_state = c_state;
131592ffb21SWarner Losh
132592ffb21SWarner Losh return 0;
133592ffb21SWarner Losh
134592ffb21SWarner Losh out_err:
135592ffb21SWarner Losh for (j = 0; j < i; ++j) {
136592ffb21SWarner Losh cur_page = ttm->pages[j];
137592ffb21SWarner Losh if (cur_page != NULL) {
138592ffb21SWarner Losh (void)ttm_tt_set_page_caching(cur_page, c_state,
139592ffb21SWarner Losh ttm->caching_state);
140592ffb21SWarner Losh }
141592ffb21SWarner Losh }
142592ffb21SWarner Losh
143592ffb21SWarner Losh return ret;
144592ffb21SWarner Losh }
145592ffb21SWarner Losh
ttm_tt_set_placement_caching(struct ttm_tt * ttm,uint32_t placement)146592ffb21SWarner Losh int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
147592ffb21SWarner Losh {
148592ffb21SWarner Losh enum ttm_caching_state state;
149592ffb21SWarner Losh
150592ffb21SWarner Losh if (placement & TTM_PL_FLAG_WC)
151592ffb21SWarner Losh state = tt_wc;
152592ffb21SWarner Losh else if (placement & TTM_PL_FLAG_UNCACHED)
153592ffb21SWarner Losh state = tt_uncached;
154592ffb21SWarner Losh else
155592ffb21SWarner Losh state = tt_cached;
156592ffb21SWarner Losh
157592ffb21SWarner Losh return ttm_tt_set_caching(ttm, state);
158592ffb21SWarner Losh }
159592ffb21SWarner Losh
ttm_tt_destroy(struct ttm_tt * ttm)160592ffb21SWarner Losh void ttm_tt_destroy(struct ttm_tt *ttm)
161592ffb21SWarner Losh {
162592ffb21SWarner Losh if (unlikely(ttm == NULL))
163592ffb21SWarner Losh return;
164592ffb21SWarner Losh
165592ffb21SWarner Losh if (ttm->state == tt_bound) {
166592ffb21SWarner Losh ttm_tt_unbind(ttm);
167592ffb21SWarner Losh }
168592ffb21SWarner Losh
169592ffb21SWarner Losh if (likely(ttm->pages != NULL)) {
170592ffb21SWarner Losh ttm->bdev->driver->ttm_tt_unpopulate(ttm);
171592ffb21SWarner Losh }
172592ffb21SWarner Losh
173592ffb21SWarner Losh if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
174592ffb21SWarner Losh ttm->swap_storage)
175592ffb21SWarner Losh vm_object_deallocate(ttm->swap_storage);
176592ffb21SWarner Losh
177592ffb21SWarner Losh ttm->swap_storage = NULL;
178592ffb21SWarner Losh ttm->func->destroy(ttm);
179592ffb21SWarner Losh }
180592ffb21SWarner Losh
ttm_tt_init(struct ttm_tt * ttm,struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,vm_page_t dummy_read_page)181592ffb21SWarner Losh int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
182592ffb21SWarner Losh unsigned long size, uint32_t page_flags,
183592ffb21SWarner Losh vm_page_t dummy_read_page)
184592ffb21SWarner Losh {
185592ffb21SWarner Losh ttm->bdev = bdev;
186592ffb21SWarner Losh ttm->glob = bdev->glob;
187592ffb21SWarner Losh ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
188592ffb21SWarner Losh ttm->caching_state = tt_cached;
189592ffb21SWarner Losh ttm->page_flags = page_flags;
190592ffb21SWarner Losh ttm->dummy_read_page = dummy_read_page;
191592ffb21SWarner Losh ttm->state = tt_unpopulated;
192592ffb21SWarner Losh ttm->swap_storage = NULL;
193592ffb21SWarner Losh
194592ffb21SWarner Losh ttm_tt_alloc_page_directory(ttm);
195592ffb21SWarner Losh if (!ttm->pages) {
196592ffb21SWarner Losh ttm_tt_destroy(ttm);
197592ffb21SWarner Losh printf("Failed allocating page table\n");
198592ffb21SWarner Losh return -ENOMEM;
199592ffb21SWarner Losh }
200592ffb21SWarner Losh return 0;
201592ffb21SWarner Losh }
202592ffb21SWarner Losh
ttm_tt_fini(struct ttm_tt * ttm)203592ffb21SWarner Losh void ttm_tt_fini(struct ttm_tt *ttm)
204592ffb21SWarner Losh {
205592ffb21SWarner Losh free(ttm->pages, M_TTM_PD);
206592ffb21SWarner Losh ttm->pages = NULL;
207592ffb21SWarner Losh }
208592ffb21SWarner Losh
ttm_dma_tt_init(struct ttm_dma_tt * ttm_dma,struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,vm_page_t dummy_read_page)209592ffb21SWarner Losh int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
210592ffb21SWarner Losh unsigned long size, uint32_t page_flags,
211592ffb21SWarner Losh vm_page_t dummy_read_page)
212592ffb21SWarner Losh {
213592ffb21SWarner Losh struct ttm_tt *ttm = &ttm_dma->ttm;
214592ffb21SWarner Losh
215592ffb21SWarner Losh ttm->bdev = bdev;
216592ffb21SWarner Losh ttm->glob = bdev->glob;
217592ffb21SWarner Losh ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
218592ffb21SWarner Losh ttm->caching_state = tt_cached;
219592ffb21SWarner Losh ttm->page_flags = page_flags;
220592ffb21SWarner Losh ttm->dummy_read_page = dummy_read_page;
221592ffb21SWarner Losh ttm->state = tt_unpopulated;
222592ffb21SWarner Losh ttm->swap_storage = NULL;
223592ffb21SWarner Losh
224592ffb21SWarner Losh INIT_LIST_HEAD(&ttm_dma->pages_list);
225592ffb21SWarner Losh ttm_dma_tt_alloc_page_directory(ttm_dma);
226592ffb21SWarner Losh if (!ttm->pages || !ttm_dma->dma_address) {
227592ffb21SWarner Losh ttm_tt_destroy(ttm);
228592ffb21SWarner Losh printf("Failed allocating page table\n");
229592ffb21SWarner Losh return -ENOMEM;
230592ffb21SWarner Losh }
231592ffb21SWarner Losh return 0;
232592ffb21SWarner Losh }
233592ffb21SWarner Losh
ttm_dma_tt_fini(struct ttm_dma_tt * ttm_dma)234592ffb21SWarner Losh void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
235592ffb21SWarner Losh {
236592ffb21SWarner Losh struct ttm_tt *ttm = &ttm_dma->ttm;
237592ffb21SWarner Losh
238592ffb21SWarner Losh free(ttm->pages, M_TTM_PD);
239592ffb21SWarner Losh ttm->pages = NULL;
240592ffb21SWarner Losh free(ttm_dma->dma_address, M_TTM_PD);
241592ffb21SWarner Losh ttm_dma->dma_address = NULL;
242592ffb21SWarner Losh }
243592ffb21SWarner Losh
ttm_tt_unbind(struct ttm_tt * ttm)244592ffb21SWarner Losh void ttm_tt_unbind(struct ttm_tt *ttm)
245592ffb21SWarner Losh {
246*c9130a46SMateusz Guzik int ret __diagused;
247592ffb21SWarner Losh
248592ffb21SWarner Losh if (ttm->state == tt_bound) {
249592ffb21SWarner Losh ret = ttm->func->unbind(ttm);
250592ffb21SWarner Losh MPASS(ret == 0);
251592ffb21SWarner Losh ttm->state = tt_unbound;
252592ffb21SWarner Losh }
253592ffb21SWarner Losh }
254592ffb21SWarner Losh
ttm_tt_bind(struct ttm_tt * ttm,struct ttm_mem_reg * bo_mem)255592ffb21SWarner Losh int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
256592ffb21SWarner Losh {
257592ffb21SWarner Losh int ret = 0;
258592ffb21SWarner Losh
259592ffb21SWarner Losh if (!ttm)
260592ffb21SWarner Losh return -EINVAL;
261592ffb21SWarner Losh
262592ffb21SWarner Losh if (ttm->state == tt_bound)
263592ffb21SWarner Losh return 0;
264592ffb21SWarner Losh
265592ffb21SWarner Losh ret = ttm->bdev->driver->ttm_tt_populate(ttm);
266592ffb21SWarner Losh if (ret)
267592ffb21SWarner Losh return ret;
268592ffb21SWarner Losh
269592ffb21SWarner Losh ret = ttm->func->bind(ttm, bo_mem);
270592ffb21SWarner Losh if (unlikely(ret != 0))
271592ffb21SWarner Losh return ret;
272592ffb21SWarner Losh
273592ffb21SWarner Losh ttm->state = tt_bound;
274592ffb21SWarner Losh
275592ffb21SWarner Losh return 0;
276592ffb21SWarner Losh }
277592ffb21SWarner Losh
ttm_tt_swapin(struct ttm_tt * ttm)278592ffb21SWarner Losh int ttm_tt_swapin(struct ttm_tt *ttm)
279592ffb21SWarner Losh {
280592ffb21SWarner Losh vm_object_t obj;
281592ffb21SWarner Losh vm_page_t from_page, to_page;
282592ffb21SWarner Losh int i, ret, rv;
283592ffb21SWarner Losh
284592ffb21SWarner Losh obj = ttm->swap_storage;
285592ffb21SWarner Losh
286592ffb21SWarner Losh vm_object_pip_add(obj, 1);
287592ffb21SWarner Losh for (i = 0; i < ttm->num_pages; ++i) {
2887aaf252cSJeff Roberson rv = vm_page_grab_valid_unlocked(&from_page, obj, i,
2897aaf252cSJeff Roberson VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
290592ffb21SWarner Losh if (rv != VM_PAGER_OK) {
291592ffb21SWarner Losh ret = -EIO;
292592ffb21SWarner Losh goto err_ret;
293592ffb21SWarner Losh }
294592ffb21SWarner Losh to_page = ttm->pages[i];
295592ffb21SWarner Losh if (unlikely(to_page == NULL)) {
2967aaf252cSJeff Roberson vm_page_sunbusy(from_page);
297592ffb21SWarner Losh ret = -ENOMEM;
298592ffb21SWarner Losh goto err_ret;
299592ffb21SWarner Losh }
300592ffb21SWarner Losh pmap_copy_page(from_page, to_page);
3017aaf252cSJeff Roberson vm_page_sunbusy(from_page);
302592ffb21SWarner Losh }
303592ffb21SWarner Losh vm_object_pip_wakeup(obj);
304592ffb21SWarner Losh
305592ffb21SWarner Losh if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
306592ffb21SWarner Losh vm_object_deallocate(obj);
307592ffb21SWarner Losh ttm->swap_storage = NULL;
308592ffb21SWarner Losh ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
309592ffb21SWarner Losh return (0);
310592ffb21SWarner Losh
311592ffb21SWarner Losh err_ret:
312592ffb21SWarner Losh vm_object_pip_wakeup(obj);
313592ffb21SWarner Losh return (ret);
314592ffb21SWarner Losh }
315592ffb21SWarner Losh
ttm_tt_swapout(struct ttm_tt * ttm,vm_object_t persistent_swap_storage)316592ffb21SWarner Losh int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
317592ffb21SWarner Losh {
318592ffb21SWarner Losh vm_object_t obj;
319592ffb21SWarner Losh vm_page_t from_page, to_page;
320592ffb21SWarner Losh int i;
321592ffb21SWarner Losh
322592ffb21SWarner Losh MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
323592ffb21SWarner Losh MPASS(ttm->caching_state == tt_cached);
324592ffb21SWarner Losh
325592ffb21SWarner Losh if (persistent_swap_storage == NULL) {
326592ffb21SWarner Losh obj = vm_pager_allocate(OBJT_SWAP, NULL,
327592ffb21SWarner Losh IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
328592ffb21SWarner Losh curthread->td_ucred);
329592ffb21SWarner Losh if (obj == NULL) {
330592ffb21SWarner Losh printf("[TTM] Failed allocating swap storage\n");
331592ffb21SWarner Losh return (-ENOMEM);
332592ffb21SWarner Losh }
333592ffb21SWarner Losh } else
334592ffb21SWarner Losh obj = persistent_swap_storage;
335592ffb21SWarner Losh
336592ffb21SWarner Losh VM_OBJECT_WLOCK(obj);
337592ffb21SWarner Losh vm_object_pip_add(obj, 1);
338592ffb21SWarner Losh for (i = 0; i < ttm->num_pages; ++i) {
339592ffb21SWarner Losh from_page = ttm->pages[i];
340592ffb21SWarner Losh if (unlikely(from_page == NULL))
341592ffb21SWarner Losh continue;
342592ffb21SWarner Losh to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
343592ffb21SWarner Losh pmap_copy_page(from_page, to_page);
3440012f373SJeff Roberson vm_page_valid(to_page);
345592ffb21SWarner Losh vm_page_dirty(to_page);
346592ffb21SWarner Losh vm_page_xunbusy(to_page);
347592ffb21SWarner Losh }
348592ffb21SWarner Losh vm_object_pip_wakeup(obj);
349592ffb21SWarner Losh VM_OBJECT_WUNLOCK(obj);
350592ffb21SWarner Losh
351592ffb21SWarner Losh ttm->bdev->driver->ttm_tt_unpopulate(ttm);
352592ffb21SWarner Losh ttm->swap_storage = obj;
353592ffb21SWarner Losh ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
354592ffb21SWarner Losh if (persistent_swap_storage != NULL)
355592ffb21SWarner Losh ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
356592ffb21SWarner Losh return (0);
357592ffb21SWarner Losh }
358