1*592ffb21SWarner Losh /**************************************************************************
2*592ffb21SWarner Losh *
3*592ffb21SWarner Losh * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4*592ffb21SWarner Losh * All Rights Reserved.
5*592ffb21SWarner Losh *
6*592ffb21SWarner Losh * Permission is hereby granted, free of charge, to any person obtaining a
7*592ffb21SWarner Losh * copy of this software and associated documentation files (the
8*592ffb21SWarner Losh * "Software"), to deal in the Software without restriction, including
9*592ffb21SWarner Losh * without limitation the rights to use, copy, modify, merge, publish,
10*592ffb21SWarner Losh * distribute, sub license, and/or sell copies of the Software, and to
11*592ffb21SWarner Losh * permit persons to whom the Software is furnished to do so, subject to
12*592ffb21SWarner Losh * the following conditions:
13*592ffb21SWarner Losh *
14*592ffb21SWarner Losh * The above copyright notice and this permission notice (including the
15*592ffb21SWarner Losh * next paragraph) shall be included in all copies or substantial portions
16*592ffb21SWarner Losh * of the Software.
17*592ffb21SWarner Losh *
18*592ffb21SWarner Losh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*592ffb21SWarner Losh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*592ffb21SWarner Losh * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*592ffb21SWarner Losh * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*592ffb21SWarner Losh * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*592ffb21SWarner Losh * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*592ffb21SWarner Losh * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*592ffb21SWarner Losh *
26*592ffb21SWarner Losh **************************************************************************/
27*592ffb21SWarner Losh /*
28*592ffb21SWarner Losh * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29*592ffb21SWarner Losh * Keith Packard.
30*592ffb21SWarner Losh */
31*592ffb21SWarner Losh
32*592ffb21SWarner Losh #include <sys/cdefs.h>
33*592ffb21SWarner Losh #include <dev/drm2/drmP.h>
34*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
35*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_driver.h>
36*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_page_alloc.h>
37*592ffb21SWarner Losh #ifdef TTM_HAS_AGP
38*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_placement.h>
39*592ffb21SWarner Losh
40*592ffb21SWarner Losh struct ttm_agp_backend {
41*592ffb21SWarner Losh struct ttm_tt ttm;
42*592ffb21SWarner Losh vm_offset_t offset;
43*592ffb21SWarner Losh vm_page_t *pages;
44*592ffb21SWarner Losh device_t bridge;
45*592ffb21SWarner Losh };
46*592ffb21SWarner Losh
47*592ffb21SWarner Losh MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
48*592ffb21SWarner Losh
ttm_agp_bind(struct ttm_tt * ttm,struct ttm_mem_reg * bo_mem)49*592ffb21SWarner Losh static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
50*592ffb21SWarner Losh {
51*592ffb21SWarner Losh struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
52*592ffb21SWarner Losh struct drm_mm_node *node = bo_mem->mm_node;
53*592ffb21SWarner Losh int ret;
54*592ffb21SWarner Losh unsigned i;
55*592ffb21SWarner Losh
56*592ffb21SWarner Losh for (i = 0; i < ttm->num_pages; i++) {
57*592ffb21SWarner Losh vm_page_t page = ttm->pages[i];
58*592ffb21SWarner Losh
59*592ffb21SWarner Losh if (!page)
60*592ffb21SWarner Losh page = ttm->dummy_read_page;
61*592ffb21SWarner Losh
62*592ffb21SWarner Losh agp_be->pages[i] = page;
63*592ffb21SWarner Losh }
64*592ffb21SWarner Losh
65*592ffb21SWarner Losh agp_be->offset = node->start * PAGE_SIZE;
66*592ffb21SWarner Losh ret = -agp_bind_pages(agp_be->bridge, agp_be->pages,
67*592ffb21SWarner Losh ttm->num_pages << PAGE_SHIFT, agp_be->offset);
68*592ffb21SWarner Losh if (ret)
69*592ffb21SWarner Losh printf("[TTM] AGP Bind memory failed\n");
70*592ffb21SWarner Losh
71*592ffb21SWarner Losh return ret;
72*592ffb21SWarner Losh }
73*592ffb21SWarner Losh
ttm_agp_unbind(struct ttm_tt * ttm)74*592ffb21SWarner Losh static int ttm_agp_unbind(struct ttm_tt *ttm)
75*592ffb21SWarner Losh {
76*592ffb21SWarner Losh struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
77*592ffb21SWarner Losh
78*592ffb21SWarner Losh return -agp_unbind_pages(agp_be->bridge, ttm->num_pages << PAGE_SHIFT,
79*592ffb21SWarner Losh agp_be->offset);
80*592ffb21SWarner Losh }
81*592ffb21SWarner Losh
ttm_agp_destroy(struct ttm_tt * ttm)82*592ffb21SWarner Losh static void ttm_agp_destroy(struct ttm_tt *ttm)
83*592ffb21SWarner Losh {
84*592ffb21SWarner Losh struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
85*592ffb21SWarner Losh
86*592ffb21SWarner Losh ttm_tt_fini(ttm);
87*592ffb21SWarner Losh free(agp_be->pages, M_TTM_AGP);
88*592ffb21SWarner Losh free(agp_be, M_TTM_AGP);
89*592ffb21SWarner Losh }
90*592ffb21SWarner Losh
91*592ffb21SWarner Losh static struct ttm_backend_func ttm_agp_func = {
92*592ffb21SWarner Losh .bind = ttm_agp_bind,
93*592ffb21SWarner Losh .unbind = ttm_agp_unbind,
94*592ffb21SWarner Losh .destroy = ttm_agp_destroy,
95*592ffb21SWarner Losh };
96*592ffb21SWarner Losh
ttm_agp_tt_create(struct ttm_bo_device * bdev,device_t bridge,unsigned long size,uint32_t page_flags,vm_page_t dummy_read_page)97*592ffb21SWarner Losh struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
98*592ffb21SWarner Losh device_t bridge,
99*592ffb21SWarner Losh unsigned long size, uint32_t page_flags,
100*592ffb21SWarner Losh vm_page_t dummy_read_page)
101*592ffb21SWarner Losh {
102*592ffb21SWarner Losh struct ttm_agp_backend *agp_be;
103*592ffb21SWarner Losh
104*592ffb21SWarner Losh agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
105*592ffb21SWarner Losh
106*592ffb21SWarner Losh agp_be->bridge = bridge;
107*592ffb21SWarner Losh agp_be->ttm.func = &ttm_agp_func;
108*592ffb21SWarner Losh
109*592ffb21SWarner Losh if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
110*592ffb21SWarner Losh free(agp_be, M_TTM_AGP);
111*592ffb21SWarner Losh return NULL;
112*592ffb21SWarner Losh }
113*592ffb21SWarner Losh
114*592ffb21SWarner Losh agp_be->offset = 0;
115*592ffb21SWarner Losh agp_be->pages = malloc(agp_be->ttm.num_pages * sizeof(*agp_be->pages),
116*592ffb21SWarner Losh M_TTM_AGP, M_WAITOK);
117*592ffb21SWarner Losh
118*592ffb21SWarner Losh return &agp_be->ttm;
119*592ffb21SWarner Losh }
120*592ffb21SWarner Losh
ttm_agp_tt_populate(struct ttm_tt * ttm)121*592ffb21SWarner Losh int ttm_agp_tt_populate(struct ttm_tt *ttm)
122*592ffb21SWarner Losh {
123*592ffb21SWarner Losh if (ttm->state != tt_unpopulated)
124*592ffb21SWarner Losh return 0;
125*592ffb21SWarner Losh
126*592ffb21SWarner Losh return ttm_pool_populate(ttm);
127*592ffb21SWarner Losh }
128*592ffb21SWarner Losh
ttm_agp_tt_unpopulate(struct ttm_tt * ttm)129*592ffb21SWarner Losh void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
130*592ffb21SWarner Losh {
131*592ffb21SWarner Losh ttm_pool_unpopulate(ttm);
132*592ffb21SWarner Losh }
133*592ffb21SWarner Losh
134*592ffb21SWarner Losh #endif
135