xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c (revision e7d65181045898a6da80add0392765e253b0023c)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gf100.h"
25 
26 extern const u8 gf100_pte_storage_type_map[256];
27 
28 bool
29 gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
30 {
31 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
32 	return likely((gf100_pte_storage_type_map[memtype] != 0xff));
33 }
34 
35 static void
36 gf100_fb_intr(struct nvkm_subdev *subdev)
37 {
38 	struct gf100_fb *fb = (void *)subdev;
39 	u32 intr = nv_rd32(fb, 0x000100);
40 	if (intr & 0x08000000)
41 		nv_debug(fb, "PFFB intr\n");
42 	if (intr & 0x00002000)
43 		nv_debug(fb, "PBFB intr\n");
44 }
45 
46 int
47 gf100_fb_init(struct nvkm_object *object)
48 {
49 	struct gf100_fb *fb = (void *)object;
50 	int ret;
51 
52 	ret = nvkm_fb_init(&fb->base);
53 	if (ret)
54 		return ret;
55 
56 	if (fb->r100c10_page)
57 		nv_wr32(fb, 0x100c10, fb->r100c10 >> 8);
58 
59 	nv_mask(fb, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
60 	return 0;
61 }
62 
63 void
64 gf100_fb_dtor(struct nvkm_object *object)
65 {
66 	struct nvkm_device *device = nv_device(object);
67 	struct gf100_fb *fb = (void *)object;
68 
69 	if (fb->r100c10_page) {
70 		dma_unmap_page(nv_device_base(device), fb->r100c10, PAGE_SIZE,
71 			       DMA_BIDIRECTIONAL);
72 		__free_page(fb->r100c10_page);
73 	}
74 
75 	nvkm_fb_destroy(&fb->base);
76 }
77 
78 int
79 gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
80 	      struct nvkm_oclass *oclass, void *data, u32 size,
81 	      struct nvkm_object **pobject)
82 {
83 	struct nvkm_device *device = nv_device(parent);
84 	struct gf100_fb *fb;
85 	int ret;
86 
87 	ret = nvkm_fb_create(parent, engine, oclass, &fb);
88 	*pobject = nv_object(fb);
89 	if (ret)
90 		return ret;
91 
92 	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
93 	if (fb->r100c10_page) {
94 		fb->r100c10 = dma_map_page(nv_device_base(device),
95 					     fb->r100c10_page, 0, PAGE_SIZE,
96 					     DMA_BIDIRECTIONAL);
97 		if (dma_mapping_error(nv_device_base(device), fb->r100c10))
98 			return -EFAULT;
99 	}
100 
101 	nv_subdev(fb)->intr = gf100_fb_intr;
102 	return 0;
103 }
104 
105 struct nvkm_oclass *
106 gf100_fb_oclass = &(struct nvkm_fb_impl) {
107 	.base.handle = NV_SUBDEV(FB, 0xc0),
108 	.base.ofuncs = &(struct nvkm_ofuncs) {
109 		.ctor = gf100_fb_ctor,
110 		.dtor = gf100_fb_dtor,
111 		.init = gf100_fb_init,
112 		.fini = _nvkm_fb_fini,
113 	},
114 	.memtype = gf100_fb_memtype_valid,
115 	.ram = &gf100_ram_oclass,
116 }.base;
117