xref: /linux/drivers/gpu/drm/ast/ast_cursor.c (revision bb17248373d4a47655052e5697046af25668e7ae)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  * Parts based on xf86-video-ast
4  * Copyright (c) 2005 ASPEED Technology Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  */
26 /*
27  * Authors: Dave Airlie <airlied@redhat.com>
28  */
29 
30 #include <drm/drm_gem_vram_helper.h>
31 #include <drm/drm_managed.h>
32 
33 #include "ast_drv.h"
34 
35 static void ast_cursor_fini(struct ast_private *ast)
36 {
37 	size_t i;
38 	struct drm_gem_vram_object *gbo;
39 
40 	for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
41 		gbo = ast->cursor.gbo[i];
42 		drm_gem_vram_unpin(gbo);
43 		drm_gem_vram_put(gbo);
44 	}
45 }
46 
47 static void ast_cursor_release(struct drm_device *dev, void *ptr)
48 {
49 	struct ast_private *ast = to_ast_private(dev);
50 
51 	ast_cursor_fini(ast);
52 }
53 
54 /*
55  * Allocate cursor BOs and pin them at the end of VRAM.
56  */
57 int ast_cursor_init(struct ast_private *ast)
58 {
59 	struct drm_device *dev = &ast->base;
60 	size_t size, i;
61 	struct drm_gem_vram_object *gbo;
62 	int ret;
63 
64 	size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
65 
66 	for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
67 		gbo = drm_gem_vram_create(dev, size, 0);
68 		if (IS_ERR(gbo)) {
69 			ret = PTR_ERR(gbo);
70 			goto err_drm_gem_vram_put;
71 		}
72 		ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
73 					    DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
74 		if (ret) {
75 			drm_gem_vram_put(gbo);
76 			goto err_drm_gem_vram_put;
77 		}
78 		ast->cursor.gbo[i] = gbo;
79 	}
80 
81 	return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
82 
83 err_drm_gem_vram_put:
84 	while (i) {
85 		--i;
86 		gbo = ast->cursor.gbo[i];
87 		drm_gem_vram_unpin(gbo);
88 		drm_gem_vram_put(gbo);
89 	}
90 	return ret;
91 }
92 
93 static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
94 {
95 	union {
96 		u32 ul;
97 		u8 b[4];
98 	} srcdata32[2], data32;
99 	union {
100 		u16 us;
101 		u8 b[2];
102 	} data16;
103 	u32 csum = 0;
104 	s32 alpha_dst_delta, last_alpha_dst_delta;
105 	u8 __iomem *dstxor;
106 	const u8 *srcxor;
107 	int i, j;
108 	u32 per_pixel_copy, two_pixel_copy;
109 
110 	alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
111 	last_alpha_dst_delta = alpha_dst_delta - (width << 1);
112 
113 	srcxor = src;
114 	dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
115 	per_pixel_copy = width & 1;
116 	two_pixel_copy = width >> 1;
117 
118 	for (j = 0; j < height; j++) {
119 		for (i = 0; i < two_pixel_copy; i++) {
120 			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
121 			srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
122 			data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
123 			data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
124 			data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
125 			data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
126 
127 			writel(data32.ul, dstxor);
128 			csum += data32.ul;
129 
130 			dstxor += 4;
131 			srcxor += 8;
132 
133 		}
134 
135 		for (i = 0; i < per_pixel_copy; i++) {
136 			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
137 			data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
138 			data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
139 			writew(data16.us, dstxor);
140 			csum += (u32)data16.us;
141 
142 			dstxor += 2;
143 			srcxor += 4;
144 		}
145 		dstxor += last_alpha_dst_delta;
146 	}
147 
148 	/* write checksum + signature */
149 	dst += AST_HWC_SIZE;
150 	writel(csum, dst);
151 	writel(width, dst + AST_HWC_SIGNATURE_SizeX);
152 	writel(height, dst + AST_HWC_SIGNATURE_SizeY);
153 	writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
154 	writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
155 }
156 
157 int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
158 {
159 	struct drm_device *dev = &ast->base;
160 	struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
161 	struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
162 	struct dma_buf_map src_map, dst_map;
163 	void __iomem *dst;
164 	void *src;
165 	int ret;
166 
167 	if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
168 	    drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
169 		return -EINVAL;
170 
171 	ret = drm_gem_vram_vmap(src_gbo, &src_map);
172 	if (ret)
173 		return ret;
174 	src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
175 
176 	ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
177 	if (ret)
178 		goto err_drm_gem_vram_vunmap;
179 	dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
180 
181 	/* do data transfer to cursor BO */
182 	update_cursor_image(dst, src, fb->width, fb->height);
183 
184 	drm_gem_vram_vunmap(dst_gbo, &dst_map);
185 	drm_gem_vram_vunmap(src_gbo, &src_map);
186 
187 	return 0;
188 
189 err_drm_gem_vram_vunmap:
190 	drm_gem_vram_vunmap(src_gbo, &src_map);
191 	return ret;
192 }
193 
194 static void ast_cursor_set_base(struct ast_private *ast, u64 address)
195 {
196 	u8 addr0 = (address >> 3) & 0xff;
197 	u8 addr1 = (address >> 11) & 0xff;
198 	u8 addr2 = (address >> 19) & 0xff;
199 
200 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
201 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
202 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
203 }
204 
205 void ast_cursor_page_flip(struct ast_private *ast)
206 {
207 	struct drm_device *dev = &ast->base;
208 	struct drm_gem_vram_object *gbo;
209 	s64 off;
210 
211 	gbo = ast->cursor.gbo[ast->cursor.next_index];
212 
213 	off = drm_gem_vram_offset(gbo);
214 	if (drm_WARN_ON_ONCE(dev, off < 0))
215 		return; /* Bug: we didn't pin the cursor HW BO to VRAM. */
216 
217 	ast_cursor_set_base(ast, off);
218 
219 	++ast->cursor.next_index;
220 	ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
221 }
222 
223 static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
224 				    u8 x_offset, u8 y_offset)
225 {
226 	u8 x0 = (x & 0x00ff);
227 	u8 x1 = (x & 0x0f00) >> 8;
228 	u8 y0 = (y & 0x00ff);
229 	u8 y1 = (y & 0x0700) >> 8;
230 
231 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
232 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
233 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
234 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
235 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
236 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
237 }
238 
239 void ast_cursor_show(struct ast_private *ast, int x, int y,
240 		     unsigned int offset_x, unsigned int offset_y)
241 {
242 	struct drm_device *dev = &ast->base;
243 	struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
244 	struct dma_buf_map map;
245 	u8 x_offset, y_offset;
246 	u8 __iomem *dst;
247 	u8 __iomem *sig;
248 	u8 jreg;
249 	int ret;
250 
251 	ret = drm_gem_vram_vmap(gbo, &map);
252 	if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
253 		return;
254 	dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
255 
256 	sig = dst + AST_HWC_SIZE;
257 	writel(x, sig + AST_HWC_SIGNATURE_X);
258 	writel(y, sig + AST_HWC_SIGNATURE_Y);
259 
260 	drm_gem_vram_vunmap(gbo, &map);
261 
262 	if (x < 0) {
263 		x_offset = (-x) + offset_x;
264 		x = 0;
265 	} else {
266 		x_offset = offset_x;
267 	}
268 	if (y < 0) {
269 		y_offset = (-y) + offset_y;
270 		y = 0;
271 	} else {
272 		y_offset = offset_y;
273 	}
274 
275 	ast_cursor_set_location(ast, x, y, x_offset, y_offset);
276 
277 	/* dummy write to fire HWC */
278 	jreg = 0x02 |
279 	       0x01; /* enable ARGB4444 cursor */
280 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
281 }
282 
283 void ast_cursor_hide(struct ast_private *ast)
284 {
285 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
286 }
287