xref: /linux/drivers/gpu/drm/qxl/qxl_image.c (revision 8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31 
32 static int
qxl_allocate_chunk(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * image,unsigned int chunk_size)33 qxl_allocate_chunk(struct qxl_device *qdev,
34 		   struct qxl_release *release,
35 		   struct qxl_drm_image *image,
36 		   unsigned int chunk_size)
37 {
38 	struct qxl_drm_chunk *chunk;
39 	int ret;
40 
41 	chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
42 	if (!chunk)
43 		return -ENOMEM;
44 
45 	ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
46 	if (ret) {
47 		kfree(chunk);
48 		return ret;
49 	}
50 
51 	list_add_tail(&chunk->head, &image->chunk_list);
52 	return 0;
53 }
54 
55 int
qxl_image_alloc_objects(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image ** image_ptr,int height,int stride)56 qxl_image_alloc_objects(struct qxl_device *qdev,
57 			struct qxl_release *release,
58 			struct qxl_drm_image **image_ptr,
59 			int height, int stride)
60 {
61 	struct qxl_drm_image *image;
62 	int ret;
63 
64 	image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
65 	if (!image)
66 		return -ENOMEM;
67 
68 	INIT_LIST_HEAD(&image->chunk_list);
69 
70 	ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
71 	if (ret) {
72 		kfree(image);
73 		return ret;
74 	}
75 
76 	ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
77 	if (ret) {
78 		qxl_bo_unref(&image->bo);
79 		kfree(image);
80 		return ret;
81 	}
82 	*image_ptr = image;
83 	return 0;
84 }
85 
qxl_image_free_objects(struct qxl_device * qdev,struct qxl_drm_image * dimage)86 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
87 {
88 	struct qxl_drm_chunk *chunk, *tmp;
89 
90 	list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
91 		qxl_bo_unref(&chunk->bo);
92 		kfree(chunk);
93 	}
94 
95 	qxl_bo_unref(&dimage->bo);
96 	kfree(dimage);
97 }
98 
99 static int
qxl_image_init_helper(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * dimage,const uint8_t * data,int width,int height,int depth,unsigned int hash,int stride)100 qxl_image_init_helper(struct qxl_device *qdev,
101 		      struct qxl_release *release,
102 		      struct qxl_drm_image *dimage,
103 		      const uint8_t *data,
104 		      int width, int height,
105 		      int depth, unsigned int hash,
106 		      int stride)
107 {
108 	struct qxl_drm_chunk *drv_chunk;
109 	struct qxl_image *image;
110 	struct qxl_data_chunk *chunk;
111 	int i;
112 	int chunk_stride;
113 	int linesize = width * depth / 8;
114 	struct qxl_bo *chunk_bo, *image_bo;
115 	void *ptr;
116 	/* Chunk */
117 	/* FIXME: Check integer overflow */
118 	/* TODO: variable number of chunks */
119 
120 	drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
121 
122 	chunk_bo = drv_chunk->bo;
123 	chunk_stride = stride; /* TODO: should use linesize, but it renders
124 				  wrong (check the bitmaps are sent correctly
125 				  first) */
126 
127 	ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
128 	chunk = ptr;
129 	chunk->data_size = height * chunk_stride;
130 	chunk->prev_chunk = 0;
131 	chunk->next_chunk = 0;
132 	qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
133 
134 	{
135 		void *k_data, *i_data;
136 		int remain;
137 		int page;
138 		int size;
139 
140 		if (stride == linesize && chunk_stride == stride) {
141 			remain = linesize * height;
142 			page = 0;
143 			i_data = (void *)data;
144 
145 			while (remain > 0) {
146 				ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
147 
148 				if (page == 0) {
149 					chunk = ptr;
150 					k_data = chunk->data;
151 					size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
152 				} else {
153 					k_data = ptr;
154 					size = PAGE_SIZE;
155 				}
156 				size = min(size, remain);
157 
158 				memcpy(k_data, i_data, size);
159 
160 				qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
161 				i_data += size;
162 				remain -= size;
163 				page++;
164 			}
165 		} else {
166 			unsigned int page_base, page_offset, out_offset;
167 
168 			for (i = 0 ; i < height ; ++i) {
169 				i_data = (void *)data + i * stride;
170 				remain = linesize;
171 				out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
172 
173 				while (remain > 0) {
174 					page_base = out_offset & PAGE_MASK;
175 					page_offset = offset_in_page(out_offset);
176 					size = min((int)(PAGE_SIZE - page_offset), remain);
177 
178 					ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
179 					k_data = ptr + page_offset;
180 					memcpy(k_data, i_data, size);
181 					qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
182 					remain -= size;
183 					i_data += size;
184 					out_offset += size;
185 				}
186 			}
187 		}
188 	}
189 	qxl_bo_vunmap_locked(chunk_bo);
190 
191 	image_bo = dimage->bo;
192 	ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
193 	image = ptr;
194 
195 	image->descriptor.id = 0;
196 	image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
197 
198 	image->descriptor.flags = 0;
199 	image->descriptor.width = width;
200 	image->descriptor.height = height;
201 
202 	switch (depth) {
203 	case 1:
204 		/* TODO: BE? check by arch? */
205 		image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
206 		break;
207 	case 24:
208 		image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
209 		break;
210 	case 32:
211 		image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
212 		break;
213 	default:
214 		DRM_ERROR("unsupported image bit depth\n");
215 		qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
216 		return -EINVAL;
217 	}
218 	image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
219 	image->u.bitmap.x = width;
220 	image->u.bitmap.y = height;
221 	image->u.bitmap.stride = chunk_stride;
222 	image->u.bitmap.palette = 0;
223 	image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
224 
225 	qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
226 
227 	return 0;
228 }
229 
qxl_image_init(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * dimage,const uint8_t * data,int x,int y,int width,int height,int depth,int stride)230 int qxl_image_init(struct qxl_device *qdev,
231 		     struct qxl_release *release,
232 		     struct qxl_drm_image *dimage,
233 		     const uint8_t *data,
234 		     int x, int y, int width, int height,
235 		     int depth, int stride)
236 {
237 	data += y * stride + x * (depth / 8);
238 	return qxl_image_init_helper(qdev, release, dimage, data,
239 				       width, height, depth, 0, stride);
240 }
241