1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include <linux/gfp.h> 27 #include <linux/slab.h> 28 29 #include <drm/drm_print.h> 30 31 #include "qxl_drv.h" 32 #include "qxl_object.h" 33 34 static int 35 qxl_allocate_chunk(struct qxl_device *qdev, 36 struct qxl_release *release, 37 struct qxl_drm_image *image, 38 unsigned int chunk_size) 39 { 40 struct qxl_drm_chunk *chunk; 41 int ret; 42 43 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); 44 if (!chunk) 45 return -ENOMEM; 46 47 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); 48 if (ret) { 49 kfree(chunk); 50 return ret; 51 } 52 53 list_add_tail(&chunk->head, &image->chunk_list); 54 return 0; 55 } 56 57 int 58 qxl_image_alloc_objects(struct qxl_device *qdev, 59 struct qxl_release *release, 60 struct qxl_drm_image **image_ptr, 61 int height, int stride) 62 { 63 struct qxl_drm_image *image; 64 int ret; 65 66 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); 67 if (!image) 68 return -ENOMEM; 69 70 INIT_LIST_HEAD(&image->chunk_list); 71 72 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); 73 if (ret) { 74 kfree(image); 75 return ret; 76 } 77 78 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); 79 if (ret) { 80 qxl_bo_unref(&image->bo); 81 kfree(image); 82 return ret; 83 } 84 *image_ptr = image; 85 return 0; 86 } 87 88 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) 89 { 90 struct qxl_drm_chunk *chunk, *tmp; 91 92 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { 93 qxl_bo_unref(&chunk->bo); 94 kfree(chunk); 95 } 96 97 qxl_bo_unref(&dimage->bo); 98 kfree(dimage); 99 } 100 101 static int 102 qxl_image_init_helper(struct qxl_device *qdev, 103 struct qxl_release *release, 104 struct qxl_drm_image *dimage, 105 const uint8_t *data, 106 int width, int height, 107 int depth, unsigned int hash, 108 int stride) 109 { 110 struct qxl_drm_chunk *drv_chunk; 111 struct qxl_image *image; 112 struct qxl_data_chunk *chunk; 113 int i; 114 int chunk_stride; 115 int linesize = width * depth / 8; 116 struct qxl_bo *chunk_bo, *image_bo; 117 void *ptr; 118 /* Chunk */ 119 /* FIXME: Check integer overflow */ 120 /* TODO: variable number of chunks */ 121 122 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); 123 124 chunk_bo = drv_chunk->bo; 125 chunk_stride = stride; /* TODO: should use linesize, but it renders 126 wrong (check the bitmaps are sent correctly 127 first) */ 128 129 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 130 chunk = ptr; 131 chunk->data_size = height * chunk_stride; 132 chunk->prev_chunk = 0; 133 chunk->next_chunk = 0; 134 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); 135 136 { 137 void *k_data, *i_data; 138 int remain; 139 int page; 140 int size; 141 142 if (stride == linesize && chunk_stride == stride) { 143 remain = linesize * height; 144 page = 0; 145 i_data = (void *)data; 146 147 while (remain > 0) { 148 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); 149 150 if (page == 0) { 151 chunk = ptr; 152 k_data = chunk->data; 153 size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data); 154 } else { 155 k_data = ptr; 156 size = PAGE_SIZE; 157 } 158 size = min(size, remain); 159 160 memcpy(k_data, i_data, size); 161 162 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); 163 i_data += size; 164 remain -= size; 165 page++; 166 } 167 } else { 168 unsigned int page_base, page_offset, out_offset; 169 170 for (i = 0 ; i < height ; ++i) { 171 i_data = (void *)data + i * stride; 172 remain = linesize; 173 out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride; 174 175 while (remain > 0) { 176 page_base = out_offset & PAGE_MASK; 177 page_offset = offset_in_page(out_offset); 178 size = min((int)(PAGE_SIZE - page_offset), remain); 179 180 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); 181 k_data = ptr + page_offset; 182 memcpy(k_data, i_data, size); 183 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); 184 remain -= size; 185 i_data += size; 186 out_offset += size; 187 } 188 } 189 } 190 } 191 qxl_bo_vunmap_locked(chunk_bo); 192 193 image_bo = dimage->bo; 194 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 195 image = ptr; 196 197 image->descriptor.id = 0; 198 image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP; 199 200 image->descriptor.flags = 0; 201 image->descriptor.width = width; 202 image->descriptor.height = height; 203 204 switch (depth) { 205 case 1: 206 /* TODO: BE? check by arch? */ 207 image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE; 208 break; 209 case 24: 210 image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT; 211 break; 212 case 32: 213 image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT; 214 break; 215 default: 216 DRM_ERROR("unsupported image bit depth\n"); 217 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 218 return -EINVAL; 219 } 220 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; 221 image->u.bitmap.x = width; 222 image->u.bitmap.y = height; 223 image->u.bitmap.stride = chunk_stride; 224 image->u.bitmap.palette = 0; 225 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 226 227 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 228 229 return 0; 230 } 231 232 int qxl_image_init(struct qxl_device *qdev, 233 struct qxl_release *release, 234 struct qxl_drm_image *dimage, 235 const uint8_t *data, 236 int x, int y, int width, int height, 237 int depth, int stride) 238 { 239 data += y * stride + x * (depth / 8); 240 return qxl_image_init_helper(qdev, release, dimage, data, 241 width, height, depth, 0, stride); 242 } 243