xref: /linux/drivers/gpu/drm/qxl/qxl_kms.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <drm/drm_crtc_helper.h>
30 #include <linux/io-mapping.h>
31 
32 int qxl_log_level;
33 
34 static bool qxl_check_device(struct qxl_device *qdev)
35 {
36 	struct qxl_rom *rom = qdev->rom;
37 
38 	if (rom->magic != 0x4f525851) {
39 		DRM_ERROR("bad rom signature %x\n", rom->magic);
40 		return false;
41 	}
42 
43 	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
44 	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
45 		 rom->log_level);
46 	DRM_INFO("%d io pages at offset 0x%x\n",
47 		 rom->num_io_pages, rom->pages_offset);
48 	DRM_INFO("%d byte draw area at offset 0x%x\n",
49 		 rom->surface0_area_size, rom->draw_area_offset);
50 
51 	qdev->vram_size = rom->surface0_area_size;
52 	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
53 	return true;
54 }
55 
56 static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
57 			  struct qxl_memslot *slot)
58 {
59 	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
60 	qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
61 	qxl_io_memslot_add(qdev, slot_index);
62 }
63 
64 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
65 	unsigned long start_phys_addr, unsigned long end_phys_addr)
66 {
67 	uint64_t high_bits;
68 	struct qxl_memslot *slot;
69 	uint8_t slot_index;
70 
71 	slot_index = qdev->rom->slots_start + slot_index_offset;
72 	slot = &qdev->mem_slots[slot_index];
73 	slot->start_phys_addr = start_phys_addr;
74 	slot->end_phys_addr = end_phys_addr;
75 
76 	setup_hw_slot(qdev, slot_index, slot);
77 
78 	slot->generation = qdev->rom->slot_generation;
79 	high_bits = slot_index << qdev->slot_gen_bits;
80 	high_bits |= slot->generation;
81 	high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
82 	slot->high_bits = high_bits;
83 	return slot_index;
84 }
85 
86 void qxl_reinit_memslots(struct qxl_device *qdev)
87 {
88 	setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
89 	setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
90 }
91 
92 static void qxl_gc_work(struct work_struct *work)
93 {
94 	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
95 	qxl_garbage_collect(qdev);
96 }
97 
98 int qxl_device_init(struct qxl_device *qdev,
99 		    struct drm_driver *drv,
100 		    struct pci_dev *pdev)
101 {
102 	int r, sb;
103 
104 	r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
105 	if (r)
106 		return r;
107 
108 	qdev->ddev.pdev = pdev;
109 	pci_set_drvdata(pdev, &qdev->ddev);
110 	qdev->ddev.dev_private = qdev;
111 
112 	mutex_init(&qdev->gem.mutex);
113 	mutex_init(&qdev->update_area_mutex);
114 	mutex_init(&qdev->release_mutex);
115 	mutex_init(&qdev->surf_evict_mutex);
116 	qxl_gem_init(qdev);
117 
118 	qdev->rom_base = pci_resource_start(pdev, 2);
119 	qdev->rom_size = pci_resource_len(pdev, 2);
120 	qdev->vram_base = pci_resource_start(pdev, 0);
121 	qdev->io_base = pci_resource_start(pdev, 3);
122 
123 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
124 
125 	if (pci_resource_len(pdev, 4) > 0) {
126 		/* 64bit surface bar present */
127 		sb = 4;
128 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
129 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
130 		qdev->surface_mapping =
131 			io_mapping_create_wc(qdev->surfaceram_base,
132 					     qdev->surfaceram_size);
133 	}
134 	if (qdev->surface_mapping == NULL) {
135 		/* 64bit surface bar not present (or mapping failed) */
136 		sb = 1;
137 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
138 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
139 		qdev->surface_mapping =
140 			io_mapping_create_wc(qdev->surfaceram_base,
141 					     qdev->surfaceram_size);
142 	}
143 
144 	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
145 		 (unsigned long long)qdev->vram_base,
146 		 (unsigned long long)pci_resource_end(pdev, 0),
147 		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
148 		 (int)pci_resource_len(pdev, 0) / 1024,
149 		 (unsigned long long)qdev->surfaceram_base,
150 		 (unsigned long long)pci_resource_end(pdev, sb),
151 		 (int)qdev->surfaceram_size / 1024 / 1024,
152 		 (int)qdev->surfaceram_size / 1024,
153 		 (sb == 4) ? "64bit" : "32bit");
154 
155 	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
156 	if (!qdev->rom) {
157 		pr_err("Unable to ioremap ROM\n");
158 		return -ENOMEM;
159 	}
160 
161 	qxl_check_device(qdev);
162 
163 	r = qxl_bo_init(qdev);
164 	if (r) {
165 		DRM_ERROR("bo init failed %d\n", r);
166 		return r;
167 	}
168 
169 	qdev->ram_header = ioremap(qdev->vram_base +
170 				   qdev->rom->ram_header_offset,
171 				   sizeof(*qdev->ram_header));
172 
173 	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
174 					     sizeof(struct qxl_command),
175 					     QXL_COMMAND_RING_SIZE,
176 					     qdev->io_base + QXL_IO_NOTIFY_CMD,
177 					     false,
178 					     &qdev->display_event);
179 
180 	qdev->cursor_ring = qxl_ring_create(
181 				&(qdev->ram_header->cursor_ring_hdr),
182 				sizeof(struct qxl_command),
183 				QXL_CURSOR_RING_SIZE,
184 				qdev->io_base + QXL_IO_NOTIFY_CMD,
185 				false,
186 				&qdev->cursor_event);
187 
188 	qdev->release_ring = qxl_ring_create(
189 				&(qdev->ram_header->release_ring_hdr),
190 				sizeof(uint64_t),
191 				QXL_RELEASE_RING_SIZE, 0, true,
192 				NULL);
193 
194 	/* TODO - slot initialization should happen on reset. where is our
195 	 * reset handler? */
196 	qdev->n_mem_slots = qdev->rom->slots_end;
197 	qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
198 	qdev->slot_id_bits = qdev->rom->slot_id_bits;
199 	qdev->va_slot_mask =
200 		(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
201 
202 	qdev->mem_slots =
203 		kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
204 			      GFP_KERNEL);
205 
206 	idr_init(&qdev->release_idr);
207 	spin_lock_init(&qdev->release_idr_lock);
208 	spin_lock_init(&qdev->release_lock);
209 
210 	idr_init(&qdev->surf_id_idr);
211 	spin_lock_init(&qdev->surf_id_idr_lock);
212 
213 	mutex_init(&qdev->async_io_mutex);
214 
215 	/* reset the device into a known state - no memslots, no primary
216 	 * created, no surfaces. */
217 	qxl_io_reset(qdev);
218 
219 	/* must initialize irq before first async io - slot creation */
220 	r = qxl_irq_init(qdev);
221 	if (r)
222 		return r;
223 
224 	/*
225 	 * Note that virtual is surface0. We rely on the single ioremap done
226 	 * before.
227 	 */
228 	qdev->main_mem_slot = setup_slot(qdev, 0,
229 		(unsigned long)qdev->vram_base,
230 		(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
231 	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
232 		(unsigned long)qdev->surfaceram_base,
233 		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
234 	DRM_INFO("main mem slot %d [%lx,%x]\n",
235 		 qdev->main_mem_slot,
236 		 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
237 	DRM_INFO("surface mem slot %d [%lx,%lx]\n",
238 		 qdev->surfaces_mem_slot,
239 		 (unsigned long)qdev->surfaceram_base,
240 		 (unsigned long)qdev->surfaceram_size);
241 
242 
243 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
244 
245 	return 0;
246 }
247 
248 void qxl_device_fini(struct qxl_device *qdev)
249 {
250 	if (qdev->current_release_bo[0])
251 		qxl_bo_unref(&qdev->current_release_bo[0]);
252 	if (qdev->current_release_bo[1])
253 		qxl_bo_unref(&qdev->current_release_bo[1]);
254 	flush_work(&qdev->gc_work);
255 	qxl_ring_free(qdev->command_ring);
256 	qxl_ring_free(qdev->cursor_ring);
257 	qxl_ring_free(qdev->release_ring);
258 	qxl_gem_fini(qdev);
259 	qxl_bo_fini(qdev);
260 	io_mapping_free(qdev->surface_mapping);
261 	io_mapping_free(qdev->vram_mapping);
262 	iounmap(qdev->ram_header);
263 	iounmap(qdev->rom);
264 	qdev->rom = NULL;
265 }
266