xref: /linux/drivers/gpu/drm/radeon/r600_cs.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kernel.h>
29 #include "drmP.h"
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
33 
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 					struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 					struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
41 
42 
43 struct r600_cs_track {
44 	/* configuration we miror so that we use same code btw kms/ums */
45 	u32			group_size;
46 	u32			nbanks;
47 	u32			npipes;
48 	/* value we track */
49 	u32			sq_config;
50 	u32			nsamples;
51 	u32			cb_color_base_last[8];
52 	struct radeon_bo	*cb_color_bo[8];
53 	u32			cb_color_bo_offset[8];
54 	struct radeon_bo	*cb_color_frag_bo[8];
55 	struct radeon_bo	*cb_color_tile_bo[8];
56 	u32			cb_color_info[8];
57 	u32			cb_color_size_idx[8];
58 	u32			cb_target_mask;
59 	u32			cb_shader_mask;
60 	u32			cb_color_size[8];
61 	u32			vgt_strmout_en;
62 	u32			vgt_strmout_buffer_en;
63 	u32			db_depth_control;
64 	u32			db_depth_info;
65 	u32			db_depth_size_idx;
66 	u32			db_depth_view;
67 	u32			db_depth_size;
68 	u32			db_offset;
69 	struct radeon_bo	*db_bo;
70 };
71 
72 static inline int r600_bpe_from_format(u32 *bpe, u32 format)
73 {
74 	switch (format) {
75 	case V_038004_COLOR_8:
76 	case V_038004_COLOR_4_4:
77 	case V_038004_COLOR_3_3_2:
78 	case V_038004_FMT_1:
79 		*bpe = 1;
80 		break;
81 	case V_038004_COLOR_16:
82 	case V_038004_COLOR_16_FLOAT:
83 	case V_038004_COLOR_8_8:
84 	case V_038004_COLOR_5_6_5:
85 	case V_038004_COLOR_6_5_5:
86 	case V_038004_COLOR_1_5_5_5:
87 	case V_038004_COLOR_4_4_4_4:
88 	case V_038004_COLOR_5_5_5_1:
89 		*bpe = 2;
90 		break;
91 	case V_038004_FMT_8_8_8:
92 		*bpe = 3;
93 		break;
94 	case V_038004_COLOR_32:
95 	case V_038004_COLOR_32_FLOAT:
96 	case V_038004_COLOR_16_16:
97 	case V_038004_COLOR_16_16_FLOAT:
98 	case V_038004_COLOR_8_24:
99 	case V_038004_COLOR_8_24_FLOAT:
100 	case V_038004_COLOR_24_8:
101 	case V_038004_COLOR_24_8_FLOAT:
102 	case V_038004_COLOR_10_11_11:
103 	case V_038004_COLOR_10_11_11_FLOAT:
104 	case V_038004_COLOR_11_11_10:
105 	case V_038004_COLOR_11_11_10_FLOAT:
106 	case V_038004_COLOR_2_10_10_10:
107 	case V_038004_COLOR_8_8_8_8:
108 	case V_038004_COLOR_10_10_10_2:
109 	case V_038004_FMT_5_9_9_9_SHAREDEXP:
110 	case V_038004_FMT_32_AS_8:
111 	case V_038004_FMT_32_AS_8_8:
112 		*bpe = 4;
113 		break;
114 	case V_038004_COLOR_X24_8_32_FLOAT:
115 	case V_038004_COLOR_32_32:
116 	case V_038004_COLOR_32_32_FLOAT:
117 	case V_038004_COLOR_16_16_16_16:
118 	case V_038004_COLOR_16_16_16_16_FLOAT:
119 		*bpe = 8;
120 		break;
121 	case V_038004_FMT_16_16_16:
122 	case V_038004_FMT_16_16_16_FLOAT:
123 		*bpe = 6;
124 		break;
125 	case V_038004_FMT_32_32_32:
126 	case V_038004_FMT_32_32_32_FLOAT:
127 		*bpe = 12;
128 		break;
129 	case V_038004_COLOR_32_32_32_32:
130 	case V_038004_COLOR_32_32_32_32_FLOAT:
131 		*bpe = 16;
132 		break;
133 	case V_038004_FMT_GB_GR:
134 	case V_038004_FMT_BG_RG:
135 	case V_038004_COLOR_INVALID:
136 		*bpe = 16;
137 		return -EINVAL;
138 	}
139 	return 0;
140 }
141 
142 static void r600_cs_track_init(struct r600_cs_track *track)
143 {
144 	int i;
145 
146 	/* assume DX9 mode */
147 	track->sq_config = DX9_CONSTS;
148 	for (i = 0; i < 8; i++) {
149 		track->cb_color_base_last[i] = 0;
150 		track->cb_color_size[i] = 0;
151 		track->cb_color_size_idx[i] = 0;
152 		track->cb_color_info[i] = 0;
153 		track->cb_color_bo[i] = NULL;
154 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
155 	}
156 	track->cb_target_mask = 0xFFFFFFFF;
157 	track->cb_shader_mask = 0xFFFFFFFF;
158 	track->db_bo = NULL;
159 	/* assume the biggest format and that htile is enabled */
160 	track->db_depth_info = 7 | (1 << 25);
161 	track->db_depth_view = 0xFFFFC000;
162 	track->db_depth_size = 0xFFFFFFFF;
163 	track->db_depth_size_idx = 0;
164 	track->db_depth_control = 0xFFFFFFFF;
165 }
166 
167 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
168 {
169 	struct r600_cs_track *track = p->track;
170 	u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
171 	volatile u32 *ib = p->ib->ptr;
172 
173 	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
174 		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
175 		return -EINVAL;
176 	}
177 	size = radeon_bo_size(track->cb_color_bo[i]);
178 	if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
179 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
180 			 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
181 			i, track->cb_color_info[i]);
182 		return -EINVAL;
183 	}
184 	/* pitch is the number of 8x8 tiles per row */
185 	pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
186 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
187 	height = size / (pitch * 8 * bpe);
188 	if (height > 8192)
189 		height = 8192;
190 	if (height > 7)
191 		height &= ~0x7;
192 	switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
193 	case V_0280A0_ARRAY_LINEAR_GENERAL:
194 		/* technically height & 0x7 */
195 		break;
196 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
197 		pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
198 		if (!IS_ALIGNED(pitch, pitch_align)) {
199 			dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
200 				 __func__, __LINE__, pitch);
201 			return -EINVAL;
202 		}
203 		if (!IS_ALIGNED(height, 8)) {
204 			dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
205 				 __func__, __LINE__, height);
206 			return -EINVAL;
207 		}
208 		break;
209 	case V_0280A0_ARRAY_1D_TILED_THIN1:
210 		pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
211 		if (!IS_ALIGNED(pitch, pitch_align)) {
212 			dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
213 				 __func__, __LINE__, pitch);
214 			return -EINVAL;
215 		}
216 		if (!IS_ALIGNED(height, 8)) {
217 			dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
218 				 __func__, __LINE__, height);
219 			return -EINVAL;
220 		}
221 		break;
222 	case V_0280A0_ARRAY_2D_TILED_THIN1:
223 		pitch_align = max((u32)track->nbanks,
224 				  (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
225 		if (!IS_ALIGNED(pitch, pitch_align)) {
226 			dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
227 				__func__, __LINE__, pitch);
228 			return -EINVAL;
229 		}
230 		if (!IS_ALIGNED((height / 8), track->nbanks)) {
231 			dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
232 				 __func__, __LINE__, height);
233 			return -EINVAL;
234 		}
235 		break;
236 	default:
237 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
238 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
239 			track->cb_color_info[i]);
240 		return -EINVAL;
241 	}
242 	/* check offset */
243 	tmp = height * pitch * 8 * bpe;
244 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
245 		dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
246 		return -EINVAL;
247 	}
248 	if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
249 		dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
250 		return -EINVAL;
251 	}
252 	/* limit max tile */
253 	tmp = (height * pitch * 8) >> 6;
254 	if (tmp < slice_tile_max)
255 		slice_tile_max = tmp;
256 	tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
257 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
258 	ib[track->cb_color_size_idx[i]] = tmp;
259 	return 0;
260 }
261 
262 static int r600_cs_track_check(struct radeon_cs_parser *p)
263 {
264 	struct r600_cs_track *track = p->track;
265 	u32 tmp;
266 	int r, i;
267 	volatile u32 *ib = p->ib->ptr;
268 
269 	/* on legacy kernel we don't perform advanced check */
270 	if (p->rdev == NULL)
271 		return 0;
272 	/* we don't support out buffer yet */
273 	if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
274 		dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
275 		return -EINVAL;
276 	}
277 	/* check that we have a cb for each enabled target, we don't check
278 	 * shader_mask because it seems mesa isn't always setting it :(
279 	 */
280 	tmp = track->cb_target_mask;
281 	for (i = 0; i < 8; i++) {
282 		if ((tmp >> (i * 4)) & 0xF) {
283 			/* at least one component is enabled */
284 			if (track->cb_color_bo[i] == NULL) {
285 				dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
286 					__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
287 				return -EINVAL;
288 			}
289 			/* perform rewrite of CB_COLOR[0-7]_SIZE */
290 			r = r600_cs_track_validate_cb(p, i);
291 			if (r)
292 				return r;
293 		}
294 	}
295 	/* Check depth buffer */
296 	if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
297 		G_028800_Z_ENABLE(track->db_depth_control)) {
298 		u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
299 		if (track->db_bo == NULL) {
300 			dev_warn(p->dev, "z/stencil with no depth buffer\n");
301 			return -EINVAL;
302 		}
303 		if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
304 			dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
305 			return -EINVAL;
306 		}
307 		switch (G_028010_FORMAT(track->db_depth_info)) {
308 		case V_028010_DEPTH_16:
309 			bpe = 2;
310 			break;
311 		case V_028010_DEPTH_X8_24:
312 		case V_028010_DEPTH_8_24:
313 		case V_028010_DEPTH_X8_24_FLOAT:
314 		case V_028010_DEPTH_8_24_FLOAT:
315 		case V_028010_DEPTH_32_FLOAT:
316 			bpe = 4;
317 			break;
318 		case V_028010_DEPTH_X24_8_32_FLOAT:
319 			bpe = 8;
320 			break;
321 		default:
322 			dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
323 			return -EINVAL;
324 		}
325 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
326 			if (!track->db_depth_size_idx) {
327 				dev_warn(p->dev, "z/stencil buffer size not set\n");
328 				return -EINVAL;
329 			}
330 			printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
331 			tmp = radeon_bo_size(track->db_bo) - track->db_offset;
332 			tmp = (tmp / bpe) >> 6;
333 			if (!tmp) {
334 				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
335 						track->db_depth_size, bpe, track->db_offset,
336 						radeon_bo_size(track->db_bo));
337 				return -EINVAL;
338 			}
339 			ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
340 		} else {
341 			size = radeon_bo_size(track->db_bo);
342 			pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
343 			height = size / (pitch * 8 * bpe);
344 			height &= ~0x7;
345 			if (!height)
346 				height = 8;
347 
348 			switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
349 			case V_028010_ARRAY_1D_TILED_THIN1:
350 				pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
351 				if (!IS_ALIGNED(pitch, pitch_align)) {
352 					dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
353 						 __func__, __LINE__, pitch);
354 					return -EINVAL;
355 				}
356 				if (!IS_ALIGNED(height, 8)) {
357 					dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
358 						 __func__, __LINE__, height);
359 					return -EINVAL;
360 				}
361 				break;
362 			case V_028010_ARRAY_2D_TILED_THIN1:
363 				pitch_align = max((u32)track->nbanks,
364 						  (u32)(((track->group_size / 8) / bpe) * track->nbanks));
365 				if (!IS_ALIGNED(pitch, pitch_align)) {
366 					dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
367 						 __func__, __LINE__, pitch);
368 					return -EINVAL;
369 				}
370 				if ((height / 8) & (track->nbanks - 1)) {
371 					dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
372 						 __func__, __LINE__, height);
373 					return -EINVAL;
374 				}
375 				break;
376 			default:
377 				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
378 					 G_028010_ARRAY_MODE(track->db_depth_info),
379 					 track->db_depth_info);
380 				return -EINVAL;
381 			}
382 			if (!IS_ALIGNED(track->db_offset, track->group_size)) {
383 				dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
384 				return -EINVAL;
385 			}
386 			ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
387 			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
388 			tmp = ntiles * bpe * 64 * nviews;
389 			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
390 				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
391 						track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
392 						radeon_bo_size(track->db_bo));
393 				return -EINVAL;
394 			}
395 		}
396 	}
397 	return 0;
398 }
399 
400 /**
401  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
402  * @parser:	parser structure holding parsing context.
403  * @pkt:	where to store packet informations
404  *
405  * Assume that chunk_ib_index is properly set. Will return -EINVAL
406  * if packet is bigger than remaining ib size. or if packets is unknown.
407  **/
408 int r600_cs_packet_parse(struct radeon_cs_parser *p,
409 			struct radeon_cs_packet *pkt,
410 			unsigned idx)
411 {
412 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
413 	uint32_t header;
414 
415 	if (idx >= ib_chunk->length_dw) {
416 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
417 			  idx, ib_chunk->length_dw);
418 		return -EINVAL;
419 	}
420 	header = radeon_get_ib_value(p, idx);
421 	pkt->idx = idx;
422 	pkt->type = CP_PACKET_GET_TYPE(header);
423 	pkt->count = CP_PACKET_GET_COUNT(header);
424 	pkt->one_reg_wr = 0;
425 	switch (pkt->type) {
426 	case PACKET_TYPE0:
427 		pkt->reg = CP_PACKET0_GET_REG(header);
428 		break;
429 	case PACKET_TYPE3:
430 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
431 		break;
432 	case PACKET_TYPE2:
433 		pkt->count = -1;
434 		break;
435 	default:
436 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
437 		return -EINVAL;
438 	}
439 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
440 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
441 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
442 		return -EINVAL;
443 	}
444 	return 0;
445 }
446 
447 /**
448  * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
449  * @parser:		parser structure holding parsing context.
450  * @data:		pointer to relocation data
451  * @offset_start:	starting offset
452  * @offset_mask:	offset mask (to align start offset on)
453  * @reloc:		reloc informations
454  *
455  * Check next packet is relocation packet3, do bo validation and compute
456  * GPU offset using the provided start.
457  **/
458 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
459 					struct radeon_cs_reloc **cs_reloc)
460 {
461 	struct radeon_cs_chunk *relocs_chunk;
462 	struct radeon_cs_packet p3reloc;
463 	unsigned idx;
464 	int r;
465 
466 	if (p->chunk_relocs_idx == -1) {
467 		DRM_ERROR("No relocation chunk !\n");
468 		return -EINVAL;
469 	}
470 	*cs_reloc = NULL;
471 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
472 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
473 	if (r) {
474 		return r;
475 	}
476 	p->idx += p3reloc.count + 2;
477 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
478 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
479 			  p3reloc.idx);
480 		return -EINVAL;
481 	}
482 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
483 	if (idx >= relocs_chunk->length_dw) {
484 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
485 			  idx, relocs_chunk->length_dw);
486 		return -EINVAL;
487 	}
488 	/* FIXME: we assume reloc size is 4 dwords */
489 	*cs_reloc = p->relocs_ptr[(idx / 4)];
490 	return 0;
491 }
492 
493 /**
494  * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
495  * @parser:		parser structure holding parsing context.
496  * @data:		pointer to relocation data
497  * @offset_start:	starting offset
498  * @offset_mask:	offset mask (to align start offset on)
499  * @reloc:		reloc informations
500  *
501  * Check next packet is relocation packet3, do bo validation and compute
502  * GPU offset using the provided start.
503  **/
504 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
505 					struct radeon_cs_reloc **cs_reloc)
506 {
507 	struct radeon_cs_chunk *relocs_chunk;
508 	struct radeon_cs_packet p3reloc;
509 	unsigned idx;
510 	int r;
511 
512 	if (p->chunk_relocs_idx == -1) {
513 		DRM_ERROR("No relocation chunk !\n");
514 		return -EINVAL;
515 	}
516 	*cs_reloc = NULL;
517 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
518 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
519 	if (r) {
520 		return r;
521 	}
522 	p->idx += p3reloc.count + 2;
523 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
524 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
525 			  p3reloc.idx);
526 		return -EINVAL;
527 	}
528 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
529 	if (idx >= relocs_chunk->length_dw) {
530 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
531 			  idx, relocs_chunk->length_dw);
532 		return -EINVAL;
533 	}
534 	*cs_reloc = p->relocs;
535 	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
536 	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
537 	return 0;
538 }
539 
540 /**
541  * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
542  * @parser:		parser structure holding parsing context.
543  *
544  * Check next packet is relocation packet3, do bo validation and compute
545  * GPU offset using the provided start.
546  **/
547 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
548 {
549 	struct radeon_cs_packet p3reloc;
550 	int r;
551 
552 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
553 	if (r) {
554 		return 0;
555 	}
556 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
557 		return 0;
558 	}
559 	return 1;
560 }
561 
562 /**
563  * r600_cs_packet_next_vline() - parse userspace VLINE packet
564  * @parser:		parser structure holding parsing context.
565  *
566  * Userspace sends a special sequence for VLINE waits.
567  * PACKET0 - VLINE_START_END + value
568  * PACKET3 - WAIT_REG_MEM poll vline status reg
569  * RELOC (P3) - crtc_id in reloc.
570  *
571  * This function parses this and relocates the VLINE START END
572  * and WAIT_REG_MEM packets to the correct crtc.
573  * It also detects a switched off crtc and nulls out the
574  * wait in that case.
575  */
576 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
577 {
578 	struct drm_mode_object *obj;
579 	struct drm_crtc *crtc;
580 	struct radeon_crtc *radeon_crtc;
581 	struct radeon_cs_packet p3reloc, wait_reg_mem;
582 	int crtc_id;
583 	int r;
584 	uint32_t header, h_idx, reg, wait_reg_mem_info;
585 	volatile uint32_t *ib;
586 
587 	ib = p->ib->ptr;
588 
589 	/* parse the WAIT_REG_MEM */
590 	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
591 	if (r)
592 		return r;
593 
594 	/* check its a WAIT_REG_MEM */
595 	if (wait_reg_mem.type != PACKET_TYPE3 ||
596 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
597 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
598 		r = -EINVAL;
599 		return r;
600 	}
601 
602 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
603 	/* bit 4 is reg (0) or mem (1) */
604 	if (wait_reg_mem_info & 0x10) {
605 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
606 		r = -EINVAL;
607 		return r;
608 	}
609 	/* waiting for value to be equal */
610 	if ((wait_reg_mem_info & 0x7) != 0x3) {
611 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
612 		r = -EINVAL;
613 		return r;
614 	}
615 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
616 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
617 		r = -EINVAL;
618 		return r;
619 	}
620 
621 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
622 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
623 		r = -EINVAL;
624 		return r;
625 	}
626 
627 	/* jump over the NOP */
628 	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
629 	if (r)
630 		return r;
631 
632 	h_idx = p->idx - 2;
633 	p->idx += wait_reg_mem.count + 2;
634 	p->idx += p3reloc.count + 2;
635 
636 	header = radeon_get_ib_value(p, h_idx);
637 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
638 	reg = CP_PACKET0_GET_REG(header);
639 
640 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
641 	if (!obj) {
642 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
643 		r = -EINVAL;
644 		goto out;
645 	}
646 	crtc = obj_to_crtc(obj);
647 	radeon_crtc = to_radeon_crtc(crtc);
648 	crtc_id = radeon_crtc->crtc_id;
649 
650 	if (!crtc->enabled) {
651 		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
652 		ib[h_idx + 2] = PACKET2(0);
653 		ib[h_idx + 3] = PACKET2(0);
654 		ib[h_idx + 4] = PACKET2(0);
655 		ib[h_idx + 5] = PACKET2(0);
656 		ib[h_idx + 6] = PACKET2(0);
657 		ib[h_idx + 7] = PACKET2(0);
658 		ib[h_idx + 8] = PACKET2(0);
659 	} else if (crtc_id == 1) {
660 		switch (reg) {
661 		case AVIVO_D1MODE_VLINE_START_END:
662 			header &= ~R600_CP_PACKET0_REG_MASK;
663 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
664 			break;
665 		default:
666 			DRM_ERROR("unknown crtc reloc\n");
667 			r = -EINVAL;
668 			goto out;
669 		}
670 		ib[h_idx] = header;
671 		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
672 	}
673 out:
674 	return r;
675 }
676 
677 static int r600_packet0_check(struct radeon_cs_parser *p,
678 				struct radeon_cs_packet *pkt,
679 				unsigned idx, unsigned reg)
680 {
681 	int r;
682 
683 	switch (reg) {
684 	case AVIVO_D1MODE_VLINE_START_END:
685 		r = r600_cs_packet_parse_vline(p);
686 		if (r) {
687 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
688 					idx, reg);
689 			return r;
690 		}
691 		break;
692 	default:
693 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
694 		       reg, idx);
695 		return -EINVAL;
696 	}
697 	return 0;
698 }
699 
700 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
701 				struct radeon_cs_packet *pkt)
702 {
703 	unsigned reg, i;
704 	unsigned idx;
705 	int r;
706 
707 	idx = pkt->idx + 1;
708 	reg = pkt->reg;
709 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
710 		r = r600_packet0_check(p, pkt, idx, reg);
711 		if (r) {
712 			return r;
713 		}
714 	}
715 	return 0;
716 }
717 
718 /**
719  * r600_cs_check_reg() - check if register is authorized or not
720  * @parser: parser structure holding parsing context
721  * @reg: register we are testing
722  * @idx: index into the cs buffer
723  *
724  * This function will test against r600_reg_safe_bm and return 0
725  * if register is safe. If register is not flag as safe this function
726  * will test it against a list of register needind special handling.
727  */
728 static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
729 {
730 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
731 	struct radeon_cs_reloc *reloc;
732 	u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
733 	u32 m, i, tmp, *ib;
734 	int r;
735 
736 	i = (reg >> 7);
737 	if (i > last_reg) {
738 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
739 		return -EINVAL;
740 	}
741 	m = 1 << ((reg >> 2) & 31);
742 	if (!(r600_reg_safe_bm[i] & m))
743 		return 0;
744 	ib = p->ib->ptr;
745 	switch (reg) {
746 	/* force following reg to 0 in an attemp to disable out buffer
747 	 * which will need us to better understand how it works to perform
748 	 * security check on it (Jerome)
749 	 */
750 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
751 	case R_008C44_SQ_ESGS_RING_SIZE:
752 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
753 	case R_008C54_SQ_ESTMP_RING_SIZE:
754 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
755 	case R_008C74_SQ_FBUF_RING_SIZE:
756 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
757 	case R_008C5C_SQ_GSTMP_RING_SIZE:
758 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
759 	case R_008C4C_SQ_GSVS_RING_SIZE:
760 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
761 	case R_008C6C_SQ_PSTMP_RING_SIZE:
762 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
763 	case R_008C7C_SQ_REDUC_RING_SIZE:
764 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
765 	case R_008C64_SQ_VSTMP_RING_SIZE:
766 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
767 		/* get value to populate the IB don't remove */
768 		tmp =radeon_get_ib_value(p, idx);
769 		ib[idx] = 0;
770 		break;
771 	case SQ_CONFIG:
772 		track->sq_config = radeon_get_ib_value(p, idx);
773 		break;
774 	case R_028800_DB_DEPTH_CONTROL:
775 		track->db_depth_control = radeon_get_ib_value(p, idx);
776 		break;
777 	case R_028010_DB_DEPTH_INFO:
778 		if (r600_cs_packet_next_is_pkt3_nop(p)) {
779 			r = r600_cs_packet_next_reloc(p, &reloc);
780 			if (r) {
781 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
782 					 "0x%04X\n", reg);
783 				return -EINVAL;
784 			}
785 			track->db_depth_info = radeon_get_ib_value(p, idx);
786 			ib[idx] &= C_028010_ARRAY_MODE;
787 			track->db_depth_info &= C_028010_ARRAY_MODE;
788 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
789 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
790 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
791 			} else {
792 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
793 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
794 			}
795 		} else
796 			track->db_depth_info = radeon_get_ib_value(p, idx);
797 		break;
798 	case R_028004_DB_DEPTH_VIEW:
799 		track->db_depth_view = radeon_get_ib_value(p, idx);
800 		break;
801 	case R_028000_DB_DEPTH_SIZE:
802 		track->db_depth_size = radeon_get_ib_value(p, idx);
803 		track->db_depth_size_idx = idx;
804 		break;
805 	case R_028AB0_VGT_STRMOUT_EN:
806 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
807 		break;
808 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
809 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
810 		break;
811 	case R_028238_CB_TARGET_MASK:
812 		track->cb_target_mask = radeon_get_ib_value(p, idx);
813 		break;
814 	case R_02823C_CB_SHADER_MASK:
815 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
816 		break;
817 	case R_028C04_PA_SC_AA_CONFIG:
818 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
819 		track->nsamples = 1 << tmp;
820 		break;
821 	case R_0280A0_CB_COLOR0_INFO:
822 	case R_0280A4_CB_COLOR1_INFO:
823 	case R_0280A8_CB_COLOR2_INFO:
824 	case R_0280AC_CB_COLOR3_INFO:
825 	case R_0280B0_CB_COLOR4_INFO:
826 	case R_0280B4_CB_COLOR5_INFO:
827 	case R_0280B8_CB_COLOR6_INFO:
828 	case R_0280BC_CB_COLOR7_INFO:
829 		if (r600_cs_packet_next_is_pkt3_nop(p)) {
830 			r = r600_cs_packet_next_reloc(p, &reloc);
831 			if (r) {
832 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
833 				return -EINVAL;
834 			}
835 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
836 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
837 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
838 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
839 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
840 			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
841 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
842 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
843 			}
844 		} else {
845 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
846 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
847 		}
848 		break;
849 	case R_028060_CB_COLOR0_SIZE:
850 	case R_028064_CB_COLOR1_SIZE:
851 	case R_028068_CB_COLOR2_SIZE:
852 	case R_02806C_CB_COLOR3_SIZE:
853 	case R_028070_CB_COLOR4_SIZE:
854 	case R_028074_CB_COLOR5_SIZE:
855 	case R_028078_CB_COLOR6_SIZE:
856 	case R_02807C_CB_COLOR7_SIZE:
857 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
858 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
859 		track->cb_color_size_idx[tmp] = idx;
860 		break;
861 		/* This register were added late, there is userspace
862 		 * which does provide relocation for those but set
863 		 * 0 offset. In order to avoid breaking old userspace
864 		 * we detect this and set address to point to last
865 		 * CB_COLOR0_BASE, note that if userspace doesn't set
866 		 * CB_COLOR0_BASE before this register we will report
867 		 * error. Old userspace always set CB_COLOR0_BASE
868 		 * before any of this.
869 		 */
870 	case R_0280E0_CB_COLOR0_FRAG:
871 	case R_0280E4_CB_COLOR1_FRAG:
872 	case R_0280E8_CB_COLOR2_FRAG:
873 	case R_0280EC_CB_COLOR3_FRAG:
874 	case R_0280F0_CB_COLOR4_FRAG:
875 	case R_0280F4_CB_COLOR5_FRAG:
876 	case R_0280F8_CB_COLOR6_FRAG:
877 	case R_0280FC_CB_COLOR7_FRAG:
878 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
879 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
880 			if (!track->cb_color_base_last[tmp]) {
881 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
882 				return -EINVAL;
883 			}
884 			ib[idx] = track->cb_color_base_last[tmp];
885 			printk_once(KERN_WARNING "You have old & broken userspace "
886 					"please consider updating mesa & xf86-video-ati\n");
887 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
888 		} else {
889 			r = r600_cs_packet_next_reloc(p, &reloc);
890 			if (r) {
891 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
892 				return -EINVAL;
893 			}
894 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
895 			track->cb_color_frag_bo[tmp] = reloc->robj;
896 		}
897 		break;
898 	case R_0280C0_CB_COLOR0_TILE:
899 	case R_0280C4_CB_COLOR1_TILE:
900 	case R_0280C8_CB_COLOR2_TILE:
901 	case R_0280CC_CB_COLOR3_TILE:
902 	case R_0280D0_CB_COLOR4_TILE:
903 	case R_0280D4_CB_COLOR5_TILE:
904 	case R_0280D8_CB_COLOR6_TILE:
905 	case R_0280DC_CB_COLOR7_TILE:
906 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
907 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
908 			if (!track->cb_color_base_last[tmp]) {
909 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
910 				return -EINVAL;
911 			}
912 			ib[idx] = track->cb_color_base_last[tmp];
913 			printk_once(KERN_WARNING "You have old & broken userspace "
914 					"please consider updating mesa & xf86-video-ati\n");
915 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
916 		} else {
917 			r = r600_cs_packet_next_reloc(p, &reloc);
918 			if (r) {
919 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
920 				return -EINVAL;
921 			}
922 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
923 			track->cb_color_tile_bo[tmp] = reloc->robj;
924 		}
925 		break;
926 	case CB_COLOR0_BASE:
927 	case CB_COLOR1_BASE:
928 	case CB_COLOR2_BASE:
929 	case CB_COLOR3_BASE:
930 	case CB_COLOR4_BASE:
931 	case CB_COLOR5_BASE:
932 	case CB_COLOR6_BASE:
933 	case CB_COLOR7_BASE:
934 		r = r600_cs_packet_next_reloc(p, &reloc);
935 		if (r) {
936 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
937 					"0x%04X\n", reg);
938 			return -EINVAL;
939 		}
940 		tmp = (reg - CB_COLOR0_BASE) / 4;
941 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
942 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
943 		track->cb_color_base_last[tmp] = ib[idx];
944 		track->cb_color_bo[tmp] = reloc->robj;
945 		break;
946 	case DB_DEPTH_BASE:
947 		r = r600_cs_packet_next_reloc(p, &reloc);
948 		if (r) {
949 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
950 					"0x%04X\n", reg);
951 			return -EINVAL;
952 		}
953 		track->db_offset = radeon_get_ib_value(p, idx);
954 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
955 		track->db_bo = reloc->robj;
956 		break;
957 	case DB_HTILE_DATA_BASE:
958 	case SQ_PGM_START_FS:
959 	case SQ_PGM_START_ES:
960 	case SQ_PGM_START_VS:
961 	case SQ_PGM_START_GS:
962 	case SQ_PGM_START_PS:
963 	case SQ_ALU_CONST_CACHE_GS_0:
964 	case SQ_ALU_CONST_CACHE_GS_1:
965 	case SQ_ALU_CONST_CACHE_GS_2:
966 	case SQ_ALU_CONST_CACHE_GS_3:
967 	case SQ_ALU_CONST_CACHE_GS_4:
968 	case SQ_ALU_CONST_CACHE_GS_5:
969 	case SQ_ALU_CONST_CACHE_GS_6:
970 	case SQ_ALU_CONST_CACHE_GS_7:
971 	case SQ_ALU_CONST_CACHE_GS_8:
972 	case SQ_ALU_CONST_CACHE_GS_9:
973 	case SQ_ALU_CONST_CACHE_GS_10:
974 	case SQ_ALU_CONST_CACHE_GS_11:
975 	case SQ_ALU_CONST_CACHE_GS_12:
976 	case SQ_ALU_CONST_CACHE_GS_13:
977 	case SQ_ALU_CONST_CACHE_GS_14:
978 	case SQ_ALU_CONST_CACHE_GS_15:
979 	case SQ_ALU_CONST_CACHE_PS_0:
980 	case SQ_ALU_CONST_CACHE_PS_1:
981 	case SQ_ALU_CONST_CACHE_PS_2:
982 	case SQ_ALU_CONST_CACHE_PS_3:
983 	case SQ_ALU_CONST_CACHE_PS_4:
984 	case SQ_ALU_CONST_CACHE_PS_5:
985 	case SQ_ALU_CONST_CACHE_PS_6:
986 	case SQ_ALU_CONST_CACHE_PS_7:
987 	case SQ_ALU_CONST_CACHE_PS_8:
988 	case SQ_ALU_CONST_CACHE_PS_9:
989 	case SQ_ALU_CONST_CACHE_PS_10:
990 	case SQ_ALU_CONST_CACHE_PS_11:
991 	case SQ_ALU_CONST_CACHE_PS_12:
992 	case SQ_ALU_CONST_CACHE_PS_13:
993 	case SQ_ALU_CONST_CACHE_PS_14:
994 	case SQ_ALU_CONST_CACHE_PS_15:
995 	case SQ_ALU_CONST_CACHE_VS_0:
996 	case SQ_ALU_CONST_CACHE_VS_1:
997 	case SQ_ALU_CONST_CACHE_VS_2:
998 	case SQ_ALU_CONST_CACHE_VS_3:
999 	case SQ_ALU_CONST_CACHE_VS_4:
1000 	case SQ_ALU_CONST_CACHE_VS_5:
1001 	case SQ_ALU_CONST_CACHE_VS_6:
1002 	case SQ_ALU_CONST_CACHE_VS_7:
1003 	case SQ_ALU_CONST_CACHE_VS_8:
1004 	case SQ_ALU_CONST_CACHE_VS_9:
1005 	case SQ_ALU_CONST_CACHE_VS_10:
1006 	case SQ_ALU_CONST_CACHE_VS_11:
1007 	case SQ_ALU_CONST_CACHE_VS_12:
1008 	case SQ_ALU_CONST_CACHE_VS_13:
1009 	case SQ_ALU_CONST_CACHE_VS_14:
1010 	case SQ_ALU_CONST_CACHE_VS_15:
1011 		r = r600_cs_packet_next_reloc(p, &reloc);
1012 		if (r) {
1013 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1014 					"0x%04X\n", reg);
1015 			return -EINVAL;
1016 		}
1017 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1018 		break;
1019 	default:
1020 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1021 		return -EINVAL;
1022 	}
1023 	return 0;
1024 }
1025 
1026 static inline unsigned minify(unsigned size, unsigned levels)
1027 {
1028 	size = size >> levels;
1029 	if (size < 1)
1030 		size = 1;
1031 	return size;
1032 }
1033 
1034 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
1035 			      unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
1036 			      unsigned pitch_align,
1037 			      unsigned *l0_size, unsigned *mipmap_size)
1038 {
1039 	unsigned offset, i, level, face;
1040 	unsigned width, height, depth, rowstride, size;
1041 
1042 	w0 = minify(w0, 0);
1043 	h0 = minify(h0, 0);
1044 	d0 = minify(d0, 0);
1045 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1046 		width = minify(w0, i);
1047 		height = minify(h0, i);
1048 		depth = minify(d0, i);
1049 		for(face = 0; face < nfaces; face++) {
1050 			rowstride = ALIGN((width * bpe), pitch_align);
1051 			size = height * rowstride * depth;
1052 			offset += size;
1053 			offset = (offset + 0x1f) & ~0x1f;
1054 		}
1055 	}
1056 	*l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
1057 	*mipmap_size = offset;
1058 	if (!blevel)
1059 		*mipmap_size -= *l0_size;
1060 	if (!nlevels)
1061 		*mipmap_size = *l0_size;
1062 }
1063 
1064 /**
1065  * r600_check_texture_resource() - check if register is authorized or not
1066  * @p: parser structure holding parsing context
1067  * @idx: index into the cs buffer
1068  * @texture: texture's bo structure
1069  * @mipmap: mipmap's bo structure
1070  *
1071  * This function will check that the resource has valid field and that
1072  * the texture and mipmap bo object are big enough to cover this resource.
1073  */
1074 static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1075 					      struct radeon_bo *texture,
1076 					      struct radeon_bo *mipmap,
1077 					      u32 tiling_flags)
1078 {
1079 	struct r600_cs_track *track = p->track;
1080 	u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
1081 	u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
1082 
1083 	/* on legacy kernel we don't perform advanced check */
1084 	if (p->rdev == NULL)
1085 		return 0;
1086 
1087 	word0 = radeon_get_ib_value(p, idx + 0);
1088 	if (tiling_flags & RADEON_TILING_MACRO)
1089 		word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1090 	else if (tiling_flags & RADEON_TILING_MICRO)
1091 		word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1092 	word1 = radeon_get_ib_value(p, idx + 1);
1093 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1094 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1095 	d0 = G_038004_TEX_DEPTH(word1);
1096 	nfaces = 1;
1097 	switch (G_038000_DIM(word0)) {
1098 	case V_038000_SQ_TEX_DIM_1D:
1099 	case V_038000_SQ_TEX_DIM_2D:
1100 	case V_038000_SQ_TEX_DIM_3D:
1101 		break;
1102 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1103 		nfaces = 6;
1104 		break;
1105 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1106 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1107 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1108 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1109 	default:
1110 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1111 		return -EINVAL;
1112 	}
1113 	if (r600_bpe_from_format(&bpe,  G_038004_DATA_FORMAT(word1))) {
1114 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1115 			 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
1116 		return -EINVAL;
1117 	}
1118 
1119 	pitch = G_038000_PITCH(word0) + 1;
1120 	switch (G_038000_TILE_MODE(word0)) {
1121 	case V_038000_ARRAY_LINEAR_GENERAL:
1122 		pitch_align = 1;
1123 		/* XXX check height align */
1124 		break;
1125 	case V_038000_ARRAY_LINEAR_ALIGNED:
1126 		pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
1127 		if (!IS_ALIGNED(pitch, pitch_align)) {
1128 			dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1129 				 __func__, __LINE__, pitch);
1130 			return -EINVAL;
1131 		}
1132 		/* XXX check height align */
1133 		break;
1134 	case V_038000_ARRAY_1D_TILED_THIN1:
1135 		pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
1136 		if (!IS_ALIGNED(pitch, pitch_align)) {
1137 			dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1138 				 __func__, __LINE__, pitch);
1139 			return -EINVAL;
1140 		}
1141 		/* XXX check height align */
1142 		break;
1143 	case V_038000_ARRAY_2D_TILED_THIN1:
1144 		pitch_align = max((u32)track->nbanks,
1145 				  (u32)(((track->group_size / 8) / bpe) * track->nbanks));
1146 		if (!IS_ALIGNED(pitch, pitch_align)) {
1147 			dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1148 				__func__, __LINE__, pitch);
1149 			return -EINVAL;
1150 		}
1151 		/* XXX check height align */
1152 		break;
1153 	default:
1154 		dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
1155 			 G_038000_TILE_MODE(word0), word0);
1156 		return -EINVAL;
1157 	}
1158 	/* XXX check offset align */
1159 
1160 	word0 = radeon_get_ib_value(p, idx + 4);
1161 	word1 = radeon_get_ib_value(p, idx + 5);
1162 	blevel = G_038010_BASE_LEVEL(word0);
1163 	nlevels = G_038014_LAST_LEVEL(word1);
1164 	r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
1165 			  (pitch_align * bpe),
1166 			  &l0_size, &mipmap_size);
1167 	/* using get ib will give us the offset into the texture bo */
1168 	word0 = radeon_get_ib_value(p, idx + 2);
1169 	if ((l0_size + word0) > radeon_bo_size(texture)) {
1170 		dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1171 			w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
1172 		return -EINVAL;
1173 	}
1174 	/* using get ib will give us the offset into the mipmap bo */
1175 	word0 = radeon_get_ib_value(p, idx + 3);
1176 	if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1177 		dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1178 			w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
1179 		return -EINVAL;
1180 	}
1181 	return 0;
1182 }
1183 
1184 static int r600_packet3_check(struct radeon_cs_parser *p,
1185 				struct radeon_cs_packet *pkt)
1186 {
1187 	struct radeon_cs_reloc *reloc;
1188 	struct r600_cs_track *track;
1189 	volatile u32 *ib;
1190 	unsigned idx;
1191 	unsigned i;
1192 	unsigned start_reg, end_reg, reg;
1193 	int r;
1194 	u32 idx_value;
1195 
1196 	track = (struct r600_cs_track *)p->track;
1197 	ib = p->ib->ptr;
1198 	idx = pkt->idx + 1;
1199 	idx_value = radeon_get_ib_value(p, idx);
1200 
1201 	switch (pkt->opcode) {
1202 	case PACKET3_START_3D_CMDBUF:
1203 		if (p->family >= CHIP_RV770 || pkt->count) {
1204 			DRM_ERROR("bad START_3D\n");
1205 			return -EINVAL;
1206 		}
1207 		break;
1208 	case PACKET3_CONTEXT_CONTROL:
1209 		if (pkt->count != 1) {
1210 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1211 			return -EINVAL;
1212 		}
1213 		break;
1214 	case PACKET3_INDEX_TYPE:
1215 	case PACKET3_NUM_INSTANCES:
1216 		if (pkt->count) {
1217 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1218 			return -EINVAL;
1219 		}
1220 		break;
1221 	case PACKET3_DRAW_INDEX:
1222 		if (pkt->count != 3) {
1223 			DRM_ERROR("bad DRAW_INDEX\n");
1224 			return -EINVAL;
1225 		}
1226 		r = r600_cs_packet_next_reloc(p, &reloc);
1227 		if (r) {
1228 			DRM_ERROR("bad DRAW_INDEX\n");
1229 			return -EINVAL;
1230 		}
1231 		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1232 		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1233 		r = r600_cs_track_check(p);
1234 		if (r) {
1235 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1236 			return r;
1237 		}
1238 		break;
1239 	case PACKET3_DRAW_INDEX_AUTO:
1240 		if (pkt->count != 1) {
1241 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1242 			return -EINVAL;
1243 		}
1244 		r = r600_cs_track_check(p);
1245 		if (r) {
1246 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1247 			return r;
1248 		}
1249 		break;
1250 	case PACKET3_DRAW_INDEX_IMMD_BE:
1251 	case PACKET3_DRAW_INDEX_IMMD:
1252 		if (pkt->count < 2) {
1253 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1254 			return -EINVAL;
1255 		}
1256 		r = r600_cs_track_check(p);
1257 		if (r) {
1258 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1259 			return r;
1260 		}
1261 		break;
1262 	case PACKET3_WAIT_REG_MEM:
1263 		if (pkt->count != 5) {
1264 			DRM_ERROR("bad WAIT_REG_MEM\n");
1265 			return -EINVAL;
1266 		}
1267 		/* bit 4 is reg (0) or mem (1) */
1268 		if (idx_value & 0x10) {
1269 			r = r600_cs_packet_next_reloc(p, &reloc);
1270 			if (r) {
1271 				DRM_ERROR("bad WAIT_REG_MEM\n");
1272 				return -EINVAL;
1273 			}
1274 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1275 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1276 		}
1277 		break;
1278 	case PACKET3_SURFACE_SYNC:
1279 		if (pkt->count != 3) {
1280 			DRM_ERROR("bad SURFACE_SYNC\n");
1281 			return -EINVAL;
1282 		}
1283 		/* 0xffffffff/0x0 is flush all cache flag */
1284 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1285 		    radeon_get_ib_value(p, idx + 2) != 0) {
1286 			r = r600_cs_packet_next_reloc(p, &reloc);
1287 			if (r) {
1288 				DRM_ERROR("bad SURFACE_SYNC\n");
1289 				return -EINVAL;
1290 			}
1291 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1292 		}
1293 		break;
1294 	case PACKET3_EVENT_WRITE:
1295 		if (pkt->count != 2 && pkt->count != 0) {
1296 			DRM_ERROR("bad EVENT_WRITE\n");
1297 			return -EINVAL;
1298 		}
1299 		if (pkt->count) {
1300 			r = r600_cs_packet_next_reloc(p, &reloc);
1301 			if (r) {
1302 				DRM_ERROR("bad EVENT_WRITE\n");
1303 				return -EINVAL;
1304 			}
1305 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1306 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1307 		}
1308 		break;
1309 	case PACKET3_EVENT_WRITE_EOP:
1310 		if (pkt->count != 4) {
1311 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
1312 			return -EINVAL;
1313 		}
1314 		r = r600_cs_packet_next_reloc(p, &reloc);
1315 		if (r) {
1316 			DRM_ERROR("bad EVENT_WRITE\n");
1317 			return -EINVAL;
1318 		}
1319 		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1320 		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1321 		break;
1322 	case PACKET3_SET_CONFIG_REG:
1323 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1324 		end_reg = 4 * pkt->count + start_reg - 4;
1325 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1326 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1327 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1328 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1329 			return -EINVAL;
1330 		}
1331 		for (i = 0; i < pkt->count; i++) {
1332 			reg = start_reg + (4 * i);
1333 			r = r600_cs_check_reg(p, reg, idx+1+i);
1334 			if (r)
1335 				return r;
1336 		}
1337 		break;
1338 	case PACKET3_SET_CONTEXT_REG:
1339 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1340 		end_reg = 4 * pkt->count + start_reg - 4;
1341 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1342 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1343 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1344 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1345 			return -EINVAL;
1346 		}
1347 		for (i = 0; i < pkt->count; i++) {
1348 			reg = start_reg + (4 * i);
1349 			r = r600_cs_check_reg(p, reg, idx+1+i);
1350 			if (r)
1351 				return r;
1352 		}
1353 		break;
1354 	case PACKET3_SET_RESOURCE:
1355 		if (pkt->count % 7) {
1356 			DRM_ERROR("bad SET_RESOURCE\n");
1357 			return -EINVAL;
1358 		}
1359 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1360 		end_reg = 4 * pkt->count + start_reg - 4;
1361 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1362 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
1363 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
1364 			DRM_ERROR("bad SET_RESOURCE\n");
1365 			return -EINVAL;
1366 		}
1367 		for (i = 0; i < (pkt->count / 7); i++) {
1368 			struct radeon_bo *texture, *mipmap;
1369 			u32 size, offset;
1370 
1371 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1372 			case SQ_TEX_VTX_VALID_TEXTURE:
1373 				/* tex base */
1374 				r = r600_cs_packet_next_reloc(p, &reloc);
1375 				if (r) {
1376 					DRM_ERROR("bad SET_RESOURCE\n");
1377 					return -EINVAL;
1378 				}
1379 				ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1380 				if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1381 					ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1382 				else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1383 					ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1384 				texture = reloc->robj;
1385 				/* tex mip base */
1386 				r = r600_cs_packet_next_reloc(p, &reloc);
1387 				if (r) {
1388 					DRM_ERROR("bad SET_RESOURCE\n");
1389 					return -EINVAL;
1390 				}
1391 				ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1392 				mipmap = reloc->robj;
1393 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1394 								texture, mipmap, reloc->lobj.tiling_flags);
1395 				if (r)
1396 					return r;
1397 				break;
1398 			case SQ_TEX_VTX_VALID_BUFFER:
1399 				/* vtx base */
1400 				r = r600_cs_packet_next_reloc(p, &reloc);
1401 				if (r) {
1402 					DRM_ERROR("bad SET_RESOURCE\n");
1403 					return -EINVAL;
1404 				}
1405 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1406 				size = radeon_get_ib_value(p, idx+1+(i*7)+1);
1407 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1408 					/* force size to size of the buffer */
1409 					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1410 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1411 				}
1412 				ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1413 				ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1414 				break;
1415 			case SQ_TEX_VTX_INVALID_TEXTURE:
1416 			case SQ_TEX_VTX_INVALID_BUFFER:
1417 			default:
1418 				DRM_ERROR("bad SET_RESOURCE\n");
1419 				return -EINVAL;
1420 			}
1421 		}
1422 		break;
1423 	case PACKET3_SET_ALU_CONST:
1424 		if (track->sq_config & DX9_CONSTS) {
1425 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1426 			end_reg = 4 * pkt->count + start_reg - 4;
1427 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1428 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1429 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1430 				DRM_ERROR("bad SET_ALU_CONST\n");
1431 				return -EINVAL;
1432 			}
1433 		}
1434 		break;
1435 	case PACKET3_SET_BOOL_CONST:
1436 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
1437 		end_reg = 4 * pkt->count + start_reg - 4;
1438 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
1439 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1440 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1441 			DRM_ERROR("bad SET_BOOL_CONST\n");
1442 			return -EINVAL;
1443 		}
1444 		break;
1445 	case PACKET3_SET_LOOP_CONST:
1446 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
1447 		end_reg = 4 * pkt->count + start_reg - 4;
1448 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
1449 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1450 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1451 			DRM_ERROR("bad SET_LOOP_CONST\n");
1452 			return -EINVAL;
1453 		}
1454 		break;
1455 	case PACKET3_SET_CTL_CONST:
1456 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
1457 		end_reg = 4 * pkt->count + start_reg - 4;
1458 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
1459 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1460 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1461 			DRM_ERROR("bad SET_CTL_CONST\n");
1462 			return -EINVAL;
1463 		}
1464 		break;
1465 	case PACKET3_SET_SAMPLER:
1466 		if (pkt->count % 3) {
1467 			DRM_ERROR("bad SET_SAMPLER\n");
1468 			return -EINVAL;
1469 		}
1470 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
1471 		end_reg = 4 * pkt->count + start_reg - 4;
1472 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
1473 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
1474 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
1475 			DRM_ERROR("bad SET_SAMPLER\n");
1476 			return -EINVAL;
1477 		}
1478 		break;
1479 	case PACKET3_SURFACE_BASE_UPDATE:
1480 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
1481 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1482 			return -EINVAL;
1483 		}
1484 		if (pkt->count) {
1485 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1486 			return -EINVAL;
1487 		}
1488 		break;
1489 	case PACKET3_NOP:
1490 		break;
1491 	default:
1492 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1493 		return -EINVAL;
1494 	}
1495 	return 0;
1496 }
1497 
1498 int r600_cs_parse(struct radeon_cs_parser *p)
1499 {
1500 	struct radeon_cs_packet pkt;
1501 	struct r600_cs_track *track;
1502 	int r;
1503 
1504 	if (p->track == NULL) {
1505 		/* initialize tracker, we are in kms */
1506 		track = kzalloc(sizeof(*track), GFP_KERNEL);
1507 		if (track == NULL)
1508 			return -ENOMEM;
1509 		r600_cs_track_init(track);
1510 		if (p->rdev->family < CHIP_RV770) {
1511 			track->npipes = p->rdev->config.r600.tiling_npipes;
1512 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
1513 			track->group_size = p->rdev->config.r600.tiling_group_size;
1514 		} else if (p->rdev->family <= CHIP_RV740) {
1515 			track->npipes = p->rdev->config.rv770.tiling_npipes;
1516 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1517 			track->group_size = p->rdev->config.rv770.tiling_group_size;
1518 		}
1519 		p->track = track;
1520 	}
1521 	do {
1522 		r = r600_cs_packet_parse(p, &pkt, p->idx);
1523 		if (r) {
1524 			kfree(p->track);
1525 			p->track = NULL;
1526 			return r;
1527 		}
1528 		p->idx += pkt.count + 2;
1529 		switch (pkt.type) {
1530 		case PACKET_TYPE0:
1531 			r = r600_cs_parse_packet0(p, &pkt);
1532 			break;
1533 		case PACKET_TYPE2:
1534 			break;
1535 		case PACKET_TYPE3:
1536 			r = r600_packet3_check(p, &pkt);
1537 			break;
1538 		default:
1539 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1540 			kfree(p->track);
1541 			p->track = NULL;
1542 			return -EINVAL;
1543 		}
1544 		if (r) {
1545 			kfree(p->track);
1546 			p->track = NULL;
1547 			return r;
1548 		}
1549 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1550 #if 0
1551 	for (r = 0; r < p->ib->length_dw; r++) {
1552 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
1553 		mdelay(1);
1554 	}
1555 #endif
1556 	kfree(p->track);
1557 	p->track = NULL;
1558 	return 0;
1559 }
1560 
1561 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
1562 {
1563 	if (p->chunk_relocs_idx == -1) {
1564 		return 0;
1565 	}
1566 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
1567 	if (p->relocs == NULL) {
1568 		return -ENOMEM;
1569 	}
1570 	return 0;
1571 }
1572 
1573 /**
1574  * cs_parser_fini() - clean parser states
1575  * @parser:	parser structure holding parsing context.
1576  * @error:	error number
1577  *
1578  * If error is set than unvalidate buffer, otherwise just free memory
1579  * used by parsing context.
1580  **/
1581 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1582 {
1583 	unsigned i;
1584 
1585 	kfree(parser->relocs);
1586 	for (i = 0; i < parser->nchunks; i++) {
1587 		kfree(parser->chunks[i].kdata);
1588 		kfree(parser->chunks[i].kpage[0]);
1589 		kfree(parser->chunks[i].kpage[1]);
1590 	}
1591 	kfree(parser->chunks);
1592 	kfree(parser->chunks_array);
1593 }
1594 
1595 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
1596 			unsigned family, u32 *ib, int *l)
1597 {
1598 	struct radeon_cs_parser parser;
1599 	struct radeon_cs_chunk *ib_chunk;
1600 	struct radeon_ib fake_ib;
1601 	struct r600_cs_track *track;
1602 	int r;
1603 
1604 	/* initialize tracker */
1605 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1606 	if (track == NULL)
1607 		return -ENOMEM;
1608 	r600_cs_track_init(track);
1609 	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
1610 	/* initialize parser */
1611 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
1612 	parser.filp = filp;
1613 	parser.dev = &dev->pdev->dev;
1614 	parser.rdev = NULL;
1615 	parser.family = family;
1616 	parser.ib = &fake_ib;
1617 	parser.track = track;
1618 	fake_ib.ptr = ib;
1619 	r = radeon_cs_parser_init(&parser, data);
1620 	if (r) {
1621 		DRM_ERROR("Failed to initialize parser !\n");
1622 		r600_cs_parser_fini(&parser, r);
1623 		return r;
1624 	}
1625 	r = r600_cs_parser_relocs_legacy(&parser);
1626 	if (r) {
1627 		DRM_ERROR("Failed to parse relocation !\n");
1628 		r600_cs_parser_fini(&parser, r);
1629 		return r;
1630 	}
1631 	/* Copy the packet into the IB, the parser will read from the
1632 	 * input memory (cached) and write to the IB (which can be
1633 	 * uncached). */
1634 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
1635 	parser.ib->length_dw = ib_chunk->length_dw;
1636 	*l = parser.ib->length_dw;
1637 	r = r600_cs_parse(&parser);
1638 	if (r) {
1639 		DRM_ERROR("Invalid command stream !\n");
1640 		r600_cs_parser_fini(&parser, r);
1641 		return r;
1642 	}
1643 	r = radeon_cs_finish_pages(&parser);
1644 	if (r) {
1645 		DRM_ERROR("Invalid command stream !\n");
1646 		r600_cs_parser_fini(&parser, r);
1647 		return r;
1648 	}
1649 	r600_cs_parser_fini(&parser, r);
1650 	return r;
1651 }
1652 
1653 void r600_cs_legacy_init(void)
1654 {
1655 	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
1656 }
1657