xref: /linux/drivers/gpu/drm/gma500/gma_display.c (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2006-2011 Intel Corporation
4  *
5  * Authors:
6  *	Eric Anholt <eric@anholt.net>
7  *	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/highmem.h>
12 
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_vblank.h>
16 
17 #include "framebuffer.h"
18 #include "gma_display.h"
19 #include "psb_drv.h"
20 #include "psb_intel_drv.h"
21 #include "psb_intel_reg.h"
22 
23 /*
24  * Returns whether any output on the specified pipe is of the specified type
25  */
26 bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
27 {
28 	struct drm_device *dev = crtc->dev;
29 	struct drm_mode_config *mode_config = &dev->mode_config;
30 	struct drm_connector *l_entry;
31 
32 	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
33 		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
34 			struct gma_encoder *gma_encoder =
35 						gma_attached_encoder(l_entry);
36 			if (gma_encoder->type == type)
37 				return true;
38 		}
39 	}
40 
41 	return false;
42 }
43 
44 void gma_wait_for_vblank(struct drm_device *dev)
45 {
46 	/* Wait for 20ms, i.e. one cycle at 50hz. */
47 	mdelay(20);
48 }
49 
50 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
51 		      struct drm_framebuffer *old_fb)
52 {
53 	struct drm_device *dev = crtc->dev;
54 	struct drm_psb_private *dev_priv = dev->dev_private;
55 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
56 	struct drm_framebuffer *fb = crtc->primary->fb;
57 	struct gtt_range *gtt;
58 	int pipe = gma_crtc->pipe;
59 	const struct psb_offset *map = &dev_priv->regmap[pipe];
60 	unsigned long start, offset;
61 	u32 dspcntr;
62 	int ret = 0;
63 
64 	if (!gma_power_begin(dev, true))
65 		return 0;
66 
67 	/* no fb bound */
68 	if (!fb) {
69 		dev_err(dev->dev, "No FB bound\n");
70 		goto gma_pipe_cleaner;
71 	}
72 
73 	gtt = to_gtt_range(fb->obj[0]);
74 
75 	/* We are displaying this buffer, make sure it is actually loaded
76 	   into the GTT */
77 	ret = psb_gtt_pin(gtt);
78 	if (ret < 0)
79 		goto gma_pipe_set_base_exit;
80 	start = gtt->offset;
81 	offset = y * fb->pitches[0] + x * fb->format->cpp[0];
82 
83 	REG_WRITE(map->stride, fb->pitches[0]);
84 
85 	dspcntr = REG_READ(map->cntr);
86 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
87 
88 	switch (fb->format->cpp[0] * 8) {
89 	case 8:
90 		dspcntr |= DISPPLANE_8BPP;
91 		break;
92 	case 16:
93 		if (fb->format->depth == 15)
94 			dspcntr |= DISPPLANE_15_16BPP;
95 		else
96 			dspcntr |= DISPPLANE_16BPP;
97 		break;
98 	case 24:
99 	case 32:
100 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
101 		break;
102 	default:
103 		dev_err(dev->dev, "Unknown color depth\n");
104 		ret = -EINVAL;
105 		goto gma_pipe_set_base_exit;
106 	}
107 	REG_WRITE(map->cntr, dspcntr);
108 
109 	dev_dbg(dev->dev,
110 		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
111 
112 	/* FIXME: Investigate whether this really is the base for psb and why
113 		  the linear offset is named base for the other chips. map->surf
114 		  should be the base and map->linoff the offset for all chips */
115 	if (IS_PSB(dev)) {
116 		REG_WRITE(map->base, offset + start);
117 		REG_READ(map->base);
118 	} else {
119 		REG_WRITE(map->base, offset);
120 		REG_READ(map->base);
121 		REG_WRITE(map->surf, start);
122 		REG_READ(map->surf);
123 	}
124 
125 gma_pipe_cleaner:
126 	/* If there was a previous display we can now unpin it */
127 	if (old_fb)
128 		psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
129 
130 gma_pipe_set_base_exit:
131 	gma_power_end(dev);
132 	return ret;
133 }
134 
135 /* Loads the palette/gamma unit for the CRTC with the prepared values */
136 void gma_crtc_load_lut(struct drm_crtc *crtc)
137 {
138 	struct drm_device *dev = crtc->dev;
139 	struct drm_psb_private *dev_priv = dev->dev_private;
140 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
141 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
142 	int palreg = map->palette;
143 	u16 *r, *g, *b;
144 	int i;
145 
146 	/* The clocks have to be on to load the palette. */
147 	if (!crtc->enabled)
148 		return;
149 
150 	r = crtc->gamma_store;
151 	g = r + crtc->gamma_size;
152 	b = g + crtc->gamma_size;
153 
154 	if (gma_power_begin(dev, false)) {
155 		for (i = 0; i < 256; i++) {
156 			REG_WRITE(palreg + 4 * i,
157 				  (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
158 				  (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
159 				  ((*b++ >> 8) + gma_crtc->lut_adj[i]));
160 		}
161 		gma_power_end(dev);
162 	} else {
163 		for (i = 0; i < 256; i++) {
164 			/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
165 			dev_priv->regs.pipe[0].palette[i] =
166 				(((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
167 				(((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
168 				((*b++ >> 8) + gma_crtc->lut_adj[i]);
169 		}
170 
171 	}
172 }
173 
174 int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
175 		       u32 size,
176 		       struct drm_modeset_acquire_ctx *ctx)
177 {
178 	gma_crtc_load_lut(crtc);
179 
180 	return 0;
181 }
182 
183 /*
184  * Sets the power management mode of the pipe and plane.
185  *
186  * This code should probably grow support for turning the cursor off and back
187  * on appropriately at the same time as we're turning the pipe off/on.
188  */
189 void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
190 {
191 	struct drm_device *dev = crtc->dev;
192 	struct drm_psb_private *dev_priv = dev->dev_private;
193 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
194 	int pipe = gma_crtc->pipe;
195 	const struct psb_offset *map = &dev_priv->regmap[pipe];
196 	u32 temp;
197 
198 	/* XXX: When our outputs are all unaware of DPMS modes other than off
199 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
200 	 */
201 
202 	if (IS_CDV(dev))
203 		dev_priv->ops->disable_sr(dev);
204 
205 	switch (mode) {
206 	case DRM_MODE_DPMS_ON:
207 	case DRM_MODE_DPMS_STANDBY:
208 	case DRM_MODE_DPMS_SUSPEND:
209 		if (gma_crtc->active)
210 			break;
211 
212 		gma_crtc->active = true;
213 
214 		/* Enable the DPLL */
215 		temp = REG_READ(map->dpll);
216 		if ((temp & DPLL_VCO_ENABLE) == 0) {
217 			REG_WRITE(map->dpll, temp);
218 			REG_READ(map->dpll);
219 			/* Wait for the clocks to stabilize. */
220 			udelay(150);
221 			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
222 			REG_READ(map->dpll);
223 			/* Wait for the clocks to stabilize. */
224 			udelay(150);
225 			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
226 			REG_READ(map->dpll);
227 			/* Wait for the clocks to stabilize. */
228 			udelay(150);
229 		}
230 
231 		/* Enable the plane */
232 		temp = REG_READ(map->cntr);
233 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
234 			REG_WRITE(map->cntr,
235 				  temp | DISPLAY_PLANE_ENABLE);
236 			/* Flush the plane changes */
237 			REG_WRITE(map->base, REG_READ(map->base));
238 		}
239 
240 		udelay(150);
241 
242 		/* Enable the pipe */
243 		temp = REG_READ(map->conf);
244 		if ((temp & PIPEACONF_ENABLE) == 0)
245 			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
246 
247 		temp = REG_READ(map->status);
248 		temp &= ~(0xFFFF);
249 		temp |= PIPE_FIFO_UNDERRUN;
250 		REG_WRITE(map->status, temp);
251 		REG_READ(map->status);
252 
253 		gma_crtc_load_lut(crtc);
254 
255 		/* Give the overlay scaler a chance to enable
256 		 * if it's on this pipe */
257 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
258 
259 		drm_crtc_vblank_on(crtc);
260 		break;
261 	case DRM_MODE_DPMS_OFF:
262 		if (!gma_crtc->active)
263 			break;
264 
265 		gma_crtc->active = false;
266 
267 		/* Give the overlay scaler a chance to disable
268 		 * if it's on this pipe */
269 		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
270 
271 		/* Disable the VGA plane that we never use */
272 		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
273 
274 		/* Turn off vblank interrupts */
275 		drm_crtc_vblank_off(crtc);
276 
277 		/* Wait for vblank for the disable to take effect */
278 		gma_wait_for_vblank(dev);
279 
280 		/* Disable plane */
281 		temp = REG_READ(map->cntr);
282 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
283 			REG_WRITE(map->cntr,
284 				  temp & ~DISPLAY_PLANE_ENABLE);
285 			/* Flush the plane changes */
286 			REG_WRITE(map->base, REG_READ(map->base));
287 			REG_READ(map->base);
288 		}
289 
290 		/* Disable pipe */
291 		temp = REG_READ(map->conf);
292 		if ((temp & PIPEACONF_ENABLE) != 0) {
293 			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
294 			REG_READ(map->conf);
295 		}
296 
297 		/* Wait for vblank for the disable to take effect. */
298 		gma_wait_for_vblank(dev);
299 
300 		udelay(150);
301 
302 		/* Disable DPLL */
303 		temp = REG_READ(map->dpll);
304 		if ((temp & DPLL_VCO_ENABLE) != 0) {
305 			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
306 			REG_READ(map->dpll);
307 		}
308 
309 		/* Wait for the clocks to turn off. */
310 		udelay(150);
311 		break;
312 	}
313 
314 	if (IS_CDV(dev))
315 		dev_priv->ops->update_wm(dev, crtc);
316 
317 	/* Set FIFO watermarks */
318 	REG_WRITE(DSPARB, 0x3F3E);
319 }
320 
321 int gma_crtc_cursor_set(struct drm_crtc *crtc,
322 			struct drm_file *file_priv,
323 			uint32_t handle,
324 			uint32_t width, uint32_t height)
325 {
326 	struct drm_device *dev = crtc->dev;
327 	struct drm_psb_private *dev_priv = dev->dev_private;
328 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
329 	int pipe = gma_crtc->pipe;
330 	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
331 	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
332 	uint32_t temp;
333 	size_t addr = 0;
334 	struct gtt_range *gt;
335 	struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
336 	struct drm_gem_object *obj;
337 	void *tmp_dst, *tmp_src;
338 	int ret = 0, i, cursor_pages;
339 
340 	/* If we didn't get a handle then turn the cursor off */
341 	if (!handle) {
342 		temp = CURSOR_MODE_DISABLE;
343 		if (gma_power_begin(dev, false)) {
344 			REG_WRITE(control, temp);
345 			REG_WRITE(base, 0);
346 			gma_power_end(dev);
347 		}
348 
349 		/* Unpin the old GEM object */
350 		if (gma_crtc->cursor_obj) {
351 			gt = container_of(gma_crtc->cursor_obj,
352 					  struct gtt_range, gem);
353 			psb_gtt_unpin(gt);
354 			drm_gem_object_put(gma_crtc->cursor_obj);
355 			gma_crtc->cursor_obj = NULL;
356 		}
357 		return 0;
358 	}
359 
360 	/* Currently we only support 64x64 cursors */
361 	if (width != 64 || height != 64) {
362 		dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
363 		return -EINVAL;
364 	}
365 
366 	obj = drm_gem_object_lookup(file_priv, handle);
367 	if (!obj) {
368 		ret = -ENOENT;
369 		goto unlock;
370 	}
371 
372 	if (obj->size < width * height * 4) {
373 		dev_dbg(dev->dev, "Buffer is too small\n");
374 		ret = -ENOMEM;
375 		goto unref_cursor;
376 	}
377 
378 	gt = container_of(obj, struct gtt_range, gem);
379 
380 	/* Pin the memory into the GTT */
381 	ret = psb_gtt_pin(gt);
382 	if (ret) {
383 		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
384 		goto unref_cursor;
385 	}
386 
387 	if (dev_priv->ops->cursor_needs_phys) {
388 		if (cursor_gt == NULL) {
389 			dev_err(dev->dev, "No hardware cursor mem available");
390 			ret = -ENOMEM;
391 			goto unref_cursor;
392 		}
393 
394 		/* Prevent overflow */
395 		if (gt->npage > 4)
396 			cursor_pages = 4;
397 		else
398 			cursor_pages = gt->npage;
399 
400 		/* Copy the cursor to cursor mem */
401 		tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
402 		for (i = 0; i < cursor_pages; i++) {
403 			tmp_src = kmap(gt->pages[i]);
404 			memcpy(tmp_dst, tmp_src, PAGE_SIZE);
405 			kunmap(gt->pages[i]);
406 			tmp_dst += PAGE_SIZE;
407 		}
408 
409 		addr = gma_crtc->cursor_addr;
410 	} else {
411 		addr = gt->offset;
412 		gma_crtc->cursor_addr = addr;
413 	}
414 
415 	temp = 0;
416 	/* set the pipe for the cursor */
417 	temp |= (pipe << 28);
418 	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
419 
420 	if (gma_power_begin(dev, false)) {
421 		REG_WRITE(control, temp);
422 		REG_WRITE(base, addr);
423 		gma_power_end(dev);
424 	}
425 
426 	/* unpin the old bo */
427 	if (gma_crtc->cursor_obj) {
428 		gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
429 		psb_gtt_unpin(gt);
430 		drm_gem_object_put(gma_crtc->cursor_obj);
431 	}
432 
433 	gma_crtc->cursor_obj = obj;
434 unlock:
435 	return ret;
436 
437 unref_cursor:
438 	drm_gem_object_put(obj);
439 	return ret;
440 }
441 
442 int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
443 {
444 	struct drm_device *dev = crtc->dev;
445 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
446 	int pipe = gma_crtc->pipe;
447 	uint32_t temp = 0;
448 	uint32_t addr;
449 
450 	if (x < 0) {
451 		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
452 		x = -x;
453 	}
454 	if (y < 0) {
455 		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
456 		y = -y;
457 	}
458 
459 	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
460 	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
461 
462 	addr = gma_crtc->cursor_addr;
463 
464 	if (gma_power_begin(dev, false)) {
465 		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
466 		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
467 		gma_power_end(dev);
468 	}
469 	return 0;
470 }
471 
472 void gma_crtc_prepare(struct drm_crtc *crtc)
473 {
474 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
475 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
476 }
477 
478 void gma_crtc_commit(struct drm_crtc *crtc)
479 {
480 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
481 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
482 }
483 
484 void gma_crtc_disable(struct drm_crtc *crtc)
485 {
486 	struct gtt_range *gt;
487 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
488 
489 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
490 
491 	if (crtc->primary->fb) {
492 		gt = to_gtt_range(crtc->primary->fb->obj[0]);
493 		psb_gtt_unpin(gt);
494 	}
495 }
496 
497 void gma_crtc_destroy(struct drm_crtc *crtc)
498 {
499 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
500 
501 	kfree(gma_crtc->crtc_state);
502 	drm_crtc_cleanup(crtc);
503 	kfree(gma_crtc);
504 }
505 
506 int gma_crtc_page_flip(struct drm_crtc *crtc,
507 		       struct drm_framebuffer *fb,
508 		       struct drm_pending_vblank_event *event,
509 		       uint32_t page_flip_flags,
510 		       struct drm_modeset_acquire_ctx *ctx)
511 {
512 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
513 	struct drm_framebuffer *current_fb = crtc->primary->fb;
514 	struct drm_framebuffer *old_fb = crtc->primary->old_fb;
515 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
516 	struct drm_device *dev = crtc->dev;
517 	unsigned long flags;
518 	int ret;
519 
520 	if (!crtc_funcs->mode_set_base)
521 		return -EINVAL;
522 
523 	/* Using mode_set_base requires the new fb to be set already. */
524 	crtc->primary->fb = fb;
525 
526 	if (event) {
527 		spin_lock_irqsave(&dev->event_lock, flags);
528 
529 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
530 
531 		gma_crtc->page_flip_event = event;
532 
533 		/* Call this locked if we want an event at vblank interrupt. */
534 		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
535 		if (ret) {
536 			gma_crtc->page_flip_event = NULL;
537 			drm_crtc_vblank_put(crtc);
538 		}
539 
540 		spin_unlock_irqrestore(&dev->event_lock, flags);
541 	} else {
542 		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
543 	}
544 
545 	/* Restore previous fb in case of failure. */
546 	if (ret)
547 		crtc->primary->fb = current_fb;
548 
549 	return ret;
550 }
551 
552 int gma_crtc_set_config(struct drm_mode_set *set,
553 			struct drm_modeset_acquire_ctx *ctx)
554 {
555 	struct drm_device *dev = set->crtc->dev;
556 	struct drm_psb_private *dev_priv = dev->dev_private;
557 	int ret;
558 
559 	if (!dev_priv->rpm_enabled)
560 		return drm_crtc_helper_set_config(set, ctx);
561 
562 	pm_runtime_forbid(dev->dev);
563 	ret = drm_crtc_helper_set_config(set, ctx);
564 	pm_runtime_allow(dev->dev);
565 
566 	return ret;
567 }
568 
569 /*
570  * Save HW states of given crtc
571  */
572 void gma_crtc_save(struct drm_crtc *crtc)
573 {
574 	struct drm_device *dev = crtc->dev;
575 	struct drm_psb_private *dev_priv = dev->dev_private;
576 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
577 	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
578 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
579 	uint32_t palette_reg;
580 	int i;
581 
582 	if (!crtc_state) {
583 		dev_err(dev->dev, "No CRTC state found\n");
584 		return;
585 	}
586 
587 	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
588 	crtc_state->savePIPECONF = REG_READ(map->conf);
589 	crtc_state->savePIPESRC = REG_READ(map->src);
590 	crtc_state->saveFP0 = REG_READ(map->fp0);
591 	crtc_state->saveFP1 = REG_READ(map->fp1);
592 	crtc_state->saveDPLL = REG_READ(map->dpll);
593 	crtc_state->saveHTOTAL = REG_READ(map->htotal);
594 	crtc_state->saveHBLANK = REG_READ(map->hblank);
595 	crtc_state->saveHSYNC = REG_READ(map->hsync);
596 	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
597 	crtc_state->saveVBLANK = REG_READ(map->vblank);
598 	crtc_state->saveVSYNC = REG_READ(map->vsync);
599 	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
600 
601 	/* NOTE: DSPSIZE DSPPOS only for psb */
602 	crtc_state->saveDSPSIZE = REG_READ(map->size);
603 	crtc_state->saveDSPPOS = REG_READ(map->pos);
604 
605 	crtc_state->saveDSPBASE = REG_READ(map->base);
606 
607 	palette_reg = map->palette;
608 	for (i = 0; i < 256; ++i)
609 		crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
610 }
611 
612 /*
613  * Restore HW states of given crtc
614  */
615 void gma_crtc_restore(struct drm_crtc *crtc)
616 {
617 	struct drm_device *dev = crtc->dev;
618 	struct drm_psb_private *dev_priv = dev->dev_private;
619 	struct gma_crtc *gma_crtc =  to_gma_crtc(crtc);
620 	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
621 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
622 	uint32_t palette_reg;
623 	int i;
624 
625 	if (!crtc_state) {
626 		dev_err(dev->dev, "No crtc state\n");
627 		return;
628 	}
629 
630 	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
631 		REG_WRITE(map->dpll,
632 			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
633 		REG_READ(map->dpll);
634 		udelay(150);
635 	}
636 
637 	REG_WRITE(map->fp0, crtc_state->saveFP0);
638 	REG_READ(map->fp0);
639 
640 	REG_WRITE(map->fp1, crtc_state->saveFP1);
641 	REG_READ(map->fp1);
642 
643 	REG_WRITE(map->dpll, crtc_state->saveDPLL);
644 	REG_READ(map->dpll);
645 	udelay(150);
646 
647 	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
648 	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
649 	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
650 	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
651 	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
652 	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
653 	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
654 
655 	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
656 	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
657 
658 	REG_WRITE(map->src, crtc_state->savePIPESRC);
659 	REG_WRITE(map->base, crtc_state->saveDSPBASE);
660 	REG_WRITE(map->conf, crtc_state->savePIPECONF);
661 
662 	gma_wait_for_vblank(dev);
663 
664 	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
665 	REG_WRITE(map->base, crtc_state->saveDSPBASE);
666 
667 	gma_wait_for_vblank(dev);
668 
669 	palette_reg = map->palette;
670 	for (i = 0; i < 256; ++i)
671 		REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
672 }
673 
674 void gma_encoder_prepare(struct drm_encoder *encoder)
675 {
676 	const struct drm_encoder_helper_funcs *encoder_funcs =
677 	    encoder->helper_private;
678 	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
679 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
680 }
681 
682 void gma_encoder_commit(struct drm_encoder *encoder)
683 {
684 	const struct drm_encoder_helper_funcs *encoder_funcs =
685 	    encoder->helper_private;
686 	/* lvds has its own version of commit see psb_intel_lvds_commit */
687 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
688 }
689 
690 void gma_encoder_destroy(struct drm_encoder *encoder)
691 {
692 	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
693 
694 	drm_encoder_cleanup(encoder);
695 	kfree(intel_encoder);
696 }
697 
698 /* Currently there is only a 1:1 mapping of encoders and connectors */
699 struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
700 {
701 	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
702 
703 	return &gma_encoder->base;
704 }
705 
706 void gma_connector_attach_encoder(struct gma_connector *connector,
707 				  struct gma_encoder *encoder)
708 {
709 	connector->encoder = encoder;
710 	drm_connector_attach_encoder(&connector->base,
711 					  &encoder->base);
712 }
713 
714 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
715 
716 bool gma_pll_is_valid(struct drm_crtc *crtc,
717 		      const struct gma_limit_t *limit,
718 		      struct gma_clock_t *clock)
719 {
720 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
721 		GMA_PLL_INVALID("p1 out of range");
722 	if (clock->p < limit->p.min || limit->p.max < clock->p)
723 		GMA_PLL_INVALID("p out of range");
724 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
725 		GMA_PLL_INVALID("m2 out of range");
726 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
727 		GMA_PLL_INVALID("m1 out of range");
728 	/* On CDV m1 is always 0 */
729 	if (clock->m1 <= clock->m2 && clock->m1 != 0)
730 		GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
731 	if (clock->m < limit->m.min || limit->m.max < clock->m)
732 		GMA_PLL_INVALID("m out of range");
733 	if (clock->n < limit->n.min || limit->n.max < clock->n)
734 		GMA_PLL_INVALID("n out of range");
735 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
736 		GMA_PLL_INVALID("vco out of range");
737 	/* XXX: We may need to be checking "Dot clock"
738 	 * depending on the multiplier, connector, etc.,
739 	 * rather than just a single range.
740 	 */
741 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
742 		GMA_PLL_INVALID("dot out of range");
743 
744 	return true;
745 }
746 
747 bool gma_find_best_pll(const struct gma_limit_t *limit,
748 		       struct drm_crtc *crtc, int target, int refclk,
749 		       struct gma_clock_t *best_clock)
750 {
751 	struct drm_device *dev = crtc->dev;
752 	const struct gma_clock_funcs *clock_funcs =
753 						to_gma_crtc(crtc)->clock_funcs;
754 	struct gma_clock_t clock;
755 	int err = target;
756 
757 	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
758 	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
759 		/*
760 		 * For LVDS, if the panel is on, just rely on its current
761 		 * settings for dual-channel.  We haven't figured out how to
762 		 * reliably set up different single/dual channel state, if we
763 		 * even can.
764 		 */
765 		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
766 		    LVDS_CLKB_POWER_UP)
767 			clock.p2 = limit->p2.p2_fast;
768 		else
769 			clock.p2 = limit->p2.p2_slow;
770 	} else {
771 		if (target < limit->p2.dot_limit)
772 			clock.p2 = limit->p2.p2_slow;
773 		else
774 			clock.p2 = limit->p2.p2_fast;
775 	}
776 
777 	memset(best_clock, 0, sizeof(*best_clock));
778 
779 	/* m1 is always 0 on CDV so the outmost loop will run just once */
780 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
781 		for (clock.m2 = limit->m2.min;
782 		     (clock.m2 < clock.m1 || clock.m1 == 0) &&
783 		      clock.m2 <= limit->m2.max; clock.m2++) {
784 			for (clock.n = limit->n.min;
785 			     clock.n <= limit->n.max; clock.n++) {
786 				for (clock.p1 = limit->p1.min;
787 				     clock.p1 <= limit->p1.max;
788 				     clock.p1++) {
789 					int this_err;
790 
791 					clock_funcs->clock(refclk, &clock);
792 
793 					if (!clock_funcs->pll_is_valid(crtc,
794 								limit, &clock))
795 						continue;
796 
797 					this_err = abs(clock.dot - target);
798 					if (this_err < err) {
799 						*best_clock = clock;
800 						err = this_err;
801 					}
802 				}
803 			}
804 		}
805 	}
806 
807 	return err != target;
808 }
809