xref: /linux/drivers/gpu/drm/drm_edid.c (revision 606b2f490fb80e55d05cf0e6cec0b6c0ff0fc18f)
1 /*
2  * Copyright (c) 2006 Luc Verhaegen (quirks list)
3  * Copyright (c) 2007-2008 Intel Corporation
4  *   Jesse Barnes <jesse.barnes@intel.com>
5  * Copyright 2010 Red Hat, Inc.
6  *
7  * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
8  * FB layer.
9  *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sub license,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the
19  * next paragraph) shall be included in all copies or substantial portions
20  * of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28  * DEALINGS IN THE SOFTWARE.
29  */
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <linux/i2c.h>
33 #include <linux/i2c-algo-bit.h>
34 #include "drmP.h"
35 #include "drm_edid.h"
36 #include "drm_edid_modes.h"
37 
38 #define version_greater(edid, maj, min) \
39 	(((edid)->version > (maj)) || \
40 	 ((edid)->version == (maj) && (edid)->revision > (min)))
41 
42 #define EDID_EST_TIMINGS 16
43 #define EDID_STD_TIMINGS 8
44 #define EDID_DETAILED_TIMINGS 4
45 
46 /*
47  * EDID blocks out in the wild have a variety of bugs, try to collect
48  * them here (note that userspace may work around broken monitors first,
49  * but fixes should make their way here so that the kernel "just works"
50  * on as many displays as possible).
51  */
52 
53 /* First detailed mode wrong, use largest 60Hz mode */
54 #define EDID_QUIRK_PREFER_LARGE_60		(1 << 0)
55 /* Reported 135MHz pixel clock is too high, needs adjustment */
56 #define EDID_QUIRK_135_CLOCK_TOO_HIGH		(1 << 1)
57 /* Prefer the largest mode at 75 Hz */
58 #define EDID_QUIRK_PREFER_LARGE_75		(1 << 2)
59 /* Detail timing is in cm not mm */
60 #define EDID_QUIRK_DETAILED_IN_CM		(1 << 3)
61 /* Detailed timing descriptors have bogus size values, so just take the
62  * maximum size and use that.
63  */
64 #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE	(1 << 4)
65 /* Monitor forgot to set the first detailed is preferred bit. */
66 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED	(1 << 5)
67 /* use +hsync +vsync for detailed mode */
68 #define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6)
69 
70 struct detailed_mode_closure {
71 	struct drm_connector *connector;
72 	struct edid *edid;
73 	bool preferred;
74 	u32 quirks;
75 	int modes;
76 };
77 
78 #define LEVEL_DMT	0
79 #define LEVEL_GTF	1
80 #define LEVEL_GTF2	2
81 #define LEVEL_CVT	3
82 
83 static struct edid_quirk {
84 	char *vendor;
85 	int product_id;
86 	u32 quirks;
87 } edid_quirk_list[] = {
88 	/* Acer AL1706 */
89 	{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
90 	/* Acer F51 */
91 	{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
92 	/* Unknown Acer */
93 	{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
94 
95 	/* Belinea 10 15 55 */
96 	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
97 	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
98 
99 	/* Envision Peripherals, Inc. EN-7100e */
100 	{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
101 	/* Envision EN2028 */
102 	{ "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
103 
104 	/* Funai Electronics PM36B */
105 	{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
106 	  EDID_QUIRK_DETAILED_IN_CM },
107 
108 	/* LG Philips LCD LP154W01-A5 */
109 	{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
110 	{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
111 
112 	/* Philips 107p5 CRT */
113 	{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
114 
115 	/* Proview AY765C */
116 	{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
117 
118 	/* Samsung SyncMaster 205BW.  Note: irony */
119 	{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
120 	/* Samsung SyncMaster 22[5-6]BW */
121 	{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
122 	{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
123 };
124 
125 /*** DDC fetch and block validation ***/
126 
127 static const u8 edid_header[] = {
128 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
129 };
130 
131 /*
132  * Sanity check the EDID block (base or extension).  Return 0 if the block
133  * doesn't check out, or 1 if it's valid.
134  */
135 static bool
136 drm_edid_block_valid(u8 *raw_edid)
137 {
138 	int i;
139 	u8 csum = 0;
140 	struct edid *edid = (struct edid *)raw_edid;
141 
142 	if (raw_edid[0] == 0x00) {
143 		int score = 0;
144 
145 		for (i = 0; i < sizeof(edid_header); i++)
146 			if (raw_edid[i] == edid_header[i])
147 				score++;
148 
149 		if (score == 8) ;
150 		else if (score >= 6) {
151 			DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
152 			memcpy(raw_edid, edid_header, sizeof(edid_header));
153 		} else {
154 			goto bad;
155 		}
156 	}
157 
158 	for (i = 0; i < EDID_LENGTH; i++)
159 		csum += raw_edid[i];
160 	if (csum) {
161 		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
162 
163 		/* allow CEA to slide through, switches mangle this */
164 		if (raw_edid[0] != 0x02)
165 			goto bad;
166 	}
167 
168 	/* per-block-type checks */
169 	switch (raw_edid[0]) {
170 	case 0: /* base */
171 		if (edid->version != 1) {
172 			DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
173 			goto bad;
174 		}
175 
176 		if (edid->revision > 4)
177 			DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
178 		break;
179 
180 	default:
181 		break;
182 	}
183 
184 	return 1;
185 
186 bad:
187 	if (raw_edid) {
188 		DRM_ERROR("Raw EDID:\n");
189 		print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
190 		printk("\n");
191 	}
192 	return 0;
193 }
194 
195 /**
196  * drm_edid_is_valid - sanity check EDID data
197  * @edid: EDID data
198  *
199  * Sanity-check an entire EDID record (including extensions)
200  */
201 bool drm_edid_is_valid(struct edid *edid)
202 {
203 	int i;
204 	u8 *raw = (u8 *)edid;
205 
206 	if (!edid)
207 		return false;
208 
209 	for (i = 0; i <= edid->extensions; i++)
210 		if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
211 			return false;
212 
213 	return true;
214 }
215 EXPORT_SYMBOL(drm_edid_is_valid);
216 
217 #define DDC_ADDR 0x50
218 #define DDC_SEGMENT_ADDR 0x30
219 /**
220  * Get EDID information via I2C.
221  *
222  * \param adapter : i2c device adaptor
223  * \param buf     : EDID data buffer to be filled
224  * \param len     : EDID data buffer length
225  * \return 0 on success or -1 on failure.
226  *
227  * Try to fetch EDID information by calling i2c driver function.
228  */
229 static int
230 drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
231 		      int block, int len)
232 {
233 	unsigned char start = block * EDID_LENGTH;
234 	struct i2c_msg msgs[] = {
235 		{
236 			.addr	= DDC_ADDR,
237 			.flags	= 0,
238 			.len	= 1,
239 			.buf	= &start,
240 		}, {
241 			.addr	= DDC_ADDR,
242 			.flags	= I2C_M_RD,
243 			.len	= len,
244 			.buf	= buf + start,
245 		}
246 	};
247 
248 	if (i2c_transfer(adapter, msgs, 2) == 2)
249 		return 0;
250 
251 	return -1;
252 }
253 
254 static u8 *
255 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
256 {
257 	int i, j = 0;
258 	u8 *block, *new;
259 
260 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
261 		return NULL;
262 
263 	/* base block fetch */
264 	for (i = 0; i < 4; i++) {
265 		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
266 			goto out;
267 		if (drm_edid_block_valid(block))
268 			break;
269 	}
270 	if (i == 4)
271 		goto carp;
272 
273 	/* if there's no extensions, we're done */
274 	if (block[0x7e] == 0)
275 		return block;
276 
277 	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
278 	if (!new)
279 		goto out;
280 	block = new;
281 
282 	for (j = 1; j <= block[0x7e]; j++) {
283 		for (i = 0; i < 4; i++) {
284 			if (drm_do_probe_ddc_edid(adapter, block, j,
285 						  EDID_LENGTH))
286 				goto out;
287 			if (drm_edid_block_valid(block + j * EDID_LENGTH))
288 				break;
289 		}
290 		if (i == 4)
291 			goto carp;
292 	}
293 
294 	return block;
295 
296 carp:
297 	dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
298 		 drm_get_connector_name(connector), j);
299 
300 out:
301 	kfree(block);
302 	return NULL;
303 }
304 
305 /**
306  * Probe DDC presence.
307  *
308  * \param adapter : i2c device adaptor
309  * \return 1 on success
310  */
311 static bool
312 drm_probe_ddc(struct i2c_adapter *adapter)
313 {
314 	unsigned char out;
315 
316 	return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
317 }
318 
319 /**
320  * drm_get_edid - get EDID data, if available
321  * @connector: connector we're probing
322  * @adapter: i2c adapter to use for DDC
323  *
324  * Poke the given i2c channel to grab EDID data if possible.  If found,
325  * attach it to the connector.
326  *
327  * Return edid data or NULL if we couldn't find any.
328  */
329 struct edid *drm_get_edid(struct drm_connector *connector,
330 			  struct i2c_adapter *adapter)
331 {
332 	struct edid *edid = NULL;
333 
334 	if (drm_probe_ddc(adapter))
335 		edid = (struct edid *)drm_do_get_edid(connector, adapter);
336 
337 	connector->display_info.raw_edid = (char *)edid;
338 
339 	return edid;
340 
341 }
342 EXPORT_SYMBOL(drm_get_edid);
343 
344 /*** EDID parsing ***/
345 
346 /**
347  * edid_vendor - match a string against EDID's obfuscated vendor field
348  * @edid: EDID to match
349  * @vendor: vendor string
350  *
351  * Returns true if @vendor is in @edid, false otherwise
352  */
353 static bool edid_vendor(struct edid *edid, char *vendor)
354 {
355 	char edid_vendor[3];
356 
357 	edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
358 	edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
359 			  ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
360 	edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
361 
362 	return !strncmp(edid_vendor, vendor, 3);
363 }
364 
365 /**
366  * edid_get_quirks - return quirk flags for a given EDID
367  * @edid: EDID to process
368  *
369  * This tells subsequent routines what fixes they need to apply.
370  */
371 static u32 edid_get_quirks(struct edid *edid)
372 {
373 	struct edid_quirk *quirk;
374 	int i;
375 
376 	for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
377 		quirk = &edid_quirk_list[i];
378 
379 		if (edid_vendor(edid, quirk->vendor) &&
380 		    (EDID_PRODUCT_ID(edid) == quirk->product_id))
381 			return quirk->quirks;
382 	}
383 
384 	return 0;
385 }
386 
387 #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
388 #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
389 
390 /**
391  * edid_fixup_preferred - set preferred modes based on quirk list
392  * @connector: has mode list to fix up
393  * @quirks: quirks list
394  *
395  * Walk the mode list for @connector, clearing the preferred status
396  * on existing modes and setting it anew for the right mode ala @quirks.
397  */
398 static void edid_fixup_preferred(struct drm_connector *connector,
399 				 u32 quirks)
400 {
401 	struct drm_display_mode *t, *cur_mode, *preferred_mode;
402 	int target_refresh = 0;
403 
404 	if (list_empty(&connector->probed_modes))
405 		return;
406 
407 	if (quirks & EDID_QUIRK_PREFER_LARGE_60)
408 		target_refresh = 60;
409 	if (quirks & EDID_QUIRK_PREFER_LARGE_75)
410 		target_refresh = 75;
411 
412 	preferred_mode = list_first_entry(&connector->probed_modes,
413 					  struct drm_display_mode, head);
414 
415 	list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
416 		cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
417 
418 		if (cur_mode == preferred_mode)
419 			continue;
420 
421 		/* Largest mode is preferred */
422 		if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
423 			preferred_mode = cur_mode;
424 
425 		/* At a given size, try to get closest to target refresh */
426 		if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
427 		    MODE_REFRESH_DIFF(cur_mode, target_refresh) <
428 		    MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
429 			preferred_mode = cur_mode;
430 		}
431 	}
432 
433 	preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
434 }
435 
436 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
437 					   int hsize, int vsize, int fresh)
438 {
439 	int i;
440 	struct drm_display_mode *ptr, *mode;
441 
442 	mode = NULL;
443 	for (i = 0; i < drm_num_dmt_modes; i++) {
444 		ptr = &drm_dmt_modes[i];
445 		if (hsize == ptr->hdisplay &&
446 			vsize == ptr->vdisplay &&
447 			fresh == drm_mode_vrefresh(ptr)) {
448 			/* get the expected default mode */
449 			mode = drm_mode_duplicate(dev, ptr);
450 			break;
451 		}
452 	}
453 	return mode;
454 }
455 EXPORT_SYMBOL(drm_mode_find_dmt);
456 
457 typedef void detailed_cb(struct detailed_timing *timing, void *closure);
458 
459 static void
460 cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
461 {
462 	int i, n = 0;
463 	u8 rev = ext[0x01], d = ext[0x02];
464 	u8 *det_base = ext + d;
465 
466 	switch (rev) {
467 	case 0:
468 		/* can't happen */
469 		return;
470 	case 1:
471 		/* have to infer how many blocks we have, check pixel clock */
472 		for (i = 0; i < 6; i++)
473 			if (det_base[18*i] || det_base[18*i+1])
474 				n++;
475 		break;
476 	default:
477 		/* explicit count */
478 		n = min(ext[0x03] & 0x0f, 6);
479 		break;
480 	}
481 
482 	for (i = 0; i < n; i++)
483 		cb((struct detailed_timing *)(det_base + 18 * i), closure);
484 }
485 
486 static void
487 vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
488 {
489 	unsigned int i, n = min((int)ext[0x02], 6);
490 	u8 *det_base = ext + 5;
491 
492 	if (ext[0x01] != 1)
493 		return; /* unknown version */
494 
495 	for (i = 0; i < n; i++)
496 		cb((struct detailed_timing *)(det_base + 18 * i), closure);
497 }
498 
499 static void
500 drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
501 {
502 	int i;
503 	struct edid *edid = (struct edid *)raw_edid;
504 
505 	if (edid == NULL)
506 		return;
507 
508 	for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
509 		cb(&(edid->detailed_timings[i]), closure);
510 
511 	for (i = 1; i <= raw_edid[0x7e]; i++) {
512 		u8 *ext = raw_edid + (i * EDID_LENGTH);
513 		switch (*ext) {
514 		case CEA_EXT:
515 			cea_for_each_detailed_block(ext, cb, closure);
516 			break;
517 		case VTB_EXT:
518 			vtb_for_each_detailed_block(ext, cb, closure);
519 			break;
520 		default:
521 			break;
522 		}
523 	}
524 }
525 
526 static void
527 is_rb(struct detailed_timing *t, void *data)
528 {
529 	u8 *r = (u8 *)t;
530 	if (r[3] == EDID_DETAIL_MONITOR_RANGE)
531 		if (r[15] & 0x10)
532 			*(bool *)data = true;
533 }
534 
535 /* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
536 static bool
537 drm_monitor_supports_rb(struct edid *edid)
538 {
539 	if (edid->revision >= 4) {
540 		bool ret;
541 		drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
542 		return ret;
543 	}
544 
545 	return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
546 }
547 
548 static void
549 find_gtf2(struct detailed_timing *t, void *data)
550 {
551 	u8 *r = (u8 *)t;
552 	if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
553 		*(u8 **)data = r;
554 }
555 
556 /* Secondary GTF curve kicks in above some break frequency */
557 static int
558 drm_gtf2_hbreak(struct edid *edid)
559 {
560 	u8 *r = NULL;
561 	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
562 	return r ? (r[12] * 2) : 0;
563 }
564 
565 static int
566 drm_gtf2_2c(struct edid *edid)
567 {
568 	u8 *r = NULL;
569 	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
570 	return r ? r[13] : 0;
571 }
572 
573 static int
574 drm_gtf2_m(struct edid *edid)
575 {
576 	u8 *r = NULL;
577 	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
578 	return r ? (r[15] << 8) + r[14] : 0;
579 }
580 
581 static int
582 drm_gtf2_k(struct edid *edid)
583 {
584 	u8 *r = NULL;
585 	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
586 	return r ? r[16] : 0;
587 }
588 
589 static int
590 drm_gtf2_2j(struct edid *edid)
591 {
592 	u8 *r = NULL;
593 	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
594 	return r ? r[17] : 0;
595 }
596 
597 /**
598  * standard_timing_level - get std. timing level(CVT/GTF/DMT)
599  * @edid: EDID block to scan
600  */
601 static int standard_timing_level(struct edid *edid)
602 {
603 	if (edid->revision >= 2) {
604 		if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
605 			return LEVEL_CVT;
606 		if (drm_gtf2_hbreak(edid))
607 			return LEVEL_GTF2;
608 		return LEVEL_GTF;
609 	}
610 	return LEVEL_DMT;
611 }
612 
613 /*
614  * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
615  * monitors fill with ascii space (0x20) instead.
616  */
617 static int
618 bad_std_timing(u8 a, u8 b)
619 {
620 	return (a == 0x00 && b == 0x00) ||
621 	       (a == 0x01 && b == 0x01) ||
622 	       (a == 0x20 && b == 0x20);
623 }
624 
625 /**
626  * drm_mode_std - convert standard mode info (width, height, refresh) into mode
627  * @t: standard timing params
628  * @timing_level: standard timing level
629  *
630  * Take the standard timing params (in this case width, aspect, and refresh)
631  * and convert them into a real mode using CVT/GTF/DMT.
632  */
633 static struct drm_display_mode *
634 drm_mode_std(struct drm_connector *connector, struct edid *edid,
635 	     struct std_timing *t, int revision)
636 {
637 	struct drm_device *dev = connector->dev;
638 	struct drm_display_mode *m, *mode = NULL;
639 	int hsize, vsize;
640 	int vrefresh_rate;
641 	unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
642 		>> EDID_TIMING_ASPECT_SHIFT;
643 	unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
644 		>> EDID_TIMING_VFREQ_SHIFT;
645 	int timing_level = standard_timing_level(edid);
646 
647 	if (bad_std_timing(t->hsize, t->vfreq_aspect))
648 		return NULL;
649 
650 	/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
651 	hsize = t->hsize * 8 + 248;
652 	/* vrefresh_rate = vfreq + 60 */
653 	vrefresh_rate = vfreq + 60;
654 	/* the vdisplay is calculated based on the aspect ratio */
655 	if (aspect_ratio == 0) {
656 		if (revision < 3)
657 			vsize = hsize;
658 		else
659 			vsize = (hsize * 10) / 16;
660 	} else if (aspect_ratio == 1)
661 		vsize = (hsize * 3) / 4;
662 	else if (aspect_ratio == 2)
663 		vsize = (hsize * 4) / 5;
664 	else
665 		vsize = (hsize * 9) / 16;
666 
667 	/* HDTV hack, part 1 */
668 	if (vrefresh_rate == 60 &&
669 	    ((hsize == 1360 && vsize == 765) ||
670 	     (hsize == 1368 && vsize == 769))) {
671 		hsize = 1366;
672 		vsize = 768;
673 	}
674 
675 	/*
676 	 * If this connector already has a mode for this size and refresh
677 	 * rate (because it came from detailed or CVT info), use that
678 	 * instead.  This way we don't have to guess at interlace or
679 	 * reduced blanking.
680 	 */
681 	list_for_each_entry(m, &connector->probed_modes, head)
682 		if (m->hdisplay == hsize && m->vdisplay == vsize &&
683 		    drm_mode_vrefresh(m) == vrefresh_rate)
684 			return NULL;
685 
686 	/* HDTV hack, part 2 */
687 	if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
688 		mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
689 				    false);
690 		mode->hdisplay = 1366;
691 		mode->hsync_start = mode->hsync_start - 1;
692 		mode->hsync_end = mode->hsync_end - 1;
693 		return mode;
694 	}
695 
696 	/* check whether it can be found in default mode table */
697 	mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
698 	if (mode)
699 		return mode;
700 
701 	switch (timing_level) {
702 	case LEVEL_DMT:
703 		break;
704 	case LEVEL_GTF:
705 		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
706 		break;
707 	case LEVEL_GTF2:
708 		/*
709 		 * This is potentially wrong if there's ever a monitor with
710 		 * more than one ranges section, each claiming a different
711 		 * secondary GTF curve.  Please don't do that.
712 		 */
713 		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
714 		if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
715 			kfree(mode);
716 			mode = drm_gtf_mode_complex(dev, hsize, vsize,
717 						    vrefresh_rate, 0, 0,
718 						    drm_gtf2_m(edid),
719 						    drm_gtf2_2c(edid),
720 						    drm_gtf2_k(edid),
721 						    drm_gtf2_2j(edid));
722 		}
723 		break;
724 	case LEVEL_CVT:
725 		mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
726 				    false);
727 		break;
728 	}
729 	return mode;
730 }
731 
732 /*
733  * EDID is delightfully ambiguous about how interlaced modes are to be
734  * encoded.  Our internal representation is of frame height, but some
735  * HDTV detailed timings are encoded as field height.
736  *
737  * The format list here is from CEA, in frame size.  Technically we
738  * should be checking refresh rate too.  Whatever.
739  */
740 static void
741 drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
742 			    struct detailed_pixel_timing *pt)
743 {
744 	int i;
745 	static const struct {
746 		int w, h;
747 	} cea_interlaced[] = {
748 		{ 1920, 1080 },
749 		{  720,  480 },
750 		{ 1440,  480 },
751 		{ 2880,  480 },
752 		{  720,  576 },
753 		{ 1440,  576 },
754 		{ 2880,  576 },
755 	};
756 
757 	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
758 		return;
759 
760 	for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
761 		if ((mode->hdisplay == cea_interlaced[i].w) &&
762 		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
763 			mode->vdisplay *= 2;
764 			mode->vsync_start *= 2;
765 			mode->vsync_end *= 2;
766 			mode->vtotal *= 2;
767 			mode->vtotal |= 1;
768 		}
769 	}
770 
771 	mode->flags |= DRM_MODE_FLAG_INTERLACE;
772 }
773 
774 /**
775  * drm_mode_detailed - create a new mode from an EDID detailed timing section
776  * @dev: DRM device (needed to create new mode)
777  * @edid: EDID block
778  * @timing: EDID detailed timing info
779  * @quirks: quirks to apply
780  *
781  * An EDID detailed timing block contains enough info for us to create and
782  * return a new struct drm_display_mode.
783  */
784 static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
785 						  struct edid *edid,
786 						  struct detailed_timing *timing,
787 						  u32 quirks)
788 {
789 	struct drm_display_mode *mode;
790 	struct detailed_pixel_timing *pt = &timing->data.pixel_data;
791 	unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
792 	unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
793 	unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
794 	unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
795 	unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
796 	unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
797 	unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
798 	unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
799 
800 	/* ignore tiny modes */
801 	if (hactive < 64 || vactive < 64)
802 		return NULL;
803 
804 	if (pt->misc & DRM_EDID_PT_STEREO) {
805 		printk(KERN_WARNING "stereo mode not supported\n");
806 		return NULL;
807 	}
808 	if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
809 		printk(KERN_WARNING "composite sync not supported\n");
810 	}
811 
812 	/* it is incorrect if hsync/vsync width is zero */
813 	if (!hsync_pulse_width || !vsync_pulse_width) {
814 		DRM_DEBUG_KMS("Incorrect Detailed timing. "
815 				"Wrong Hsync/Vsync pulse width\n");
816 		return NULL;
817 	}
818 	mode = drm_mode_create(dev);
819 	if (!mode)
820 		return NULL;
821 
822 	mode->type = DRM_MODE_TYPE_DRIVER;
823 
824 	if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
825 		timing->pixel_clock = cpu_to_le16(1088);
826 
827 	mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
828 
829 	mode->hdisplay = hactive;
830 	mode->hsync_start = mode->hdisplay + hsync_offset;
831 	mode->hsync_end = mode->hsync_start + hsync_pulse_width;
832 	mode->htotal = mode->hdisplay + hblank;
833 
834 	mode->vdisplay = vactive;
835 	mode->vsync_start = mode->vdisplay + vsync_offset;
836 	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
837 	mode->vtotal = mode->vdisplay + vblank;
838 
839 	/* Some EDIDs have bogus h/vtotal values */
840 	if (mode->hsync_end > mode->htotal)
841 		mode->htotal = mode->hsync_end + 1;
842 	if (mode->vsync_end > mode->vtotal)
843 		mode->vtotal = mode->vsync_end + 1;
844 
845 	drm_mode_do_interlace_quirk(mode, pt);
846 
847 	drm_mode_set_name(mode);
848 
849 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
850 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
851 	}
852 
853 	mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
854 		DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
855 	mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
856 		DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
857 
858 	mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
859 	mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
860 
861 	if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
862 		mode->width_mm *= 10;
863 		mode->height_mm *= 10;
864 	}
865 
866 	if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
867 		mode->width_mm = edid->width_cm * 10;
868 		mode->height_mm = edid->height_cm * 10;
869 	}
870 
871 	return mode;
872 }
873 
874 static bool
875 mode_is_rb(struct drm_display_mode *mode)
876 {
877 	return (mode->htotal - mode->hdisplay == 160) &&
878 	       (mode->hsync_end - mode->hdisplay == 80) &&
879 	       (mode->hsync_end - mode->hsync_start == 32) &&
880 	       (mode->vsync_start - mode->vdisplay == 3);
881 }
882 
883 static bool
884 mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
885 {
886 	int hsync, hmin, hmax;
887 
888 	hmin = t[7];
889 	if (edid->revision >= 4)
890 	    hmin += ((t[4] & 0x04) ? 255 : 0);
891 	hmax = t[8];
892 	if (edid->revision >= 4)
893 	    hmax += ((t[4] & 0x08) ? 255 : 0);
894 	hsync = drm_mode_hsync(mode);
895 
896 	return (hsync <= hmax && hsync >= hmin);
897 }
898 
899 static bool
900 mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
901 {
902 	int vsync, vmin, vmax;
903 
904 	vmin = t[5];
905 	if (edid->revision >= 4)
906 	    vmin += ((t[4] & 0x01) ? 255 : 0);
907 	vmax = t[6];
908 	if (edid->revision >= 4)
909 	    vmax += ((t[4] & 0x02) ? 255 : 0);
910 	vsync = drm_mode_vrefresh(mode);
911 
912 	return (vsync <= vmax && vsync >= vmin);
913 }
914 
915 static u32
916 range_pixel_clock(struct edid *edid, u8 *t)
917 {
918 	/* unspecified */
919 	if (t[9] == 0 || t[9] == 255)
920 		return 0;
921 
922 	/* 1.4 with CVT support gives us real precision, yay */
923 	if (edid->revision >= 4 && t[10] == 0x04)
924 		return (t[9] * 10000) - ((t[12] >> 2) * 250);
925 
926 	/* 1.3 is pathetic, so fuzz up a bit */
927 	return t[9] * 10000 + 5001;
928 }
929 
930 static bool
931 mode_in_range(struct drm_display_mode *mode, struct edid *edid,
932 	      struct detailed_timing *timing)
933 {
934 	u32 max_clock;
935 	u8 *t = (u8 *)timing;
936 
937 	if (!mode_in_hsync_range(mode, edid, t))
938 		return false;
939 
940 	if (!mode_in_vsync_range(mode, edid, t))
941 		return false;
942 
943 	if ((max_clock = range_pixel_clock(edid, t)))
944 		if (mode->clock > max_clock)
945 			return false;
946 
947 	/* 1.4 max horizontal check */
948 	if (edid->revision >= 4 && t[10] == 0x04)
949 		if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
950 			return false;
951 
952 	if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
953 		return false;
954 
955 	return true;
956 }
957 
958 /*
959  * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
960  * need to account for them.
961  */
962 static int
963 drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
964 			struct detailed_timing *timing)
965 {
966 	int i, modes = 0;
967 	struct drm_display_mode *newmode;
968 	struct drm_device *dev = connector->dev;
969 
970 	for (i = 0; i < drm_num_dmt_modes; i++) {
971 		if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
972 			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
973 			if (newmode) {
974 				drm_mode_probed_add(connector, newmode);
975 				modes++;
976 			}
977 		}
978 	}
979 
980 	return modes;
981 }
982 
983 static void
984 do_inferred_modes(struct detailed_timing *timing, void *c)
985 {
986 	struct detailed_mode_closure *closure = c;
987 	struct detailed_non_pixel *data = &timing->data.other_data;
988 	int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
989 
990 	if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
991 		closure->modes += drm_gtf_modes_for_range(closure->connector,
992 							  closure->edid,
993 							  timing);
994 }
995 
996 static int
997 add_inferred_modes(struct drm_connector *connector, struct edid *edid)
998 {
999 	struct detailed_mode_closure closure = {
1000 		connector, edid, 0, 0, 0
1001 	};
1002 
1003 	if (version_greater(edid, 1, 0))
1004 		drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
1005 					    &closure);
1006 
1007 	return closure.modes;
1008 }
1009 
1010 static int
1011 drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
1012 {
1013 	int i, j, m, modes = 0;
1014 	struct drm_display_mode *mode;
1015 	u8 *est = ((u8 *)timing) + 5;
1016 
1017 	for (i = 0; i < 6; i++) {
1018 		for (j = 7; j > 0; j--) {
1019 			m = (i * 8) + (7 - j);
1020 			if (m >= ARRAY_SIZE(est3_modes))
1021 				break;
1022 			if (est[i] & (1 << j)) {
1023 				mode = drm_mode_find_dmt(connector->dev,
1024 							 est3_modes[m].w,
1025 							 est3_modes[m].h,
1026 							 est3_modes[m].r
1027 							 /*, est3_modes[m].rb */);
1028 				if (mode) {
1029 					drm_mode_probed_add(connector, mode);
1030 					modes++;
1031 				}
1032 			}
1033 		}
1034 	}
1035 
1036 	return modes;
1037 }
1038 
1039 static void
1040 do_established_modes(struct detailed_timing *timing, void *c)
1041 {
1042 	struct detailed_mode_closure *closure = c;
1043 	struct detailed_non_pixel *data = &timing->data.other_data;
1044 
1045 	if (data->type == EDID_DETAIL_EST_TIMINGS)
1046 		closure->modes += drm_est3_modes(closure->connector, timing);
1047 }
1048 
1049 /**
1050  * add_established_modes - get est. modes from EDID and add them
1051  * @edid: EDID block to scan
1052  *
1053  * Each EDID block contains a bitmap of the supported "established modes" list
1054  * (defined above).  Tease them out and add them to the global modes list.
1055  */
1056 static int
1057 add_established_modes(struct drm_connector *connector, struct edid *edid)
1058 {
1059 	struct drm_device *dev = connector->dev;
1060 	unsigned long est_bits = edid->established_timings.t1 |
1061 		(edid->established_timings.t2 << 8) |
1062 		((edid->established_timings.mfg_rsvd & 0x80) << 9);
1063 	int i, modes = 0;
1064 	struct detailed_mode_closure closure = {
1065 		connector, edid, 0, 0, 0
1066 	};
1067 
1068 	for (i = 0; i <= EDID_EST_TIMINGS; i++) {
1069 		if (est_bits & (1<<i)) {
1070 			struct drm_display_mode *newmode;
1071 			newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
1072 			if (newmode) {
1073 				drm_mode_probed_add(connector, newmode);
1074 				modes++;
1075 			}
1076 		}
1077 	}
1078 
1079 	if (version_greater(edid, 1, 0))
1080 		    drm_for_each_detailed_block((u8 *)edid,
1081 						do_established_modes, &closure);
1082 
1083 	return modes + closure.modes;
1084 }
1085 
1086 static void
1087 do_standard_modes(struct detailed_timing *timing, void *c)
1088 {
1089 	struct detailed_mode_closure *closure = c;
1090 	struct detailed_non_pixel *data = &timing->data.other_data;
1091 	struct drm_connector *connector = closure->connector;
1092 	struct edid *edid = closure->edid;
1093 
1094 	if (data->type == EDID_DETAIL_STD_MODES) {
1095 		int i;
1096 		for (i = 0; i < 6; i++) {
1097 			struct std_timing *std;
1098 			struct drm_display_mode *newmode;
1099 
1100 			std = &data->data.timings[i];
1101 			newmode = drm_mode_std(connector, edid, std,
1102 					       edid->revision);
1103 			if (newmode) {
1104 				drm_mode_probed_add(connector, newmode);
1105 				closure->modes++;
1106 			}
1107 		}
1108 	}
1109 }
1110 
1111 /**
1112  * add_standard_modes - get std. modes from EDID and add them
1113  * @edid: EDID block to scan
1114  *
1115  * Standard modes can be calculated using the appropriate standard (DMT,
1116  * GTF or CVT. Grab them from @edid and add them to the list.
1117  */
1118 static int
1119 add_standard_modes(struct drm_connector *connector, struct edid *edid)
1120 {
1121 	int i, modes = 0;
1122 	struct detailed_mode_closure closure = {
1123 		connector, edid, 0, 0, 0
1124 	};
1125 
1126 	for (i = 0; i < EDID_STD_TIMINGS; i++) {
1127 		struct drm_display_mode *newmode;
1128 
1129 		newmode = drm_mode_std(connector, edid,
1130 				       &edid->standard_timings[i],
1131 				       edid->revision);
1132 		if (newmode) {
1133 			drm_mode_probed_add(connector, newmode);
1134 			modes++;
1135 		}
1136 	}
1137 
1138 	if (version_greater(edid, 1, 0))
1139 		drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
1140 					    &closure);
1141 
1142 	/* XXX should also look for standard codes in VTB blocks */
1143 
1144 	return modes + closure.modes;
1145 }
1146 
1147 static int drm_cvt_modes(struct drm_connector *connector,
1148 			 struct detailed_timing *timing)
1149 {
1150 	int i, j, modes = 0;
1151 	struct drm_display_mode *newmode;
1152 	struct drm_device *dev = connector->dev;
1153 	struct cvt_timing *cvt;
1154 	const int rates[] = { 60, 85, 75, 60, 50 };
1155 	const u8 empty[3] = { 0, 0, 0 };
1156 
1157 	for (i = 0; i < 4; i++) {
1158 		int uninitialized_var(width), height;
1159 		cvt = &(timing->data.other_data.data.cvt[i]);
1160 
1161 		if (!memcmp(cvt->code, empty, 3))
1162 			continue;
1163 
1164 		height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
1165 		switch (cvt->code[1] & 0x0c) {
1166 		case 0x00:
1167 			width = height * 4 / 3;
1168 			break;
1169 		case 0x04:
1170 			width = height * 16 / 9;
1171 			break;
1172 		case 0x08:
1173 			width = height * 16 / 10;
1174 			break;
1175 		case 0x0c:
1176 			width = height * 15 / 9;
1177 			break;
1178 		}
1179 
1180 		for (j = 1; j < 5; j++) {
1181 			if (cvt->code[2] & (1 << j)) {
1182 				newmode = drm_cvt_mode(dev, width, height,
1183 						       rates[j], j == 0,
1184 						       false, false);
1185 				if (newmode) {
1186 					drm_mode_probed_add(connector, newmode);
1187 					modes++;
1188 				}
1189 			}
1190 		}
1191 	}
1192 
1193 	return modes;
1194 }
1195 
1196 static void
1197 do_cvt_mode(struct detailed_timing *timing, void *c)
1198 {
1199 	struct detailed_mode_closure *closure = c;
1200 	struct detailed_non_pixel *data = &timing->data.other_data;
1201 
1202 	if (data->type == EDID_DETAIL_CVT_3BYTE)
1203 		closure->modes += drm_cvt_modes(closure->connector, timing);
1204 }
1205 
1206 static int
1207 add_cvt_modes(struct drm_connector *connector, struct edid *edid)
1208 {
1209 	struct detailed_mode_closure closure = {
1210 		connector, edid, 0, 0, 0
1211 	};
1212 
1213 	if (version_greater(edid, 1, 2))
1214 		drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
1215 
1216 	/* XXX should also look for CVT codes in VTB blocks */
1217 
1218 	return closure.modes;
1219 }
1220 
1221 static void
1222 do_detailed_mode(struct detailed_timing *timing, void *c)
1223 {
1224 	struct detailed_mode_closure *closure = c;
1225 	struct drm_display_mode *newmode;
1226 
1227 	if (timing->pixel_clock) {
1228 		newmode = drm_mode_detailed(closure->connector->dev,
1229 					    closure->edid, timing,
1230 					    closure->quirks);
1231 		if (!newmode)
1232 			return;
1233 
1234 		if (closure->preferred)
1235 			newmode->type |= DRM_MODE_TYPE_PREFERRED;
1236 
1237 		drm_mode_probed_add(closure->connector, newmode);
1238 		closure->modes++;
1239 		closure->preferred = 0;
1240 	}
1241 }
1242 
1243 /*
1244  * add_detailed_modes - Add modes from detailed timings
1245  * @connector: attached connector
1246  * @edid: EDID block to scan
1247  * @quirks: quirks to apply
1248  */
1249 static int
1250 add_detailed_modes(struct drm_connector *connector, struct edid *edid,
1251 		   u32 quirks)
1252 {
1253 	struct detailed_mode_closure closure = {
1254 		connector,
1255 		edid,
1256 		1,
1257 		quirks,
1258 		0
1259 	};
1260 
1261 	if (closure.preferred && !version_greater(edid, 1, 3))
1262 		closure.preferred =
1263 		    (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
1264 
1265 	drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
1266 
1267 	return closure.modes;
1268 }
1269 
1270 #define HDMI_IDENTIFIER 0x000C03
1271 #define VENDOR_BLOCK    0x03
1272 /**
1273  * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
1274  * @edid: monitor EDID information
1275  *
1276  * Parse the CEA extension according to CEA-861-B.
1277  * Return true if HDMI, false if not or unknown.
1278  */
1279 bool drm_detect_hdmi_monitor(struct edid *edid)
1280 {
1281 	char *edid_ext = NULL;
1282 	int i, hdmi_id;
1283 	int start_offset, end_offset;
1284 	bool is_hdmi = false;
1285 
1286 	/* No EDID or EDID extensions */
1287 	if (edid == NULL || edid->extensions == 0)
1288 		goto end;
1289 
1290 	/* Find CEA extension */
1291 	for (i = 0; i < edid->extensions; i++) {
1292 		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
1293 		/* This block is CEA extension */
1294 		if (edid_ext[0] == 0x02)
1295 			break;
1296 	}
1297 
1298 	if (i == edid->extensions)
1299 		goto end;
1300 
1301 	/* Data block offset in CEA extension block */
1302 	start_offset = 4;
1303 	end_offset = edid_ext[2];
1304 
1305 	/*
1306 	 * Because HDMI identifier is in Vendor Specific Block,
1307 	 * search it from all data blocks of CEA extension.
1308 	 */
1309 	for (i = start_offset; i < end_offset;
1310 		/* Increased by data block len */
1311 		i += ((edid_ext[i] & 0x1f) + 1)) {
1312 		/* Find vendor specific block */
1313 		if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
1314 			hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
1315 				  edid_ext[i + 3] << 16;
1316 			/* Find HDMI identifier */
1317 			if (hdmi_id == HDMI_IDENTIFIER)
1318 				is_hdmi = true;
1319 			break;
1320 		}
1321 	}
1322 
1323 end:
1324 	return is_hdmi;
1325 }
1326 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
1327 
1328 /**
1329  * drm_add_edid_modes - add modes from EDID data, if available
1330  * @connector: connector we're probing
1331  * @edid: edid data
1332  *
1333  * Add the specified modes to the connector's mode list.
1334  *
1335  * Return number of modes added or 0 if we couldn't find any.
1336  */
1337 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1338 {
1339 	int num_modes = 0;
1340 	u32 quirks;
1341 
1342 	if (edid == NULL) {
1343 		return 0;
1344 	}
1345 	if (!drm_edid_is_valid(edid)) {
1346 		dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
1347 			 drm_get_connector_name(connector));
1348 		return 0;
1349 	}
1350 
1351 	quirks = edid_get_quirks(edid);
1352 
1353 	/*
1354 	 * EDID spec says modes should be preferred in this order:
1355 	 * - preferred detailed mode
1356 	 * - other detailed modes from base block
1357 	 * - detailed modes from extension blocks
1358 	 * - CVT 3-byte code modes
1359 	 * - standard timing codes
1360 	 * - established timing codes
1361 	 * - modes inferred from GTF or CVT range information
1362 	 *
1363 	 * We get this pretty much right.
1364 	 *
1365 	 * XXX order for additional mode types in extension blocks?
1366 	 */
1367 	num_modes += add_detailed_modes(connector, edid, quirks);
1368 	num_modes += add_cvt_modes(connector, edid);
1369 	num_modes += add_standard_modes(connector, edid);
1370 	num_modes += add_established_modes(connector, edid);
1371 	num_modes += add_inferred_modes(connector, edid);
1372 
1373 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1374 		edid_fixup_preferred(connector, quirks);
1375 
1376 	connector->display_info.width_mm = edid->width_cm * 10;
1377 	connector->display_info.height_mm = edid->height_cm * 10;
1378 
1379 	return num_modes;
1380 }
1381 EXPORT_SYMBOL(drm_add_edid_modes);
1382 
1383 /**
1384  * drm_add_modes_noedid - add modes for the connectors without EDID
1385  * @connector: connector we're probing
1386  * @hdisplay: the horizontal display limit
1387  * @vdisplay: the vertical display limit
1388  *
1389  * Add the specified modes to the connector's mode list. Only when the
1390  * hdisplay/vdisplay is not beyond the given limit, it will be added.
1391  *
1392  * Return number of modes added or 0 if we couldn't find any.
1393  */
1394 int drm_add_modes_noedid(struct drm_connector *connector,
1395 			int hdisplay, int vdisplay)
1396 {
1397 	int i, count, num_modes = 0;
1398 	struct drm_display_mode *mode, *ptr;
1399 	struct drm_device *dev = connector->dev;
1400 
1401 	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
1402 	if (hdisplay < 0)
1403 		hdisplay = 0;
1404 	if (vdisplay < 0)
1405 		vdisplay = 0;
1406 
1407 	for (i = 0; i < count; i++) {
1408 		ptr = &drm_dmt_modes[i];
1409 		if (hdisplay && vdisplay) {
1410 			/*
1411 			 * Only when two are valid, they will be used to check
1412 			 * whether the mode should be added to the mode list of
1413 			 * the connector.
1414 			 */
1415 			if (ptr->hdisplay > hdisplay ||
1416 					ptr->vdisplay > vdisplay)
1417 				continue;
1418 		}
1419 		if (drm_mode_vrefresh(ptr) > 61)
1420 			continue;
1421 		mode = drm_mode_duplicate(dev, ptr);
1422 		if (mode) {
1423 			drm_mode_probed_add(connector, mode);
1424 			num_modes++;
1425 		}
1426 	}
1427 	return num_modes;
1428 }
1429 EXPORT_SYMBOL(drm_add_modes_noedid);
1430