xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_types.h"
24 #include "intel_hdcp.h"
25 #include "intel_pcode.h"
26 
27 #define KEY_LOAD_TRIES	5
28 #define HDCP2_LC_RETRY_CNT			3
29 
30 static int intel_conn_to_vcpi(struct intel_connector *connector)
31 {
32 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
33 	return connector->port	? connector->port->vcpi.vcpi : 0;
34 }
35 
36 /*
37  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
38  * content_type for all streams in DP MST topology because security f/w doesn't
39  * have any provision to mark content_type for each stream separately, it marks
40  * all available streams with the content_type proivided at the time of port
41  * authentication. This may prohibit the userspace to use type1 content on
42  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
43  * DP MST topology. Though it is not compulsory, security fw should change its
44  * policy to mark different content_types for different streams.
45  */
46 static int
47 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
48 {
49 	struct drm_connector_list_iter conn_iter;
50 	struct intel_digital_port *conn_dig_port;
51 	struct intel_connector *connector;
52 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
53 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
54 	bool enforce_type0 = false;
55 	int k;
56 
57 	data->k = 0;
58 
59 	if (dig_port->hdcp_auth_status)
60 		return 0;
61 
62 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
63 	for_each_intel_connector_iter(connector, &conn_iter) {
64 		if (connector->base.status == connector_status_disconnected)
65 			continue;
66 
67 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
68 			continue;
69 
70 		conn_dig_port = intel_attached_dig_port(connector);
71 		if (conn_dig_port != dig_port)
72 			continue;
73 
74 		if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
75 			enforce_type0 = true;
76 
77 		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
78 		data->k++;
79 
80 		/* if there is only one active stream */
81 		if (dig_port->dp.active_mst_links <= 1)
82 			break;
83 	}
84 	drm_connector_list_iter_end(&conn_iter);
85 
86 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
87 		return -EINVAL;
88 
89 	/*
90 	 * Apply common protection level across all streams in DP MST Topology.
91 	 * Use highest supported content type for all streams in DP MST Topology.
92 	 */
93 	for (k = 0; k < data->k; k++)
94 		data->streams[k].stream_type =
95 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
96 
97 	return 0;
98 }
99 
100 static int intel_hdcp_prepare_streams(struct intel_connector *connector)
101 {
102 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
103 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
104 	struct intel_hdcp *hdcp = &connector->hdcp;
105 	int ret;
106 
107 	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
108 		data->k = 1;
109 		data->streams[0].stream_type = hdcp->content_type;
110 	} else {
111 		ret = intel_hdcp_required_content_stream(dig_port);
112 		if (ret)
113 			return ret;
114 	}
115 
116 	return 0;
117 }
118 
119 static
120 bool intel_hdcp_is_ksv_valid(u8 *ksv)
121 {
122 	int i, ones = 0;
123 	/* KSV has 20 1's and 20 0's */
124 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
125 		ones += hweight8(ksv[i]);
126 	if (ones != 20)
127 		return false;
128 
129 	return true;
130 }
131 
132 static
133 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
134 			       const struct intel_hdcp_shim *shim, u8 *bksv)
135 {
136 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
137 	int ret, i, tries = 2;
138 
139 	/* HDCP spec states that we must retry the bksv if it is invalid */
140 	for (i = 0; i < tries; i++) {
141 		ret = shim->read_bksv(dig_port, bksv);
142 		if (ret)
143 			return ret;
144 		if (intel_hdcp_is_ksv_valid(bksv))
145 			break;
146 	}
147 	if (i == tries) {
148 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
149 		return -ENODEV;
150 	}
151 
152 	return 0;
153 }
154 
155 /* Is HDCP1.4 capable on Platform and Sink */
156 bool intel_hdcp_capable(struct intel_connector *connector)
157 {
158 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
159 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
160 	bool capable = false;
161 	u8 bksv[5];
162 
163 	if (!shim)
164 		return capable;
165 
166 	if (shim->hdcp_capable) {
167 		shim->hdcp_capable(dig_port, &capable);
168 	} else {
169 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
170 			capable = true;
171 	}
172 
173 	return capable;
174 }
175 
176 /* Is HDCP2.2 capable on Platform and Sink */
177 bool intel_hdcp2_capable(struct intel_connector *connector)
178 {
179 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
180 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
181 	struct intel_hdcp *hdcp = &connector->hdcp;
182 	bool capable = false;
183 
184 	/* I915 support for HDCP2.2 */
185 	if (!hdcp->hdcp2_supported)
186 		return false;
187 
188 	/* MEI interface is solid */
189 	mutex_lock(&dev_priv->hdcp_comp_mutex);
190 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
191 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
192 		return false;
193 	}
194 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
195 
196 	/* Sink's capability for HDCP2.2 */
197 	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
198 
199 	return capable;
200 }
201 
202 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
203 			      enum transcoder cpu_transcoder, enum port port)
204 {
205 	return intel_de_read(dev_priv,
206 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
207 	       HDCP_STATUS_ENC;
208 }
209 
210 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
211 			       enum transcoder cpu_transcoder, enum port port)
212 {
213 	return intel_de_read(dev_priv,
214 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
215 	       LINK_ENCRYPTION_STATUS;
216 }
217 
218 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
219 				    const struct intel_hdcp_shim *shim)
220 {
221 	int ret, read_ret;
222 	bool ksv_ready;
223 
224 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
225 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
226 							 &ksv_ready),
227 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
228 			 100 * 1000);
229 	if (ret)
230 		return ret;
231 	if (read_ret)
232 		return read_ret;
233 	if (!ksv_ready)
234 		return -ETIMEDOUT;
235 
236 	return 0;
237 }
238 
239 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
240 {
241 	enum i915_power_well_id id;
242 	intel_wakeref_t wakeref;
243 	bool enabled = false;
244 
245 	/*
246 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
247 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
248 	 */
249 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
250 		id = HSW_DISP_PW_GLOBAL;
251 	else
252 		id = SKL_DISP_PW_1;
253 
254 	/* PG1 (power well #1) needs to be enabled */
255 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
256 		enabled = intel_display_power_well_is_enabled(dev_priv, id);
257 
258 	/*
259 	 * Another req for hdcp key loadability is enabled state of pll for
260 	 * cdclk. Without active crtc we wont land here. So we are assuming that
261 	 * cdclk is already on.
262 	 */
263 
264 	return enabled;
265 }
266 
267 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
268 {
269 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
270 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
271 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
272 }
273 
274 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
275 {
276 	int ret;
277 	u32 val;
278 
279 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
280 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
281 		return 0;
282 
283 	/*
284 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
285 	 * out of reset. So if Key is not already loaded, its an error state.
286 	 */
287 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
288 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
289 			return -ENXIO;
290 
291 	/*
292 	 * Initiate loading the HDCP key from fuses.
293 	 *
294 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
295 	 * version 9 platforms (minus BXT) differ in the key load trigger
296 	 * process from other platforms. These platforms use the GT Driver
297 	 * Mailbox interface.
298 	 */
299 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
300 		ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
301 		if (ret) {
302 			drm_err(&dev_priv->drm,
303 				"Failed to initiate HDCP key load (%d)\n",
304 				ret);
305 			return ret;
306 		}
307 	} else {
308 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
309 	}
310 
311 	/* Wait for the keys to load (500us) */
312 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
313 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
314 					10, 1, &val);
315 	if (ret)
316 		return ret;
317 	else if (!(val & HDCP_KEY_LOAD_STATUS))
318 		return -ENXIO;
319 
320 	/* Send Aksv over to PCH display for use in authentication */
321 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
322 
323 	return 0;
324 }
325 
326 /* Returns updated SHA-1 index */
327 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
328 {
329 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
330 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
331 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
332 		return -ETIMEDOUT;
333 	}
334 	return 0;
335 }
336 
337 static
338 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
339 				enum transcoder cpu_transcoder, enum port port)
340 {
341 	if (DISPLAY_VER(dev_priv) >= 12) {
342 		switch (cpu_transcoder) {
343 		case TRANSCODER_A:
344 			return HDCP_TRANSA_REP_PRESENT |
345 			       HDCP_TRANSA_SHA1_M0;
346 		case TRANSCODER_B:
347 			return HDCP_TRANSB_REP_PRESENT |
348 			       HDCP_TRANSB_SHA1_M0;
349 		case TRANSCODER_C:
350 			return HDCP_TRANSC_REP_PRESENT |
351 			       HDCP_TRANSC_SHA1_M0;
352 		case TRANSCODER_D:
353 			return HDCP_TRANSD_REP_PRESENT |
354 			       HDCP_TRANSD_SHA1_M0;
355 		default:
356 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
357 				cpu_transcoder);
358 			return -EINVAL;
359 		}
360 	}
361 
362 	switch (port) {
363 	case PORT_A:
364 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
365 	case PORT_B:
366 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
367 	case PORT_C:
368 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
369 	case PORT_D:
370 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
371 	case PORT_E:
372 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
373 	default:
374 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
375 		return -EINVAL;
376 	}
377 }
378 
379 static
380 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
381 				const struct intel_hdcp_shim *shim,
382 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
383 {
384 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
385 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
386 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
387 	enum port port = dig_port->base.port;
388 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
389 	int ret, i, j, sha_idx;
390 
391 	/* Process V' values from the receiver */
392 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
393 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
394 		if (ret)
395 			return ret;
396 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
397 	}
398 
399 	/*
400 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
401 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
402 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
403 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
404 	 * index will keep track of our progress through the 64 bytes as well as
405 	 * helping us work the 40-bit KSVs through our 32-bit register.
406 	 *
407 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
408 	 */
409 	sha_idx = 0;
410 	sha_text = 0;
411 	sha_leftovers = 0;
412 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
413 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
414 	for (i = 0; i < num_downstream; i++) {
415 		unsigned int sha_empty;
416 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
417 
418 		/* Fill up the empty slots in sha_text and write it out */
419 		sha_empty = sizeof(sha_text) - sha_leftovers;
420 		for (j = 0; j < sha_empty; j++) {
421 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
422 			sha_text |= ksv[j] << off;
423 		}
424 
425 		ret = intel_write_sha_text(dev_priv, sha_text);
426 		if (ret < 0)
427 			return ret;
428 
429 		/* Programming guide writes this every 64 bytes */
430 		sha_idx += sizeof(sha_text);
431 		if (!(sha_idx % 64))
432 			intel_de_write(dev_priv, HDCP_REP_CTL,
433 				       rep_ctl | HDCP_SHA1_TEXT_32);
434 
435 		/* Store the leftover bytes from the ksv in sha_text */
436 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
437 		sha_text = 0;
438 		for (j = 0; j < sha_leftovers; j++)
439 			sha_text |= ksv[sha_empty + j] <<
440 					((sizeof(sha_text) - j - 1) * 8);
441 
442 		/*
443 		 * If we still have room in sha_text for more data, continue.
444 		 * Otherwise, write it out immediately.
445 		 */
446 		if (sizeof(sha_text) > sha_leftovers)
447 			continue;
448 
449 		ret = intel_write_sha_text(dev_priv, sha_text);
450 		if (ret < 0)
451 			return ret;
452 		sha_leftovers = 0;
453 		sha_text = 0;
454 		sha_idx += sizeof(sha_text);
455 	}
456 
457 	/*
458 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
459 	 * bytes are leftover from the last ksv, we might be able to fit them
460 	 * all in sha_text (first 2 cases), or we might need to split them up
461 	 * into 2 writes (last 2 cases).
462 	 */
463 	if (sha_leftovers == 0) {
464 		/* Write 16 bits of text, 16 bits of M0 */
465 		intel_de_write(dev_priv, HDCP_REP_CTL,
466 			       rep_ctl | HDCP_SHA1_TEXT_16);
467 		ret = intel_write_sha_text(dev_priv,
468 					   bstatus[0] << 8 | bstatus[1]);
469 		if (ret < 0)
470 			return ret;
471 		sha_idx += sizeof(sha_text);
472 
473 		/* Write 32 bits of M0 */
474 		intel_de_write(dev_priv, HDCP_REP_CTL,
475 			       rep_ctl | HDCP_SHA1_TEXT_0);
476 		ret = intel_write_sha_text(dev_priv, 0);
477 		if (ret < 0)
478 			return ret;
479 		sha_idx += sizeof(sha_text);
480 
481 		/* Write 16 bits of M0 */
482 		intel_de_write(dev_priv, HDCP_REP_CTL,
483 			       rep_ctl | HDCP_SHA1_TEXT_16);
484 		ret = intel_write_sha_text(dev_priv, 0);
485 		if (ret < 0)
486 			return ret;
487 		sha_idx += sizeof(sha_text);
488 
489 	} else if (sha_leftovers == 1) {
490 		/* Write 24 bits of text, 8 bits of M0 */
491 		intel_de_write(dev_priv, HDCP_REP_CTL,
492 			       rep_ctl | HDCP_SHA1_TEXT_24);
493 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
494 		/* Only 24-bits of data, must be in the LSB */
495 		sha_text = (sha_text & 0xffffff00) >> 8;
496 		ret = intel_write_sha_text(dev_priv, sha_text);
497 		if (ret < 0)
498 			return ret;
499 		sha_idx += sizeof(sha_text);
500 
501 		/* Write 32 bits of M0 */
502 		intel_de_write(dev_priv, HDCP_REP_CTL,
503 			       rep_ctl | HDCP_SHA1_TEXT_0);
504 		ret = intel_write_sha_text(dev_priv, 0);
505 		if (ret < 0)
506 			return ret;
507 		sha_idx += sizeof(sha_text);
508 
509 		/* Write 24 bits of M0 */
510 		intel_de_write(dev_priv, HDCP_REP_CTL,
511 			       rep_ctl | HDCP_SHA1_TEXT_8);
512 		ret = intel_write_sha_text(dev_priv, 0);
513 		if (ret < 0)
514 			return ret;
515 		sha_idx += sizeof(sha_text);
516 
517 	} else if (sha_leftovers == 2) {
518 		/* Write 32 bits of text */
519 		intel_de_write(dev_priv, HDCP_REP_CTL,
520 			       rep_ctl | HDCP_SHA1_TEXT_32);
521 		sha_text |= bstatus[0] << 8 | bstatus[1];
522 		ret = intel_write_sha_text(dev_priv, sha_text);
523 		if (ret < 0)
524 			return ret;
525 		sha_idx += sizeof(sha_text);
526 
527 		/* Write 64 bits of M0 */
528 		intel_de_write(dev_priv, HDCP_REP_CTL,
529 			       rep_ctl | HDCP_SHA1_TEXT_0);
530 		for (i = 0; i < 2; i++) {
531 			ret = intel_write_sha_text(dev_priv, 0);
532 			if (ret < 0)
533 				return ret;
534 			sha_idx += sizeof(sha_text);
535 		}
536 
537 		/*
538 		 * Terminate the SHA-1 stream by hand. For the other leftover
539 		 * cases this is appended by the hardware.
540 		 */
541 		intel_de_write(dev_priv, HDCP_REP_CTL,
542 			       rep_ctl | HDCP_SHA1_TEXT_32);
543 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
544 		ret = intel_write_sha_text(dev_priv, sha_text);
545 		if (ret < 0)
546 			return ret;
547 		sha_idx += sizeof(sha_text);
548 	} else if (sha_leftovers == 3) {
549 		/* Write 32 bits of text (filled from LSB) */
550 		intel_de_write(dev_priv, HDCP_REP_CTL,
551 			       rep_ctl | HDCP_SHA1_TEXT_32);
552 		sha_text |= bstatus[0];
553 		ret = intel_write_sha_text(dev_priv, sha_text);
554 		if (ret < 0)
555 			return ret;
556 		sha_idx += sizeof(sha_text);
557 
558 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
559 		intel_de_write(dev_priv, HDCP_REP_CTL,
560 			       rep_ctl | HDCP_SHA1_TEXT_8);
561 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
562 		if (ret < 0)
563 			return ret;
564 		sha_idx += sizeof(sha_text);
565 
566 		/* Write 32 bits of M0 */
567 		intel_de_write(dev_priv, HDCP_REP_CTL,
568 			       rep_ctl | HDCP_SHA1_TEXT_0);
569 		ret = intel_write_sha_text(dev_priv, 0);
570 		if (ret < 0)
571 			return ret;
572 		sha_idx += sizeof(sha_text);
573 
574 		/* Write 8 bits of M0 */
575 		intel_de_write(dev_priv, HDCP_REP_CTL,
576 			       rep_ctl | HDCP_SHA1_TEXT_24);
577 		ret = intel_write_sha_text(dev_priv, 0);
578 		if (ret < 0)
579 			return ret;
580 		sha_idx += sizeof(sha_text);
581 	} else {
582 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
583 			    sha_leftovers);
584 		return -EINVAL;
585 	}
586 
587 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
588 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
589 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
590 		ret = intel_write_sha_text(dev_priv, 0);
591 		if (ret < 0)
592 			return ret;
593 		sha_idx += sizeof(sha_text);
594 	}
595 
596 	/*
597 	 * Last write gets the length of the concatenation in bits. That is:
598 	 *  - 5 bytes per device
599 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
600 	 */
601 	sha_text = (num_downstream * 5 + 10) * 8;
602 	ret = intel_write_sha_text(dev_priv, sha_text);
603 	if (ret < 0)
604 		return ret;
605 
606 	/* Tell the HW we're done with the hash and wait for it to ACK */
607 	intel_de_write(dev_priv, HDCP_REP_CTL,
608 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
609 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
610 				  HDCP_SHA1_COMPLETE, 1)) {
611 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
612 		return -ETIMEDOUT;
613 	}
614 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
615 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
616 		return -ENXIO;
617 	}
618 
619 	return 0;
620 }
621 
622 /* Implements Part 2 of the HDCP authorization procedure */
623 static
624 int intel_hdcp_auth_downstream(struct intel_connector *connector)
625 {
626 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
627 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
628 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
629 	u8 bstatus[2], num_downstream, *ksv_fifo;
630 	int ret, i, tries = 3;
631 
632 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
633 	if (ret) {
634 		drm_dbg_kms(&dev_priv->drm,
635 			    "KSV list failed to become ready (%d)\n", ret);
636 		return ret;
637 	}
638 
639 	ret = shim->read_bstatus(dig_port, bstatus);
640 	if (ret)
641 		return ret;
642 
643 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
644 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
645 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
646 		return -EPERM;
647 	}
648 
649 	/*
650 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
651 	 * the HDCP encryption. That implies that repeater can't have its own
652 	 * display. As there is no consumption of encrypted content in the
653 	 * repeater with 0 downstream devices, we are failing the
654 	 * authentication.
655 	 */
656 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
657 	if (num_downstream == 0) {
658 		drm_dbg_kms(&dev_priv->drm,
659 			    "Repeater with zero downstream devices\n");
660 		return -EINVAL;
661 	}
662 
663 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
664 	if (!ksv_fifo) {
665 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
666 		return -ENOMEM;
667 	}
668 
669 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
670 	if (ret)
671 		goto err;
672 
673 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
674 					num_downstream) > 0) {
675 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
676 		ret = -EPERM;
677 		goto err;
678 	}
679 
680 	/*
681 	 * When V prime mismatches, DP Spec mandates re-read of
682 	 * V prime atleast twice.
683 	 */
684 	for (i = 0; i < tries; i++) {
685 		ret = intel_hdcp_validate_v_prime(connector, shim,
686 						  ksv_fifo, num_downstream,
687 						  bstatus);
688 		if (!ret)
689 			break;
690 	}
691 
692 	if (i == tries) {
693 		drm_dbg_kms(&dev_priv->drm,
694 			    "V Prime validation failed.(%d)\n", ret);
695 		goto err;
696 	}
697 
698 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
699 		    num_downstream);
700 	ret = 0;
701 err:
702 	kfree(ksv_fifo);
703 	return ret;
704 }
705 
706 /* Implements Part 1 of the HDCP authorization procedure */
707 static int intel_hdcp_auth(struct intel_connector *connector)
708 {
709 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
710 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
711 	struct intel_hdcp *hdcp = &connector->hdcp;
712 	const struct intel_hdcp_shim *shim = hdcp->shim;
713 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
714 	enum port port = dig_port->base.port;
715 	unsigned long r0_prime_gen_start;
716 	int ret, i, tries = 2;
717 	union {
718 		u32 reg[2];
719 		u8 shim[DRM_HDCP_AN_LEN];
720 	} an;
721 	union {
722 		u32 reg[2];
723 		u8 shim[DRM_HDCP_KSV_LEN];
724 	} bksv;
725 	union {
726 		u32 reg;
727 		u8 shim[DRM_HDCP_RI_LEN];
728 	} ri;
729 	bool repeater_present, hdcp_capable;
730 
731 	/*
732 	 * Detects whether the display is HDCP capable. Although we check for
733 	 * valid Bksv below, the HDCP over DP spec requires that we check
734 	 * whether the display supports HDCP before we write An. For HDMI
735 	 * displays, this is not necessary.
736 	 */
737 	if (shim->hdcp_capable) {
738 		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
739 		if (ret)
740 			return ret;
741 		if (!hdcp_capable) {
742 			drm_dbg_kms(&dev_priv->drm,
743 				    "Panel is not HDCP capable\n");
744 			return -EINVAL;
745 		}
746 	}
747 
748 	/* Initialize An with 2 random values and acquire it */
749 	for (i = 0; i < 2; i++)
750 		intel_de_write(dev_priv,
751 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
752 			       get_random_u32());
753 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
754 		       HDCP_CONF_CAPTURE_AN);
755 
756 	/* Wait for An to be acquired */
757 	if (intel_de_wait_for_set(dev_priv,
758 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
759 				  HDCP_STATUS_AN_READY, 1)) {
760 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
761 		return -ETIMEDOUT;
762 	}
763 
764 	an.reg[0] = intel_de_read(dev_priv,
765 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
766 	an.reg[1] = intel_de_read(dev_priv,
767 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
768 	ret = shim->write_an_aksv(dig_port, an.shim);
769 	if (ret)
770 		return ret;
771 
772 	r0_prime_gen_start = jiffies;
773 
774 	memset(&bksv, 0, sizeof(bksv));
775 
776 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
777 	if (ret < 0)
778 		return ret;
779 
780 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
781 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
782 		return -EPERM;
783 	}
784 
785 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
786 		       bksv.reg[0]);
787 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
788 		       bksv.reg[1]);
789 
790 	ret = shim->repeater_present(dig_port, &repeater_present);
791 	if (ret)
792 		return ret;
793 	if (repeater_present)
794 		intel_de_write(dev_priv, HDCP_REP_CTL,
795 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
796 
797 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
798 	if (ret)
799 		return ret;
800 
801 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
802 		       HDCP_CONF_AUTH_AND_ENC);
803 
804 	/* Wait for R0 ready */
805 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
806 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
807 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
808 		return -ETIMEDOUT;
809 	}
810 
811 	/*
812 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
813 	 * some monitors can take longer than this. We'll set the timeout at
814 	 * 300ms just to be sure.
815 	 *
816 	 * On DP, there's an R0_READY bit available but no such bit
817 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
818 	 * the stupid thing instead of polling on one and not the other.
819 	 */
820 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
821 
822 	tries = 3;
823 
824 	/*
825 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
826 	 * of R0 mismatch.
827 	 */
828 	for (i = 0; i < tries; i++) {
829 		ri.reg = 0;
830 		ret = shim->read_ri_prime(dig_port, ri.shim);
831 		if (ret)
832 			return ret;
833 		intel_de_write(dev_priv,
834 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
835 			       ri.reg);
836 
837 		/* Wait for Ri prime match */
838 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
839 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
840 			break;
841 	}
842 
843 	if (i == tries) {
844 		drm_dbg_kms(&dev_priv->drm,
845 			    "Timed out waiting for Ri prime match (%x)\n",
846 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
847 					  cpu_transcoder, port)));
848 		return -ETIMEDOUT;
849 	}
850 
851 	/* Wait for encryption confirmation */
852 	if (intel_de_wait_for_set(dev_priv,
853 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
854 				  HDCP_STATUS_ENC,
855 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
856 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
857 		return -ETIMEDOUT;
858 	}
859 
860 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
861 	if (shim->stream_encryption) {
862 		ret = shim->stream_encryption(connector, true);
863 		if (ret) {
864 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
865 				connector->base.name, connector->base.base.id);
866 			return ret;
867 		}
868 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
869 			    transcoder_name(hdcp->stream_transcoder));
870 	}
871 
872 	if (repeater_present)
873 		return intel_hdcp_auth_downstream(connector);
874 
875 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
876 	return 0;
877 }
878 
879 static int _intel_hdcp_disable(struct intel_connector *connector)
880 {
881 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
882 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
883 	struct intel_hdcp *hdcp = &connector->hdcp;
884 	enum port port = dig_port->base.port;
885 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
886 	u32 repeater_ctl;
887 	int ret;
888 
889 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
890 		    connector->base.name, connector->base.base.id);
891 
892 	if (hdcp->shim->stream_encryption) {
893 		ret = hdcp->shim->stream_encryption(connector, false);
894 		if (ret) {
895 			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
896 				connector->base.name, connector->base.base.id);
897 			return ret;
898 		}
899 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
900 			    transcoder_name(hdcp->stream_transcoder));
901 		/*
902 		 * If there are other connectors on this port using HDCP,
903 		 * don't disable it until it disabled HDCP encryption for
904 		 * all connectors in MST topology.
905 		 */
906 		if (dig_port->num_hdcp_streams > 0)
907 			return 0;
908 	}
909 
910 	hdcp->hdcp_encrypted = false;
911 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
912 	if (intel_de_wait_for_clear(dev_priv,
913 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
914 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
915 		drm_err(&dev_priv->drm,
916 			"Failed to disable HDCP, timeout clearing status\n");
917 		return -ETIMEDOUT;
918 	}
919 
920 	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
921 						   port);
922 	intel_de_write(dev_priv, HDCP_REP_CTL,
923 		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
924 
925 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
926 	if (ret) {
927 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
928 		return ret;
929 	}
930 
931 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
932 	return 0;
933 }
934 
935 static int _intel_hdcp_enable(struct intel_connector *connector)
936 {
937 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
938 	struct intel_hdcp *hdcp = &connector->hdcp;
939 	int i, ret, tries = 3;
940 
941 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
942 		    connector->base.name, connector->base.base.id);
943 
944 	if (!hdcp_key_loadable(dev_priv)) {
945 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
946 		return -ENXIO;
947 	}
948 
949 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
950 		ret = intel_hdcp_load_keys(dev_priv);
951 		if (!ret)
952 			break;
953 		intel_hdcp_clear_keys(dev_priv);
954 	}
955 	if (ret) {
956 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
957 			ret);
958 		return ret;
959 	}
960 
961 	/* Incase of authentication failures, HDCP spec expects reauth. */
962 	for (i = 0; i < tries; i++) {
963 		ret = intel_hdcp_auth(connector);
964 		if (!ret) {
965 			hdcp->hdcp_encrypted = true;
966 			return 0;
967 		}
968 
969 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
970 
971 		/* Ensuring HDCP encryption and signalling are stopped. */
972 		_intel_hdcp_disable(connector);
973 	}
974 
975 	drm_dbg_kms(&dev_priv->drm,
976 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
977 	return ret;
978 }
979 
980 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
981 {
982 	return container_of(hdcp, struct intel_connector, hdcp);
983 }
984 
985 static void intel_hdcp_update_value(struct intel_connector *connector,
986 				    u64 value, bool update_property)
987 {
988 	struct drm_device *dev = connector->base.dev;
989 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
990 	struct intel_hdcp *hdcp = &connector->hdcp;
991 
992 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
993 
994 	if (hdcp->value == value)
995 		return;
996 
997 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
998 
999 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1000 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1001 			dig_port->num_hdcp_streams--;
1002 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1003 		dig_port->num_hdcp_streams++;
1004 	}
1005 
1006 	hdcp->value = value;
1007 	if (update_property) {
1008 		drm_connector_get(&connector->base);
1009 		schedule_work(&hdcp->prop_work);
1010 	}
1011 }
1012 
1013 /* Implements Part 3 of the HDCP authorization procedure */
1014 static int intel_hdcp_check_link(struct intel_connector *connector)
1015 {
1016 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1017 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1018 	struct intel_hdcp *hdcp = &connector->hdcp;
1019 	enum port port = dig_port->base.port;
1020 	enum transcoder cpu_transcoder;
1021 	int ret = 0;
1022 
1023 	mutex_lock(&hdcp->mutex);
1024 	mutex_lock(&dig_port->hdcp_mutex);
1025 
1026 	cpu_transcoder = hdcp->cpu_transcoder;
1027 
1028 	/* Check_link valid only when HDCP1.4 is enabled */
1029 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1030 	    !hdcp->hdcp_encrypted) {
1031 		ret = -EINVAL;
1032 		goto out;
1033 	}
1034 
1035 	if (drm_WARN_ON(&dev_priv->drm,
1036 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1037 		drm_err(&dev_priv->drm,
1038 			"%s:%d HDCP link stopped encryption,%x\n",
1039 			connector->base.name, connector->base.base.id,
1040 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1041 		ret = -ENXIO;
1042 		intel_hdcp_update_value(connector,
1043 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1044 					true);
1045 		goto out;
1046 	}
1047 
1048 	if (hdcp->shim->check_link(dig_port, connector)) {
1049 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1050 			intel_hdcp_update_value(connector,
1051 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1052 		}
1053 		goto out;
1054 	}
1055 
1056 	drm_dbg_kms(&dev_priv->drm,
1057 		    "[%s:%d] HDCP link failed, retrying authentication\n",
1058 		    connector->base.name, connector->base.base.id);
1059 
1060 	ret = _intel_hdcp_disable(connector);
1061 	if (ret) {
1062 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1063 		intel_hdcp_update_value(connector,
1064 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1065 					true);
1066 		goto out;
1067 	}
1068 
1069 	ret = _intel_hdcp_enable(connector);
1070 	if (ret) {
1071 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1072 		intel_hdcp_update_value(connector,
1073 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1074 					true);
1075 		goto out;
1076 	}
1077 
1078 out:
1079 	mutex_unlock(&dig_port->hdcp_mutex);
1080 	mutex_unlock(&hdcp->mutex);
1081 	return ret;
1082 }
1083 
1084 static void intel_hdcp_prop_work(struct work_struct *work)
1085 {
1086 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1087 					       prop_work);
1088 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1089 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1090 
1091 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1092 	mutex_lock(&hdcp->mutex);
1093 
1094 	/*
1095 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1096 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1097 	 * we're running just after hdcp has been disabled, so just exit
1098 	 */
1099 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1100 		drm_hdcp_update_content_protection(&connector->base,
1101 						   hdcp->value);
1102 
1103 	mutex_unlock(&hdcp->mutex);
1104 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1105 
1106 	drm_connector_put(&connector->base);
1107 }
1108 
1109 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1110 {
1111 	return INTEL_INFO(dev_priv)->display.has_hdcp &&
1112 			(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1113 }
1114 
1115 static int
1116 hdcp2_prepare_ake_init(struct intel_connector *connector,
1117 		       struct hdcp2_ake_init *ake_data)
1118 {
1119 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1120 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1121 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1122 	struct i915_hdcp_comp_master *comp;
1123 	int ret;
1124 
1125 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1126 	comp = dev_priv->hdcp_master;
1127 
1128 	if (!comp || !comp->ops) {
1129 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1130 		return -EINVAL;
1131 	}
1132 
1133 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1134 	if (ret)
1135 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1136 			    ret);
1137 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1138 
1139 	return ret;
1140 }
1141 
1142 static int
1143 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1144 				struct hdcp2_ake_send_cert *rx_cert,
1145 				bool *paired,
1146 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1147 				size_t *msg_sz)
1148 {
1149 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1150 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1151 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1152 	struct i915_hdcp_comp_master *comp;
1153 	int ret;
1154 
1155 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1156 	comp = dev_priv->hdcp_master;
1157 
1158 	if (!comp || !comp->ops) {
1159 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1160 		return -EINVAL;
1161 	}
1162 
1163 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1164 							 rx_cert, paired,
1165 							 ek_pub_km, msg_sz);
1166 	if (ret < 0)
1167 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1168 			    ret);
1169 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1170 
1171 	return ret;
1172 }
1173 
1174 static int hdcp2_verify_hprime(struct intel_connector *connector,
1175 			       struct hdcp2_ake_send_hprime *rx_hprime)
1176 {
1177 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1178 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1179 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1180 	struct i915_hdcp_comp_master *comp;
1181 	int ret;
1182 
1183 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1184 	comp = dev_priv->hdcp_master;
1185 
1186 	if (!comp || !comp->ops) {
1187 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1188 		return -EINVAL;
1189 	}
1190 
1191 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1192 	if (ret < 0)
1193 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1194 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1195 
1196 	return ret;
1197 }
1198 
1199 static int
1200 hdcp2_store_pairing_info(struct intel_connector *connector,
1201 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1202 {
1203 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1204 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1205 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1206 	struct i915_hdcp_comp_master *comp;
1207 	int ret;
1208 
1209 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1210 	comp = dev_priv->hdcp_master;
1211 
1212 	if (!comp || !comp->ops) {
1213 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1214 		return -EINVAL;
1215 	}
1216 
1217 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1218 	if (ret < 0)
1219 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1220 			    ret);
1221 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1222 
1223 	return ret;
1224 }
1225 
1226 static int
1227 hdcp2_prepare_lc_init(struct intel_connector *connector,
1228 		      struct hdcp2_lc_init *lc_init)
1229 {
1230 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1231 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1232 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1233 	struct i915_hdcp_comp_master *comp;
1234 	int ret;
1235 
1236 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1237 	comp = dev_priv->hdcp_master;
1238 
1239 	if (!comp || !comp->ops) {
1240 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1241 		return -EINVAL;
1242 	}
1243 
1244 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1245 	if (ret < 0)
1246 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1247 			    ret);
1248 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1249 
1250 	return ret;
1251 }
1252 
1253 static int
1254 hdcp2_verify_lprime(struct intel_connector *connector,
1255 		    struct hdcp2_lc_send_lprime *rx_lprime)
1256 {
1257 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1258 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1259 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1260 	struct i915_hdcp_comp_master *comp;
1261 	int ret;
1262 
1263 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1264 	comp = dev_priv->hdcp_master;
1265 
1266 	if (!comp || !comp->ops) {
1267 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1268 		return -EINVAL;
1269 	}
1270 
1271 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1272 	if (ret < 0)
1273 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1274 			    ret);
1275 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1276 
1277 	return ret;
1278 }
1279 
1280 static int hdcp2_prepare_skey(struct intel_connector *connector,
1281 			      struct hdcp2_ske_send_eks *ske_data)
1282 {
1283 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1284 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1285 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1286 	struct i915_hdcp_comp_master *comp;
1287 	int ret;
1288 
1289 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1290 	comp = dev_priv->hdcp_master;
1291 
1292 	if (!comp || !comp->ops) {
1293 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1294 		return -EINVAL;
1295 	}
1296 
1297 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1298 	if (ret < 0)
1299 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1300 			    ret);
1301 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1302 
1303 	return ret;
1304 }
1305 
1306 static int
1307 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1308 				      struct hdcp2_rep_send_receiverid_list
1309 								*rep_topology,
1310 				      struct hdcp2_rep_send_ack *rep_send_ack)
1311 {
1312 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1313 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1314 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1315 	struct i915_hdcp_comp_master *comp;
1316 	int ret;
1317 
1318 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1319 	comp = dev_priv->hdcp_master;
1320 
1321 	if (!comp || !comp->ops) {
1322 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1327 							 rep_topology,
1328 							 rep_send_ack);
1329 	if (ret < 0)
1330 		drm_dbg_kms(&dev_priv->drm,
1331 			    "Verify rep topology failed. %d\n", ret);
1332 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1333 
1334 	return ret;
1335 }
1336 
1337 static int
1338 hdcp2_verify_mprime(struct intel_connector *connector,
1339 		    struct hdcp2_rep_stream_ready *stream_ready)
1340 {
1341 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1342 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1343 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1344 	struct i915_hdcp_comp_master *comp;
1345 	int ret;
1346 
1347 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1348 	comp = dev_priv->hdcp_master;
1349 
1350 	if (!comp || !comp->ops) {
1351 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1352 		return -EINVAL;
1353 	}
1354 
1355 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1356 	if (ret < 0)
1357 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1358 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1359 
1360 	return ret;
1361 }
1362 
1363 static int hdcp2_authenticate_port(struct intel_connector *connector)
1364 {
1365 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1366 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1367 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1368 	struct i915_hdcp_comp_master *comp;
1369 	int ret;
1370 
1371 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1372 	comp = dev_priv->hdcp_master;
1373 
1374 	if (!comp || !comp->ops) {
1375 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1376 		return -EINVAL;
1377 	}
1378 
1379 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1380 	if (ret < 0)
1381 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1382 			    ret);
1383 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1384 
1385 	return ret;
1386 }
1387 
1388 static int hdcp2_close_mei_session(struct intel_connector *connector)
1389 {
1390 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1391 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1392 	struct i915_hdcp_comp_master *comp;
1393 	int ret;
1394 
1395 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1396 	comp = dev_priv->hdcp_master;
1397 
1398 	if (!comp || !comp->ops) {
1399 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1400 		return -EINVAL;
1401 	}
1402 
1403 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1404 					     &dig_port->hdcp_port_data);
1405 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1406 
1407 	return ret;
1408 }
1409 
1410 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1411 {
1412 	return hdcp2_close_mei_session(connector);
1413 }
1414 
1415 /* Authentication flow starts from here */
1416 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1417 {
1418 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1419 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1420 	struct intel_hdcp *hdcp = &connector->hdcp;
1421 	union {
1422 		struct hdcp2_ake_init ake_init;
1423 		struct hdcp2_ake_send_cert send_cert;
1424 		struct hdcp2_ake_no_stored_km no_stored_km;
1425 		struct hdcp2_ake_send_hprime send_hprime;
1426 		struct hdcp2_ake_send_pairing_info pairing_info;
1427 	} msgs;
1428 	const struct intel_hdcp_shim *shim = hdcp->shim;
1429 	size_t size;
1430 	int ret;
1431 
1432 	/* Init for seq_num */
1433 	hdcp->seq_num_v = 0;
1434 	hdcp->seq_num_m = 0;
1435 
1436 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1437 	if (ret < 0)
1438 		return ret;
1439 
1440 	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1441 				  sizeof(msgs.ake_init));
1442 	if (ret < 0)
1443 		return ret;
1444 
1445 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1446 				 &msgs.send_cert, sizeof(msgs.send_cert));
1447 	if (ret < 0)
1448 		return ret;
1449 
1450 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1451 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1452 		return -EINVAL;
1453 	}
1454 
1455 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1456 
1457 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1458 					msgs.send_cert.cert_rx.receiver_id,
1459 					1) > 0) {
1460 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1461 		return -EPERM;
1462 	}
1463 
1464 	/*
1465 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1466 	 * stored also.
1467 	 */
1468 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1469 					      &hdcp->is_paired,
1470 					      &msgs.no_stored_km, &size);
1471 	if (ret < 0)
1472 		return ret;
1473 
1474 	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1475 	if (ret < 0)
1476 		return ret;
1477 
1478 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1479 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1480 	if (ret < 0)
1481 		return ret;
1482 
1483 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1484 	if (ret < 0)
1485 		return ret;
1486 
1487 	if (!hdcp->is_paired) {
1488 		/* Pairing is required */
1489 		ret = shim->read_2_2_msg(dig_port,
1490 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1491 					 &msgs.pairing_info,
1492 					 sizeof(msgs.pairing_info));
1493 		if (ret < 0)
1494 			return ret;
1495 
1496 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1497 		if (ret < 0)
1498 			return ret;
1499 		hdcp->is_paired = true;
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static int hdcp2_locality_check(struct intel_connector *connector)
1506 {
1507 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1508 	struct intel_hdcp *hdcp = &connector->hdcp;
1509 	union {
1510 		struct hdcp2_lc_init lc_init;
1511 		struct hdcp2_lc_send_lprime send_lprime;
1512 	} msgs;
1513 	const struct intel_hdcp_shim *shim = hdcp->shim;
1514 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1515 
1516 	for (i = 0; i < tries; i++) {
1517 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1518 		if (ret < 0)
1519 			continue;
1520 
1521 		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1522 				      sizeof(msgs.lc_init));
1523 		if (ret < 0)
1524 			continue;
1525 
1526 		ret = shim->read_2_2_msg(dig_port,
1527 					 HDCP_2_2_LC_SEND_LPRIME,
1528 					 &msgs.send_lprime,
1529 					 sizeof(msgs.send_lprime));
1530 		if (ret < 0)
1531 			continue;
1532 
1533 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1534 		if (!ret)
1535 			break;
1536 	}
1537 
1538 	return ret;
1539 }
1540 
1541 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1542 {
1543 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1544 	struct intel_hdcp *hdcp = &connector->hdcp;
1545 	struct hdcp2_ske_send_eks send_eks;
1546 	int ret;
1547 
1548 	ret = hdcp2_prepare_skey(connector, &send_eks);
1549 	if (ret < 0)
1550 		return ret;
1551 
1552 	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1553 					sizeof(send_eks));
1554 	if (ret < 0)
1555 		return ret;
1556 
1557 	return 0;
1558 }
1559 
1560 static
1561 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1562 {
1563 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1564 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1565 	struct intel_hdcp *hdcp = &connector->hdcp;
1566 	union {
1567 		struct hdcp2_rep_stream_manage stream_manage;
1568 		struct hdcp2_rep_stream_ready stream_ready;
1569 	} msgs;
1570 	const struct intel_hdcp_shim *shim = hdcp->shim;
1571 	int ret, streams_size_delta, i;
1572 
1573 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1574 		return -ERANGE;
1575 
1576 	/* Prepare RepeaterAuth_Stream_Manage msg */
1577 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1578 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1579 
1580 	msgs.stream_manage.k = cpu_to_be16(data->k);
1581 
1582 	for (i = 0; i < data->k; i++) {
1583 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1584 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1585 	}
1586 
1587 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1588 				sizeof(struct hdcp2_streamid_type);
1589 	/* Send it to Repeater */
1590 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1591 				  sizeof(msgs.stream_manage) - streams_size_delta);
1592 	if (ret < 0)
1593 		goto out;
1594 
1595 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1596 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1597 	if (ret < 0)
1598 		goto out;
1599 
1600 	data->seq_num_m = hdcp->seq_num_m;
1601 
1602 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1603 
1604 out:
1605 	hdcp->seq_num_m++;
1606 
1607 	return ret;
1608 }
1609 
1610 static
1611 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1612 {
1613 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1614 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1615 	struct intel_hdcp *hdcp = &connector->hdcp;
1616 	union {
1617 		struct hdcp2_rep_send_receiverid_list recvid_list;
1618 		struct hdcp2_rep_send_ack rep_ack;
1619 	} msgs;
1620 	const struct intel_hdcp_shim *shim = hdcp->shim;
1621 	u32 seq_num_v, device_cnt;
1622 	u8 *rx_info;
1623 	int ret;
1624 
1625 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1626 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1627 	if (ret < 0)
1628 		return ret;
1629 
1630 	rx_info = msgs.recvid_list.rx_info;
1631 
1632 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1633 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1634 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1635 		return -EINVAL;
1636 	}
1637 
1638 	/*
1639 	 * MST topology is not Type 1 capable if it contains a downstream
1640 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1641 	 */
1642 	dig_port->hdcp_mst_type1_capable =
1643 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1644 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1645 
1646 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1647 	seq_num_v =
1648 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1649 
1650 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1651 		drm_dbg_kms(&dev_priv->drm,
1652 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1653 		return -EINVAL;
1654 	}
1655 
1656 	if (seq_num_v < hdcp->seq_num_v) {
1657 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1658 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1659 		return -EINVAL;
1660 	}
1661 
1662 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1663 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1664 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1665 					msgs.recvid_list.receiver_ids,
1666 					device_cnt) > 0) {
1667 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1668 		return -EPERM;
1669 	}
1670 
1671 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1672 						    &msgs.recvid_list,
1673 						    &msgs.rep_ack);
1674 	if (ret < 0)
1675 		return ret;
1676 
1677 	hdcp->seq_num_v = seq_num_v;
1678 	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1679 				  sizeof(msgs.rep_ack));
1680 	if (ret < 0)
1681 		return ret;
1682 
1683 	return 0;
1684 }
1685 
1686 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1687 {
1688 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1689 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1690 	struct intel_hdcp *hdcp = &connector->hdcp;
1691 	const struct intel_hdcp_shim *shim = hdcp->shim;
1692 	int ret;
1693 
1694 	ret = hdcp2_authentication_key_exchange(connector);
1695 	if (ret < 0) {
1696 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1697 		return ret;
1698 	}
1699 
1700 	ret = hdcp2_locality_check(connector);
1701 	if (ret < 0) {
1702 		drm_dbg_kms(&i915->drm,
1703 			    "Locality Check failed. Err : %d\n", ret);
1704 		return ret;
1705 	}
1706 
1707 	ret = hdcp2_session_key_exchange(connector);
1708 	if (ret < 0) {
1709 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1710 		return ret;
1711 	}
1712 
1713 	if (shim->config_stream_type) {
1714 		ret = shim->config_stream_type(dig_port,
1715 					       hdcp->is_repeater,
1716 					       hdcp->content_type);
1717 		if (ret < 0)
1718 			return ret;
1719 	}
1720 
1721 	if (hdcp->is_repeater) {
1722 		ret = hdcp2_authenticate_repeater_topology(connector);
1723 		if (ret < 0) {
1724 			drm_dbg_kms(&i915->drm,
1725 				    "Repeater Auth Failed. Err: %d\n", ret);
1726 			return ret;
1727 		}
1728 	}
1729 
1730 	return ret;
1731 }
1732 
1733 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1734 {
1735 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1736 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1737 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1738 	struct intel_hdcp *hdcp = &connector->hdcp;
1739 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1740 	enum port port = dig_port->base.port;
1741 	int ret = 0;
1742 
1743 	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1744 			    LINK_ENCRYPTION_STATUS)) {
1745 		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1746 			connector->base.name, connector->base.base.id);
1747 		ret = -EPERM;
1748 		goto link_recover;
1749 	}
1750 
1751 	if (hdcp->shim->stream_2_2_encryption) {
1752 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1753 		if (ret) {
1754 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1755 				connector->base.name, connector->base.base.id);
1756 			return ret;
1757 		}
1758 		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1759 			    transcoder_name(hdcp->stream_transcoder));
1760 	}
1761 
1762 	return 0;
1763 
1764 link_recover:
1765 	if (hdcp2_deauthenticate_port(connector) < 0)
1766 		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1767 
1768 	dig_port->hdcp_auth_status = false;
1769 	data->k = 0;
1770 
1771 	return ret;
1772 }
1773 
1774 static int hdcp2_enable_encryption(struct intel_connector *connector)
1775 {
1776 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1777 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1778 	struct intel_hdcp *hdcp = &connector->hdcp;
1779 	enum port port = dig_port->base.port;
1780 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1781 	int ret;
1782 
1783 	drm_WARN_ON(&dev_priv->drm,
1784 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1785 		    LINK_ENCRYPTION_STATUS);
1786 	if (hdcp->shim->toggle_signalling) {
1787 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1788 						    true);
1789 		if (ret) {
1790 			drm_err(&dev_priv->drm,
1791 				"Failed to enable HDCP signalling. %d\n",
1792 				ret);
1793 			return ret;
1794 		}
1795 	}
1796 
1797 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1798 	    LINK_AUTH_STATUS) {
1799 		/* Link is Authenticated. Now set for Encryption */
1800 		intel_de_write(dev_priv,
1801 			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1802 			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1803 	}
1804 
1805 	ret = intel_de_wait_for_set(dev_priv,
1806 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1807 						 port),
1808 				    LINK_ENCRYPTION_STATUS,
1809 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1810 	dig_port->hdcp_auth_status = true;
1811 
1812 	return ret;
1813 }
1814 
1815 static int hdcp2_disable_encryption(struct intel_connector *connector)
1816 {
1817 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1818 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1819 	struct intel_hdcp *hdcp = &connector->hdcp;
1820 	enum port port = dig_port->base.port;
1821 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1822 	int ret;
1823 
1824 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1825 				      LINK_ENCRYPTION_STATUS));
1826 
1827 	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1828 		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1829 
1830 	ret = intel_de_wait_for_clear(dev_priv,
1831 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1832 						   port),
1833 				      LINK_ENCRYPTION_STATUS,
1834 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1835 	if (ret == -ETIMEDOUT)
1836 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1837 
1838 	if (hdcp->shim->toggle_signalling) {
1839 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1840 						    false);
1841 		if (ret) {
1842 			drm_err(&dev_priv->drm,
1843 				"Failed to disable HDCP signalling. %d\n",
1844 				ret);
1845 			return ret;
1846 		}
1847 	}
1848 
1849 	return ret;
1850 }
1851 
1852 static int
1853 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1854 {
1855 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1856 	int i, tries = 3, ret;
1857 
1858 	if (!connector->hdcp.is_repeater)
1859 		return 0;
1860 
1861 	for (i = 0; i < tries; i++) {
1862 		ret = _hdcp2_propagate_stream_management_info(connector);
1863 		if (!ret)
1864 			break;
1865 
1866 		/* Lets restart the auth incase of seq_num_m roll over */
1867 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1868 			drm_dbg_kms(&i915->drm,
1869 				    "seq_num_m roll over.(%d)\n", ret);
1870 			break;
1871 		}
1872 
1873 		drm_dbg_kms(&i915->drm,
1874 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1875 			    i + 1, tries, ret);
1876 	}
1877 
1878 	return ret;
1879 }
1880 
1881 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1882 {
1883 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1884 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1885 	int ret = 0, i, tries = 3;
1886 
1887 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1888 		ret = hdcp2_authenticate_sink(connector);
1889 		if (!ret) {
1890 			ret = intel_hdcp_prepare_streams(connector);
1891 			if (ret) {
1892 				drm_dbg_kms(&i915->drm,
1893 					    "Prepare streams failed.(%d)\n",
1894 					    ret);
1895 				break;
1896 			}
1897 
1898 			ret = hdcp2_propagate_stream_management_info(connector);
1899 			if (ret) {
1900 				drm_dbg_kms(&i915->drm,
1901 					    "Stream management failed.(%d)\n",
1902 					    ret);
1903 				break;
1904 			}
1905 
1906 			ret = hdcp2_authenticate_port(connector);
1907 			if (!ret)
1908 				break;
1909 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1910 				    ret);
1911 		}
1912 
1913 		/* Clearing the mei hdcp session */
1914 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1915 			    i + 1, tries, ret);
1916 		if (hdcp2_deauthenticate_port(connector) < 0)
1917 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1918 	}
1919 
1920 	if (!ret && !dig_port->hdcp_auth_status) {
1921 		/*
1922 		 * Ensuring the required 200mSec min time interval between
1923 		 * Session Key Exchange and encryption.
1924 		 */
1925 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1926 		ret = hdcp2_enable_encryption(connector);
1927 		if (ret < 0) {
1928 			drm_dbg_kms(&i915->drm,
1929 				    "Encryption Enable Failed.(%d)\n", ret);
1930 			if (hdcp2_deauthenticate_port(connector) < 0)
1931 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1932 		}
1933 	}
1934 
1935 	if (!ret)
1936 		ret = hdcp2_enable_stream_encryption(connector);
1937 
1938 	return ret;
1939 }
1940 
1941 static int _intel_hdcp2_enable(struct intel_connector *connector)
1942 {
1943 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1944 	struct intel_hdcp *hdcp = &connector->hdcp;
1945 	int ret;
1946 
1947 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1948 		    connector->base.name, connector->base.base.id,
1949 		    hdcp->content_type);
1950 
1951 	ret = hdcp2_authenticate_and_encrypt(connector);
1952 	if (ret) {
1953 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1954 			    hdcp->content_type, ret);
1955 		return ret;
1956 	}
1957 
1958 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1959 		    connector->base.name, connector->base.base.id,
1960 		    hdcp->content_type);
1961 
1962 	hdcp->hdcp2_encrypted = true;
1963 	return 0;
1964 }
1965 
1966 static int
1967 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1968 {
1969 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1970 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1971 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1972 	struct intel_hdcp *hdcp = &connector->hdcp;
1973 	int ret;
1974 
1975 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1976 		    connector->base.name, connector->base.base.id);
1977 
1978 	if (hdcp->shim->stream_2_2_encryption) {
1979 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
1980 		if (ret) {
1981 			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1982 				connector->base.name, connector->base.base.id);
1983 			return ret;
1984 		}
1985 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1986 			    transcoder_name(hdcp->stream_transcoder));
1987 
1988 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1989 			return 0;
1990 	}
1991 
1992 	ret = hdcp2_disable_encryption(connector);
1993 
1994 	if (hdcp2_deauthenticate_port(connector) < 0)
1995 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1996 
1997 	connector->hdcp.hdcp2_encrypted = false;
1998 	dig_port->hdcp_auth_status = false;
1999 	data->k = 0;
2000 
2001 	return ret;
2002 }
2003 
2004 /* Implements the Link Integrity Check for HDCP2.2 */
2005 static int intel_hdcp2_check_link(struct intel_connector *connector)
2006 {
2007 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2008 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2009 	struct intel_hdcp *hdcp = &connector->hdcp;
2010 	enum port port = dig_port->base.port;
2011 	enum transcoder cpu_transcoder;
2012 	int ret = 0;
2013 
2014 	mutex_lock(&hdcp->mutex);
2015 	mutex_lock(&dig_port->hdcp_mutex);
2016 	cpu_transcoder = hdcp->cpu_transcoder;
2017 
2018 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2019 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2020 	    !hdcp->hdcp2_encrypted) {
2021 		ret = -EINVAL;
2022 		goto out;
2023 	}
2024 
2025 	if (drm_WARN_ON(&dev_priv->drm,
2026 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2027 		drm_err(&dev_priv->drm,
2028 			"HDCP2.2 link stopped the encryption, %x\n",
2029 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2030 		ret = -ENXIO;
2031 		_intel_hdcp2_disable(connector, true);
2032 		intel_hdcp_update_value(connector,
2033 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2034 					true);
2035 		goto out;
2036 	}
2037 
2038 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2039 	if (ret == HDCP_LINK_PROTECTED) {
2040 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2041 			intel_hdcp_update_value(connector,
2042 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2043 					true);
2044 		}
2045 		goto out;
2046 	}
2047 
2048 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2049 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2050 			goto out;
2051 
2052 		drm_dbg_kms(&dev_priv->drm,
2053 			    "HDCP2.2 Downstream topology change\n");
2054 		ret = hdcp2_authenticate_repeater_topology(connector);
2055 		if (!ret) {
2056 			intel_hdcp_update_value(connector,
2057 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2058 					true);
2059 			goto out;
2060 		}
2061 		drm_dbg_kms(&dev_priv->drm,
2062 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2063 			    connector->base.name, connector->base.base.id,
2064 			    ret);
2065 	} else {
2066 		drm_dbg_kms(&dev_priv->drm,
2067 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2068 			    connector->base.name, connector->base.base.id);
2069 	}
2070 
2071 	ret = _intel_hdcp2_disable(connector, true);
2072 	if (ret) {
2073 		drm_err(&dev_priv->drm,
2074 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2075 			connector->base.name, connector->base.base.id, ret);
2076 		intel_hdcp_update_value(connector,
2077 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2078 		goto out;
2079 	}
2080 
2081 	ret = _intel_hdcp2_enable(connector);
2082 	if (ret) {
2083 		drm_dbg_kms(&dev_priv->drm,
2084 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2085 			    connector->base.name, connector->base.base.id,
2086 			    ret);
2087 		intel_hdcp_update_value(connector,
2088 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2089 					true);
2090 		goto out;
2091 	}
2092 
2093 out:
2094 	mutex_unlock(&dig_port->hdcp_mutex);
2095 	mutex_unlock(&hdcp->mutex);
2096 	return ret;
2097 }
2098 
2099 static void intel_hdcp_check_work(struct work_struct *work)
2100 {
2101 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2102 					       struct intel_hdcp,
2103 					       check_work);
2104 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2105 
2106 	if (drm_connector_is_unregistered(&connector->base))
2107 		return;
2108 
2109 	if (!intel_hdcp2_check_link(connector))
2110 		schedule_delayed_work(&hdcp->check_work,
2111 				      DRM_HDCP2_CHECK_PERIOD_MS);
2112 	else if (!intel_hdcp_check_link(connector))
2113 		schedule_delayed_work(&hdcp->check_work,
2114 				      DRM_HDCP_CHECK_PERIOD_MS);
2115 }
2116 
2117 static int i915_hdcp_component_bind(struct device *i915_kdev,
2118 				    struct device *mei_kdev, void *data)
2119 {
2120 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2121 
2122 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2123 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2124 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2125 	dev_priv->hdcp_master->mei_dev = mei_kdev;
2126 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2127 
2128 	return 0;
2129 }
2130 
2131 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2132 				       struct device *mei_kdev, void *data)
2133 {
2134 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2135 
2136 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2137 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2138 	dev_priv->hdcp_master = NULL;
2139 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2140 }
2141 
2142 static const struct component_ops i915_hdcp_component_ops = {
2143 	.bind   = i915_hdcp_component_bind,
2144 	.unbind = i915_hdcp_component_unbind,
2145 };
2146 
2147 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2148 {
2149 	switch (port) {
2150 	case PORT_A:
2151 		return MEI_DDI_A;
2152 	case PORT_B ... PORT_F:
2153 		return (enum mei_fw_ddi)port;
2154 	default:
2155 		return MEI_DDI_INVALID_PORT;
2156 	}
2157 }
2158 
2159 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2160 {
2161 	switch (cpu_transcoder) {
2162 	case TRANSCODER_A ... TRANSCODER_D:
2163 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2164 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2165 		return MEI_INVALID_TRANSCODER;
2166 	}
2167 }
2168 
2169 static int initialize_hdcp_port_data(struct intel_connector *connector,
2170 				     struct intel_digital_port *dig_port,
2171 				     const struct intel_hdcp_shim *shim)
2172 {
2173 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2174 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2175 	struct intel_hdcp *hdcp = &connector->hdcp;
2176 	enum port port = dig_port->base.port;
2177 
2178 	if (DISPLAY_VER(dev_priv) < 12)
2179 		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2180 	else
2181 		/*
2182 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2183 		 * with zero(INVALID PORT index).
2184 		 */
2185 		data->fw_ddi = MEI_DDI_INVALID_PORT;
2186 
2187 	/*
2188 	 * As associated transcoder is set and modified at modeset, here fw_tc
2189 	 * is initialized to zero (invalid transcoder index). This will be
2190 	 * retained for <Gen12 forever.
2191 	 */
2192 	data->fw_tc = MEI_INVALID_TRANSCODER;
2193 
2194 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2195 	data->protocol = (u8)shim->protocol;
2196 
2197 	if (!data->streams)
2198 		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2199 					sizeof(struct hdcp2_streamid_type),
2200 					GFP_KERNEL);
2201 	if (!data->streams) {
2202 		drm_err(&dev_priv->drm, "Out of Memory\n");
2203 		return -ENOMEM;
2204 	}
2205 	/* For SST */
2206 	data->streams[0].stream_id = 0;
2207 	data->streams[0].stream_type = hdcp->content_type;
2208 
2209 	return 0;
2210 }
2211 
2212 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2213 {
2214 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2215 		return false;
2216 
2217 	return (DISPLAY_VER(dev_priv) >= 10 ||
2218 		IS_KABYLAKE(dev_priv) ||
2219 		IS_COFFEELAKE(dev_priv) ||
2220 		IS_COMETLAKE(dev_priv));
2221 }
2222 
2223 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2224 {
2225 	int ret;
2226 
2227 	if (!is_hdcp2_supported(dev_priv))
2228 		return;
2229 
2230 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2231 	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2232 
2233 	dev_priv->hdcp_comp_added = true;
2234 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2235 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2236 				  I915_COMPONENT_HDCP);
2237 	if (ret < 0) {
2238 		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2239 			    ret);
2240 		mutex_lock(&dev_priv->hdcp_comp_mutex);
2241 		dev_priv->hdcp_comp_added = false;
2242 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2243 		return;
2244 	}
2245 }
2246 
2247 static void intel_hdcp2_init(struct intel_connector *connector,
2248 			     struct intel_digital_port *dig_port,
2249 			     const struct intel_hdcp_shim *shim)
2250 {
2251 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2252 	struct intel_hdcp *hdcp = &connector->hdcp;
2253 	int ret;
2254 
2255 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2256 	if (ret) {
2257 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2258 		return;
2259 	}
2260 
2261 	hdcp->hdcp2_supported = true;
2262 }
2263 
2264 int intel_hdcp_init(struct intel_connector *connector,
2265 		    struct intel_digital_port *dig_port,
2266 		    const struct intel_hdcp_shim *shim)
2267 {
2268 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2269 	struct intel_hdcp *hdcp = &connector->hdcp;
2270 	int ret;
2271 
2272 	if (!shim)
2273 		return -EINVAL;
2274 
2275 	if (is_hdcp2_supported(dev_priv))
2276 		intel_hdcp2_init(connector, dig_port, shim);
2277 
2278 	ret =
2279 	drm_connector_attach_content_protection_property(&connector->base,
2280 							 hdcp->hdcp2_supported);
2281 	if (ret) {
2282 		hdcp->hdcp2_supported = false;
2283 		kfree(dig_port->hdcp_port_data.streams);
2284 		return ret;
2285 	}
2286 
2287 	hdcp->shim = shim;
2288 	mutex_init(&hdcp->mutex);
2289 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2290 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2291 	init_waitqueue_head(&hdcp->cp_irq_queue);
2292 
2293 	return 0;
2294 }
2295 
2296 int intel_hdcp_enable(struct intel_connector *connector,
2297 		      const struct intel_crtc_state *pipe_config, u8 content_type)
2298 {
2299 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2300 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2301 	struct intel_hdcp *hdcp = &connector->hdcp;
2302 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2303 	int ret = -EINVAL;
2304 
2305 	if (!hdcp->shim)
2306 		return -ENOENT;
2307 
2308 	if (!connector->encoder) {
2309 		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2310 			connector->base.name, connector->base.base.id);
2311 		return -ENODEV;
2312 	}
2313 
2314 	mutex_lock(&hdcp->mutex);
2315 	mutex_lock(&dig_port->hdcp_mutex);
2316 	drm_WARN_ON(&dev_priv->drm,
2317 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2318 	hdcp->content_type = content_type;
2319 
2320 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2321 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2322 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2323 	} else {
2324 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2325 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2326 	}
2327 
2328 	if (DISPLAY_VER(dev_priv) >= 12)
2329 		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2330 
2331 	/*
2332 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2333 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2334 	 */
2335 	if (intel_hdcp2_capable(connector)) {
2336 		ret = _intel_hdcp2_enable(connector);
2337 		if (!ret)
2338 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2339 	}
2340 
2341 	/*
2342 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2343 	 * be attempted.
2344 	 */
2345 	if (ret && intel_hdcp_capable(connector) &&
2346 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2347 		ret = _intel_hdcp_enable(connector);
2348 	}
2349 
2350 	if (!ret) {
2351 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2352 		intel_hdcp_update_value(connector,
2353 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2354 					true);
2355 	}
2356 
2357 	mutex_unlock(&dig_port->hdcp_mutex);
2358 	mutex_unlock(&hdcp->mutex);
2359 	return ret;
2360 }
2361 
2362 int intel_hdcp_disable(struct intel_connector *connector)
2363 {
2364 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2365 	struct intel_hdcp *hdcp = &connector->hdcp;
2366 	int ret = 0;
2367 
2368 	if (!hdcp->shim)
2369 		return -ENOENT;
2370 
2371 	mutex_lock(&hdcp->mutex);
2372 	mutex_lock(&dig_port->hdcp_mutex);
2373 
2374 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2375 		goto out;
2376 
2377 	intel_hdcp_update_value(connector,
2378 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2379 	if (hdcp->hdcp2_encrypted)
2380 		ret = _intel_hdcp2_disable(connector, false);
2381 	else if (hdcp->hdcp_encrypted)
2382 		ret = _intel_hdcp_disable(connector);
2383 
2384 out:
2385 	mutex_unlock(&dig_port->hdcp_mutex);
2386 	mutex_unlock(&hdcp->mutex);
2387 	cancel_delayed_work_sync(&hdcp->check_work);
2388 	return ret;
2389 }
2390 
2391 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2392 			    struct intel_encoder *encoder,
2393 			    const struct intel_crtc_state *crtc_state,
2394 			    const struct drm_connector_state *conn_state)
2395 {
2396 	struct intel_connector *connector =
2397 				to_intel_connector(conn_state->connector);
2398 	struct intel_hdcp *hdcp = &connector->hdcp;
2399 	bool content_protection_type_changed, desired_and_not_enabled = false;
2400 
2401 	if (!connector->hdcp.shim)
2402 		return;
2403 
2404 	content_protection_type_changed =
2405 		(conn_state->hdcp_content_type != hdcp->content_type &&
2406 		 conn_state->content_protection !=
2407 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2408 
2409 	/*
2410 	 * During the HDCP encryption session if Type change is requested,
2411 	 * disable the HDCP and reenable it with new TYPE value.
2412 	 */
2413 	if (conn_state->content_protection ==
2414 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2415 	    content_protection_type_changed)
2416 		intel_hdcp_disable(connector);
2417 
2418 	/*
2419 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2420 	 * change procedure.
2421 	 */
2422 	if (content_protection_type_changed) {
2423 		mutex_lock(&hdcp->mutex);
2424 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2425 		drm_connector_get(&connector->base);
2426 		schedule_work(&hdcp->prop_work);
2427 		mutex_unlock(&hdcp->mutex);
2428 	}
2429 
2430 	if (conn_state->content_protection ==
2431 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2432 		mutex_lock(&hdcp->mutex);
2433 		/* Avoid enabling hdcp, if it already ENABLED */
2434 		desired_and_not_enabled =
2435 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2436 		mutex_unlock(&hdcp->mutex);
2437 		/*
2438 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2439 		 * prop_work to update correct CP property to user space.
2440 		 */
2441 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2442 			drm_connector_get(&connector->base);
2443 			schedule_work(&hdcp->prop_work);
2444 		}
2445 	}
2446 
2447 	if (desired_and_not_enabled || content_protection_type_changed)
2448 		intel_hdcp_enable(connector,
2449 				  crtc_state,
2450 				  (u8)conn_state->hdcp_content_type);
2451 }
2452 
2453 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2454 {
2455 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2456 	if (!dev_priv->hdcp_comp_added) {
2457 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2458 		return;
2459 	}
2460 
2461 	dev_priv->hdcp_comp_added = false;
2462 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2463 
2464 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2465 }
2466 
2467 void intel_hdcp_cleanup(struct intel_connector *connector)
2468 {
2469 	struct intel_hdcp *hdcp = &connector->hdcp;
2470 
2471 	if (!hdcp->shim)
2472 		return;
2473 
2474 	/*
2475 	 * If the connector is registered, it's possible userspace could kick
2476 	 * off another HDCP enable, which would re-spawn the workers.
2477 	 */
2478 	drm_WARN_ON(connector->base.dev,
2479 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2480 
2481 	/*
2482 	 * Now that the connector is not registered, check_work won't be run,
2483 	 * but cancel any outstanding instances of it
2484 	 */
2485 	cancel_delayed_work_sync(&hdcp->check_work);
2486 
2487 	/*
2488 	 * We don't cancel prop_work in the same way as check_work since it
2489 	 * requires connection_mutex which could be held while calling this
2490 	 * function. Instead, we rely on the connector references grabbed before
2491 	 * scheduling prop_work to ensure the connector is alive when prop_work
2492 	 * is run. So if we're in the destroy path (which is where this
2493 	 * function should be called), we're "guaranteed" that prop_work is not
2494 	 * active (tl;dr This Should Never Happen).
2495 	 */
2496 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2497 
2498 	mutex_lock(&hdcp->mutex);
2499 	hdcp->shim = NULL;
2500 	mutex_unlock(&hdcp->mutex);
2501 }
2502 
2503 void intel_hdcp_atomic_check(struct drm_connector *connector,
2504 			     struct drm_connector_state *old_state,
2505 			     struct drm_connector_state *new_state)
2506 {
2507 	u64 old_cp = old_state->content_protection;
2508 	u64 new_cp = new_state->content_protection;
2509 	struct drm_crtc_state *crtc_state;
2510 
2511 	if (!new_state->crtc) {
2512 		/*
2513 		 * If the connector is being disabled with CP enabled, mark it
2514 		 * desired so it's re-enabled when the connector is brought back
2515 		 */
2516 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2517 			new_state->content_protection =
2518 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2519 		return;
2520 	}
2521 
2522 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2523 						   new_state->crtc);
2524 	/*
2525 	 * Fix the HDCP uapi content protection state in case of modeset.
2526 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2527 	 * need to be sent if there is transition from ENABLED->DESIRED.
2528 	 */
2529 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2530 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2531 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2532 		new_state->content_protection =
2533 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2534 
2535 	/*
2536 	 * Nothing to do if the state didn't change, or HDCP was activated since
2537 	 * the last commit. And also no change in hdcp content type.
2538 	 */
2539 	if (old_cp == new_cp ||
2540 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2541 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2542 		if (old_state->hdcp_content_type ==
2543 				new_state->hdcp_content_type)
2544 			return;
2545 	}
2546 
2547 	crtc_state->mode_changed = true;
2548 }
2549 
2550 /* Handles the CP_IRQ raised from the DP HDCP sink */
2551 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2552 {
2553 	struct intel_hdcp *hdcp = &connector->hdcp;
2554 
2555 	if (!hdcp->shim)
2556 		return;
2557 
2558 	atomic_inc(&connector->hdcp.cp_irq_count);
2559 	wake_up_all(&connector->hdcp.cp_irq_queue);
2560 
2561 	schedule_delayed_work(&hdcp->check_work, 0);
2562 }
2563