xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/intel/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29 
30 #define KEY_LOAD_TRIES	5
31 #define HDCP2_LC_RETRY_CNT			3
32 
33 /* WA: 16022217614 */
34 static void
35 intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
36 				      struct intel_hdcp *hdcp)
37 {
38 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
39 
40 	/* Here we assume HDMI is in TMDS mode of operation */
41 	if (encoder->type != INTEL_OUTPUT_HDMI)
42 		return;
43 
44 	if (DISPLAY_VER(dev_priv) >= 14) {
45 		if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
46 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
47 				     0, HDCP_LINE_REKEY_DISABLE);
48 		else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
49 			 IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
50 			intel_de_rmw(dev_priv,
51 				     TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
52 				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
53 	}
54 }
55 
56 static int intel_conn_to_vcpi(struct intel_atomic_state *state,
57 			      struct intel_connector *connector)
58 {
59 	struct drm_dp_mst_topology_mgr *mgr;
60 	struct drm_dp_mst_atomic_payload *payload;
61 	struct drm_dp_mst_topology_state *mst_state;
62 	int vcpi = 0;
63 
64 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
65 	if (!connector->port)
66 		return 0;
67 	mgr = connector->port->mgr;
68 
69 	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
70 	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
71 	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
72 	if (drm_WARN_ON(mgr->dev, !payload))
73 		goto out;
74 
75 	vcpi = payload->vcpi;
76 	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
77 		vcpi = 0;
78 		goto out;
79 	}
80 out:
81 	return vcpi;
82 }
83 
84 /*
85  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
86  * content_type for all streams in DP MST topology because security f/w doesn't
87  * have any provision to mark content_type for each stream separately, it marks
88  * all available streams with the content_type proivided at the time of port
89  * authentication. This may prohibit the userspace to use type1 content on
90  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
91  * DP MST topology. Though it is not compulsory, security fw should change its
92  * policy to mark different content_types for different streams.
93  */
94 static int
95 intel_hdcp_required_content_stream(struct intel_atomic_state *state,
96 				   struct intel_digital_port *dig_port)
97 {
98 	struct drm_connector_list_iter conn_iter;
99 	struct intel_digital_port *conn_dig_port;
100 	struct intel_connector *connector;
101 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
102 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
103 	bool enforce_type0 = false;
104 	int k;
105 
106 	if (dig_port->hdcp_auth_status)
107 		return 0;
108 
109 	data->k = 0;
110 
111 	if (!dig_port->hdcp_mst_type1_capable)
112 		enforce_type0 = true;
113 
114 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
115 	for_each_intel_connector_iter(connector, &conn_iter) {
116 		if (connector->base.status == connector_status_disconnected)
117 			continue;
118 
119 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
120 			continue;
121 
122 		conn_dig_port = intel_attached_dig_port(connector);
123 		if (conn_dig_port != dig_port)
124 			continue;
125 
126 		data->streams[data->k].stream_id =
127 			intel_conn_to_vcpi(state, connector);
128 		data->k++;
129 
130 		/* if there is only one active stream */
131 		if (dig_port->dp.active_mst_links <= 1)
132 			break;
133 	}
134 	drm_connector_list_iter_end(&conn_iter);
135 
136 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
137 		return -EINVAL;
138 
139 	/*
140 	 * Apply common protection level across all streams in DP MST Topology.
141 	 * Use highest supported content type for all streams in DP MST Topology.
142 	 */
143 	for (k = 0; k < data->k; k++)
144 		data->streams[k].stream_type =
145 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
146 
147 	return 0;
148 }
149 
150 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
151 				      struct intel_connector *connector)
152 {
153 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
154 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
155 	struct intel_hdcp *hdcp = &connector->hdcp;
156 
157 	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
158 		return intel_hdcp_required_content_stream(state, dig_port);
159 
160 	data->k = 1;
161 	data->streams[0].stream_id = 0;
162 	data->streams[0].stream_type = hdcp->content_type;
163 
164 	return 0;
165 }
166 
167 static
168 bool intel_hdcp_is_ksv_valid(u8 *ksv)
169 {
170 	int i, ones = 0;
171 	/* KSV has 20 1's and 20 0's */
172 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
173 		ones += hweight8(ksv[i]);
174 	if (ones != 20)
175 		return false;
176 
177 	return true;
178 }
179 
180 static
181 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
182 			       const struct intel_hdcp_shim *shim, u8 *bksv)
183 {
184 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
185 	int ret, i, tries = 2;
186 
187 	/* HDCP spec states that we must retry the bksv if it is invalid */
188 	for (i = 0; i < tries; i++) {
189 		ret = shim->read_bksv(dig_port, bksv);
190 		if (ret)
191 			return ret;
192 		if (intel_hdcp_is_ksv_valid(bksv))
193 			break;
194 	}
195 	if (i == tries) {
196 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
197 		return -ENODEV;
198 	}
199 
200 	return 0;
201 }
202 
203 /* Is HDCP1.4 capable on Platform and Sink */
204 bool intel_hdcp_get_capability(struct intel_connector *connector)
205 {
206 	struct intel_digital_port *dig_port;
207 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
208 	bool capable = false;
209 	u8 bksv[5];
210 
211 	if (!intel_attached_encoder(connector))
212 		return capable;
213 
214 	dig_port = intel_attached_dig_port(connector);
215 
216 	if (!shim)
217 		return capable;
218 
219 	if (shim->hdcp_get_capability) {
220 		shim->hdcp_get_capability(dig_port, &capable);
221 	} else {
222 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
223 			capable = true;
224 	}
225 
226 	return capable;
227 }
228 
229 /*
230  * Check if the source has all the building blocks ready to make
231  * HDCP 2.2 work
232  */
233 static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
234 {
235 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
236 	struct intel_hdcp *hdcp = &connector->hdcp;
237 
238 	/* I915 support for HDCP2.2 */
239 	if (!hdcp->hdcp2_supported)
240 		return false;
241 
242 	/* If MTL+ make sure gsc is loaded and proxy is setup */
243 	if (intel_hdcp_gsc_cs_required(i915)) {
244 		if (!intel_hdcp_gsc_check_status(i915))
245 			return false;
246 	}
247 
248 	/* MEI/GSC interface is solid depending on which is used */
249 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
250 	if (!i915->display.hdcp.comp_added ||  !i915->display.hdcp.arbiter) {
251 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
252 		return false;
253 	}
254 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
255 
256 	return true;
257 }
258 
259 /* Is HDCP2.2 capable on Platform and Sink */
260 bool intel_hdcp2_get_capability(struct intel_connector *connector)
261 {
262 	struct intel_hdcp *hdcp = &connector->hdcp;
263 	bool capable = false;
264 
265 	if (!intel_hdcp2_prerequisite(connector))
266 		return false;
267 
268 	/* Sink's capability for HDCP2.2 */
269 	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
270 
271 	return capable;
272 }
273 
274 void intel_hdcp_get_remote_capability(struct intel_connector *connector,
275 				      bool *hdcp_capable,
276 				      bool *hdcp2_capable)
277 {
278 	struct intel_hdcp *hdcp = &connector->hdcp;
279 
280 	if (!hdcp->shim->get_remote_hdcp_capability)
281 		return;
282 
283 	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
284 					       hdcp2_capable);
285 
286 	if (!intel_hdcp2_prerequisite(connector))
287 		*hdcp2_capable = false;
288 }
289 
290 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
291 			      enum transcoder cpu_transcoder, enum port port)
292 {
293 	return intel_de_read(i915,
294 			     HDCP_STATUS(i915, cpu_transcoder, port)) &
295 		HDCP_STATUS_ENC;
296 }
297 
298 static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
299 			       enum transcoder cpu_transcoder, enum port port)
300 {
301 	return intel_de_read(i915,
302 			     HDCP2_STATUS(i915, cpu_transcoder, port)) &
303 		LINK_ENCRYPTION_STATUS;
304 }
305 
306 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
307 				    const struct intel_hdcp_shim *shim)
308 {
309 	int ret, read_ret;
310 	bool ksv_ready;
311 
312 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
313 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
314 							 &ksv_ready),
315 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
316 			 100 * 1000);
317 	if (ret)
318 		return ret;
319 	if (read_ret)
320 		return read_ret;
321 	if (!ksv_ready)
322 		return -ETIMEDOUT;
323 
324 	return 0;
325 }
326 
327 static bool hdcp_key_loadable(struct drm_i915_private *i915)
328 {
329 	enum i915_power_well_id id;
330 	intel_wakeref_t wakeref;
331 	bool enabled = false;
332 
333 	/*
334 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
335 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
336 	 */
337 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
338 		id = HSW_DISP_PW_GLOBAL;
339 	else
340 		id = SKL_DISP_PW_1;
341 
342 	/* PG1 (power well #1) needs to be enabled */
343 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
344 		enabled = intel_display_power_well_is_enabled(i915, id);
345 
346 	/*
347 	 * Another req for hdcp key loadability is enabled state of pll for
348 	 * cdclk. Without active crtc we wont land here. So we are assuming that
349 	 * cdclk is already on.
350 	 */
351 
352 	return enabled;
353 }
354 
355 static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
356 {
357 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
358 	intel_de_write(i915, HDCP_KEY_STATUS,
359 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
360 }
361 
362 static int intel_hdcp_load_keys(struct drm_i915_private *i915)
363 {
364 	int ret;
365 	u32 val;
366 
367 	val = intel_de_read(i915, HDCP_KEY_STATUS);
368 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
369 		return 0;
370 
371 	/*
372 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
373 	 * out of reset. So if Key is not already loaded, its an error state.
374 	 */
375 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
376 		if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
377 			return -ENXIO;
378 
379 	/*
380 	 * Initiate loading the HDCP key from fuses.
381 	 *
382 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
383 	 * version 9 platforms (minus BXT) differ in the key load trigger
384 	 * process from other platforms. These platforms use the GT Driver
385 	 * Mailbox interface.
386 	 */
387 	if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
388 		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
389 		if (ret) {
390 			drm_err(&i915->drm,
391 				"Failed to initiate HDCP key load (%d)\n",
392 				ret);
393 			return ret;
394 		}
395 	} else {
396 		intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
397 	}
398 
399 	/* Wait for the keys to load (500us) */
400 	ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
401 				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
402 				   10, 1, &val);
403 	if (ret)
404 		return ret;
405 	else if (!(val & HDCP_KEY_LOAD_STATUS))
406 		return -ENXIO;
407 
408 	/* Send Aksv over to PCH display for use in authentication */
409 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
410 
411 	return 0;
412 }
413 
414 /* Returns updated SHA-1 index */
415 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
416 {
417 	intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
418 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
419 		drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
420 		return -ETIMEDOUT;
421 	}
422 	return 0;
423 }
424 
425 static
426 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
427 				enum transcoder cpu_transcoder, enum port port)
428 {
429 	if (DISPLAY_VER(i915) >= 12) {
430 		switch (cpu_transcoder) {
431 		case TRANSCODER_A:
432 			return HDCP_TRANSA_REP_PRESENT |
433 			       HDCP_TRANSA_SHA1_M0;
434 		case TRANSCODER_B:
435 			return HDCP_TRANSB_REP_PRESENT |
436 			       HDCP_TRANSB_SHA1_M0;
437 		case TRANSCODER_C:
438 			return HDCP_TRANSC_REP_PRESENT |
439 			       HDCP_TRANSC_SHA1_M0;
440 		case TRANSCODER_D:
441 			return HDCP_TRANSD_REP_PRESENT |
442 			       HDCP_TRANSD_SHA1_M0;
443 		default:
444 			drm_err(&i915->drm, "Unknown transcoder %d\n",
445 				cpu_transcoder);
446 			return 0;
447 		}
448 	}
449 
450 	switch (port) {
451 	case PORT_A:
452 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
453 	case PORT_B:
454 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
455 	case PORT_C:
456 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
457 	case PORT_D:
458 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
459 	case PORT_E:
460 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
461 	default:
462 		drm_err(&i915->drm, "Unknown port %d\n", port);
463 		return 0;
464 	}
465 }
466 
467 static
468 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
469 				const struct intel_hdcp_shim *shim,
470 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
471 {
472 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
473 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
474 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
475 	enum port port = dig_port->base.port;
476 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
477 	int ret, i, j, sha_idx;
478 
479 	/* Process V' values from the receiver */
480 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
481 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
482 		if (ret)
483 			return ret;
484 		intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
485 	}
486 
487 	/*
488 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
489 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
490 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
491 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
492 	 * index will keep track of our progress through the 64 bytes as well as
493 	 * helping us work the 40-bit KSVs through our 32-bit register.
494 	 *
495 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
496 	 */
497 	sha_idx = 0;
498 	sha_text = 0;
499 	sha_leftovers = 0;
500 	rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
501 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
502 	for (i = 0; i < num_downstream; i++) {
503 		unsigned int sha_empty;
504 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
505 
506 		/* Fill up the empty slots in sha_text and write it out */
507 		sha_empty = sizeof(sha_text) - sha_leftovers;
508 		for (j = 0; j < sha_empty; j++) {
509 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
510 			sha_text |= ksv[j] << off;
511 		}
512 
513 		ret = intel_write_sha_text(i915, sha_text);
514 		if (ret < 0)
515 			return ret;
516 
517 		/* Programming guide writes this every 64 bytes */
518 		sha_idx += sizeof(sha_text);
519 		if (!(sha_idx % 64))
520 			intel_de_write(i915, HDCP_REP_CTL,
521 				       rep_ctl | HDCP_SHA1_TEXT_32);
522 
523 		/* Store the leftover bytes from the ksv in sha_text */
524 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
525 		sha_text = 0;
526 		for (j = 0; j < sha_leftovers; j++)
527 			sha_text |= ksv[sha_empty + j] <<
528 					((sizeof(sha_text) - j - 1) * 8);
529 
530 		/*
531 		 * If we still have room in sha_text for more data, continue.
532 		 * Otherwise, write it out immediately.
533 		 */
534 		if (sizeof(sha_text) > sha_leftovers)
535 			continue;
536 
537 		ret = intel_write_sha_text(i915, sha_text);
538 		if (ret < 0)
539 			return ret;
540 		sha_leftovers = 0;
541 		sha_text = 0;
542 		sha_idx += sizeof(sha_text);
543 	}
544 
545 	/*
546 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
547 	 * bytes are leftover from the last ksv, we might be able to fit them
548 	 * all in sha_text (first 2 cases), or we might need to split them up
549 	 * into 2 writes (last 2 cases).
550 	 */
551 	if (sha_leftovers == 0) {
552 		/* Write 16 bits of text, 16 bits of M0 */
553 		intel_de_write(i915, HDCP_REP_CTL,
554 			       rep_ctl | HDCP_SHA1_TEXT_16);
555 		ret = intel_write_sha_text(i915,
556 					   bstatus[0] << 8 | bstatus[1]);
557 		if (ret < 0)
558 			return ret;
559 		sha_idx += sizeof(sha_text);
560 
561 		/* Write 32 bits of M0 */
562 		intel_de_write(i915, HDCP_REP_CTL,
563 			       rep_ctl | HDCP_SHA1_TEXT_0);
564 		ret = intel_write_sha_text(i915, 0);
565 		if (ret < 0)
566 			return ret;
567 		sha_idx += sizeof(sha_text);
568 
569 		/* Write 16 bits of M0 */
570 		intel_de_write(i915, HDCP_REP_CTL,
571 			       rep_ctl | HDCP_SHA1_TEXT_16);
572 		ret = intel_write_sha_text(i915, 0);
573 		if (ret < 0)
574 			return ret;
575 		sha_idx += sizeof(sha_text);
576 
577 	} else if (sha_leftovers == 1) {
578 		/* Write 24 bits of text, 8 bits of M0 */
579 		intel_de_write(i915, HDCP_REP_CTL,
580 			       rep_ctl | HDCP_SHA1_TEXT_24);
581 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
582 		/* Only 24-bits of data, must be in the LSB */
583 		sha_text = (sha_text & 0xffffff00) >> 8;
584 		ret = intel_write_sha_text(i915, sha_text);
585 		if (ret < 0)
586 			return ret;
587 		sha_idx += sizeof(sha_text);
588 
589 		/* Write 32 bits of M0 */
590 		intel_de_write(i915, HDCP_REP_CTL,
591 			       rep_ctl | HDCP_SHA1_TEXT_0);
592 		ret = intel_write_sha_text(i915, 0);
593 		if (ret < 0)
594 			return ret;
595 		sha_idx += sizeof(sha_text);
596 
597 		/* Write 24 bits of M0 */
598 		intel_de_write(i915, HDCP_REP_CTL,
599 			       rep_ctl | HDCP_SHA1_TEXT_8);
600 		ret = intel_write_sha_text(i915, 0);
601 		if (ret < 0)
602 			return ret;
603 		sha_idx += sizeof(sha_text);
604 
605 	} else if (sha_leftovers == 2) {
606 		/* Write 32 bits of text */
607 		intel_de_write(i915, HDCP_REP_CTL,
608 			       rep_ctl | HDCP_SHA1_TEXT_32);
609 		sha_text |= bstatus[0] << 8 | bstatus[1];
610 		ret = intel_write_sha_text(i915, sha_text);
611 		if (ret < 0)
612 			return ret;
613 		sha_idx += sizeof(sha_text);
614 
615 		/* Write 64 bits of M0 */
616 		intel_de_write(i915, HDCP_REP_CTL,
617 			       rep_ctl | HDCP_SHA1_TEXT_0);
618 		for (i = 0; i < 2; i++) {
619 			ret = intel_write_sha_text(i915, 0);
620 			if (ret < 0)
621 				return ret;
622 			sha_idx += sizeof(sha_text);
623 		}
624 
625 		/*
626 		 * Terminate the SHA-1 stream by hand. For the other leftover
627 		 * cases this is appended by the hardware.
628 		 */
629 		intel_de_write(i915, HDCP_REP_CTL,
630 			       rep_ctl | HDCP_SHA1_TEXT_32);
631 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
632 		ret = intel_write_sha_text(i915, sha_text);
633 		if (ret < 0)
634 			return ret;
635 		sha_idx += sizeof(sha_text);
636 	} else if (sha_leftovers == 3) {
637 		/* Write 32 bits of text (filled from LSB) */
638 		intel_de_write(i915, HDCP_REP_CTL,
639 			       rep_ctl | HDCP_SHA1_TEXT_32);
640 		sha_text |= bstatus[0];
641 		ret = intel_write_sha_text(i915, sha_text);
642 		if (ret < 0)
643 			return ret;
644 		sha_idx += sizeof(sha_text);
645 
646 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
647 		intel_de_write(i915, HDCP_REP_CTL,
648 			       rep_ctl | HDCP_SHA1_TEXT_8);
649 		ret = intel_write_sha_text(i915, bstatus[1]);
650 		if (ret < 0)
651 			return ret;
652 		sha_idx += sizeof(sha_text);
653 
654 		/* Write 32 bits of M0 */
655 		intel_de_write(i915, HDCP_REP_CTL,
656 			       rep_ctl | HDCP_SHA1_TEXT_0);
657 		ret = intel_write_sha_text(i915, 0);
658 		if (ret < 0)
659 			return ret;
660 		sha_idx += sizeof(sha_text);
661 
662 		/* Write 8 bits of M0 */
663 		intel_de_write(i915, HDCP_REP_CTL,
664 			       rep_ctl | HDCP_SHA1_TEXT_24);
665 		ret = intel_write_sha_text(i915, 0);
666 		if (ret < 0)
667 			return ret;
668 		sha_idx += sizeof(sha_text);
669 	} else {
670 		drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
671 			    sha_leftovers);
672 		return -EINVAL;
673 	}
674 
675 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
676 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
677 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
678 		ret = intel_write_sha_text(i915, 0);
679 		if (ret < 0)
680 			return ret;
681 		sha_idx += sizeof(sha_text);
682 	}
683 
684 	/*
685 	 * Last write gets the length of the concatenation in bits. That is:
686 	 *  - 5 bytes per device
687 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
688 	 */
689 	sha_text = (num_downstream * 5 + 10) * 8;
690 	ret = intel_write_sha_text(i915, sha_text);
691 	if (ret < 0)
692 		return ret;
693 
694 	/* Tell the HW we're done with the hash and wait for it to ACK */
695 	intel_de_write(i915, HDCP_REP_CTL,
696 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
697 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
698 				  HDCP_SHA1_COMPLETE, 1)) {
699 		drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
700 		return -ETIMEDOUT;
701 	}
702 	if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
703 		drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
704 		return -ENXIO;
705 	}
706 
707 	return 0;
708 }
709 
710 /* Implements Part 2 of the HDCP authorization procedure */
711 static
712 int intel_hdcp_auth_downstream(struct intel_connector *connector)
713 {
714 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
715 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
716 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
717 	u8 bstatus[2], num_downstream, *ksv_fifo;
718 	int ret, i, tries = 3;
719 
720 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
721 	if (ret) {
722 		drm_dbg_kms(&i915->drm,
723 			    "KSV list failed to become ready (%d)\n", ret);
724 		return ret;
725 	}
726 
727 	ret = shim->read_bstatus(dig_port, bstatus);
728 	if (ret)
729 		return ret;
730 
731 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
732 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
733 		drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
734 		return -EPERM;
735 	}
736 
737 	/*
738 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
739 	 * the HDCP encryption. That implies that repeater can't have its own
740 	 * display. As there is no consumption of encrypted content in the
741 	 * repeater with 0 downstream devices, we are failing the
742 	 * authentication.
743 	 */
744 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
745 	if (num_downstream == 0) {
746 		drm_dbg_kms(&i915->drm,
747 			    "Repeater with zero downstream devices\n");
748 		return -EINVAL;
749 	}
750 
751 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
752 	if (!ksv_fifo) {
753 		drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
754 		return -ENOMEM;
755 	}
756 
757 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
758 	if (ret)
759 		goto err;
760 
761 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
762 					num_downstream) > 0) {
763 		drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
764 		ret = -EPERM;
765 		goto err;
766 	}
767 
768 	/*
769 	 * When V prime mismatches, DP Spec mandates re-read of
770 	 * V prime atleast twice.
771 	 */
772 	for (i = 0; i < tries; i++) {
773 		ret = intel_hdcp_validate_v_prime(connector, shim,
774 						  ksv_fifo, num_downstream,
775 						  bstatus);
776 		if (!ret)
777 			break;
778 	}
779 
780 	if (i == tries) {
781 		drm_dbg_kms(&i915->drm,
782 			    "V Prime validation failed.(%d)\n", ret);
783 		goto err;
784 	}
785 
786 	drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
787 		    num_downstream);
788 	ret = 0;
789 err:
790 	kfree(ksv_fifo);
791 	return ret;
792 }
793 
794 /* Implements Part 1 of the HDCP authorization procedure */
795 static int intel_hdcp_auth(struct intel_connector *connector)
796 {
797 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
798 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
799 	struct intel_hdcp *hdcp = &connector->hdcp;
800 	const struct intel_hdcp_shim *shim = hdcp->shim;
801 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
802 	enum port port = dig_port->base.port;
803 	unsigned long r0_prime_gen_start;
804 	int ret, i, tries = 2;
805 	union {
806 		u32 reg[2];
807 		u8 shim[DRM_HDCP_AN_LEN];
808 	} an;
809 	union {
810 		u32 reg[2];
811 		u8 shim[DRM_HDCP_KSV_LEN];
812 	} bksv;
813 	union {
814 		u32 reg;
815 		u8 shim[DRM_HDCP_RI_LEN];
816 	} ri;
817 	bool repeater_present, hdcp_capable;
818 
819 	/*
820 	 * Detects whether the display is HDCP capable. Although we check for
821 	 * valid Bksv below, the HDCP over DP spec requires that we check
822 	 * whether the display supports HDCP before we write An. For HDMI
823 	 * displays, this is not necessary.
824 	 */
825 	if (shim->hdcp_get_capability) {
826 		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
827 		if (ret)
828 			return ret;
829 		if (!hdcp_capable) {
830 			drm_dbg_kms(&i915->drm,
831 				    "Panel is not HDCP capable\n");
832 			return -EINVAL;
833 		}
834 	}
835 
836 	/* Initialize An with 2 random values and acquire it */
837 	for (i = 0; i < 2; i++)
838 		intel_de_write(i915,
839 			       HDCP_ANINIT(i915, cpu_transcoder, port),
840 			       get_random_u32());
841 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
842 		       HDCP_CONF_CAPTURE_AN);
843 
844 	/* Wait for An to be acquired */
845 	if (intel_de_wait_for_set(i915,
846 				  HDCP_STATUS(i915, cpu_transcoder, port),
847 				  HDCP_STATUS_AN_READY, 1)) {
848 		drm_err(&i915->drm, "Timed out waiting for An\n");
849 		return -ETIMEDOUT;
850 	}
851 
852 	an.reg[0] = intel_de_read(i915,
853 				  HDCP_ANLO(i915, cpu_transcoder, port));
854 	an.reg[1] = intel_de_read(i915,
855 				  HDCP_ANHI(i915, cpu_transcoder, port));
856 	ret = shim->write_an_aksv(dig_port, an.shim);
857 	if (ret)
858 		return ret;
859 
860 	r0_prime_gen_start = jiffies;
861 
862 	memset(&bksv, 0, sizeof(bksv));
863 
864 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
865 	if (ret < 0)
866 		return ret;
867 
868 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
869 		drm_err(&i915->drm, "BKSV is revoked\n");
870 		return -EPERM;
871 	}
872 
873 	intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
874 		       bksv.reg[0]);
875 	intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
876 		       bksv.reg[1]);
877 
878 	ret = shim->repeater_present(dig_port, &repeater_present);
879 	if (ret)
880 		return ret;
881 	if (repeater_present)
882 		intel_de_write(i915, HDCP_REP_CTL,
883 			       intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
884 
885 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
886 	if (ret)
887 		return ret;
888 
889 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
890 		       HDCP_CONF_AUTH_AND_ENC);
891 
892 	/* Wait for R0 ready */
893 	if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
894 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
895 		drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
896 		return -ETIMEDOUT;
897 	}
898 
899 	/*
900 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
901 	 * some monitors can take longer than this. We'll set the timeout at
902 	 * 300ms just to be sure.
903 	 *
904 	 * On DP, there's an R0_READY bit available but no such bit
905 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
906 	 * the stupid thing instead of polling on one and not the other.
907 	 */
908 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
909 
910 	tries = 3;
911 
912 	/*
913 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
914 	 * of R0 mismatch.
915 	 */
916 	for (i = 0; i < tries; i++) {
917 		ri.reg = 0;
918 		ret = shim->read_ri_prime(dig_port, ri.shim);
919 		if (ret)
920 			return ret;
921 		intel_de_write(i915,
922 			       HDCP_RPRIME(i915, cpu_transcoder, port),
923 			       ri.reg);
924 
925 		/* Wait for Ri prime match */
926 		if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
927 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
928 			break;
929 	}
930 
931 	if (i == tries) {
932 		drm_dbg_kms(&i915->drm,
933 			    "Timed out waiting for Ri prime match (%x)\n",
934 			    intel_de_read(i915,
935 					  HDCP_STATUS(i915, cpu_transcoder, port)));
936 		return -ETIMEDOUT;
937 	}
938 
939 	/* Wait for encryption confirmation */
940 	if (intel_de_wait_for_set(i915,
941 				  HDCP_STATUS(i915, cpu_transcoder, port),
942 				  HDCP_STATUS_ENC,
943 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
944 		drm_err(&i915->drm, "Timed out waiting for encryption\n");
945 		return -ETIMEDOUT;
946 	}
947 
948 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
949 	if (shim->stream_encryption) {
950 		ret = shim->stream_encryption(connector, true);
951 		if (ret) {
952 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
953 				connector->base.base.id, connector->base.name);
954 			return ret;
955 		}
956 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
957 			    transcoder_name(hdcp->stream_transcoder));
958 	}
959 
960 	if (repeater_present)
961 		return intel_hdcp_auth_downstream(connector);
962 
963 	drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
964 	return 0;
965 }
966 
967 static int _intel_hdcp_disable(struct intel_connector *connector)
968 {
969 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
970 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
971 	struct intel_hdcp *hdcp = &connector->hdcp;
972 	enum port port = dig_port->base.port;
973 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
974 	u32 repeater_ctl;
975 	int ret;
976 
977 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
978 		    connector->base.base.id, connector->base.name);
979 
980 	if (hdcp->shim->stream_encryption) {
981 		ret = hdcp->shim->stream_encryption(connector, false);
982 		if (ret) {
983 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
984 				connector->base.base.id, connector->base.name);
985 			return ret;
986 		}
987 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
988 			    transcoder_name(hdcp->stream_transcoder));
989 		/*
990 		 * If there are other connectors on this port using HDCP,
991 		 * don't disable it until it disabled HDCP encryption for
992 		 * all connectors in MST topology.
993 		 */
994 		if (dig_port->num_hdcp_streams > 0)
995 			return 0;
996 	}
997 
998 	hdcp->hdcp_encrypted = false;
999 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
1000 	if (intel_de_wait_for_clear(i915,
1001 				    HDCP_STATUS(i915, cpu_transcoder, port),
1002 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1003 		drm_err(&i915->drm,
1004 			"Failed to disable HDCP, timeout clearing status\n");
1005 		return -ETIMEDOUT;
1006 	}
1007 
1008 	repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
1009 						   port);
1010 	intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
1011 
1012 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1013 	if (ret) {
1014 		drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
1015 		return ret;
1016 	}
1017 
1018 	drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
1019 	return 0;
1020 }
1021 
1022 static int intel_hdcp1_enable(struct intel_connector *connector)
1023 {
1024 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1025 	struct intel_hdcp *hdcp = &connector->hdcp;
1026 	int i, ret, tries = 3;
1027 
1028 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1029 		    connector->base.base.id, connector->base.name);
1030 
1031 	if (!hdcp_key_loadable(i915)) {
1032 		drm_err(&i915->drm, "HDCP key Load is not possible\n");
1033 		return -ENXIO;
1034 	}
1035 
1036 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1037 		ret = intel_hdcp_load_keys(i915);
1038 		if (!ret)
1039 			break;
1040 		intel_hdcp_clear_keys(i915);
1041 	}
1042 	if (ret) {
1043 		drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
1044 			ret);
1045 		return ret;
1046 	}
1047 
1048 	/* Incase of authentication failures, HDCP spec expects reauth. */
1049 	for (i = 0; i < tries; i++) {
1050 		ret = intel_hdcp_auth(connector);
1051 		if (!ret) {
1052 			hdcp->hdcp_encrypted = true;
1053 			return 0;
1054 		}
1055 
1056 		drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
1057 
1058 		/* Ensuring HDCP encryption and signalling are stopped. */
1059 		_intel_hdcp_disable(connector);
1060 	}
1061 
1062 	drm_dbg_kms(&i915->drm,
1063 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1064 	return ret;
1065 }
1066 
1067 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1068 {
1069 	return container_of(hdcp, struct intel_connector, hdcp);
1070 }
1071 
1072 static void intel_hdcp_update_value(struct intel_connector *connector,
1073 				    u64 value, bool update_property)
1074 {
1075 	struct drm_device *dev = connector->base.dev;
1076 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1077 	struct intel_hdcp *hdcp = &connector->hdcp;
1078 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1079 
1080 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1081 
1082 	if (hdcp->value == value)
1083 		return;
1084 
1085 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1086 
1087 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1088 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1089 			dig_port->num_hdcp_streams--;
1090 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1091 		dig_port->num_hdcp_streams++;
1092 	}
1093 
1094 	hdcp->value = value;
1095 	if (update_property) {
1096 		drm_connector_get(&connector->base);
1097 		queue_work(i915->unordered_wq, &hdcp->prop_work);
1098 	}
1099 }
1100 
1101 /* Implements Part 3 of the HDCP authorization procedure */
1102 static int intel_hdcp_check_link(struct intel_connector *connector)
1103 {
1104 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1105 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1106 	struct intel_hdcp *hdcp = &connector->hdcp;
1107 	enum port port = dig_port->base.port;
1108 	enum transcoder cpu_transcoder;
1109 	int ret = 0;
1110 
1111 	mutex_lock(&hdcp->mutex);
1112 	mutex_lock(&dig_port->hdcp_mutex);
1113 
1114 	cpu_transcoder = hdcp->cpu_transcoder;
1115 
1116 	/* Check_link valid only when HDCP1.4 is enabled */
1117 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1118 	    !hdcp->hdcp_encrypted) {
1119 		ret = -EINVAL;
1120 		goto out;
1121 	}
1122 
1123 	if (drm_WARN_ON(&i915->drm,
1124 			!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
1125 		drm_err(&i915->drm,
1126 			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1127 			connector->base.base.id, connector->base.name,
1128 			intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
1129 		ret = -ENXIO;
1130 		intel_hdcp_update_value(connector,
1131 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1132 					true);
1133 		goto out;
1134 	}
1135 
1136 	if (hdcp->shim->check_link(dig_port, connector)) {
1137 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1138 			intel_hdcp_update_value(connector,
1139 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1140 		}
1141 		goto out;
1142 	}
1143 
1144 	drm_dbg_kms(&i915->drm,
1145 		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1146 		    connector->base.base.id, connector->base.name);
1147 
1148 	ret = _intel_hdcp_disable(connector);
1149 	if (ret) {
1150 		drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
1151 		intel_hdcp_update_value(connector,
1152 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1153 					true);
1154 		goto out;
1155 	}
1156 
1157 	intel_hdcp_update_value(connector,
1158 				DRM_MODE_CONTENT_PROTECTION_DESIRED,
1159 				true);
1160 out:
1161 	mutex_unlock(&dig_port->hdcp_mutex);
1162 	mutex_unlock(&hdcp->mutex);
1163 	return ret;
1164 }
1165 
1166 static void intel_hdcp_prop_work(struct work_struct *work)
1167 {
1168 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1169 					       prop_work);
1170 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1171 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1172 
1173 	drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
1174 	mutex_lock(&hdcp->mutex);
1175 
1176 	/*
1177 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1178 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1179 	 * we're running just after hdcp has been disabled, so just exit
1180 	 */
1181 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1182 		drm_hdcp_update_content_protection(&connector->base,
1183 						   hdcp->value);
1184 
1185 	mutex_unlock(&hdcp->mutex);
1186 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1187 
1188 	drm_connector_put(&connector->base);
1189 }
1190 
1191 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
1192 {
1193 	return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
1194 		(DISPLAY_VER(i915) >= 12 || port < PORT_E);
1195 }
1196 
1197 static int
1198 hdcp2_prepare_ake_init(struct intel_connector *connector,
1199 		       struct hdcp2_ake_init *ake_data)
1200 {
1201 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1202 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1203 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1204 	struct i915_hdcp_arbiter *arbiter;
1205 	int ret;
1206 
1207 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1208 	arbiter = i915->display.hdcp.arbiter;
1209 
1210 	if (!arbiter || !arbiter->ops) {
1211 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1212 		return -EINVAL;
1213 	}
1214 
1215 	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1216 	if (ret)
1217 		drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
1218 			    ret);
1219 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1220 
1221 	return ret;
1222 }
1223 
1224 static int
1225 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1226 				struct hdcp2_ake_send_cert *rx_cert,
1227 				bool *paired,
1228 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1229 				size_t *msg_sz)
1230 {
1231 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1232 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1233 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1234 	struct i915_hdcp_arbiter *arbiter;
1235 	int ret;
1236 
1237 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1238 	arbiter = i915->display.hdcp.arbiter;
1239 
1240 	if (!arbiter || !arbiter->ops) {
1241 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1242 		return -EINVAL;
1243 	}
1244 
1245 	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1246 							 rx_cert, paired,
1247 							 ek_pub_km, msg_sz);
1248 	if (ret < 0)
1249 		drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
1250 			    ret);
1251 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1252 
1253 	return ret;
1254 }
1255 
1256 static int hdcp2_verify_hprime(struct intel_connector *connector,
1257 			       struct hdcp2_ake_send_hprime *rx_hprime)
1258 {
1259 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1260 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1261 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1262 	struct i915_hdcp_arbiter *arbiter;
1263 	int ret;
1264 
1265 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1266 	arbiter = i915->display.hdcp.arbiter;
1267 
1268 	if (!arbiter || !arbiter->ops) {
1269 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1270 		return -EINVAL;
1271 	}
1272 
1273 	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1274 	if (ret < 0)
1275 		drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
1276 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1277 
1278 	return ret;
1279 }
1280 
1281 static int
1282 hdcp2_store_pairing_info(struct intel_connector *connector,
1283 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1284 {
1285 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1286 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1287 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1288 	struct i915_hdcp_arbiter *arbiter;
1289 	int ret;
1290 
1291 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1292 	arbiter = i915->display.hdcp.arbiter;
1293 
1294 	if (!arbiter || !arbiter->ops) {
1295 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1296 		return -EINVAL;
1297 	}
1298 
1299 	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1300 	if (ret < 0)
1301 		drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
1302 			    ret);
1303 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1304 
1305 	return ret;
1306 }
1307 
1308 static int
1309 hdcp2_prepare_lc_init(struct intel_connector *connector,
1310 		      struct hdcp2_lc_init *lc_init)
1311 {
1312 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1313 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1314 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1315 	struct i915_hdcp_arbiter *arbiter;
1316 	int ret;
1317 
1318 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1319 	arbiter = i915->display.hdcp.arbiter;
1320 
1321 	if (!arbiter || !arbiter->ops) {
1322 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1327 	if (ret < 0)
1328 		drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
1329 			    ret);
1330 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1331 
1332 	return ret;
1333 }
1334 
1335 static int
1336 hdcp2_verify_lprime(struct intel_connector *connector,
1337 		    struct hdcp2_lc_send_lprime *rx_lprime)
1338 {
1339 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1340 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1341 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1342 	struct i915_hdcp_arbiter *arbiter;
1343 	int ret;
1344 
1345 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1346 	arbiter = i915->display.hdcp.arbiter;
1347 
1348 	if (!arbiter || !arbiter->ops) {
1349 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1350 		return -EINVAL;
1351 	}
1352 
1353 	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1354 	if (ret < 0)
1355 		drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
1356 			    ret);
1357 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1358 
1359 	return ret;
1360 }
1361 
1362 static int hdcp2_prepare_skey(struct intel_connector *connector,
1363 			      struct hdcp2_ske_send_eks *ske_data)
1364 {
1365 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1366 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1367 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1368 	struct i915_hdcp_arbiter *arbiter;
1369 	int ret;
1370 
1371 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1372 	arbiter = i915->display.hdcp.arbiter;
1373 
1374 	if (!arbiter || !arbiter->ops) {
1375 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1376 		return -EINVAL;
1377 	}
1378 
1379 	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1380 	if (ret < 0)
1381 		drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
1382 			    ret);
1383 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1384 
1385 	return ret;
1386 }
1387 
1388 static int
1389 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1390 				      struct hdcp2_rep_send_receiverid_list
1391 								*rep_topology,
1392 				      struct hdcp2_rep_send_ack *rep_send_ack)
1393 {
1394 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1395 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1396 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1397 	struct i915_hdcp_arbiter *arbiter;
1398 	int ret;
1399 
1400 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1401 	arbiter = i915->display.hdcp.arbiter;
1402 
1403 	if (!arbiter || !arbiter->ops) {
1404 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1405 		return -EINVAL;
1406 	}
1407 
1408 	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1409 							    data,
1410 							    rep_topology,
1411 							    rep_send_ack);
1412 	if (ret < 0)
1413 		drm_dbg_kms(&i915->drm,
1414 			    "Verify rep topology failed. %d\n", ret);
1415 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1416 
1417 	return ret;
1418 }
1419 
1420 static int
1421 hdcp2_verify_mprime(struct intel_connector *connector,
1422 		    struct hdcp2_rep_stream_ready *stream_ready)
1423 {
1424 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1425 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1426 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1427 	struct i915_hdcp_arbiter *arbiter;
1428 	int ret;
1429 
1430 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1431 	arbiter = i915->display.hdcp.arbiter;
1432 
1433 	if (!arbiter || !arbiter->ops) {
1434 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1435 		return -EINVAL;
1436 	}
1437 
1438 	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1439 	if (ret < 0)
1440 		drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
1441 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1442 
1443 	return ret;
1444 }
1445 
1446 static int hdcp2_authenticate_port(struct intel_connector *connector)
1447 {
1448 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1449 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1450 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1451 	struct i915_hdcp_arbiter *arbiter;
1452 	int ret;
1453 
1454 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1455 	arbiter = i915->display.hdcp.arbiter;
1456 
1457 	if (!arbiter || !arbiter->ops) {
1458 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1459 		return -EINVAL;
1460 	}
1461 
1462 	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1463 	if (ret < 0)
1464 		drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
1465 			    ret);
1466 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1467 
1468 	return ret;
1469 }
1470 
1471 static int hdcp2_close_session(struct intel_connector *connector)
1472 {
1473 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1474 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1475 	struct i915_hdcp_arbiter *arbiter;
1476 	int ret;
1477 
1478 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1479 	arbiter = i915->display.hdcp.arbiter;
1480 
1481 	if (!arbiter || !arbiter->ops) {
1482 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1483 		return -EINVAL;
1484 	}
1485 
1486 	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1487 					     &dig_port->hdcp_port_data);
1488 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1489 
1490 	return ret;
1491 }
1492 
1493 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1494 {
1495 	return hdcp2_close_session(connector);
1496 }
1497 
1498 /* Authentication flow starts from here */
1499 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1500 {
1501 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1502 	struct intel_hdcp *hdcp = &connector->hdcp;
1503 	union {
1504 		struct hdcp2_ake_init ake_init;
1505 		struct hdcp2_ake_send_cert send_cert;
1506 		struct hdcp2_ake_no_stored_km no_stored_km;
1507 		struct hdcp2_ake_send_hprime send_hprime;
1508 		struct hdcp2_ake_send_pairing_info pairing_info;
1509 	} msgs;
1510 	const struct intel_hdcp_shim *shim = hdcp->shim;
1511 	size_t size;
1512 	int ret;
1513 
1514 	/* Init for seq_num */
1515 	hdcp->seq_num_v = 0;
1516 	hdcp->seq_num_m = 0;
1517 
1518 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1519 	if (ret < 0)
1520 		return ret;
1521 
1522 	ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1523 				  sizeof(msgs.ake_init));
1524 	if (ret < 0)
1525 		return ret;
1526 
1527 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1528 				 &msgs.send_cert, sizeof(msgs.send_cert));
1529 	if (ret < 0)
1530 		return ret;
1531 
1532 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1533 		drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
1534 		return -EINVAL;
1535 	}
1536 
1537 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1538 
1539 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1540 					msgs.send_cert.cert_rx.receiver_id,
1541 					1) > 0) {
1542 		drm_err(&i915->drm, "Receiver ID is revoked\n");
1543 		return -EPERM;
1544 	}
1545 
1546 	/*
1547 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1548 	 * stored also.
1549 	 */
1550 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1551 					      &hdcp->is_paired,
1552 					      &msgs.no_stored_km, &size);
1553 	if (ret < 0)
1554 		return ret;
1555 
1556 	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1557 	if (ret < 0)
1558 		return ret;
1559 
1560 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1561 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1562 	if (ret < 0)
1563 		return ret;
1564 
1565 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1566 	if (ret < 0)
1567 		return ret;
1568 
1569 	if (!hdcp->is_paired) {
1570 		/* Pairing is required */
1571 		ret = shim->read_2_2_msg(connector,
1572 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1573 					 &msgs.pairing_info,
1574 					 sizeof(msgs.pairing_info));
1575 		if (ret < 0)
1576 			return ret;
1577 
1578 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1579 		if (ret < 0)
1580 			return ret;
1581 		hdcp->is_paired = true;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int hdcp2_locality_check(struct intel_connector *connector)
1588 {
1589 	struct intel_hdcp *hdcp = &connector->hdcp;
1590 	union {
1591 		struct hdcp2_lc_init lc_init;
1592 		struct hdcp2_lc_send_lprime send_lprime;
1593 	} msgs;
1594 	const struct intel_hdcp_shim *shim = hdcp->shim;
1595 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1596 
1597 	for (i = 0; i < tries; i++) {
1598 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1599 		if (ret < 0)
1600 			continue;
1601 
1602 		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1603 				      sizeof(msgs.lc_init));
1604 		if (ret < 0)
1605 			continue;
1606 
1607 		ret = shim->read_2_2_msg(connector,
1608 					 HDCP_2_2_LC_SEND_LPRIME,
1609 					 &msgs.send_lprime,
1610 					 sizeof(msgs.send_lprime));
1611 		if (ret < 0)
1612 			continue;
1613 
1614 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1615 		if (!ret)
1616 			break;
1617 	}
1618 
1619 	return ret;
1620 }
1621 
1622 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1623 {
1624 	struct intel_hdcp *hdcp = &connector->hdcp;
1625 	struct hdcp2_ske_send_eks send_eks;
1626 	int ret;
1627 
1628 	ret = hdcp2_prepare_skey(connector, &send_eks);
1629 	if (ret < 0)
1630 		return ret;
1631 
1632 	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1633 					sizeof(send_eks));
1634 	if (ret < 0)
1635 		return ret;
1636 
1637 	return 0;
1638 }
1639 
1640 static
1641 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1642 {
1643 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1644 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1645 	struct intel_hdcp *hdcp = &connector->hdcp;
1646 	union {
1647 		struct hdcp2_rep_stream_manage stream_manage;
1648 		struct hdcp2_rep_stream_ready stream_ready;
1649 	} msgs;
1650 	const struct intel_hdcp_shim *shim = hdcp->shim;
1651 	int ret, streams_size_delta, i;
1652 
1653 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1654 		return -ERANGE;
1655 
1656 	/* Prepare RepeaterAuth_Stream_Manage msg */
1657 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1658 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1659 
1660 	msgs.stream_manage.k = cpu_to_be16(data->k);
1661 
1662 	for (i = 0; i < data->k; i++) {
1663 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1664 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1665 	}
1666 
1667 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1668 				sizeof(struct hdcp2_streamid_type);
1669 	/* Send it to Repeater */
1670 	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1671 				  sizeof(msgs.stream_manage) - streams_size_delta);
1672 	if (ret < 0)
1673 		goto out;
1674 
1675 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1676 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1677 	if (ret < 0)
1678 		goto out;
1679 
1680 	data->seq_num_m = hdcp->seq_num_m;
1681 
1682 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1683 
1684 out:
1685 	hdcp->seq_num_m++;
1686 
1687 	return ret;
1688 }
1689 
1690 static
1691 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1692 {
1693 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1694 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1695 	struct intel_hdcp *hdcp = &connector->hdcp;
1696 	union {
1697 		struct hdcp2_rep_send_receiverid_list recvid_list;
1698 		struct hdcp2_rep_send_ack rep_ack;
1699 	} msgs;
1700 	const struct intel_hdcp_shim *shim = hdcp->shim;
1701 	u32 seq_num_v, device_cnt;
1702 	u8 *rx_info;
1703 	int ret;
1704 
1705 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1706 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1707 	if (ret < 0)
1708 		return ret;
1709 
1710 	rx_info = msgs.recvid_list.rx_info;
1711 
1712 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1713 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1714 		drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
1715 		return -EINVAL;
1716 	}
1717 
1718 	/*
1719 	 * MST topology is not Type 1 capable if it contains a downstream
1720 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1721 	 */
1722 	dig_port->hdcp_mst_type1_capable =
1723 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1724 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1725 
1726 	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1727 		drm_dbg_kms(&i915->drm,
1728 			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1729 		return -EINVAL;
1730 	}
1731 
1732 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1733 	seq_num_v =
1734 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1735 
1736 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1737 		drm_dbg_kms(&i915->drm,
1738 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1739 		return -EINVAL;
1740 	}
1741 
1742 	if (seq_num_v < hdcp->seq_num_v) {
1743 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1744 		drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
1745 		return -EINVAL;
1746 	}
1747 
1748 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1749 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1750 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1751 					msgs.recvid_list.receiver_ids,
1752 					device_cnt) > 0) {
1753 		drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
1754 		return -EPERM;
1755 	}
1756 
1757 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1758 						    &msgs.recvid_list,
1759 						    &msgs.rep_ack);
1760 	if (ret < 0)
1761 		return ret;
1762 
1763 	hdcp->seq_num_v = seq_num_v;
1764 	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1765 				  sizeof(msgs.rep_ack));
1766 	if (ret < 0)
1767 		return ret;
1768 
1769 	return 0;
1770 }
1771 
1772 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1773 {
1774 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1775 	struct intel_hdcp *hdcp = &connector->hdcp;
1776 	const struct intel_hdcp_shim *shim = hdcp->shim;
1777 	int ret;
1778 
1779 	ret = hdcp2_authentication_key_exchange(connector);
1780 	if (ret < 0) {
1781 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1782 		return ret;
1783 	}
1784 
1785 	ret = hdcp2_locality_check(connector);
1786 	if (ret < 0) {
1787 		drm_dbg_kms(&i915->drm,
1788 			    "Locality Check failed. Err : %d\n", ret);
1789 		return ret;
1790 	}
1791 
1792 	ret = hdcp2_session_key_exchange(connector);
1793 	if (ret < 0) {
1794 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1795 		return ret;
1796 	}
1797 
1798 	if (shim->config_stream_type) {
1799 		ret = shim->config_stream_type(connector,
1800 					       hdcp->is_repeater,
1801 					       hdcp->content_type);
1802 		if (ret < 0)
1803 			return ret;
1804 	}
1805 
1806 	if (hdcp->is_repeater) {
1807 		ret = hdcp2_authenticate_repeater_topology(connector);
1808 		if (ret < 0) {
1809 			drm_dbg_kms(&i915->drm,
1810 				    "Repeater Auth Failed. Err: %d\n", ret);
1811 			return ret;
1812 		}
1813 	}
1814 
1815 	return ret;
1816 }
1817 
1818 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1819 {
1820 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1821 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1822 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1823 	struct intel_hdcp *hdcp = &connector->hdcp;
1824 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1825 	enum port port = dig_port->base.port;
1826 	int ret = 0;
1827 
1828 	if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1829 			    LINK_ENCRYPTION_STATUS)) {
1830 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1831 			connector->base.base.id, connector->base.name);
1832 		ret = -EPERM;
1833 		goto link_recover;
1834 	}
1835 
1836 	if (hdcp->shim->stream_2_2_encryption) {
1837 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1838 		if (ret) {
1839 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1840 				connector->base.base.id, connector->base.name);
1841 			return ret;
1842 		}
1843 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1844 			    transcoder_name(hdcp->stream_transcoder));
1845 	}
1846 
1847 	return 0;
1848 
1849 link_recover:
1850 	if (hdcp2_deauthenticate_port(connector) < 0)
1851 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1852 
1853 	dig_port->hdcp_auth_status = false;
1854 	data->k = 0;
1855 
1856 	return ret;
1857 }
1858 
1859 static int hdcp2_enable_encryption(struct intel_connector *connector)
1860 {
1861 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1862 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1863 	struct intel_hdcp *hdcp = &connector->hdcp;
1864 	enum port port = dig_port->base.port;
1865 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1866 	int ret;
1867 
1868 	drm_WARN_ON(&i915->drm,
1869 		    intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1870 		    LINK_ENCRYPTION_STATUS);
1871 	if (hdcp->shim->toggle_signalling) {
1872 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1873 						    true);
1874 		if (ret) {
1875 			drm_err(&i915->drm,
1876 				"Failed to enable HDCP signalling. %d\n",
1877 				ret);
1878 			return ret;
1879 		}
1880 	}
1881 
1882 	if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1883 	    LINK_AUTH_STATUS)
1884 		/* Link is Authenticated. Now set for Encryption */
1885 		intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1886 			     0, CTL_LINK_ENCRYPTION_REQ);
1887 
1888 	ret = intel_de_wait_for_set(i915,
1889 				    HDCP2_STATUS(i915, cpu_transcoder,
1890 						 port),
1891 				    LINK_ENCRYPTION_STATUS,
1892 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1893 	dig_port->hdcp_auth_status = true;
1894 
1895 	return ret;
1896 }
1897 
1898 static int hdcp2_disable_encryption(struct intel_connector *connector)
1899 {
1900 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1901 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1902 	struct intel_hdcp *hdcp = &connector->hdcp;
1903 	enum port port = dig_port->base.port;
1904 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1905 	int ret;
1906 
1907 	drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1908 				      LINK_ENCRYPTION_STATUS));
1909 
1910 	intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1911 		     CTL_LINK_ENCRYPTION_REQ, 0);
1912 
1913 	ret = intel_de_wait_for_clear(i915,
1914 				      HDCP2_STATUS(i915, cpu_transcoder,
1915 						   port),
1916 				      LINK_ENCRYPTION_STATUS,
1917 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1918 	if (ret == -ETIMEDOUT)
1919 		drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
1920 
1921 	if (hdcp->shim->toggle_signalling) {
1922 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1923 						    false);
1924 		if (ret) {
1925 			drm_err(&i915->drm,
1926 				"Failed to disable HDCP signalling. %d\n",
1927 				ret);
1928 			return ret;
1929 		}
1930 	}
1931 
1932 	return ret;
1933 }
1934 
1935 static int
1936 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1937 {
1938 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1939 	int i, tries = 3, ret;
1940 
1941 	if (!connector->hdcp.is_repeater)
1942 		return 0;
1943 
1944 	for (i = 0; i < tries; i++) {
1945 		ret = _hdcp2_propagate_stream_management_info(connector);
1946 		if (!ret)
1947 			break;
1948 
1949 		/* Lets restart the auth incase of seq_num_m roll over */
1950 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1951 			drm_dbg_kms(&i915->drm,
1952 				    "seq_num_m roll over.(%d)\n", ret);
1953 			break;
1954 		}
1955 
1956 		drm_dbg_kms(&i915->drm,
1957 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1958 			    i + 1, tries, ret);
1959 	}
1960 
1961 	return ret;
1962 }
1963 
1964 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
1965 					  struct intel_connector *connector)
1966 {
1967 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1968 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1969 	int ret = 0, i, tries = 3;
1970 
1971 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1972 		ret = hdcp2_authenticate_sink(connector);
1973 		if (!ret) {
1974 			ret = intel_hdcp_prepare_streams(state, connector);
1975 			if (ret) {
1976 				drm_dbg_kms(&i915->drm,
1977 					    "Prepare stream failed.(%d)\n",
1978 					    ret);
1979 				break;
1980 			}
1981 
1982 			ret = hdcp2_propagate_stream_management_info(connector);
1983 			if (ret) {
1984 				drm_dbg_kms(&i915->drm,
1985 					    "Stream management failed.(%d)\n",
1986 					    ret);
1987 				break;
1988 			}
1989 
1990 			ret = hdcp2_authenticate_port(connector);
1991 			if (!ret)
1992 				break;
1993 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1994 				    ret);
1995 		}
1996 
1997 		/* Clearing the mei hdcp session */
1998 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1999 			    i + 1, tries, ret);
2000 		if (hdcp2_deauthenticate_port(connector) < 0)
2001 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2002 	}
2003 
2004 	if (!ret && !dig_port->hdcp_auth_status) {
2005 		/*
2006 		 * Ensuring the required 200mSec min time interval between
2007 		 * Session Key Exchange and encryption.
2008 		 */
2009 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2010 		ret = hdcp2_enable_encryption(connector);
2011 		if (ret < 0) {
2012 			drm_dbg_kms(&i915->drm,
2013 				    "Encryption Enable Failed.(%d)\n", ret);
2014 			if (hdcp2_deauthenticate_port(connector) < 0)
2015 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2016 		}
2017 	}
2018 
2019 	if (!ret)
2020 		ret = hdcp2_enable_stream_encryption(connector);
2021 
2022 	return ret;
2023 }
2024 
2025 static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2026 			       struct intel_connector *connector)
2027 {
2028 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2029 	struct intel_hdcp *hdcp = &connector->hdcp;
2030 	int ret;
2031 
2032 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2033 		    connector->base.base.id, connector->base.name,
2034 		    hdcp->content_type);
2035 
2036 	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
2037 
2038 	ret = hdcp2_authenticate_and_encrypt(state, connector);
2039 	if (ret) {
2040 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2041 			    hdcp->content_type, ret);
2042 		return ret;
2043 	}
2044 
2045 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2046 		    connector->base.base.id, connector->base.name,
2047 		    hdcp->content_type);
2048 
2049 	hdcp->hdcp2_encrypted = true;
2050 	return 0;
2051 }
2052 
2053 static int
2054 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2055 {
2056 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2057 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2058 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2059 	struct intel_hdcp *hdcp = &connector->hdcp;
2060 	int ret;
2061 
2062 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2063 		    connector->base.base.id, connector->base.name);
2064 
2065 	if (hdcp->shim->stream_2_2_encryption) {
2066 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2067 		if (ret) {
2068 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2069 				connector->base.base.id, connector->base.name);
2070 			return ret;
2071 		}
2072 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2073 			    transcoder_name(hdcp->stream_transcoder));
2074 
2075 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2076 			return 0;
2077 	}
2078 
2079 	ret = hdcp2_disable_encryption(connector);
2080 
2081 	if (hdcp2_deauthenticate_port(connector) < 0)
2082 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2083 
2084 	connector->hdcp.hdcp2_encrypted = false;
2085 	dig_port->hdcp_auth_status = false;
2086 	data->k = 0;
2087 
2088 	return ret;
2089 }
2090 
2091 /* Implements the Link Integrity Check for HDCP2.2 */
2092 static int intel_hdcp2_check_link(struct intel_connector *connector)
2093 {
2094 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2095 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2096 	struct intel_hdcp *hdcp = &connector->hdcp;
2097 	enum port port = dig_port->base.port;
2098 	enum transcoder cpu_transcoder;
2099 	int ret = 0;
2100 
2101 	mutex_lock(&hdcp->mutex);
2102 	mutex_lock(&dig_port->hdcp_mutex);
2103 	cpu_transcoder = hdcp->cpu_transcoder;
2104 
2105 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2106 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2107 	    !hdcp->hdcp2_encrypted) {
2108 		ret = -EINVAL;
2109 		goto out;
2110 	}
2111 
2112 	if (drm_WARN_ON(&i915->drm,
2113 			!intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
2114 		drm_err(&i915->drm,
2115 			"HDCP2.2 link stopped the encryption, %x\n",
2116 			intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
2117 		ret = -ENXIO;
2118 		_intel_hdcp2_disable(connector, true);
2119 		intel_hdcp_update_value(connector,
2120 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2121 					true);
2122 		goto out;
2123 	}
2124 
2125 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2126 	if (ret == HDCP_LINK_PROTECTED) {
2127 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2128 			intel_hdcp_update_value(connector,
2129 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2130 					true);
2131 		}
2132 		goto out;
2133 	}
2134 
2135 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2136 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2137 			goto out;
2138 
2139 		drm_dbg_kms(&i915->drm,
2140 			    "HDCP2.2 Downstream topology change\n");
2141 	} else {
2142 		drm_dbg_kms(&i915->drm,
2143 			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2144 			    connector->base.base.id, connector->base.name);
2145 	}
2146 
2147 	ret = _intel_hdcp2_disable(connector, true);
2148 	if (ret) {
2149 		drm_err(&i915->drm,
2150 			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2151 			connector->base.base.id, connector->base.name, ret);
2152 		intel_hdcp_update_value(connector,
2153 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2154 		goto out;
2155 	}
2156 
2157 	intel_hdcp_update_value(connector,
2158 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2159 out:
2160 	mutex_unlock(&dig_port->hdcp_mutex);
2161 	mutex_unlock(&hdcp->mutex);
2162 	return ret;
2163 }
2164 
2165 static void intel_hdcp_check_work(struct work_struct *work)
2166 {
2167 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2168 					       struct intel_hdcp,
2169 					       check_work);
2170 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2171 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2172 
2173 	if (drm_connector_is_unregistered(&connector->base))
2174 		return;
2175 
2176 	if (!intel_hdcp2_check_link(connector))
2177 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2178 				   DRM_HDCP2_CHECK_PERIOD_MS);
2179 	else if (!intel_hdcp_check_link(connector))
2180 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2181 				   DRM_HDCP_CHECK_PERIOD_MS);
2182 }
2183 
2184 static int i915_hdcp_component_bind(struct device *drv_kdev,
2185 				    struct device *mei_kdev, void *data)
2186 {
2187 	struct intel_display *display = to_intel_display(drv_kdev);
2188 	struct drm_i915_private *i915 = to_i915(display->drm);
2189 
2190 	drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
2191 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2192 	i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2193 	i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
2194 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2195 
2196 	return 0;
2197 }
2198 
2199 static void i915_hdcp_component_unbind(struct device *drv_kdev,
2200 				       struct device *mei_kdev, void *data)
2201 {
2202 	struct intel_display *display = to_intel_display(drv_kdev);
2203 	struct drm_i915_private *i915 = to_i915(display->drm);
2204 
2205 	drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
2206 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2207 	i915->display.hdcp.arbiter = NULL;
2208 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2209 }
2210 
2211 static const struct component_ops i915_hdcp_ops = {
2212 	.bind   = i915_hdcp_component_bind,
2213 	.unbind = i915_hdcp_component_unbind,
2214 };
2215 
2216 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2217 {
2218 	switch (port) {
2219 	case PORT_A:
2220 		return HDCP_DDI_A;
2221 	case PORT_B ... PORT_F:
2222 		return (enum hdcp_ddi)port;
2223 	default:
2224 		return HDCP_DDI_INVALID_PORT;
2225 	}
2226 }
2227 
2228 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2229 {
2230 	switch (cpu_transcoder) {
2231 	case TRANSCODER_A ... TRANSCODER_D:
2232 		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2233 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2234 		return HDCP_INVALID_TRANSCODER;
2235 	}
2236 }
2237 
2238 static int initialize_hdcp_port_data(struct intel_connector *connector,
2239 				     struct intel_digital_port *dig_port,
2240 				     const struct intel_hdcp_shim *shim)
2241 {
2242 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2243 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2244 	enum port port = dig_port->base.port;
2245 
2246 	if (DISPLAY_VER(i915) < 12)
2247 		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2248 	else
2249 		/*
2250 		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2251 		 * with zero(INVALID PORT index).
2252 		 */
2253 		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2254 
2255 	/*
2256 	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2257 	 * is initialized to zero (invalid transcoder index). This will be
2258 	 * retained for <Gen12 forever.
2259 	 */
2260 	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2261 
2262 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2263 	data->protocol = (u8)shim->protocol;
2264 
2265 	if (!data->streams)
2266 		data->streams = kcalloc(INTEL_NUM_PIPES(i915),
2267 					sizeof(struct hdcp2_streamid_type),
2268 					GFP_KERNEL);
2269 	if (!data->streams) {
2270 		drm_err(&i915->drm, "Out of Memory\n");
2271 		return -ENOMEM;
2272 	}
2273 
2274 	return 0;
2275 }
2276 
2277 static bool is_hdcp2_supported(struct drm_i915_private *i915)
2278 {
2279 	if (intel_hdcp_gsc_cs_required(i915))
2280 		return true;
2281 
2282 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2283 		return false;
2284 
2285 	return (DISPLAY_VER(i915) >= 10 ||
2286 		IS_KABYLAKE(i915) ||
2287 		IS_COFFEELAKE(i915) ||
2288 		IS_COMETLAKE(i915));
2289 }
2290 
2291 void intel_hdcp_component_init(struct drm_i915_private *i915)
2292 {
2293 	int ret;
2294 
2295 	if (!is_hdcp2_supported(i915))
2296 		return;
2297 
2298 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2299 	drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
2300 
2301 	i915->display.hdcp.comp_added = true;
2302 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2303 	if (intel_hdcp_gsc_cs_required(i915))
2304 		ret = intel_hdcp_gsc_init(i915);
2305 	else
2306 		ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
2307 					  I915_COMPONENT_HDCP);
2308 
2309 	if (ret < 0) {
2310 		drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
2311 			    ret);
2312 		mutex_lock(&i915->display.hdcp.hdcp_mutex);
2313 		i915->display.hdcp.comp_added = false;
2314 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2315 		return;
2316 	}
2317 }
2318 
2319 static void intel_hdcp2_init(struct intel_connector *connector,
2320 			     struct intel_digital_port *dig_port,
2321 			     const struct intel_hdcp_shim *shim)
2322 {
2323 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2324 	struct intel_hdcp *hdcp = &connector->hdcp;
2325 	int ret;
2326 
2327 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2328 	if (ret) {
2329 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2330 		return;
2331 	}
2332 
2333 	hdcp->hdcp2_supported = true;
2334 }
2335 
2336 int intel_hdcp_init(struct intel_connector *connector,
2337 		    struct intel_digital_port *dig_port,
2338 		    const struct intel_hdcp_shim *shim)
2339 {
2340 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2341 	struct intel_hdcp *hdcp = &connector->hdcp;
2342 	int ret;
2343 
2344 	if (!shim)
2345 		return -EINVAL;
2346 
2347 	if (is_hdcp2_supported(i915))
2348 		intel_hdcp2_init(connector, dig_port, shim);
2349 
2350 	ret =
2351 	drm_connector_attach_content_protection_property(&connector->base,
2352 							 hdcp->hdcp2_supported);
2353 	if (ret) {
2354 		hdcp->hdcp2_supported = false;
2355 		kfree(dig_port->hdcp_port_data.streams);
2356 		return ret;
2357 	}
2358 
2359 	hdcp->shim = shim;
2360 	mutex_init(&hdcp->mutex);
2361 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2362 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2363 	init_waitqueue_head(&hdcp->cp_irq_queue);
2364 
2365 	return 0;
2366 }
2367 
2368 static int _intel_hdcp_enable(struct intel_atomic_state *state,
2369 			      struct intel_encoder *encoder,
2370 			      const struct intel_crtc_state *pipe_config,
2371 			      const struct drm_connector_state *conn_state)
2372 {
2373 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2374 	struct intel_connector *connector =
2375 		to_intel_connector(conn_state->connector);
2376 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2377 	struct intel_hdcp *hdcp = &connector->hdcp;
2378 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2379 	int ret = -EINVAL;
2380 
2381 	if (!hdcp->shim)
2382 		return -ENOENT;
2383 
2384 	if (!connector->encoder) {
2385 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2386 			connector->base.base.id, connector->base.name);
2387 		return -ENODEV;
2388 	}
2389 
2390 	mutex_lock(&hdcp->mutex);
2391 	mutex_lock(&dig_port->hdcp_mutex);
2392 	drm_WARN_ON(&i915->drm,
2393 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2394 	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2395 
2396 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2397 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2398 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2399 	} else {
2400 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2401 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2402 	}
2403 
2404 	if (DISPLAY_VER(i915) >= 12)
2405 		dig_port->hdcp_port_data.hdcp_transcoder =
2406 			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2407 
2408 	/*
2409 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2410 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2411 	 */
2412 	if (intel_hdcp2_get_capability(connector)) {
2413 		ret = _intel_hdcp2_enable(state, connector);
2414 		if (!ret)
2415 			check_link_interval =
2416 				DRM_HDCP2_CHECK_PERIOD_MS;
2417 	}
2418 
2419 	/*
2420 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2421 	 * be attempted.
2422 	 */
2423 	if (ret && intel_hdcp_get_capability(connector) &&
2424 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2425 		ret = intel_hdcp1_enable(connector);
2426 	}
2427 
2428 	if (!ret) {
2429 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2430 				   check_link_interval);
2431 		intel_hdcp_update_value(connector,
2432 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2433 					true);
2434 	}
2435 
2436 	mutex_unlock(&dig_port->hdcp_mutex);
2437 	mutex_unlock(&hdcp->mutex);
2438 	return ret;
2439 }
2440 
2441 void intel_hdcp_enable(struct intel_atomic_state *state,
2442 		       struct intel_encoder *encoder,
2443 		       const struct intel_crtc_state *crtc_state,
2444 		       const struct drm_connector_state *conn_state)
2445 {
2446 	struct intel_connector *connector =
2447 		to_intel_connector(conn_state->connector);
2448 	struct intel_hdcp *hdcp = &connector->hdcp;
2449 
2450 	/*
2451 	 * Enable hdcp if it's desired or if userspace is enabled and
2452 	 * driver set its state to undesired
2453 	 */
2454 	if (conn_state->content_protection ==
2455 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2456 	    (conn_state->content_protection ==
2457 	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2458 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2459 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2460 }
2461 
2462 int intel_hdcp_disable(struct intel_connector *connector)
2463 {
2464 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2465 	struct intel_hdcp *hdcp = &connector->hdcp;
2466 	int ret = 0;
2467 
2468 	if (!hdcp->shim)
2469 		return -ENOENT;
2470 
2471 	mutex_lock(&hdcp->mutex);
2472 	mutex_lock(&dig_port->hdcp_mutex);
2473 
2474 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2475 		goto out;
2476 
2477 	intel_hdcp_update_value(connector,
2478 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2479 	if (hdcp->hdcp2_encrypted)
2480 		ret = _intel_hdcp2_disable(connector, false);
2481 	else if (hdcp->hdcp_encrypted)
2482 		ret = _intel_hdcp_disable(connector);
2483 
2484 out:
2485 	mutex_unlock(&dig_port->hdcp_mutex);
2486 	mutex_unlock(&hdcp->mutex);
2487 	cancel_delayed_work_sync(&hdcp->check_work);
2488 	return ret;
2489 }
2490 
2491 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2492 			    struct intel_encoder *encoder,
2493 			    const struct intel_crtc_state *crtc_state,
2494 			    const struct drm_connector_state *conn_state)
2495 {
2496 	struct intel_connector *connector =
2497 				to_intel_connector(conn_state->connector);
2498 	struct intel_hdcp *hdcp = &connector->hdcp;
2499 	bool content_protection_type_changed, desired_and_not_enabled = false;
2500 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2501 
2502 	if (!connector->hdcp.shim)
2503 		return;
2504 
2505 	content_protection_type_changed =
2506 		(conn_state->hdcp_content_type != hdcp->content_type &&
2507 		 conn_state->content_protection !=
2508 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2509 
2510 	/*
2511 	 * During the HDCP encryption session if Type change is requested,
2512 	 * disable the HDCP and reenable it with new TYPE value.
2513 	 */
2514 	if (conn_state->content_protection ==
2515 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2516 	    content_protection_type_changed)
2517 		intel_hdcp_disable(connector);
2518 
2519 	/*
2520 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2521 	 * change procedure.
2522 	 */
2523 	if (content_protection_type_changed) {
2524 		mutex_lock(&hdcp->mutex);
2525 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2526 		drm_connector_get(&connector->base);
2527 		queue_work(i915->unordered_wq, &hdcp->prop_work);
2528 		mutex_unlock(&hdcp->mutex);
2529 	}
2530 
2531 	if (conn_state->content_protection ==
2532 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2533 		mutex_lock(&hdcp->mutex);
2534 		/* Avoid enabling hdcp, if it already ENABLED */
2535 		desired_and_not_enabled =
2536 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2537 		mutex_unlock(&hdcp->mutex);
2538 		/*
2539 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2540 		 * prop_work to update correct CP property to user space.
2541 		 */
2542 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2543 			drm_connector_get(&connector->base);
2544 			queue_work(i915->unordered_wq, &hdcp->prop_work);
2545 		}
2546 	}
2547 
2548 	if (desired_and_not_enabled || content_protection_type_changed)
2549 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2550 }
2551 
2552 void intel_hdcp_component_fini(struct drm_i915_private *i915)
2553 {
2554 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2555 	if (!i915->display.hdcp.comp_added) {
2556 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2557 		return;
2558 	}
2559 
2560 	i915->display.hdcp.comp_added = false;
2561 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2562 
2563 	if (intel_hdcp_gsc_cs_required(i915))
2564 		intel_hdcp_gsc_fini(i915);
2565 	else
2566 		component_del(i915->drm.dev, &i915_hdcp_ops);
2567 }
2568 
2569 void intel_hdcp_cleanup(struct intel_connector *connector)
2570 {
2571 	struct intel_hdcp *hdcp = &connector->hdcp;
2572 
2573 	if (!hdcp->shim)
2574 		return;
2575 
2576 	/*
2577 	 * If the connector is registered, it's possible userspace could kick
2578 	 * off another HDCP enable, which would re-spawn the workers.
2579 	 */
2580 	drm_WARN_ON(connector->base.dev,
2581 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2582 
2583 	/*
2584 	 * Now that the connector is not registered, check_work won't be run,
2585 	 * but cancel any outstanding instances of it
2586 	 */
2587 	cancel_delayed_work_sync(&hdcp->check_work);
2588 
2589 	/*
2590 	 * We don't cancel prop_work in the same way as check_work since it
2591 	 * requires connection_mutex which could be held while calling this
2592 	 * function. Instead, we rely on the connector references grabbed before
2593 	 * scheduling prop_work to ensure the connector is alive when prop_work
2594 	 * is run. So if we're in the destroy path (which is where this
2595 	 * function should be called), we're "guaranteed" that prop_work is not
2596 	 * active (tl;dr This Should Never Happen).
2597 	 */
2598 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2599 
2600 	mutex_lock(&hdcp->mutex);
2601 	hdcp->shim = NULL;
2602 	mutex_unlock(&hdcp->mutex);
2603 }
2604 
2605 void intel_hdcp_atomic_check(struct drm_connector *connector,
2606 			     struct drm_connector_state *old_state,
2607 			     struct drm_connector_state *new_state)
2608 {
2609 	u64 old_cp = old_state->content_protection;
2610 	u64 new_cp = new_state->content_protection;
2611 	struct drm_crtc_state *crtc_state;
2612 
2613 	if (!new_state->crtc) {
2614 		/*
2615 		 * If the connector is being disabled with CP enabled, mark it
2616 		 * desired so it's re-enabled when the connector is brought back
2617 		 */
2618 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2619 			new_state->content_protection =
2620 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2621 		return;
2622 	}
2623 
2624 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2625 						   new_state->crtc);
2626 	/*
2627 	 * Fix the HDCP uapi content protection state in case of modeset.
2628 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2629 	 * need to be sent if there is transition from ENABLED->DESIRED.
2630 	 */
2631 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2632 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2633 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2634 		new_state->content_protection =
2635 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2636 
2637 	/*
2638 	 * Nothing to do if the state didn't change, or HDCP was activated since
2639 	 * the last commit. And also no change in hdcp content type.
2640 	 */
2641 	if (old_cp == new_cp ||
2642 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2643 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2644 		if (old_state->hdcp_content_type ==
2645 				new_state->hdcp_content_type)
2646 			return;
2647 	}
2648 
2649 	crtc_state->mode_changed = true;
2650 }
2651 
2652 /* Handles the CP_IRQ raised from the DP HDCP sink */
2653 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2654 {
2655 	struct intel_hdcp *hdcp = &connector->hdcp;
2656 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2657 
2658 	if (!hdcp->shim)
2659 		return;
2660 
2661 	atomic_inc(&connector->hdcp.cp_irq_count);
2662 	wake_up_all(&connector->hdcp.cp_irq_queue);
2663 
2664 	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2665 }
2666