xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision 429508c84d95811dd1300181dfe84743caff9a38)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/intel/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29 
30 #define KEY_LOAD_TRIES	5
31 #define HDCP2_LC_RETRY_CNT			3
32 
33 /* WA: 16022217614 */
34 static void
35 intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
36 				      struct intel_hdcp *hdcp)
37 {
38 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
39 
40 	/* Here we assume HDMI is in TMDS mode of operation */
41 	if (encoder->type != INTEL_OUTPUT_HDMI)
42 		return;
43 
44 	if (DISPLAY_VER(dev_priv) >= 14) {
45 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
46 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
47 				     0, HDCP_LINE_REKEY_DISABLE);
48 		else if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
49 			 IS_DISPLAY_IP_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
50 			intel_de_rmw(dev_priv,
51 				     TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
52 				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
53 	}
54 }
55 
56 static int intel_conn_to_vcpi(struct intel_atomic_state *state,
57 			      struct intel_connector *connector)
58 {
59 	struct drm_dp_mst_topology_mgr *mgr;
60 	struct drm_dp_mst_atomic_payload *payload;
61 	struct drm_dp_mst_topology_state *mst_state;
62 	int vcpi = 0;
63 
64 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
65 	if (!connector->port)
66 		return 0;
67 	mgr = connector->port->mgr;
68 
69 	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
70 	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
71 	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
72 	if (drm_WARN_ON(mgr->dev, !payload))
73 		goto out;
74 
75 	vcpi = payload->vcpi;
76 	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
77 		vcpi = 0;
78 		goto out;
79 	}
80 out:
81 	return vcpi;
82 }
83 
84 /*
85  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
86  * content_type for all streams in DP MST topology because security f/w doesn't
87  * have any provision to mark content_type for each stream separately, it marks
88  * all available streams with the content_type proivided at the time of port
89  * authentication. This may prohibit the userspace to use type1 content on
90  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
91  * DP MST topology. Though it is not compulsory, security fw should change its
92  * policy to mark different content_types for different streams.
93  */
94 static int
95 intel_hdcp_required_content_stream(struct intel_atomic_state *state,
96 				   struct intel_digital_port *dig_port)
97 {
98 	struct drm_connector_list_iter conn_iter;
99 	struct intel_digital_port *conn_dig_port;
100 	struct intel_connector *connector;
101 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
102 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
103 	bool enforce_type0 = false;
104 	int k;
105 
106 	if (dig_port->hdcp_auth_status)
107 		return 0;
108 
109 	data->k = 0;
110 
111 	if (!dig_port->hdcp_mst_type1_capable)
112 		enforce_type0 = true;
113 
114 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
115 	for_each_intel_connector_iter(connector, &conn_iter) {
116 		if (connector->base.status == connector_status_disconnected)
117 			continue;
118 
119 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
120 			continue;
121 
122 		conn_dig_port = intel_attached_dig_port(connector);
123 		if (conn_dig_port != dig_port)
124 			continue;
125 
126 		data->streams[data->k].stream_id =
127 			intel_conn_to_vcpi(state, connector);
128 		data->k++;
129 
130 		/* if there is only one active stream */
131 		if (dig_port->dp.active_mst_links <= 1)
132 			break;
133 	}
134 	drm_connector_list_iter_end(&conn_iter);
135 
136 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
137 		return -EINVAL;
138 
139 	/*
140 	 * Apply common protection level across all streams in DP MST Topology.
141 	 * Use highest supported content type for all streams in DP MST Topology.
142 	 */
143 	for (k = 0; k < data->k; k++)
144 		data->streams[k].stream_type =
145 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
146 
147 	return 0;
148 }
149 
150 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
151 				      struct intel_connector *connector)
152 {
153 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
154 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
155 	struct intel_hdcp *hdcp = &connector->hdcp;
156 
157 	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
158 		return intel_hdcp_required_content_stream(state, dig_port);
159 
160 	data->k = 1;
161 	data->streams[0].stream_id = 0;
162 	data->streams[0].stream_type = hdcp->content_type;
163 
164 	return 0;
165 }
166 
167 static
168 bool intel_hdcp_is_ksv_valid(u8 *ksv)
169 {
170 	int i, ones = 0;
171 	/* KSV has 20 1's and 20 0's */
172 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
173 		ones += hweight8(ksv[i]);
174 	if (ones != 20)
175 		return false;
176 
177 	return true;
178 }
179 
180 static
181 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
182 			       const struct intel_hdcp_shim *shim, u8 *bksv)
183 {
184 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
185 	int ret, i, tries = 2;
186 
187 	/* HDCP spec states that we must retry the bksv if it is invalid */
188 	for (i = 0; i < tries; i++) {
189 		ret = shim->read_bksv(dig_port, bksv);
190 		if (ret)
191 			return ret;
192 		if (intel_hdcp_is_ksv_valid(bksv))
193 			break;
194 	}
195 	if (i == tries) {
196 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
197 		return -ENODEV;
198 	}
199 
200 	return 0;
201 }
202 
203 /* Is HDCP1.4 capable on Platform and Sink */
204 bool intel_hdcp_get_capability(struct intel_connector *connector)
205 {
206 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
207 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
208 	bool capable = false;
209 	u8 bksv[5];
210 
211 	if (!shim)
212 		return capable;
213 
214 	if (shim->hdcp_get_capability) {
215 		shim->hdcp_get_capability(dig_port, &capable);
216 	} else {
217 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
218 			capable = true;
219 	}
220 
221 	return capable;
222 }
223 
224 /*
225  * Check if the source has all the building blocks ready to make
226  * HDCP 2.2 work
227  */
228 static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
229 {
230 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
231 	struct intel_hdcp *hdcp = &connector->hdcp;
232 
233 	/* I915 support for HDCP2.2 */
234 	if (!hdcp->hdcp2_supported)
235 		return false;
236 
237 	/* If MTL+ make sure gsc is loaded and proxy is setup */
238 	if (intel_hdcp_gsc_cs_required(i915)) {
239 		if (!intel_hdcp_gsc_check_status(i915))
240 			return false;
241 	}
242 
243 	/* MEI/GSC interface is solid depending on which is used */
244 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
245 	if (!i915->display.hdcp.comp_added ||  !i915->display.hdcp.arbiter) {
246 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
247 		return false;
248 	}
249 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
250 
251 	return true;
252 }
253 
254 /* Is HDCP2.2 capable on Platform and Sink */
255 bool intel_hdcp2_get_capability(struct intel_connector *connector)
256 {
257 	struct intel_hdcp *hdcp = &connector->hdcp;
258 	bool capable = false;
259 
260 	if (!intel_hdcp2_prerequisite(connector))
261 		return false;
262 
263 	/* Sink's capability for HDCP2.2 */
264 	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
265 
266 	return capable;
267 }
268 
269 void intel_hdcp_get_remote_capability(struct intel_connector *connector,
270 				      bool *hdcp_capable,
271 				      bool *hdcp2_capable)
272 {
273 	struct intel_hdcp *hdcp = &connector->hdcp;
274 
275 	if (!hdcp->shim->get_remote_hdcp_capability)
276 		return;
277 
278 	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
279 					       hdcp2_capable);
280 
281 	if (!intel_hdcp2_prerequisite(connector))
282 		*hdcp2_capable = false;
283 }
284 
285 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
286 			      enum transcoder cpu_transcoder, enum port port)
287 {
288 	return intel_de_read(i915,
289 			     HDCP_STATUS(i915, cpu_transcoder, port)) &
290 		HDCP_STATUS_ENC;
291 }
292 
293 static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
294 			       enum transcoder cpu_transcoder, enum port port)
295 {
296 	return intel_de_read(i915,
297 			     HDCP2_STATUS(i915, cpu_transcoder, port)) &
298 		LINK_ENCRYPTION_STATUS;
299 }
300 
301 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
302 				    const struct intel_hdcp_shim *shim)
303 {
304 	int ret, read_ret;
305 	bool ksv_ready;
306 
307 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
308 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
309 							 &ksv_ready),
310 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
311 			 100 * 1000);
312 	if (ret)
313 		return ret;
314 	if (read_ret)
315 		return read_ret;
316 	if (!ksv_ready)
317 		return -ETIMEDOUT;
318 
319 	return 0;
320 }
321 
322 static bool hdcp_key_loadable(struct drm_i915_private *i915)
323 {
324 	enum i915_power_well_id id;
325 	intel_wakeref_t wakeref;
326 	bool enabled = false;
327 
328 	/*
329 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
330 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
331 	 */
332 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
333 		id = HSW_DISP_PW_GLOBAL;
334 	else
335 		id = SKL_DISP_PW_1;
336 
337 	/* PG1 (power well #1) needs to be enabled */
338 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
339 		enabled = intel_display_power_well_is_enabled(i915, id);
340 
341 	/*
342 	 * Another req for hdcp key loadability is enabled state of pll for
343 	 * cdclk. Without active crtc we wont land here. So we are assuming that
344 	 * cdclk is already on.
345 	 */
346 
347 	return enabled;
348 }
349 
350 static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
351 {
352 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
353 	intel_de_write(i915, HDCP_KEY_STATUS,
354 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
355 }
356 
357 static int intel_hdcp_load_keys(struct drm_i915_private *i915)
358 {
359 	int ret;
360 	u32 val;
361 
362 	val = intel_de_read(i915, HDCP_KEY_STATUS);
363 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
364 		return 0;
365 
366 	/*
367 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
368 	 * out of reset. So if Key is not already loaded, its an error state.
369 	 */
370 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
371 		if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
372 			return -ENXIO;
373 
374 	/*
375 	 * Initiate loading the HDCP key from fuses.
376 	 *
377 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
378 	 * version 9 platforms (minus BXT) differ in the key load trigger
379 	 * process from other platforms. These platforms use the GT Driver
380 	 * Mailbox interface.
381 	 */
382 	if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
383 		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
384 		if (ret) {
385 			drm_err(&i915->drm,
386 				"Failed to initiate HDCP key load (%d)\n",
387 				ret);
388 			return ret;
389 		}
390 	} else {
391 		intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
392 	}
393 
394 	/* Wait for the keys to load (500us) */
395 	ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
396 				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
397 				   10, 1, &val);
398 	if (ret)
399 		return ret;
400 	else if (!(val & HDCP_KEY_LOAD_STATUS))
401 		return -ENXIO;
402 
403 	/* Send Aksv over to PCH display for use in authentication */
404 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
405 
406 	return 0;
407 }
408 
409 /* Returns updated SHA-1 index */
410 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
411 {
412 	intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
413 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
414 		drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
415 		return -ETIMEDOUT;
416 	}
417 	return 0;
418 }
419 
420 static
421 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
422 				enum transcoder cpu_transcoder, enum port port)
423 {
424 	if (DISPLAY_VER(i915) >= 12) {
425 		switch (cpu_transcoder) {
426 		case TRANSCODER_A:
427 			return HDCP_TRANSA_REP_PRESENT |
428 			       HDCP_TRANSA_SHA1_M0;
429 		case TRANSCODER_B:
430 			return HDCP_TRANSB_REP_PRESENT |
431 			       HDCP_TRANSB_SHA1_M0;
432 		case TRANSCODER_C:
433 			return HDCP_TRANSC_REP_PRESENT |
434 			       HDCP_TRANSC_SHA1_M0;
435 		case TRANSCODER_D:
436 			return HDCP_TRANSD_REP_PRESENT |
437 			       HDCP_TRANSD_SHA1_M0;
438 		default:
439 			drm_err(&i915->drm, "Unknown transcoder %d\n",
440 				cpu_transcoder);
441 			return 0;
442 		}
443 	}
444 
445 	switch (port) {
446 	case PORT_A:
447 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
448 	case PORT_B:
449 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
450 	case PORT_C:
451 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
452 	case PORT_D:
453 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
454 	case PORT_E:
455 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
456 	default:
457 		drm_err(&i915->drm, "Unknown port %d\n", port);
458 		return 0;
459 	}
460 }
461 
462 static
463 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
464 				const struct intel_hdcp_shim *shim,
465 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
466 {
467 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
468 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
469 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
470 	enum port port = dig_port->base.port;
471 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
472 	int ret, i, j, sha_idx;
473 
474 	/* Process V' values from the receiver */
475 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
476 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
477 		if (ret)
478 			return ret;
479 		intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
480 	}
481 
482 	/*
483 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
484 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
485 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
486 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
487 	 * index will keep track of our progress through the 64 bytes as well as
488 	 * helping us work the 40-bit KSVs through our 32-bit register.
489 	 *
490 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
491 	 */
492 	sha_idx = 0;
493 	sha_text = 0;
494 	sha_leftovers = 0;
495 	rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
496 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
497 	for (i = 0; i < num_downstream; i++) {
498 		unsigned int sha_empty;
499 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
500 
501 		/* Fill up the empty slots in sha_text and write it out */
502 		sha_empty = sizeof(sha_text) - sha_leftovers;
503 		for (j = 0; j < sha_empty; j++) {
504 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
505 			sha_text |= ksv[j] << off;
506 		}
507 
508 		ret = intel_write_sha_text(i915, sha_text);
509 		if (ret < 0)
510 			return ret;
511 
512 		/* Programming guide writes this every 64 bytes */
513 		sha_idx += sizeof(sha_text);
514 		if (!(sha_idx % 64))
515 			intel_de_write(i915, HDCP_REP_CTL,
516 				       rep_ctl | HDCP_SHA1_TEXT_32);
517 
518 		/* Store the leftover bytes from the ksv in sha_text */
519 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
520 		sha_text = 0;
521 		for (j = 0; j < sha_leftovers; j++)
522 			sha_text |= ksv[sha_empty + j] <<
523 					((sizeof(sha_text) - j - 1) * 8);
524 
525 		/*
526 		 * If we still have room in sha_text for more data, continue.
527 		 * Otherwise, write it out immediately.
528 		 */
529 		if (sizeof(sha_text) > sha_leftovers)
530 			continue;
531 
532 		ret = intel_write_sha_text(i915, sha_text);
533 		if (ret < 0)
534 			return ret;
535 		sha_leftovers = 0;
536 		sha_text = 0;
537 		sha_idx += sizeof(sha_text);
538 	}
539 
540 	/*
541 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
542 	 * bytes are leftover from the last ksv, we might be able to fit them
543 	 * all in sha_text (first 2 cases), or we might need to split them up
544 	 * into 2 writes (last 2 cases).
545 	 */
546 	if (sha_leftovers == 0) {
547 		/* Write 16 bits of text, 16 bits of M0 */
548 		intel_de_write(i915, HDCP_REP_CTL,
549 			       rep_ctl | HDCP_SHA1_TEXT_16);
550 		ret = intel_write_sha_text(i915,
551 					   bstatus[0] << 8 | bstatus[1]);
552 		if (ret < 0)
553 			return ret;
554 		sha_idx += sizeof(sha_text);
555 
556 		/* Write 32 bits of M0 */
557 		intel_de_write(i915, HDCP_REP_CTL,
558 			       rep_ctl | HDCP_SHA1_TEXT_0);
559 		ret = intel_write_sha_text(i915, 0);
560 		if (ret < 0)
561 			return ret;
562 		sha_idx += sizeof(sha_text);
563 
564 		/* Write 16 bits of M0 */
565 		intel_de_write(i915, HDCP_REP_CTL,
566 			       rep_ctl | HDCP_SHA1_TEXT_16);
567 		ret = intel_write_sha_text(i915, 0);
568 		if (ret < 0)
569 			return ret;
570 		sha_idx += sizeof(sha_text);
571 
572 	} else if (sha_leftovers == 1) {
573 		/* Write 24 bits of text, 8 bits of M0 */
574 		intel_de_write(i915, HDCP_REP_CTL,
575 			       rep_ctl | HDCP_SHA1_TEXT_24);
576 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
577 		/* Only 24-bits of data, must be in the LSB */
578 		sha_text = (sha_text & 0xffffff00) >> 8;
579 		ret = intel_write_sha_text(i915, sha_text);
580 		if (ret < 0)
581 			return ret;
582 		sha_idx += sizeof(sha_text);
583 
584 		/* Write 32 bits of M0 */
585 		intel_de_write(i915, HDCP_REP_CTL,
586 			       rep_ctl | HDCP_SHA1_TEXT_0);
587 		ret = intel_write_sha_text(i915, 0);
588 		if (ret < 0)
589 			return ret;
590 		sha_idx += sizeof(sha_text);
591 
592 		/* Write 24 bits of M0 */
593 		intel_de_write(i915, HDCP_REP_CTL,
594 			       rep_ctl | HDCP_SHA1_TEXT_8);
595 		ret = intel_write_sha_text(i915, 0);
596 		if (ret < 0)
597 			return ret;
598 		sha_idx += sizeof(sha_text);
599 
600 	} else if (sha_leftovers == 2) {
601 		/* Write 32 bits of text */
602 		intel_de_write(i915, HDCP_REP_CTL,
603 			       rep_ctl | HDCP_SHA1_TEXT_32);
604 		sha_text |= bstatus[0] << 8 | bstatus[1];
605 		ret = intel_write_sha_text(i915, sha_text);
606 		if (ret < 0)
607 			return ret;
608 		sha_idx += sizeof(sha_text);
609 
610 		/* Write 64 bits of M0 */
611 		intel_de_write(i915, HDCP_REP_CTL,
612 			       rep_ctl | HDCP_SHA1_TEXT_0);
613 		for (i = 0; i < 2; i++) {
614 			ret = intel_write_sha_text(i915, 0);
615 			if (ret < 0)
616 				return ret;
617 			sha_idx += sizeof(sha_text);
618 		}
619 
620 		/*
621 		 * Terminate the SHA-1 stream by hand. For the other leftover
622 		 * cases this is appended by the hardware.
623 		 */
624 		intel_de_write(i915, HDCP_REP_CTL,
625 			       rep_ctl | HDCP_SHA1_TEXT_32);
626 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
627 		ret = intel_write_sha_text(i915, sha_text);
628 		if (ret < 0)
629 			return ret;
630 		sha_idx += sizeof(sha_text);
631 	} else if (sha_leftovers == 3) {
632 		/* Write 32 bits of text (filled from LSB) */
633 		intel_de_write(i915, HDCP_REP_CTL,
634 			       rep_ctl | HDCP_SHA1_TEXT_32);
635 		sha_text |= bstatus[0];
636 		ret = intel_write_sha_text(i915, sha_text);
637 		if (ret < 0)
638 			return ret;
639 		sha_idx += sizeof(sha_text);
640 
641 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
642 		intel_de_write(i915, HDCP_REP_CTL,
643 			       rep_ctl | HDCP_SHA1_TEXT_8);
644 		ret = intel_write_sha_text(i915, bstatus[1]);
645 		if (ret < 0)
646 			return ret;
647 		sha_idx += sizeof(sha_text);
648 
649 		/* Write 32 bits of M0 */
650 		intel_de_write(i915, HDCP_REP_CTL,
651 			       rep_ctl | HDCP_SHA1_TEXT_0);
652 		ret = intel_write_sha_text(i915, 0);
653 		if (ret < 0)
654 			return ret;
655 		sha_idx += sizeof(sha_text);
656 
657 		/* Write 8 bits of M0 */
658 		intel_de_write(i915, HDCP_REP_CTL,
659 			       rep_ctl | HDCP_SHA1_TEXT_24);
660 		ret = intel_write_sha_text(i915, 0);
661 		if (ret < 0)
662 			return ret;
663 		sha_idx += sizeof(sha_text);
664 	} else {
665 		drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
666 			    sha_leftovers);
667 		return -EINVAL;
668 	}
669 
670 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
671 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
672 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
673 		ret = intel_write_sha_text(i915, 0);
674 		if (ret < 0)
675 			return ret;
676 		sha_idx += sizeof(sha_text);
677 	}
678 
679 	/*
680 	 * Last write gets the length of the concatenation in bits. That is:
681 	 *  - 5 bytes per device
682 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
683 	 */
684 	sha_text = (num_downstream * 5 + 10) * 8;
685 	ret = intel_write_sha_text(i915, sha_text);
686 	if (ret < 0)
687 		return ret;
688 
689 	/* Tell the HW we're done with the hash and wait for it to ACK */
690 	intel_de_write(i915, HDCP_REP_CTL,
691 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
692 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
693 				  HDCP_SHA1_COMPLETE, 1)) {
694 		drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
695 		return -ETIMEDOUT;
696 	}
697 	if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
698 		drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
699 		return -ENXIO;
700 	}
701 
702 	return 0;
703 }
704 
705 /* Implements Part 2 of the HDCP authorization procedure */
706 static
707 int intel_hdcp_auth_downstream(struct intel_connector *connector)
708 {
709 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
710 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
711 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
712 	u8 bstatus[2], num_downstream, *ksv_fifo;
713 	int ret, i, tries = 3;
714 
715 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
716 	if (ret) {
717 		drm_dbg_kms(&i915->drm,
718 			    "KSV list failed to become ready (%d)\n", ret);
719 		return ret;
720 	}
721 
722 	ret = shim->read_bstatus(dig_port, bstatus);
723 	if (ret)
724 		return ret;
725 
726 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
727 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
728 		drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
729 		return -EPERM;
730 	}
731 
732 	/*
733 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
734 	 * the HDCP encryption. That implies that repeater can't have its own
735 	 * display. As there is no consumption of encrypted content in the
736 	 * repeater with 0 downstream devices, we are failing the
737 	 * authentication.
738 	 */
739 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
740 	if (num_downstream == 0) {
741 		drm_dbg_kms(&i915->drm,
742 			    "Repeater with zero downstream devices\n");
743 		return -EINVAL;
744 	}
745 
746 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
747 	if (!ksv_fifo) {
748 		drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
749 		return -ENOMEM;
750 	}
751 
752 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
753 	if (ret)
754 		goto err;
755 
756 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
757 					num_downstream) > 0) {
758 		drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
759 		ret = -EPERM;
760 		goto err;
761 	}
762 
763 	/*
764 	 * When V prime mismatches, DP Spec mandates re-read of
765 	 * V prime atleast twice.
766 	 */
767 	for (i = 0; i < tries; i++) {
768 		ret = intel_hdcp_validate_v_prime(connector, shim,
769 						  ksv_fifo, num_downstream,
770 						  bstatus);
771 		if (!ret)
772 			break;
773 	}
774 
775 	if (i == tries) {
776 		drm_dbg_kms(&i915->drm,
777 			    "V Prime validation failed.(%d)\n", ret);
778 		goto err;
779 	}
780 
781 	drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
782 		    num_downstream);
783 	ret = 0;
784 err:
785 	kfree(ksv_fifo);
786 	return ret;
787 }
788 
789 /* Implements Part 1 of the HDCP authorization procedure */
790 static int intel_hdcp_auth(struct intel_connector *connector)
791 {
792 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
793 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
794 	struct intel_hdcp *hdcp = &connector->hdcp;
795 	const struct intel_hdcp_shim *shim = hdcp->shim;
796 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
797 	enum port port = dig_port->base.port;
798 	unsigned long r0_prime_gen_start;
799 	int ret, i, tries = 2;
800 	union {
801 		u32 reg[2];
802 		u8 shim[DRM_HDCP_AN_LEN];
803 	} an;
804 	union {
805 		u32 reg[2];
806 		u8 shim[DRM_HDCP_KSV_LEN];
807 	} bksv;
808 	union {
809 		u32 reg;
810 		u8 shim[DRM_HDCP_RI_LEN];
811 	} ri;
812 	bool repeater_present, hdcp_capable;
813 
814 	/*
815 	 * Detects whether the display is HDCP capable. Although we check for
816 	 * valid Bksv below, the HDCP over DP spec requires that we check
817 	 * whether the display supports HDCP before we write An. For HDMI
818 	 * displays, this is not necessary.
819 	 */
820 	if (shim->hdcp_get_capability) {
821 		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
822 		if (ret)
823 			return ret;
824 		if (!hdcp_capable) {
825 			drm_dbg_kms(&i915->drm,
826 				    "Panel is not HDCP capable\n");
827 			return -EINVAL;
828 		}
829 	}
830 
831 	/* Initialize An with 2 random values and acquire it */
832 	for (i = 0; i < 2; i++)
833 		intel_de_write(i915,
834 			       HDCP_ANINIT(i915, cpu_transcoder, port),
835 			       get_random_u32());
836 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
837 		       HDCP_CONF_CAPTURE_AN);
838 
839 	/* Wait for An to be acquired */
840 	if (intel_de_wait_for_set(i915,
841 				  HDCP_STATUS(i915, cpu_transcoder, port),
842 				  HDCP_STATUS_AN_READY, 1)) {
843 		drm_err(&i915->drm, "Timed out waiting for An\n");
844 		return -ETIMEDOUT;
845 	}
846 
847 	an.reg[0] = intel_de_read(i915,
848 				  HDCP_ANLO(i915, cpu_transcoder, port));
849 	an.reg[1] = intel_de_read(i915,
850 				  HDCP_ANHI(i915, cpu_transcoder, port));
851 	ret = shim->write_an_aksv(dig_port, an.shim);
852 	if (ret)
853 		return ret;
854 
855 	r0_prime_gen_start = jiffies;
856 
857 	memset(&bksv, 0, sizeof(bksv));
858 
859 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
860 	if (ret < 0)
861 		return ret;
862 
863 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
864 		drm_err(&i915->drm, "BKSV is revoked\n");
865 		return -EPERM;
866 	}
867 
868 	intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
869 		       bksv.reg[0]);
870 	intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
871 		       bksv.reg[1]);
872 
873 	ret = shim->repeater_present(dig_port, &repeater_present);
874 	if (ret)
875 		return ret;
876 	if (repeater_present)
877 		intel_de_write(i915, HDCP_REP_CTL,
878 			       intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
879 
880 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
881 	if (ret)
882 		return ret;
883 
884 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
885 		       HDCP_CONF_AUTH_AND_ENC);
886 
887 	/* Wait for R0 ready */
888 	if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
889 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
890 		drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
891 		return -ETIMEDOUT;
892 	}
893 
894 	/*
895 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
896 	 * some monitors can take longer than this. We'll set the timeout at
897 	 * 300ms just to be sure.
898 	 *
899 	 * On DP, there's an R0_READY bit available but no such bit
900 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
901 	 * the stupid thing instead of polling on one and not the other.
902 	 */
903 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
904 
905 	tries = 3;
906 
907 	/*
908 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
909 	 * of R0 mismatch.
910 	 */
911 	for (i = 0; i < tries; i++) {
912 		ri.reg = 0;
913 		ret = shim->read_ri_prime(dig_port, ri.shim);
914 		if (ret)
915 			return ret;
916 		intel_de_write(i915,
917 			       HDCP_RPRIME(i915, cpu_transcoder, port),
918 			       ri.reg);
919 
920 		/* Wait for Ri prime match */
921 		if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
922 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
923 			break;
924 	}
925 
926 	if (i == tries) {
927 		drm_dbg_kms(&i915->drm,
928 			    "Timed out waiting for Ri prime match (%x)\n",
929 			    intel_de_read(i915,
930 					  HDCP_STATUS(i915, cpu_transcoder, port)));
931 		return -ETIMEDOUT;
932 	}
933 
934 	/* Wait for encryption confirmation */
935 	if (intel_de_wait_for_set(i915,
936 				  HDCP_STATUS(i915, cpu_transcoder, port),
937 				  HDCP_STATUS_ENC,
938 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
939 		drm_err(&i915->drm, "Timed out waiting for encryption\n");
940 		return -ETIMEDOUT;
941 	}
942 
943 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
944 	if (shim->stream_encryption) {
945 		ret = shim->stream_encryption(connector, true);
946 		if (ret) {
947 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
948 				connector->base.base.id, connector->base.name);
949 			return ret;
950 		}
951 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
952 			    transcoder_name(hdcp->stream_transcoder));
953 	}
954 
955 	if (repeater_present)
956 		return intel_hdcp_auth_downstream(connector);
957 
958 	drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
959 	return 0;
960 }
961 
962 static int _intel_hdcp_disable(struct intel_connector *connector)
963 {
964 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
965 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
966 	struct intel_hdcp *hdcp = &connector->hdcp;
967 	enum port port = dig_port->base.port;
968 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
969 	u32 repeater_ctl;
970 	int ret;
971 
972 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
973 		    connector->base.base.id, connector->base.name);
974 
975 	if (hdcp->shim->stream_encryption) {
976 		ret = hdcp->shim->stream_encryption(connector, false);
977 		if (ret) {
978 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
979 				connector->base.base.id, connector->base.name);
980 			return ret;
981 		}
982 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
983 			    transcoder_name(hdcp->stream_transcoder));
984 		/*
985 		 * If there are other connectors on this port using HDCP,
986 		 * don't disable it until it disabled HDCP encryption for
987 		 * all connectors in MST topology.
988 		 */
989 		if (dig_port->num_hdcp_streams > 0)
990 			return 0;
991 	}
992 
993 	hdcp->hdcp_encrypted = false;
994 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
995 	if (intel_de_wait_for_clear(i915,
996 				    HDCP_STATUS(i915, cpu_transcoder, port),
997 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
998 		drm_err(&i915->drm,
999 			"Failed to disable HDCP, timeout clearing status\n");
1000 		return -ETIMEDOUT;
1001 	}
1002 
1003 	repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
1004 						   port);
1005 	intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
1006 
1007 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1008 	if (ret) {
1009 		drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
1010 		return ret;
1011 	}
1012 
1013 	drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
1014 	return 0;
1015 }
1016 
1017 static int intel_hdcp1_enable(struct intel_connector *connector)
1018 {
1019 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1020 	struct intel_hdcp *hdcp = &connector->hdcp;
1021 	int i, ret, tries = 3;
1022 
1023 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1024 		    connector->base.base.id, connector->base.name);
1025 
1026 	if (!hdcp_key_loadable(i915)) {
1027 		drm_err(&i915->drm, "HDCP key Load is not possible\n");
1028 		return -ENXIO;
1029 	}
1030 
1031 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1032 		ret = intel_hdcp_load_keys(i915);
1033 		if (!ret)
1034 			break;
1035 		intel_hdcp_clear_keys(i915);
1036 	}
1037 	if (ret) {
1038 		drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
1039 			ret);
1040 		return ret;
1041 	}
1042 
1043 	/* Incase of authentication failures, HDCP spec expects reauth. */
1044 	for (i = 0; i < tries; i++) {
1045 		ret = intel_hdcp_auth(connector);
1046 		if (!ret) {
1047 			hdcp->hdcp_encrypted = true;
1048 			return 0;
1049 		}
1050 
1051 		drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
1052 
1053 		/* Ensuring HDCP encryption and signalling are stopped. */
1054 		_intel_hdcp_disable(connector);
1055 	}
1056 
1057 	drm_dbg_kms(&i915->drm,
1058 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1059 	return ret;
1060 }
1061 
1062 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1063 {
1064 	return container_of(hdcp, struct intel_connector, hdcp);
1065 }
1066 
1067 static void intel_hdcp_update_value(struct intel_connector *connector,
1068 				    u64 value, bool update_property)
1069 {
1070 	struct drm_device *dev = connector->base.dev;
1071 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1072 	struct intel_hdcp *hdcp = &connector->hdcp;
1073 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1074 
1075 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1076 
1077 	if (hdcp->value == value)
1078 		return;
1079 
1080 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1081 
1082 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1083 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1084 			dig_port->num_hdcp_streams--;
1085 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1086 		dig_port->num_hdcp_streams++;
1087 	}
1088 
1089 	hdcp->value = value;
1090 	if (update_property) {
1091 		drm_connector_get(&connector->base);
1092 		queue_work(i915->unordered_wq, &hdcp->prop_work);
1093 	}
1094 }
1095 
1096 /* Implements Part 3 of the HDCP authorization procedure */
1097 static int intel_hdcp_check_link(struct intel_connector *connector)
1098 {
1099 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1100 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1101 	struct intel_hdcp *hdcp = &connector->hdcp;
1102 	enum port port = dig_port->base.port;
1103 	enum transcoder cpu_transcoder;
1104 	int ret = 0;
1105 
1106 	mutex_lock(&hdcp->mutex);
1107 	mutex_lock(&dig_port->hdcp_mutex);
1108 
1109 	cpu_transcoder = hdcp->cpu_transcoder;
1110 
1111 	/* Check_link valid only when HDCP1.4 is enabled */
1112 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1113 	    !hdcp->hdcp_encrypted) {
1114 		ret = -EINVAL;
1115 		goto out;
1116 	}
1117 
1118 	if (drm_WARN_ON(&i915->drm,
1119 			!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
1120 		drm_err(&i915->drm,
1121 			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1122 			connector->base.base.id, connector->base.name,
1123 			intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
1124 		ret = -ENXIO;
1125 		intel_hdcp_update_value(connector,
1126 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1127 					true);
1128 		goto out;
1129 	}
1130 
1131 	if (hdcp->shim->check_link(dig_port, connector)) {
1132 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1133 			intel_hdcp_update_value(connector,
1134 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1135 		}
1136 		goto out;
1137 	}
1138 
1139 	drm_dbg_kms(&i915->drm,
1140 		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1141 		    connector->base.base.id, connector->base.name);
1142 
1143 	ret = _intel_hdcp_disable(connector);
1144 	if (ret) {
1145 		drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
1146 		intel_hdcp_update_value(connector,
1147 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1148 					true);
1149 		goto out;
1150 	}
1151 
1152 	intel_hdcp_update_value(connector,
1153 				DRM_MODE_CONTENT_PROTECTION_DESIRED,
1154 				true);
1155 out:
1156 	mutex_unlock(&dig_port->hdcp_mutex);
1157 	mutex_unlock(&hdcp->mutex);
1158 	return ret;
1159 }
1160 
1161 static void intel_hdcp_prop_work(struct work_struct *work)
1162 {
1163 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1164 					       prop_work);
1165 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1166 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1167 
1168 	drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
1169 	mutex_lock(&hdcp->mutex);
1170 
1171 	/*
1172 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1173 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1174 	 * we're running just after hdcp has been disabled, so just exit
1175 	 */
1176 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1177 		drm_hdcp_update_content_protection(&connector->base,
1178 						   hdcp->value);
1179 
1180 	mutex_unlock(&hdcp->mutex);
1181 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1182 
1183 	drm_connector_put(&connector->base);
1184 }
1185 
1186 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
1187 {
1188 	return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
1189 		(DISPLAY_VER(i915) >= 12 || port < PORT_E);
1190 }
1191 
1192 static int
1193 hdcp2_prepare_ake_init(struct intel_connector *connector,
1194 		       struct hdcp2_ake_init *ake_data)
1195 {
1196 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1197 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1198 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1199 	struct i915_hdcp_arbiter *arbiter;
1200 	int ret;
1201 
1202 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1203 	arbiter = i915->display.hdcp.arbiter;
1204 
1205 	if (!arbiter || !arbiter->ops) {
1206 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1207 		return -EINVAL;
1208 	}
1209 
1210 	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1211 	if (ret)
1212 		drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
1213 			    ret);
1214 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1215 
1216 	return ret;
1217 }
1218 
1219 static int
1220 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1221 				struct hdcp2_ake_send_cert *rx_cert,
1222 				bool *paired,
1223 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1224 				size_t *msg_sz)
1225 {
1226 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1227 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1228 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1229 	struct i915_hdcp_arbiter *arbiter;
1230 	int ret;
1231 
1232 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1233 	arbiter = i915->display.hdcp.arbiter;
1234 
1235 	if (!arbiter || !arbiter->ops) {
1236 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1237 		return -EINVAL;
1238 	}
1239 
1240 	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1241 							 rx_cert, paired,
1242 							 ek_pub_km, msg_sz);
1243 	if (ret < 0)
1244 		drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
1245 			    ret);
1246 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1247 
1248 	return ret;
1249 }
1250 
1251 static int hdcp2_verify_hprime(struct intel_connector *connector,
1252 			       struct hdcp2_ake_send_hprime *rx_hprime)
1253 {
1254 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1255 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1256 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1257 	struct i915_hdcp_arbiter *arbiter;
1258 	int ret;
1259 
1260 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1261 	arbiter = i915->display.hdcp.arbiter;
1262 
1263 	if (!arbiter || !arbiter->ops) {
1264 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1265 		return -EINVAL;
1266 	}
1267 
1268 	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1269 	if (ret < 0)
1270 		drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
1271 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1272 
1273 	return ret;
1274 }
1275 
1276 static int
1277 hdcp2_store_pairing_info(struct intel_connector *connector,
1278 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1279 {
1280 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1281 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1282 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1283 	struct i915_hdcp_arbiter *arbiter;
1284 	int ret;
1285 
1286 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1287 	arbiter = i915->display.hdcp.arbiter;
1288 
1289 	if (!arbiter || !arbiter->ops) {
1290 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1291 		return -EINVAL;
1292 	}
1293 
1294 	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1295 	if (ret < 0)
1296 		drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
1297 			    ret);
1298 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1299 
1300 	return ret;
1301 }
1302 
1303 static int
1304 hdcp2_prepare_lc_init(struct intel_connector *connector,
1305 		      struct hdcp2_lc_init *lc_init)
1306 {
1307 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1308 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1309 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1310 	struct i915_hdcp_arbiter *arbiter;
1311 	int ret;
1312 
1313 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1314 	arbiter = i915->display.hdcp.arbiter;
1315 
1316 	if (!arbiter || !arbiter->ops) {
1317 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1318 		return -EINVAL;
1319 	}
1320 
1321 	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1322 	if (ret < 0)
1323 		drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
1324 			    ret);
1325 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1326 
1327 	return ret;
1328 }
1329 
1330 static int
1331 hdcp2_verify_lprime(struct intel_connector *connector,
1332 		    struct hdcp2_lc_send_lprime *rx_lprime)
1333 {
1334 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1335 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1336 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1337 	struct i915_hdcp_arbiter *arbiter;
1338 	int ret;
1339 
1340 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1341 	arbiter = i915->display.hdcp.arbiter;
1342 
1343 	if (!arbiter || !arbiter->ops) {
1344 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1345 		return -EINVAL;
1346 	}
1347 
1348 	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1349 	if (ret < 0)
1350 		drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
1351 			    ret);
1352 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1353 
1354 	return ret;
1355 }
1356 
1357 static int hdcp2_prepare_skey(struct intel_connector *connector,
1358 			      struct hdcp2_ske_send_eks *ske_data)
1359 {
1360 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1361 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1362 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1363 	struct i915_hdcp_arbiter *arbiter;
1364 	int ret;
1365 
1366 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1367 	arbiter = i915->display.hdcp.arbiter;
1368 
1369 	if (!arbiter || !arbiter->ops) {
1370 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1371 		return -EINVAL;
1372 	}
1373 
1374 	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1375 	if (ret < 0)
1376 		drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
1377 			    ret);
1378 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1379 
1380 	return ret;
1381 }
1382 
1383 static int
1384 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1385 				      struct hdcp2_rep_send_receiverid_list
1386 								*rep_topology,
1387 				      struct hdcp2_rep_send_ack *rep_send_ack)
1388 {
1389 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1390 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1391 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1392 	struct i915_hdcp_arbiter *arbiter;
1393 	int ret;
1394 
1395 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1396 	arbiter = i915->display.hdcp.arbiter;
1397 
1398 	if (!arbiter || !arbiter->ops) {
1399 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1400 		return -EINVAL;
1401 	}
1402 
1403 	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1404 							    data,
1405 							    rep_topology,
1406 							    rep_send_ack);
1407 	if (ret < 0)
1408 		drm_dbg_kms(&i915->drm,
1409 			    "Verify rep topology failed. %d\n", ret);
1410 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1411 
1412 	return ret;
1413 }
1414 
1415 static int
1416 hdcp2_verify_mprime(struct intel_connector *connector,
1417 		    struct hdcp2_rep_stream_ready *stream_ready)
1418 {
1419 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1420 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1421 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1422 	struct i915_hdcp_arbiter *arbiter;
1423 	int ret;
1424 
1425 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1426 	arbiter = i915->display.hdcp.arbiter;
1427 
1428 	if (!arbiter || !arbiter->ops) {
1429 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1430 		return -EINVAL;
1431 	}
1432 
1433 	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1434 	if (ret < 0)
1435 		drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
1436 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1437 
1438 	return ret;
1439 }
1440 
1441 static int hdcp2_authenticate_port(struct intel_connector *connector)
1442 {
1443 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1444 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1445 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1446 	struct i915_hdcp_arbiter *arbiter;
1447 	int ret;
1448 
1449 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1450 	arbiter = i915->display.hdcp.arbiter;
1451 
1452 	if (!arbiter || !arbiter->ops) {
1453 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1454 		return -EINVAL;
1455 	}
1456 
1457 	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1458 	if (ret < 0)
1459 		drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
1460 			    ret);
1461 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1462 
1463 	return ret;
1464 }
1465 
1466 static int hdcp2_close_session(struct intel_connector *connector)
1467 {
1468 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1469 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1470 	struct i915_hdcp_arbiter *arbiter;
1471 	int ret;
1472 
1473 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1474 	arbiter = i915->display.hdcp.arbiter;
1475 
1476 	if (!arbiter || !arbiter->ops) {
1477 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1478 		return -EINVAL;
1479 	}
1480 
1481 	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1482 					     &dig_port->hdcp_port_data);
1483 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1484 
1485 	return ret;
1486 }
1487 
1488 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1489 {
1490 	return hdcp2_close_session(connector);
1491 }
1492 
1493 /* Authentication flow starts from here */
1494 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1495 {
1496 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1497 	struct intel_hdcp *hdcp = &connector->hdcp;
1498 	union {
1499 		struct hdcp2_ake_init ake_init;
1500 		struct hdcp2_ake_send_cert send_cert;
1501 		struct hdcp2_ake_no_stored_km no_stored_km;
1502 		struct hdcp2_ake_send_hprime send_hprime;
1503 		struct hdcp2_ake_send_pairing_info pairing_info;
1504 	} msgs;
1505 	const struct intel_hdcp_shim *shim = hdcp->shim;
1506 	size_t size;
1507 	int ret;
1508 
1509 	/* Init for seq_num */
1510 	hdcp->seq_num_v = 0;
1511 	hdcp->seq_num_m = 0;
1512 
1513 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1514 	if (ret < 0)
1515 		return ret;
1516 
1517 	ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1518 				  sizeof(msgs.ake_init));
1519 	if (ret < 0)
1520 		return ret;
1521 
1522 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1523 				 &msgs.send_cert, sizeof(msgs.send_cert));
1524 	if (ret < 0)
1525 		return ret;
1526 
1527 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1528 		drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
1529 		return -EINVAL;
1530 	}
1531 
1532 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1533 
1534 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1535 					msgs.send_cert.cert_rx.receiver_id,
1536 					1) > 0) {
1537 		drm_err(&i915->drm, "Receiver ID is revoked\n");
1538 		return -EPERM;
1539 	}
1540 
1541 	/*
1542 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1543 	 * stored also.
1544 	 */
1545 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1546 					      &hdcp->is_paired,
1547 					      &msgs.no_stored_km, &size);
1548 	if (ret < 0)
1549 		return ret;
1550 
1551 	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1552 	if (ret < 0)
1553 		return ret;
1554 
1555 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1556 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1557 	if (ret < 0)
1558 		return ret;
1559 
1560 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1561 	if (ret < 0)
1562 		return ret;
1563 
1564 	if (!hdcp->is_paired) {
1565 		/* Pairing is required */
1566 		ret = shim->read_2_2_msg(connector,
1567 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1568 					 &msgs.pairing_info,
1569 					 sizeof(msgs.pairing_info));
1570 		if (ret < 0)
1571 			return ret;
1572 
1573 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1574 		if (ret < 0)
1575 			return ret;
1576 		hdcp->is_paired = true;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 static int hdcp2_locality_check(struct intel_connector *connector)
1583 {
1584 	struct intel_hdcp *hdcp = &connector->hdcp;
1585 	union {
1586 		struct hdcp2_lc_init lc_init;
1587 		struct hdcp2_lc_send_lprime send_lprime;
1588 	} msgs;
1589 	const struct intel_hdcp_shim *shim = hdcp->shim;
1590 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1591 
1592 	for (i = 0; i < tries; i++) {
1593 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1594 		if (ret < 0)
1595 			continue;
1596 
1597 		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1598 				      sizeof(msgs.lc_init));
1599 		if (ret < 0)
1600 			continue;
1601 
1602 		ret = shim->read_2_2_msg(connector,
1603 					 HDCP_2_2_LC_SEND_LPRIME,
1604 					 &msgs.send_lprime,
1605 					 sizeof(msgs.send_lprime));
1606 		if (ret < 0)
1607 			continue;
1608 
1609 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1610 		if (!ret)
1611 			break;
1612 	}
1613 
1614 	return ret;
1615 }
1616 
1617 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1618 {
1619 	struct intel_hdcp *hdcp = &connector->hdcp;
1620 	struct hdcp2_ske_send_eks send_eks;
1621 	int ret;
1622 
1623 	ret = hdcp2_prepare_skey(connector, &send_eks);
1624 	if (ret < 0)
1625 		return ret;
1626 
1627 	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1628 					sizeof(send_eks));
1629 	if (ret < 0)
1630 		return ret;
1631 
1632 	return 0;
1633 }
1634 
1635 static
1636 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1637 {
1638 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1639 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1640 	struct intel_hdcp *hdcp = &connector->hdcp;
1641 	union {
1642 		struct hdcp2_rep_stream_manage stream_manage;
1643 		struct hdcp2_rep_stream_ready stream_ready;
1644 	} msgs;
1645 	const struct intel_hdcp_shim *shim = hdcp->shim;
1646 	int ret, streams_size_delta, i;
1647 
1648 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1649 		return -ERANGE;
1650 
1651 	/* Prepare RepeaterAuth_Stream_Manage msg */
1652 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1653 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1654 
1655 	msgs.stream_manage.k = cpu_to_be16(data->k);
1656 
1657 	for (i = 0; i < data->k; i++) {
1658 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1659 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1660 	}
1661 
1662 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1663 				sizeof(struct hdcp2_streamid_type);
1664 	/* Send it to Repeater */
1665 	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1666 				  sizeof(msgs.stream_manage) - streams_size_delta);
1667 	if (ret < 0)
1668 		goto out;
1669 
1670 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1671 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1672 	if (ret < 0)
1673 		goto out;
1674 
1675 	data->seq_num_m = hdcp->seq_num_m;
1676 
1677 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1678 
1679 out:
1680 	hdcp->seq_num_m++;
1681 
1682 	return ret;
1683 }
1684 
1685 static
1686 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1687 {
1688 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1689 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1690 	struct intel_hdcp *hdcp = &connector->hdcp;
1691 	union {
1692 		struct hdcp2_rep_send_receiverid_list recvid_list;
1693 		struct hdcp2_rep_send_ack rep_ack;
1694 	} msgs;
1695 	const struct intel_hdcp_shim *shim = hdcp->shim;
1696 	u32 seq_num_v, device_cnt;
1697 	u8 *rx_info;
1698 	int ret;
1699 
1700 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1701 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1702 	if (ret < 0)
1703 		return ret;
1704 
1705 	rx_info = msgs.recvid_list.rx_info;
1706 
1707 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1708 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1709 		drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
1710 		return -EINVAL;
1711 	}
1712 
1713 	/*
1714 	 * MST topology is not Type 1 capable if it contains a downstream
1715 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1716 	 */
1717 	dig_port->hdcp_mst_type1_capable =
1718 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1719 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1720 
1721 	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1722 		drm_dbg_kms(&i915->drm,
1723 			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1724 		return -EINVAL;
1725 	}
1726 
1727 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1728 	seq_num_v =
1729 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1730 
1731 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1732 		drm_dbg_kms(&i915->drm,
1733 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1734 		return -EINVAL;
1735 	}
1736 
1737 	if (seq_num_v < hdcp->seq_num_v) {
1738 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1739 		drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
1740 		return -EINVAL;
1741 	}
1742 
1743 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1744 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1745 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1746 					msgs.recvid_list.receiver_ids,
1747 					device_cnt) > 0) {
1748 		drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
1749 		return -EPERM;
1750 	}
1751 
1752 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1753 						    &msgs.recvid_list,
1754 						    &msgs.rep_ack);
1755 	if (ret < 0)
1756 		return ret;
1757 
1758 	hdcp->seq_num_v = seq_num_v;
1759 	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1760 				  sizeof(msgs.rep_ack));
1761 	if (ret < 0)
1762 		return ret;
1763 
1764 	return 0;
1765 }
1766 
1767 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1768 {
1769 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1770 	struct intel_hdcp *hdcp = &connector->hdcp;
1771 	const struct intel_hdcp_shim *shim = hdcp->shim;
1772 	int ret;
1773 
1774 	ret = hdcp2_authentication_key_exchange(connector);
1775 	if (ret < 0) {
1776 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1777 		return ret;
1778 	}
1779 
1780 	ret = hdcp2_locality_check(connector);
1781 	if (ret < 0) {
1782 		drm_dbg_kms(&i915->drm,
1783 			    "Locality Check failed. Err : %d\n", ret);
1784 		return ret;
1785 	}
1786 
1787 	ret = hdcp2_session_key_exchange(connector);
1788 	if (ret < 0) {
1789 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1790 		return ret;
1791 	}
1792 
1793 	if (shim->config_stream_type) {
1794 		ret = shim->config_stream_type(connector,
1795 					       hdcp->is_repeater,
1796 					       hdcp->content_type);
1797 		if (ret < 0)
1798 			return ret;
1799 	}
1800 
1801 	if (hdcp->is_repeater) {
1802 		ret = hdcp2_authenticate_repeater_topology(connector);
1803 		if (ret < 0) {
1804 			drm_dbg_kms(&i915->drm,
1805 				    "Repeater Auth Failed. Err: %d\n", ret);
1806 			return ret;
1807 		}
1808 	}
1809 
1810 	return ret;
1811 }
1812 
1813 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1814 {
1815 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1816 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1817 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1818 	struct intel_hdcp *hdcp = &connector->hdcp;
1819 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1820 	enum port port = dig_port->base.port;
1821 	int ret = 0;
1822 
1823 	if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1824 			    LINK_ENCRYPTION_STATUS)) {
1825 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1826 			connector->base.base.id, connector->base.name);
1827 		ret = -EPERM;
1828 		goto link_recover;
1829 	}
1830 
1831 	if (hdcp->shim->stream_2_2_encryption) {
1832 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1833 		if (ret) {
1834 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1835 				connector->base.base.id, connector->base.name);
1836 			return ret;
1837 		}
1838 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1839 			    transcoder_name(hdcp->stream_transcoder));
1840 	}
1841 
1842 	return 0;
1843 
1844 link_recover:
1845 	if (hdcp2_deauthenticate_port(connector) < 0)
1846 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1847 
1848 	dig_port->hdcp_auth_status = false;
1849 	data->k = 0;
1850 
1851 	return ret;
1852 }
1853 
1854 static int hdcp2_enable_encryption(struct intel_connector *connector)
1855 {
1856 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1857 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1858 	struct intel_hdcp *hdcp = &connector->hdcp;
1859 	enum port port = dig_port->base.port;
1860 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1861 	int ret;
1862 
1863 	drm_WARN_ON(&i915->drm,
1864 		    intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1865 		    LINK_ENCRYPTION_STATUS);
1866 	if (hdcp->shim->toggle_signalling) {
1867 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1868 						    true);
1869 		if (ret) {
1870 			drm_err(&i915->drm,
1871 				"Failed to enable HDCP signalling. %d\n",
1872 				ret);
1873 			return ret;
1874 		}
1875 	}
1876 
1877 	if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1878 	    LINK_AUTH_STATUS)
1879 		/* Link is Authenticated. Now set for Encryption */
1880 		intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1881 			     0, CTL_LINK_ENCRYPTION_REQ);
1882 
1883 	ret = intel_de_wait_for_set(i915,
1884 				    HDCP2_STATUS(i915, cpu_transcoder,
1885 						 port),
1886 				    LINK_ENCRYPTION_STATUS,
1887 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1888 	dig_port->hdcp_auth_status = true;
1889 
1890 	return ret;
1891 }
1892 
1893 static int hdcp2_disable_encryption(struct intel_connector *connector)
1894 {
1895 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1896 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1897 	struct intel_hdcp *hdcp = &connector->hdcp;
1898 	enum port port = dig_port->base.port;
1899 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1900 	int ret;
1901 
1902 	drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1903 				      LINK_ENCRYPTION_STATUS));
1904 
1905 	intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1906 		     CTL_LINK_ENCRYPTION_REQ, 0);
1907 
1908 	ret = intel_de_wait_for_clear(i915,
1909 				      HDCP2_STATUS(i915, cpu_transcoder,
1910 						   port),
1911 				      LINK_ENCRYPTION_STATUS,
1912 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1913 	if (ret == -ETIMEDOUT)
1914 		drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
1915 
1916 	if (hdcp->shim->toggle_signalling) {
1917 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1918 						    false);
1919 		if (ret) {
1920 			drm_err(&i915->drm,
1921 				"Failed to disable HDCP signalling. %d\n",
1922 				ret);
1923 			return ret;
1924 		}
1925 	}
1926 
1927 	return ret;
1928 }
1929 
1930 static int
1931 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1932 {
1933 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1934 	int i, tries = 3, ret;
1935 
1936 	if (!connector->hdcp.is_repeater)
1937 		return 0;
1938 
1939 	for (i = 0; i < tries; i++) {
1940 		ret = _hdcp2_propagate_stream_management_info(connector);
1941 		if (!ret)
1942 			break;
1943 
1944 		/* Lets restart the auth incase of seq_num_m roll over */
1945 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1946 			drm_dbg_kms(&i915->drm,
1947 				    "seq_num_m roll over.(%d)\n", ret);
1948 			break;
1949 		}
1950 
1951 		drm_dbg_kms(&i915->drm,
1952 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1953 			    i + 1, tries, ret);
1954 	}
1955 
1956 	return ret;
1957 }
1958 
1959 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
1960 					  struct intel_connector *connector)
1961 {
1962 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1963 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1964 	int ret = 0, i, tries = 3;
1965 
1966 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1967 		ret = hdcp2_authenticate_sink(connector);
1968 		if (!ret) {
1969 			ret = intel_hdcp_prepare_streams(state, connector);
1970 			if (ret) {
1971 				drm_dbg_kms(&i915->drm,
1972 					    "Prepare stream failed.(%d)\n",
1973 					    ret);
1974 				break;
1975 			}
1976 
1977 			ret = hdcp2_propagate_stream_management_info(connector);
1978 			if (ret) {
1979 				drm_dbg_kms(&i915->drm,
1980 					    "Stream management failed.(%d)\n",
1981 					    ret);
1982 				break;
1983 			}
1984 
1985 			ret = hdcp2_authenticate_port(connector);
1986 			if (!ret)
1987 				break;
1988 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1989 				    ret);
1990 		}
1991 
1992 		/* Clearing the mei hdcp session */
1993 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1994 			    i + 1, tries, ret);
1995 		if (hdcp2_deauthenticate_port(connector) < 0)
1996 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1997 	}
1998 
1999 	if (!ret && !dig_port->hdcp_auth_status) {
2000 		/*
2001 		 * Ensuring the required 200mSec min time interval between
2002 		 * Session Key Exchange and encryption.
2003 		 */
2004 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2005 		ret = hdcp2_enable_encryption(connector);
2006 		if (ret < 0) {
2007 			drm_dbg_kms(&i915->drm,
2008 				    "Encryption Enable Failed.(%d)\n", ret);
2009 			if (hdcp2_deauthenticate_port(connector) < 0)
2010 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2011 		}
2012 	}
2013 
2014 	if (!ret)
2015 		ret = hdcp2_enable_stream_encryption(connector);
2016 
2017 	return ret;
2018 }
2019 
2020 static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2021 			       struct intel_connector *connector)
2022 {
2023 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2024 	struct intel_hdcp *hdcp = &connector->hdcp;
2025 	int ret;
2026 
2027 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2028 		    connector->base.base.id, connector->base.name,
2029 		    hdcp->content_type);
2030 
2031 	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
2032 
2033 	ret = hdcp2_authenticate_and_encrypt(state, connector);
2034 	if (ret) {
2035 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2036 			    hdcp->content_type, ret);
2037 		return ret;
2038 	}
2039 
2040 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2041 		    connector->base.base.id, connector->base.name,
2042 		    hdcp->content_type);
2043 
2044 	hdcp->hdcp2_encrypted = true;
2045 	return 0;
2046 }
2047 
2048 static int
2049 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2050 {
2051 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2052 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2053 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2054 	struct intel_hdcp *hdcp = &connector->hdcp;
2055 	int ret;
2056 
2057 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2058 		    connector->base.base.id, connector->base.name);
2059 
2060 	if (hdcp->shim->stream_2_2_encryption) {
2061 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2062 		if (ret) {
2063 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2064 				connector->base.base.id, connector->base.name);
2065 			return ret;
2066 		}
2067 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2068 			    transcoder_name(hdcp->stream_transcoder));
2069 
2070 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2071 			return 0;
2072 	}
2073 
2074 	ret = hdcp2_disable_encryption(connector);
2075 
2076 	if (hdcp2_deauthenticate_port(connector) < 0)
2077 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2078 
2079 	connector->hdcp.hdcp2_encrypted = false;
2080 	dig_port->hdcp_auth_status = false;
2081 	data->k = 0;
2082 
2083 	return ret;
2084 }
2085 
2086 /* Implements the Link Integrity Check for HDCP2.2 */
2087 static int intel_hdcp2_check_link(struct intel_connector *connector)
2088 {
2089 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2090 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2091 	struct intel_hdcp *hdcp = &connector->hdcp;
2092 	enum port port = dig_port->base.port;
2093 	enum transcoder cpu_transcoder;
2094 	int ret = 0;
2095 
2096 	mutex_lock(&hdcp->mutex);
2097 	mutex_lock(&dig_port->hdcp_mutex);
2098 	cpu_transcoder = hdcp->cpu_transcoder;
2099 
2100 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2101 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2102 	    !hdcp->hdcp2_encrypted) {
2103 		ret = -EINVAL;
2104 		goto out;
2105 	}
2106 
2107 	if (drm_WARN_ON(&i915->drm,
2108 			!intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
2109 		drm_err(&i915->drm,
2110 			"HDCP2.2 link stopped the encryption, %x\n",
2111 			intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
2112 		ret = -ENXIO;
2113 		_intel_hdcp2_disable(connector, true);
2114 		intel_hdcp_update_value(connector,
2115 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2116 					true);
2117 		goto out;
2118 	}
2119 
2120 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2121 	if (ret == HDCP_LINK_PROTECTED) {
2122 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2123 			intel_hdcp_update_value(connector,
2124 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2125 					true);
2126 		}
2127 		goto out;
2128 	}
2129 
2130 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2131 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2132 			goto out;
2133 
2134 		drm_dbg_kms(&i915->drm,
2135 			    "HDCP2.2 Downstream topology change\n");
2136 	} else {
2137 		drm_dbg_kms(&i915->drm,
2138 			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2139 			    connector->base.base.id, connector->base.name);
2140 	}
2141 
2142 	ret = _intel_hdcp2_disable(connector, true);
2143 	if (ret) {
2144 		drm_err(&i915->drm,
2145 			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2146 			connector->base.base.id, connector->base.name, ret);
2147 		intel_hdcp_update_value(connector,
2148 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2149 		goto out;
2150 	}
2151 
2152 	intel_hdcp_update_value(connector,
2153 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2154 out:
2155 	mutex_unlock(&dig_port->hdcp_mutex);
2156 	mutex_unlock(&hdcp->mutex);
2157 	return ret;
2158 }
2159 
2160 static void intel_hdcp_check_work(struct work_struct *work)
2161 {
2162 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2163 					       struct intel_hdcp,
2164 					       check_work);
2165 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2166 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2167 
2168 	if (drm_connector_is_unregistered(&connector->base))
2169 		return;
2170 
2171 	if (!intel_hdcp2_check_link(connector))
2172 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2173 				   DRM_HDCP2_CHECK_PERIOD_MS);
2174 	else if (!intel_hdcp_check_link(connector))
2175 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2176 				   DRM_HDCP_CHECK_PERIOD_MS);
2177 }
2178 
2179 static int i915_hdcp_component_bind(struct device *i915_kdev,
2180 				    struct device *mei_kdev, void *data)
2181 {
2182 	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2183 
2184 	drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
2185 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2186 	i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2187 	i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
2188 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2189 
2190 	return 0;
2191 }
2192 
2193 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2194 				       struct device *mei_kdev, void *data)
2195 {
2196 	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2197 
2198 	drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
2199 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2200 	i915->display.hdcp.arbiter = NULL;
2201 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2202 }
2203 
2204 static const struct component_ops i915_hdcp_ops = {
2205 	.bind   = i915_hdcp_component_bind,
2206 	.unbind = i915_hdcp_component_unbind,
2207 };
2208 
2209 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2210 {
2211 	switch (port) {
2212 	case PORT_A:
2213 		return HDCP_DDI_A;
2214 	case PORT_B ... PORT_F:
2215 		return (enum hdcp_ddi)port;
2216 	default:
2217 		return HDCP_DDI_INVALID_PORT;
2218 	}
2219 }
2220 
2221 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2222 {
2223 	switch (cpu_transcoder) {
2224 	case TRANSCODER_A ... TRANSCODER_D:
2225 		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2226 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2227 		return HDCP_INVALID_TRANSCODER;
2228 	}
2229 }
2230 
2231 static int initialize_hdcp_port_data(struct intel_connector *connector,
2232 				     struct intel_digital_port *dig_port,
2233 				     const struct intel_hdcp_shim *shim)
2234 {
2235 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2236 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2237 	enum port port = dig_port->base.port;
2238 
2239 	if (DISPLAY_VER(i915) < 12)
2240 		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2241 	else
2242 		/*
2243 		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2244 		 * with zero(INVALID PORT index).
2245 		 */
2246 		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2247 
2248 	/*
2249 	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2250 	 * is initialized to zero (invalid transcoder index). This will be
2251 	 * retained for <Gen12 forever.
2252 	 */
2253 	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2254 
2255 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2256 	data->protocol = (u8)shim->protocol;
2257 
2258 	if (!data->streams)
2259 		data->streams = kcalloc(INTEL_NUM_PIPES(i915),
2260 					sizeof(struct hdcp2_streamid_type),
2261 					GFP_KERNEL);
2262 	if (!data->streams) {
2263 		drm_err(&i915->drm, "Out of Memory\n");
2264 		return -ENOMEM;
2265 	}
2266 
2267 	return 0;
2268 }
2269 
2270 static bool is_hdcp2_supported(struct drm_i915_private *i915)
2271 {
2272 	if (intel_hdcp_gsc_cs_required(i915))
2273 		return true;
2274 
2275 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2276 		return false;
2277 
2278 	return (DISPLAY_VER(i915) >= 10 ||
2279 		IS_KABYLAKE(i915) ||
2280 		IS_COFFEELAKE(i915) ||
2281 		IS_COMETLAKE(i915));
2282 }
2283 
2284 void intel_hdcp_component_init(struct drm_i915_private *i915)
2285 {
2286 	int ret;
2287 
2288 	if (!is_hdcp2_supported(i915))
2289 		return;
2290 
2291 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2292 	drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
2293 
2294 	i915->display.hdcp.comp_added = true;
2295 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2296 	if (intel_hdcp_gsc_cs_required(i915))
2297 		ret = intel_hdcp_gsc_init(i915);
2298 	else
2299 		ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
2300 					  I915_COMPONENT_HDCP);
2301 
2302 	if (ret < 0) {
2303 		drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
2304 			    ret);
2305 		mutex_lock(&i915->display.hdcp.hdcp_mutex);
2306 		i915->display.hdcp.comp_added = false;
2307 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2308 		return;
2309 	}
2310 }
2311 
2312 static void intel_hdcp2_init(struct intel_connector *connector,
2313 			     struct intel_digital_port *dig_port,
2314 			     const struct intel_hdcp_shim *shim)
2315 {
2316 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2317 	struct intel_hdcp *hdcp = &connector->hdcp;
2318 	int ret;
2319 
2320 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2321 	if (ret) {
2322 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2323 		return;
2324 	}
2325 
2326 	hdcp->hdcp2_supported = true;
2327 }
2328 
2329 int intel_hdcp_init(struct intel_connector *connector,
2330 		    struct intel_digital_port *dig_port,
2331 		    const struct intel_hdcp_shim *shim)
2332 {
2333 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2334 	struct intel_hdcp *hdcp = &connector->hdcp;
2335 	int ret;
2336 
2337 	if (!shim)
2338 		return -EINVAL;
2339 
2340 	if (is_hdcp2_supported(i915))
2341 		intel_hdcp2_init(connector, dig_port, shim);
2342 
2343 	ret =
2344 	drm_connector_attach_content_protection_property(&connector->base,
2345 							 hdcp->hdcp2_supported);
2346 	if (ret) {
2347 		hdcp->hdcp2_supported = false;
2348 		kfree(dig_port->hdcp_port_data.streams);
2349 		return ret;
2350 	}
2351 
2352 	hdcp->shim = shim;
2353 	mutex_init(&hdcp->mutex);
2354 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2355 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2356 	init_waitqueue_head(&hdcp->cp_irq_queue);
2357 
2358 	return 0;
2359 }
2360 
2361 static int _intel_hdcp_enable(struct intel_atomic_state *state,
2362 			      struct intel_encoder *encoder,
2363 			      const struct intel_crtc_state *pipe_config,
2364 			      const struct drm_connector_state *conn_state)
2365 {
2366 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2367 	struct intel_connector *connector =
2368 		to_intel_connector(conn_state->connector);
2369 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2370 	struct intel_hdcp *hdcp = &connector->hdcp;
2371 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2372 	int ret = -EINVAL;
2373 
2374 	if (!hdcp->shim)
2375 		return -ENOENT;
2376 
2377 	if (!connector->encoder) {
2378 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2379 			connector->base.base.id, connector->base.name);
2380 		return -ENODEV;
2381 	}
2382 
2383 	mutex_lock(&hdcp->mutex);
2384 	mutex_lock(&dig_port->hdcp_mutex);
2385 	drm_WARN_ON(&i915->drm,
2386 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2387 	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2388 
2389 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2390 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2391 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2392 	} else {
2393 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2394 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2395 	}
2396 
2397 	if (DISPLAY_VER(i915) >= 12)
2398 		dig_port->hdcp_port_data.hdcp_transcoder =
2399 			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2400 
2401 	/*
2402 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2403 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2404 	 */
2405 	if (intel_hdcp2_get_capability(connector)) {
2406 		ret = _intel_hdcp2_enable(state, connector);
2407 		if (!ret)
2408 			check_link_interval =
2409 				DRM_HDCP2_CHECK_PERIOD_MS;
2410 	}
2411 
2412 	/*
2413 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2414 	 * be attempted.
2415 	 */
2416 	if (ret && intel_hdcp_get_capability(connector) &&
2417 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2418 		ret = intel_hdcp1_enable(connector);
2419 	}
2420 
2421 	if (!ret) {
2422 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2423 				   check_link_interval);
2424 		intel_hdcp_update_value(connector,
2425 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2426 					true);
2427 	}
2428 
2429 	mutex_unlock(&dig_port->hdcp_mutex);
2430 	mutex_unlock(&hdcp->mutex);
2431 	return ret;
2432 }
2433 
2434 void intel_hdcp_enable(struct intel_atomic_state *state,
2435 		       struct intel_encoder *encoder,
2436 		       const struct intel_crtc_state *crtc_state,
2437 		       const struct drm_connector_state *conn_state)
2438 {
2439 	struct intel_connector *connector =
2440 		to_intel_connector(conn_state->connector);
2441 	struct intel_hdcp *hdcp = &connector->hdcp;
2442 
2443 	/*
2444 	 * Enable hdcp if it's desired or if userspace is enabled and
2445 	 * driver set its state to undesired
2446 	 */
2447 	if (conn_state->content_protection ==
2448 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2449 	    (conn_state->content_protection ==
2450 	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2451 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2452 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2453 }
2454 
2455 int intel_hdcp_disable(struct intel_connector *connector)
2456 {
2457 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2458 	struct intel_hdcp *hdcp = &connector->hdcp;
2459 	int ret = 0;
2460 
2461 	if (!hdcp->shim)
2462 		return -ENOENT;
2463 
2464 	mutex_lock(&hdcp->mutex);
2465 	mutex_lock(&dig_port->hdcp_mutex);
2466 
2467 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2468 		goto out;
2469 
2470 	intel_hdcp_update_value(connector,
2471 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2472 	if (hdcp->hdcp2_encrypted)
2473 		ret = _intel_hdcp2_disable(connector, false);
2474 	else if (hdcp->hdcp_encrypted)
2475 		ret = _intel_hdcp_disable(connector);
2476 
2477 out:
2478 	mutex_unlock(&dig_port->hdcp_mutex);
2479 	mutex_unlock(&hdcp->mutex);
2480 	cancel_delayed_work_sync(&hdcp->check_work);
2481 	return ret;
2482 }
2483 
2484 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2485 			    struct intel_encoder *encoder,
2486 			    const struct intel_crtc_state *crtc_state,
2487 			    const struct drm_connector_state *conn_state)
2488 {
2489 	struct intel_connector *connector =
2490 				to_intel_connector(conn_state->connector);
2491 	struct intel_hdcp *hdcp = &connector->hdcp;
2492 	bool content_protection_type_changed, desired_and_not_enabled = false;
2493 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2494 
2495 	if (!connector->hdcp.shim)
2496 		return;
2497 
2498 	content_protection_type_changed =
2499 		(conn_state->hdcp_content_type != hdcp->content_type &&
2500 		 conn_state->content_protection !=
2501 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2502 
2503 	/*
2504 	 * During the HDCP encryption session if Type change is requested,
2505 	 * disable the HDCP and reenable it with new TYPE value.
2506 	 */
2507 	if (conn_state->content_protection ==
2508 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2509 	    content_protection_type_changed)
2510 		intel_hdcp_disable(connector);
2511 
2512 	/*
2513 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2514 	 * change procedure.
2515 	 */
2516 	if (content_protection_type_changed) {
2517 		mutex_lock(&hdcp->mutex);
2518 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2519 		drm_connector_get(&connector->base);
2520 		queue_work(i915->unordered_wq, &hdcp->prop_work);
2521 		mutex_unlock(&hdcp->mutex);
2522 	}
2523 
2524 	if (conn_state->content_protection ==
2525 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2526 		mutex_lock(&hdcp->mutex);
2527 		/* Avoid enabling hdcp, if it already ENABLED */
2528 		desired_and_not_enabled =
2529 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2530 		mutex_unlock(&hdcp->mutex);
2531 		/*
2532 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2533 		 * prop_work to update correct CP property to user space.
2534 		 */
2535 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2536 			drm_connector_get(&connector->base);
2537 			queue_work(i915->unordered_wq, &hdcp->prop_work);
2538 		}
2539 	}
2540 
2541 	if (desired_and_not_enabled || content_protection_type_changed)
2542 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2543 }
2544 
2545 void intel_hdcp_component_fini(struct drm_i915_private *i915)
2546 {
2547 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2548 	if (!i915->display.hdcp.comp_added) {
2549 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2550 		return;
2551 	}
2552 
2553 	i915->display.hdcp.comp_added = false;
2554 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2555 
2556 	if (intel_hdcp_gsc_cs_required(i915))
2557 		intel_hdcp_gsc_fini(i915);
2558 	else
2559 		component_del(i915->drm.dev, &i915_hdcp_ops);
2560 }
2561 
2562 void intel_hdcp_cleanup(struct intel_connector *connector)
2563 {
2564 	struct intel_hdcp *hdcp = &connector->hdcp;
2565 
2566 	if (!hdcp->shim)
2567 		return;
2568 
2569 	/*
2570 	 * If the connector is registered, it's possible userspace could kick
2571 	 * off another HDCP enable, which would re-spawn the workers.
2572 	 */
2573 	drm_WARN_ON(connector->base.dev,
2574 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2575 
2576 	/*
2577 	 * Now that the connector is not registered, check_work won't be run,
2578 	 * but cancel any outstanding instances of it
2579 	 */
2580 	cancel_delayed_work_sync(&hdcp->check_work);
2581 
2582 	/*
2583 	 * We don't cancel prop_work in the same way as check_work since it
2584 	 * requires connection_mutex which could be held while calling this
2585 	 * function. Instead, we rely on the connector references grabbed before
2586 	 * scheduling prop_work to ensure the connector is alive when prop_work
2587 	 * is run. So if we're in the destroy path (which is where this
2588 	 * function should be called), we're "guaranteed" that prop_work is not
2589 	 * active (tl;dr This Should Never Happen).
2590 	 */
2591 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2592 
2593 	mutex_lock(&hdcp->mutex);
2594 	hdcp->shim = NULL;
2595 	mutex_unlock(&hdcp->mutex);
2596 }
2597 
2598 void intel_hdcp_atomic_check(struct drm_connector *connector,
2599 			     struct drm_connector_state *old_state,
2600 			     struct drm_connector_state *new_state)
2601 {
2602 	u64 old_cp = old_state->content_protection;
2603 	u64 new_cp = new_state->content_protection;
2604 	struct drm_crtc_state *crtc_state;
2605 
2606 	if (!new_state->crtc) {
2607 		/*
2608 		 * If the connector is being disabled with CP enabled, mark it
2609 		 * desired so it's re-enabled when the connector is brought back
2610 		 */
2611 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2612 			new_state->content_protection =
2613 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2614 		return;
2615 	}
2616 
2617 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2618 						   new_state->crtc);
2619 	/*
2620 	 * Fix the HDCP uapi content protection state in case of modeset.
2621 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2622 	 * need to be sent if there is transition from ENABLED->DESIRED.
2623 	 */
2624 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2625 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2626 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2627 		new_state->content_protection =
2628 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2629 
2630 	/*
2631 	 * Nothing to do if the state didn't change, or HDCP was activated since
2632 	 * the last commit. And also no change in hdcp content type.
2633 	 */
2634 	if (old_cp == new_cp ||
2635 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2636 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2637 		if (old_state->hdcp_content_type ==
2638 				new_state->hdcp_content_type)
2639 			return;
2640 	}
2641 
2642 	crtc_state->mode_changed = true;
2643 }
2644 
2645 /* Handles the CP_IRQ raised from the DP HDCP sink */
2646 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2647 {
2648 	struct intel_hdcp *hdcp = &connector->hdcp;
2649 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2650 
2651 	if (!hdcp->shim)
2652 		return;
2653 
2654 	atomic_inc(&connector->hdcp.cp_irq_count);
2655 	wake_up_all(&connector->hdcp.cp_irq_queue);
2656 
2657 	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2658 }
2659