xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision befcc89362383208f62b15887592758165459e3d)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/intel/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29 
30 #define KEY_LOAD_TRIES	5
31 #define HDCP2_LC_RETRY_CNT			3
32 
33 /* WA: 16022217614 */
34 static void
intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder * encoder,struct intel_hdcp * hdcp)35 intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
36 				      struct intel_hdcp *hdcp)
37 {
38 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
39 
40 	/* Here we assume HDMI is in TMDS mode of operation */
41 	if (encoder->type != INTEL_OUTPUT_HDMI)
42 		return;
43 
44 	if (DISPLAY_VER(dev_priv) >= 14) {
45 		if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
46 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
47 				     0, HDCP_LINE_REKEY_DISABLE);
48 		else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
49 			 IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
50 			intel_de_rmw(dev_priv,
51 				     TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
52 				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
53 	}
54 }
55 
intel_conn_to_vcpi(struct intel_atomic_state * state,struct intel_connector * connector)56 static int intel_conn_to_vcpi(struct intel_atomic_state *state,
57 			      struct intel_connector *connector)
58 {
59 	struct drm_dp_mst_topology_mgr *mgr;
60 	struct drm_dp_mst_atomic_payload *payload;
61 	struct drm_dp_mst_topology_state *mst_state;
62 	int vcpi = 0;
63 
64 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
65 	if (!connector->port)
66 		return 0;
67 	mgr = connector->port->mgr;
68 
69 	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
70 	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
71 	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
72 	if (drm_WARN_ON(mgr->dev, !payload))
73 		goto out;
74 
75 	vcpi = payload->vcpi;
76 	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
77 		vcpi = 0;
78 		goto out;
79 	}
80 out:
81 	return vcpi;
82 }
83 
84 /*
85  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
86  * content_type for all streams in DP MST topology because security f/w doesn't
87  * have any provision to mark content_type for each stream separately, it marks
88  * all available streams with the content_type proivided at the time of port
89  * authentication. This may prohibit the userspace to use type1 content on
90  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
91  * DP MST topology. Though it is not compulsory, security fw should change its
92  * policy to mark different content_types for different streams.
93  */
94 static int
intel_hdcp_required_content_stream(struct intel_atomic_state * state,struct intel_digital_port * dig_port)95 intel_hdcp_required_content_stream(struct intel_atomic_state *state,
96 				   struct intel_digital_port *dig_port)
97 {
98 	struct drm_connector_list_iter conn_iter;
99 	struct intel_digital_port *conn_dig_port;
100 	struct intel_connector *connector;
101 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
102 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
103 	bool enforce_type0 = false;
104 	int k;
105 
106 	if (dig_port->hdcp_auth_status)
107 		return 0;
108 
109 	data->k = 0;
110 
111 	if (!dig_port->hdcp_mst_type1_capable)
112 		enforce_type0 = true;
113 
114 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
115 	for_each_intel_connector_iter(connector, &conn_iter) {
116 		if (connector->base.status == connector_status_disconnected)
117 			continue;
118 
119 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
120 			continue;
121 
122 		conn_dig_port = intel_attached_dig_port(connector);
123 		if (conn_dig_port != dig_port)
124 			continue;
125 
126 		data->streams[data->k].stream_id =
127 			intel_conn_to_vcpi(state, connector);
128 		data->k++;
129 
130 		/* if there is only one active stream */
131 		if (dig_port->dp.active_mst_links <= 1)
132 			break;
133 	}
134 	drm_connector_list_iter_end(&conn_iter);
135 
136 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
137 		return -EINVAL;
138 
139 	/*
140 	 * Apply common protection level across all streams in DP MST Topology.
141 	 * Use highest supported content type for all streams in DP MST Topology.
142 	 */
143 	for (k = 0; k < data->k; k++)
144 		data->streams[k].stream_type =
145 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
146 
147 	return 0;
148 }
149 
intel_hdcp_prepare_streams(struct intel_atomic_state * state,struct intel_connector * connector)150 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
151 				      struct intel_connector *connector)
152 {
153 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
154 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
155 	struct intel_hdcp *hdcp = &connector->hdcp;
156 
157 	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
158 		return intel_hdcp_required_content_stream(state, dig_port);
159 
160 	data->k = 1;
161 	data->streams[0].stream_id = 0;
162 	data->streams[0].stream_type = hdcp->content_type;
163 
164 	return 0;
165 }
166 
167 static
intel_hdcp_is_ksv_valid(u8 * ksv)168 bool intel_hdcp_is_ksv_valid(u8 *ksv)
169 {
170 	int i, ones = 0;
171 	/* KSV has 20 1's and 20 0's */
172 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
173 		ones += hweight8(ksv[i]);
174 	if (ones != 20)
175 		return false;
176 
177 	return true;
178 }
179 
180 static
intel_hdcp_read_valid_bksv(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim,u8 * bksv)181 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
182 			       const struct intel_hdcp_shim *shim, u8 *bksv)
183 {
184 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
185 	int ret, i, tries = 2;
186 
187 	/* HDCP spec states that we must retry the bksv if it is invalid */
188 	for (i = 0; i < tries; i++) {
189 		ret = shim->read_bksv(dig_port, bksv);
190 		if (ret)
191 			return ret;
192 		if (intel_hdcp_is_ksv_valid(bksv))
193 			break;
194 	}
195 	if (i == tries) {
196 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
197 		return -ENODEV;
198 	}
199 
200 	return 0;
201 }
202 
203 /* Is HDCP1.4 capable on Platform and Sink */
intel_hdcp_get_capability(struct intel_connector * connector)204 bool intel_hdcp_get_capability(struct intel_connector *connector)
205 {
206 	struct intel_digital_port *dig_port;
207 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
208 	bool capable = false;
209 	u8 bksv[5];
210 
211 	if (!intel_attached_encoder(connector))
212 		return capable;
213 
214 	dig_port = intel_attached_dig_port(connector);
215 
216 	if (!shim)
217 		return capable;
218 
219 	if (shim->hdcp_get_capability) {
220 		shim->hdcp_get_capability(dig_port, &capable);
221 	} else {
222 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
223 			capable = true;
224 	}
225 
226 	return capable;
227 }
228 
229 /*
230  * Check if the source has all the building blocks ready to make
231  * HDCP 2.2 work
232  */
intel_hdcp2_prerequisite(struct intel_connector * connector)233 static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
234 {
235 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
236 	struct intel_hdcp *hdcp = &connector->hdcp;
237 
238 	/* I915 support for HDCP2.2 */
239 	if (!hdcp->hdcp2_supported)
240 		return false;
241 
242 	/* If MTL+ make sure gsc is loaded and proxy is setup */
243 	if (intel_hdcp_gsc_cs_required(i915)) {
244 		if (!intel_hdcp_gsc_check_status(i915))
245 			return false;
246 	}
247 
248 	/* MEI/GSC interface is solid depending on which is used */
249 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
250 	if (!i915->display.hdcp.comp_added ||  !i915->display.hdcp.arbiter) {
251 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
252 		return false;
253 	}
254 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
255 
256 	return true;
257 }
258 
259 /* Is HDCP2.2 capable on Platform and Sink */
intel_hdcp2_get_capability(struct intel_connector * connector)260 bool intel_hdcp2_get_capability(struct intel_connector *connector)
261 {
262 	struct intel_hdcp *hdcp = &connector->hdcp;
263 	bool capable = false;
264 
265 	if (!intel_hdcp2_prerequisite(connector))
266 		return false;
267 
268 	/* Sink's capability for HDCP2.2 */
269 	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
270 
271 	return capable;
272 }
273 
intel_hdcp_get_remote_capability(struct intel_connector * connector,bool * hdcp_capable,bool * hdcp2_capable)274 void intel_hdcp_get_remote_capability(struct intel_connector *connector,
275 				      bool *hdcp_capable,
276 				      bool *hdcp2_capable)
277 {
278 	struct intel_hdcp *hdcp = &connector->hdcp;
279 
280 	if (!hdcp->shim->get_remote_hdcp_capability)
281 		return;
282 
283 	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
284 					       hdcp2_capable);
285 
286 	if (!intel_hdcp2_prerequisite(connector))
287 		*hdcp2_capable = false;
288 }
289 
intel_hdcp_in_use(struct drm_i915_private * i915,enum transcoder cpu_transcoder,enum port port)290 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
291 			      enum transcoder cpu_transcoder, enum port port)
292 {
293 	return intel_de_read(i915,
294 			     HDCP_STATUS(i915, cpu_transcoder, port)) &
295 		HDCP_STATUS_ENC;
296 }
297 
intel_hdcp2_in_use(struct drm_i915_private * i915,enum transcoder cpu_transcoder,enum port port)298 static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
299 			       enum transcoder cpu_transcoder, enum port port)
300 {
301 	return intel_de_read(i915,
302 			     HDCP2_STATUS(i915, cpu_transcoder, port)) &
303 		LINK_ENCRYPTION_STATUS;
304 }
305 
intel_hdcp_poll_ksv_fifo(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)306 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
307 				    const struct intel_hdcp_shim *shim)
308 {
309 	int ret, read_ret;
310 	bool ksv_ready;
311 
312 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
313 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
314 							 &ksv_ready),
315 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
316 			 100 * 1000);
317 	if (ret)
318 		return ret;
319 	if (read_ret)
320 		return read_ret;
321 	if (!ksv_ready)
322 		return -ETIMEDOUT;
323 
324 	return 0;
325 }
326 
hdcp_key_loadable(struct drm_i915_private * i915)327 static bool hdcp_key_loadable(struct drm_i915_private *i915)
328 {
329 	enum i915_power_well_id id;
330 	intel_wakeref_t wakeref;
331 	bool enabled = false;
332 
333 	/*
334 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
335 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
336 	 */
337 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
338 		id = HSW_DISP_PW_GLOBAL;
339 	else
340 		id = SKL_DISP_PW_1;
341 
342 	/* PG1 (power well #1) needs to be enabled */
343 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
344 		enabled = intel_display_power_well_is_enabled(i915, id);
345 
346 	/*
347 	 * Another req for hdcp key loadability is enabled state of pll for
348 	 * cdclk. Without active crtc we wont land here. So we are assuming that
349 	 * cdclk is already on.
350 	 */
351 
352 	return enabled;
353 }
354 
intel_hdcp_clear_keys(struct drm_i915_private * i915)355 static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
356 {
357 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
358 	intel_de_write(i915, HDCP_KEY_STATUS,
359 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
360 }
361 
intel_hdcp_load_keys(struct drm_i915_private * i915)362 static int intel_hdcp_load_keys(struct drm_i915_private *i915)
363 {
364 	int ret;
365 	u32 val;
366 
367 	val = intel_de_read(i915, HDCP_KEY_STATUS);
368 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
369 		return 0;
370 
371 	/*
372 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
373 	 * out of reset. So if Key is not already loaded, its an error state.
374 	 */
375 	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
376 		if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
377 			return -ENXIO;
378 
379 	/*
380 	 * Initiate loading the HDCP key from fuses.
381 	 *
382 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
383 	 * version 9 platforms (minus BXT) differ in the key load trigger
384 	 * process from other platforms. These platforms use the GT Driver
385 	 * Mailbox interface.
386 	 */
387 	if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
388 		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
389 		if (ret) {
390 			drm_err(&i915->drm,
391 				"Failed to initiate HDCP key load (%d)\n",
392 				ret);
393 			return ret;
394 		}
395 	} else {
396 		intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
397 	}
398 
399 	/* Wait for the keys to load (500us) */
400 	ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
401 				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
402 				   10, 1, &val);
403 	if (ret)
404 		return ret;
405 	else if (!(val & HDCP_KEY_LOAD_STATUS))
406 		return -ENXIO;
407 
408 	/* Send Aksv over to PCH display for use in authentication */
409 	intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
410 
411 	return 0;
412 }
413 
414 /* Returns updated SHA-1 index */
intel_write_sha_text(struct drm_i915_private * i915,u32 sha_text)415 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
416 {
417 	intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
418 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
419 		drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
420 		return -ETIMEDOUT;
421 	}
422 	return 0;
423 }
424 
425 static
intel_hdcp_get_repeater_ctl(struct drm_i915_private * i915,enum transcoder cpu_transcoder,enum port port)426 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
427 				enum transcoder cpu_transcoder, enum port port)
428 {
429 	if (DISPLAY_VER(i915) >= 12) {
430 		switch (cpu_transcoder) {
431 		case TRANSCODER_A:
432 			return HDCP_TRANSA_REP_PRESENT |
433 			       HDCP_TRANSA_SHA1_M0;
434 		case TRANSCODER_B:
435 			return HDCP_TRANSB_REP_PRESENT |
436 			       HDCP_TRANSB_SHA1_M0;
437 		case TRANSCODER_C:
438 			return HDCP_TRANSC_REP_PRESENT |
439 			       HDCP_TRANSC_SHA1_M0;
440 		case TRANSCODER_D:
441 			return HDCP_TRANSD_REP_PRESENT |
442 			       HDCP_TRANSD_SHA1_M0;
443 		default:
444 			drm_err(&i915->drm, "Unknown transcoder %d\n",
445 				cpu_transcoder);
446 			return 0;
447 		}
448 	}
449 
450 	switch (port) {
451 	case PORT_A:
452 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
453 	case PORT_B:
454 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
455 	case PORT_C:
456 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
457 	case PORT_D:
458 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
459 	case PORT_E:
460 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
461 	default:
462 		drm_err(&i915->drm, "Unknown port %d\n", port);
463 		return 0;
464 	}
465 }
466 
467 static
intel_hdcp_validate_v_prime(struct intel_connector * connector,const struct intel_hdcp_shim * shim,u8 * ksv_fifo,u8 num_downstream,u8 * bstatus)468 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
469 				const struct intel_hdcp_shim *shim,
470 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
471 {
472 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
473 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
474 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
475 	enum port port = dig_port->base.port;
476 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
477 	int ret, i, j, sha_idx;
478 
479 	/* Process V' values from the receiver */
480 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
481 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
482 		if (ret)
483 			return ret;
484 		intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
485 	}
486 
487 	/*
488 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
489 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
490 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
491 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
492 	 * index will keep track of our progress through the 64 bytes as well as
493 	 * helping us work the 40-bit KSVs through our 32-bit register.
494 	 *
495 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
496 	 */
497 	sha_idx = 0;
498 	sha_text = 0;
499 	sha_leftovers = 0;
500 	rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
501 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
502 	for (i = 0; i < num_downstream; i++) {
503 		unsigned int sha_empty;
504 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
505 
506 		/* Fill up the empty slots in sha_text and write it out */
507 		sha_empty = sizeof(sha_text) - sha_leftovers;
508 		for (j = 0; j < sha_empty; j++) {
509 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
510 			sha_text |= ksv[j] << off;
511 		}
512 
513 		ret = intel_write_sha_text(i915, sha_text);
514 		if (ret < 0)
515 			return ret;
516 
517 		/* Programming guide writes this every 64 bytes */
518 		sha_idx += sizeof(sha_text);
519 		if (!(sha_idx % 64))
520 			intel_de_write(i915, HDCP_REP_CTL,
521 				       rep_ctl | HDCP_SHA1_TEXT_32);
522 
523 		/* Store the leftover bytes from the ksv in sha_text */
524 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
525 		sha_text = 0;
526 		for (j = 0; j < sha_leftovers; j++)
527 			sha_text |= ksv[sha_empty + j] <<
528 					((sizeof(sha_text) - j - 1) * 8);
529 
530 		/*
531 		 * If we still have room in sha_text for more data, continue.
532 		 * Otherwise, write it out immediately.
533 		 */
534 		if (sizeof(sha_text) > sha_leftovers)
535 			continue;
536 
537 		ret = intel_write_sha_text(i915, sha_text);
538 		if (ret < 0)
539 			return ret;
540 		sha_leftovers = 0;
541 		sha_text = 0;
542 		sha_idx += sizeof(sha_text);
543 	}
544 
545 	/*
546 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
547 	 * bytes are leftover from the last ksv, we might be able to fit them
548 	 * all in sha_text (first 2 cases), or we might need to split them up
549 	 * into 2 writes (last 2 cases).
550 	 */
551 	if (sha_leftovers == 0) {
552 		/* Write 16 bits of text, 16 bits of M0 */
553 		intel_de_write(i915, HDCP_REP_CTL,
554 			       rep_ctl | HDCP_SHA1_TEXT_16);
555 		ret = intel_write_sha_text(i915,
556 					   bstatus[0] << 8 | bstatus[1]);
557 		if (ret < 0)
558 			return ret;
559 		sha_idx += sizeof(sha_text);
560 
561 		/* Write 32 bits of M0 */
562 		intel_de_write(i915, HDCP_REP_CTL,
563 			       rep_ctl | HDCP_SHA1_TEXT_0);
564 		ret = intel_write_sha_text(i915, 0);
565 		if (ret < 0)
566 			return ret;
567 		sha_idx += sizeof(sha_text);
568 
569 		/* Write 16 bits of M0 */
570 		intel_de_write(i915, HDCP_REP_CTL,
571 			       rep_ctl | HDCP_SHA1_TEXT_16);
572 		ret = intel_write_sha_text(i915, 0);
573 		if (ret < 0)
574 			return ret;
575 		sha_idx += sizeof(sha_text);
576 
577 	} else if (sha_leftovers == 1) {
578 		/* Write 24 bits of text, 8 bits of M0 */
579 		intel_de_write(i915, HDCP_REP_CTL,
580 			       rep_ctl | HDCP_SHA1_TEXT_24);
581 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
582 		/* Only 24-bits of data, must be in the LSB */
583 		sha_text = (sha_text & 0xffffff00) >> 8;
584 		ret = intel_write_sha_text(i915, sha_text);
585 		if (ret < 0)
586 			return ret;
587 		sha_idx += sizeof(sha_text);
588 
589 		/* Write 32 bits of M0 */
590 		intel_de_write(i915, HDCP_REP_CTL,
591 			       rep_ctl | HDCP_SHA1_TEXT_0);
592 		ret = intel_write_sha_text(i915, 0);
593 		if (ret < 0)
594 			return ret;
595 		sha_idx += sizeof(sha_text);
596 
597 		/* Write 24 bits of M0 */
598 		intel_de_write(i915, HDCP_REP_CTL,
599 			       rep_ctl | HDCP_SHA1_TEXT_8);
600 		ret = intel_write_sha_text(i915, 0);
601 		if (ret < 0)
602 			return ret;
603 		sha_idx += sizeof(sha_text);
604 
605 	} else if (sha_leftovers == 2) {
606 		/* Write 32 bits of text */
607 		intel_de_write(i915, HDCP_REP_CTL,
608 			       rep_ctl | HDCP_SHA1_TEXT_32);
609 		sha_text |= bstatus[0] << 8 | bstatus[1];
610 		ret = intel_write_sha_text(i915, sha_text);
611 		if (ret < 0)
612 			return ret;
613 		sha_idx += sizeof(sha_text);
614 
615 		/* Write 64 bits of M0 */
616 		intel_de_write(i915, HDCP_REP_CTL,
617 			       rep_ctl | HDCP_SHA1_TEXT_0);
618 		for (i = 0; i < 2; i++) {
619 			ret = intel_write_sha_text(i915, 0);
620 			if (ret < 0)
621 				return ret;
622 			sha_idx += sizeof(sha_text);
623 		}
624 
625 		/*
626 		 * Terminate the SHA-1 stream by hand. For the other leftover
627 		 * cases this is appended by the hardware.
628 		 */
629 		intel_de_write(i915, HDCP_REP_CTL,
630 			       rep_ctl | HDCP_SHA1_TEXT_32);
631 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
632 		ret = intel_write_sha_text(i915, sha_text);
633 		if (ret < 0)
634 			return ret;
635 		sha_idx += sizeof(sha_text);
636 	} else if (sha_leftovers == 3) {
637 		/* Write 32 bits of text (filled from LSB) */
638 		intel_de_write(i915, HDCP_REP_CTL,
639 			       rep_ctl | HDCP_SHA1_TEXT_32);
640 		sha_text |= bstatus[0];
641 		ret = intel_write_sha_text(i915, sha_text);
642 		if (ret < 0)
643 			return ret;
644 		sha_idx += sizeof(sha_text);
645 
646 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
647 		intel_de_write(i915, HDCP_REP_CTL,
648 			       rep_ctl | HDCP_SHA1_TEXT_8);
649 		ret = intel_write_sha_text(i915, bstatus[1]);
650 		if (ret < 0)
651 			return ret;
652 		sha_idx += sizeof(sha_text);
653 
654 		/* Write 32 bits of M0 */
655 		intel_de_write(i915, HDCP_REP_CTL,
656 			       rep_ctl | HDCP_SHA1_TEXT_0);
657 		ret = intel_write_sha_text(i915, 0);
658 		if (ret < 0)
659 			return ret;
660 		sha_idx += sizeof(sha_text);
661 
662 		/* Write 8 bits of M0 */
663 		intel_de_write(i915, HDCP_REP_CTL,
664 			       rep_ctl | HDCP_SHA1_TEXT_24);
665 		ret = intel_write_sha_text(i915, 0);
666 		if (ret < 0)
667 			return ret;
668 		sha_idx += sizeof(sha_text);
669 	} else {
670 		drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
671 			    sha_leftovers);
672 		return -EINVAL;
673 	}
674 
675 	intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
676 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
677 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
678 		ret = intel_write_sha_text(i915, 0);
679 		if (ret < 0)
680 			return ret;
681 		sha_idx += sizeof(sha_text);
682 	}
683 
684 	/*
685 	 * Last write gets the length of the concatenation in bits. That is:
686 	 *  - 5 bytes per device
687 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
688 	 */
689 	sha_text = (num_downstream * 5 + 10) * 8;
690 	ret = intel_write_sha_text(i915, sha_text);
691 	if (ret < 0)
692 		return ret;
693 
694 	/* Tell the HW we're done with the hash and wait for it to ACK */
695 	intel_de_write(i915, HDCP_REP_CTL,
696 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
697 	if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
698 				  HDCP_SHA1_COMPLETE, 1)) {
699 		drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
700 		return -ETIMEDOUT;
701 	}
702 	if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
703 		drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
704 		return -ENXIO;
705 	}
706 
707 	return 0;
708 }
709 
710 /* Implements Part 2 of the HDCP authorization procedure */
711 static
intel_hdcp_auth_downstream(struct intel_connector * connector)712 int intel_hdcp_auth_downstream(struct intel_connector *connector)
713 {
714 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
715 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
716 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
717 	u8 bstatus[2], num_downstream, *ksv_fifo;
718 	int ret, i, tries = 3;
719 
720 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
721 	if (ret) {
722 		drm_dbg_kms(&i915->drm,
723 			    "KSV list failed to become ready (%d)\n", ret);
724 		return ret;
725 	}
726 
727 	ret = shim->read_bstatus(dig_port, bstatus);
728 	if (ret)
729 		return ret;
730 
731 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
732 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
733 		drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
734 		return -EPERM;
735 	}
736 
737 	/*
738 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
739 	 * the HDCP encryption. That implies that repeater can't have its own
740 	 * display. As there is no consumption of encrypted content in the
741 	 * repeater with 0 downstream devices, we are failing the
742 	 * authentication.
743 	 */
744 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
745 	if (num_downstream == 0) {
746 		drm_dbg_kms(&i915->drm,
747 			    "Repeater with zero downstream devices\n");
748 		return -EINVAL;
749 	}
750 
751 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
752 	if (!ksv_fifo) {
753 		drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
754 		return -ENOMEM;
755 	}
756 
757 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
758 	if (ret)
759 		goto err;
760 
761 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
762 					num_downstream) > 0) {
763 		drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
764 		ret = -EPERM;
765 		goto err;
766 	}
767 
768 	/*
769 	 * When V prime mismatches, DP Spec mandates re-read of
770 	 * V prime atleast twice.
771 	 */
772 	for (i = 0; i < tries; i++) {
773 		ret = intel_hdcp_validate_v_prime(connector, shim,
774 						  ksv_fifo, num_downstream,
775 						  bstatus);
776 		if (!ret)
777 			break;
778 	}
779 
780 	if (i == tries) {
781 		drm_dbg_kms(&i915->drm,
782 			    "V Prime validation failed.(%d)\n", ret);
783 		goto err;
784 	}
785 
786 	drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
787 		    num_downstream);
788 	ret = 0;
789 err:
790 	kfree(ksv_fifo);
791 	return ret;
792 }
793 
794 /* Implements Part 1 of the HDCP authorization procedure */
intel_hdcp_auth(struct intel_connector * connector)795 static int intel_hdcp_auth(struct intel_connector *connector)
796 {
797 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
798 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
799 	struct intel_hdcp *hdcp = &connector->hdcp;
800 	const struct intel_hdcp_shim *shim = hdcp->shim;
801 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
802 	enum port port = dig_port->base.port;
803 	unsigned long r0_prime_gen_start;
804 	int ret, i, tries = 2;
805 	union {
806 		u32 reg[2];
807 		u8 shim[DRM_HDCP_AN_LEN];
808 	} an;
809 	union {
810 		u32 reg[2];
811 		u8 shim[DRM_HDCP_KSV_LEN];
812 	} bksv;
813 	union {
814 		u32 reg;
815 		u8 shim[DRM_HDCP_RI_LEN];
816 	} ri;
817 	bool repeater_present, hdcp_capable;
818 
819 	/*
820 	 * Detects whether the display is HDCP capable. Although we check for
821 	 * valid Bksv below, the HDCP over DP spec requires that we check
822 	 * whether the display supports HDCP before we write An. For HDMI
823 	 * displays, this is not necessary.
824 	 */
825 	if (shim->hdcp_get_capability) {
826 		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
827 		if (ret)
828 			return ret;
829 		if (!hdcp_capable) {
830 			drm_dbg_kms(&i915->drm,
831 				    "Panel is not HDCP capable\n");
832 			return -EINVAL;
833 		}
834 	}
835 
836 	/* Initialize An with 2 random values and acquire it */
837 	for (i = 0; i < 2; i++)
838 		intel_de_write(i915,
839 			       HDCP_ANINIT(i915, cpu_transcoder, port),
840 			       get_random_u32());
841 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
842 		       HDCP_CONF_CAPTURE_AN);
843 
844 	/* Wait for An to be acquired */
845 	if (intel_de_wait_for_set(i915,
846 				  HDCP_STATUS(i915, cpu_transcoder, port),
847 				  HDCP_STATUS_AN_READY, 1)) {
848 		drm_err(&i915->drm, "Timed out waiting for An\n");
849 		return -ETIMEDOUT;
850 	}
851 
852 	an.reg[0] = intel_de_read(i915,
853 				  HDCP_ANLO(i915, cpu_transcoder, port));
854 	an.reg[1] = intel_de_read(i915,
855 				  HDCP_ANHI(i915, cpu_transcoder, port));
856 	ret = shim->write_an_aksv(dig_port, an.shim);
857 	if (ret)
858 		return ret;
859 
860 	r0_prime_gen_start = jiffies;
861 
862 	memset(&bksv, 0, sizeof(bksv));
863 
864 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
865 	if (ret < 0)
866 		return ret;
867 
868 	if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
869 		drm_err(&i915->drm, "BKSV is revoked\n");
870 		return -EPERM;
871 	}
872 
873 	intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
874 		       bksv.reg[0]);
875 	intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
876 		       bksv.reg[1]);
877 
878 	ret = shim->repeater_present(dig_port, &repeater_present);
879 	if (ret)
880 		return ret;
881 	if (repeater_present)
882 		intel_de_write(i915, HDCP_REP_CTL,
883 			       intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
884 
885 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
886 	if (ret)
887 		return ret;
888 
889 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
890 		       HDCP_CONF_AUTH_AND_ENC);
891 
892 	/* Wait for R0 ready */
893 	if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
894 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
895 		drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
896 		return -ETIMEDOUT;
897 	}
898 
899 	/*
900 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
901 	 * some monitors can take longer than this. We'll set the timeout at
902 	 * 300ms just to be sure.
903 	 *
904 	 * On DP, there's an R0_READY bit available but no such bit
905 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
906 	 * the stupid thing instead of polling on one and not the other.
907 	 */
908 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
909 
910 	tries = 3;
911 
912 	/*
913 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
914 	 * of R0 mismatch.
915 	 */
916 	for (i = 0; i < tries; i++) {
917 		ri.reg = 0;
918 		ret = shim->read_ri_prime(dig_port, ri.shim);
919 		if (ret)
920 			return ret;
921 		intel_de_write(i915,
922 			       HDCP_RPRIME(i915, cpu_transcoder, port),
923 			       ri.reg);
924 
925 		/* Wait for Ri prime match */
926 		if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
927 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
928 			break;
929 	}
930 
931 	if (i == tries) {
932 		drm_dbg_kms(&i915->drm,
933 			    "Timed out waiting for Ri prime match (%x)\n",
934 			    intel_de_read(i915,
935 					  HDCP_STATUS(i915, cpu_transcoder, port)));
936 		return -ETIMEDOUT;
937 	}
938 
939 	/* Wait for encryption confirmation */
940 	if (intel_de_wait_for_set(i915,
941 				  HDCP_STATUS(i915, cpu_transcoder, port),
942 				  HDCP_STATUS_ENC,
943 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
944 		drm_err(&i915->drm, "Timed out waiting for encryption\n");
945 		return -ETIMEDOUT;
946 	}
947 
948 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
949 	if (shim->stream_encryption) {
950 		ret = shim->stream_encryption(connector, true);
951 		if (ret) {
952 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
953 				connector->base.base.id, connector->base.name);
954 			return ret;
955 		}
956 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
957 			    transcoder_name(hdcp->stream_transcoder));
958 	}
959 
960 	if (repeater_present)
961 		return intel_hdcp_auth_downstream(connector);
962 
963 	drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
964 	return 0;
965 }
966 
_intel_hdcp_disable(struct intel_connector * connector)967 static int _intel_hdcp_disable(struct intel_connector *connector)
968 {
969 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
970 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
971 	struct intel_hdcp *hdcp = &connector->hdcp;
972 	enum port port = dig_port->base.port;
973 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
974 	u32 repeater_ctl;
975 	int ret;
976 
977 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
978 		    connector->base.base.id, connector->base.name);
979 
980 	if (hdcp->shim->stream_encryption) {
981 		ret = hdcp->shim->stream_encryption(connector, false);
982 		if (ret) {
983 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
984 				connector->base.base.id, connector->base.name);
985 			return ret;
986 		}
987 		drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
988 			    transcoder_name(hdcp->stream_transcoder));
989 		/*
990 		 * If there are other connectors on this port using HDCP,
991 		 * don't disable it until it disabled HDCP encryption for
992 		 * all connectors in MST topology.
993 		 */
994 		if (dig_port->num_hdcp_streams > 0)
995 			return 0;
996 	}
997 
998 	hdcp->hdcp_encrypted = false;
999 	intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
1000 	if (intel_de_wait_for_clear(i915,
1001 				    HDCP_STATUS(i915, cpu_transcoder, port),
1002 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1003 		drm_err(&i915->drm,
1004 			"Failed to disable HDCP, timeout clearing status\n");
1005 		return -ETIMEDOUT;
1006 	}
1007 
1008 	repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
1009 						   port);
1010 	intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
1011 
1012 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1013 	if (ret) {
1014 		drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
1015 		return ret;
1016 	}
1017 
1018 	drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
1019 	return 0;
1020 }
1021 
intel_hdcp1_enable(struct intel_connector * connector)1022 static int intel_hdcp1_enable(struct intel_connector *connector)
1023 {
1024 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1025 	struct intel_hdcp *hdcp = &connector->hdcp;
1026 	int i, ret, tries = 3;
1027 
1028 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1029 		    connector->base.base.id, connector->base.name);
1030 
1031 	if (!hdcp_key_loadable(i915)) {
1032 		drm_err(&i915->drm, "HDCP key Load is not possible\n");
1033 		return -ENXIO;
1034 	}
1035 
1036 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1037 		ret = intel_hdcp_load_keys(i915);
1038 		if (!ret)
1039 			break;
1040 		intel_hdcp_clear_keys(i915);
1041 	}
1042 	if (ret) {
1043 		drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
1044 			ret);
1045 		return ret;
1046 	}
1047 
1048 	/* Incase of authentication failures, HDCP spec expects reauth. */
1049 	for (i = 0; i < tries; i++) {
1050 		ret = intel_hdcp_auth(connector);
1051 		if (!ret) {
1052 			hdcp->hdcp_encrypted = true;
1053 			return 0;
1054 		}
1055 
1056 		drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
1057 
1058 		/* Ensuring HDCP encryption and signalling are stopped. */
1059 		_intel_hdcp_disable(connector);
1060 	}
1061 
1062 	drm_dbg_kms(&i915->drm,
1063 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1064 	return ret;
1065 }
1066 
intel_hdcp_to_connector(struct intel_hdcp * hdcp)1067 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1068 {
1069 	return container_of(hdcp, struct intel_connector, hdcp);
1070 }
1071 
intel_hdcp_update_value(struct intel_connector * connector,u64 value,bool update_property)1072 static void intel_hdcp_update_value(struct intel_connector *connector,
1073 				    u64 value, bool update_property)
1074 {
1075 	struct drm_device *dev = connector->base.dev;
1076 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1077 	struct intel_hdcp *hdcp = &connector->hdcp;
1078 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1079 
1080 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1081 
1082 	if (hdcp->value == value)
1083 		return;
1084 
1085 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1086 
1087 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1088 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1089 			dig_port->num_hdcp_streams--;
1090 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1091 		dig_port->num_hdcp_streams++;
1092 	}
1093 
1094 	hdcp->value = value;
1095 	if (update_property) {
1096 		drm_connector_get(&connector->base);
1097 		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
1098 			drm_connector_put(&connector->base);
1099 	}
1100 }
1101 
1102 /* Implements Part 3 of the HDCP authorization procedure */
intel_hdcp_check_link(struct intel_connector * connector)1103 static int intel_hdcp_check_link(struct intel_connector *connector)
1104 {
1105 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1106 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1107 	struct intel_hdcp *hdcp = &connector->hdcp;
1108 	enum port port = dig_port->base.port;
1109 	enum transcoder cpu_transcoder;
1110 	int ret = 0;
1111 
1112 	mutex_lock(&hdcp->mutex);
1113 	mutex_lock(&dig_port->hdcp_mutex);
1114 
1115 	cpu_transcoder = hdcp->cpu_transcoder;
1116 
1117 	/* Check_link valid only when HDCP1.4 is enabled */
1118 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1119 	    !hdcp->hdcp_encrypted) {
1120 		ret = -EINVAL;
1121 		goto out;
1122 	}
1123 
1124 	if (drm_WARN_ON(&i915->drm,
1125 			!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
1126 		drm_err(&i915->drm,
1127 			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1128 			connector->base.base.id, connector->base.name,
1129 			intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
1130 		ret = -ENXIO;
1131 		intel_hdcp_update_value(connector,
1132 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1133 					true);
1134 		goto out;
1135 	}
1136 
1137 	if (hdcp->shim->check_link(dig_port, connector)) {
1138 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1139 			intel_hdcp_update_value(connector,
1140 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1141 		}
1142 		goto out;
1143 	}
1144 
1145 	drm_dbg_kms(&i915->drm,
1146 		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1147 		    connector->base.base.id, connector->base.name);
1148 
1149 	ret = _intel_hdcp_disable(connector);
1150 	if (ret) {
1151 		drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
1152 		intel_hdcp_update_value(connector,
1153 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1154 					true);
1155 		goto out;
1156 	}
1157 
1158 	intel_hdcp_update_value(connector,
1159 				DRM_MODE_CONTENT_PROTECTION_DESIRED,
1160 				true);
1161 out:
1162 	mutex_unlock(&dig_port->hdcp_mutex);
1163 	mutex_unlock(&hdcp->mutex);
1164 	return ret;
1165 }
1166 
intel_hdcp_prop_work(struct work_struct * work)1167 static void intel_hdcp_prop_work(struct work_struct *work)
1168 {
1169 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1170 					       prop_work);
1171 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1172 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1173 
1174 	drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
1175 	mutex_lock(&hdcp->mutex);
1176 
1177 	/*
1178 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1179 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1180 	 * we're running just after hdcp has been disabled, so just exit
1181 	 */
1182 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1183 		drm_hdcp_update_content_protection(&connector->base,
1184 						   hdcp->value);
1185 
1186 	mutex_unlock(&hdcp->mutex);
1187 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1188 
1189 	drm_connector_put(&connector->base);
1190 }
1191 
is_hdcp_supported(struct drm_i915_private * i915,enum port port)1192 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
1193 {
1194 	return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
1195 		(DISPLAY_VER(i915) >= 12 || port < PORT_E);
1196 }
1197 
1198 static int
hdcp2_prepare_ake_init(struct intel_connector * connector,struct hdcp2_ake_init * ake_data)1199 hdcp2_prepare_ake_init(struct intel_connector *connector,
1200 		       struct hdcp2_ake_init *ake_data)
1201 {
1202 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1203 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1204 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1205 	struct i915_hdcp_arbiter *arbiter;
1206 	int ret;
1207 
1208 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1209 	arbiter = i915->display.hdcp.arbiter;
1210 
1211 	if (!arbiter || !arbiter->ops) {
1212 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1213 		return -EINVAL;
1214 	}
1215 
1216 	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1217 	if (ret)
1218 		drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
1219 			    ret);
1220 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1221 
1222 	return ret;
1223 }
1224 
1225 static int
hdcp2_verify_rx_cert_prepare_km(struct intel_connector * connector,struct hdcp2_ake_send_cert * rx_cert,bool * paired,struct hdcp2_ake_no_stored_km * ek_pub_km,size_t * msg_sz)1226 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1227 				struct hdcp2_ake_send_cert *rx_cert,
1228 				bool *paired,
1229 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1230 				size_t *msg_sz)
1231 {
1232 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1233 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1234 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1235 	struct i915_hdcp_arbiter *arbiter;
1236 	int ret;
1237 
1238 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1239 	arbiter = i915->display.hdcp.arbiter;
1240 
1241 	if (!arbiter || !arbiter->ops) {
1242 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1243 		return -EINVAL;
1244 	}
1245 
1246 	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1247 							 rx_cert, paired,
1248 							 ek_pub_km, msg_sz);
1249 	if (ret < 0)
1250 		drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
1251 			    ret);
1252 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1253 
1254 	return ret;
1255 }
1256 
hdcp2_verify_hprime(struct intel_connector * connector,struct hdcp2_ake_send_hprime * rx_hprime)1257 static int hdcp2_verify_hprime(struct intel_connector *connector,
1258 			       struct hdcp2_ake_send_hprime *rx_hprime)
1259 {
1260 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1261 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1262 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1263 	struct i915_hdcp_arbiter *arbiter;
1264 	int ret;
1265 
1266 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1267 	arbiter = i915->display.hdcp.arbiter;
1268 
1269 	if (!arbiter || !arbiter->ops) {
1270 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1271 		return -EINVAL;
1272 	}
1273 
1274 	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1275 	if (ret < 0)
1276 		drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
1277 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1278 
1279 	return ret;
1280 }
1281 
1282 static int
hdcp2_store_pairing_info(struct intel_connector * connector,struct hdcp2_ake_send_pairing_info * pairing_info)1283 hdcp2_store_pairing_info(struct intel_connector *connector,
1284 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1285 {
1286 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1287 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1288 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1289 	struct i915_hdcp_arbiter *arbiter;
1290 	int ret;
1291 
1292 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1293 	arbiter = i915->display.hdcp.arbiter;
1294 
1295 	if (!arbiter || !arbiter->ops) {
1296 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1297 		return -EINVAL;
1298 	}
1299 
1300 	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1301 	if (ret < 0)
1302 		drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
1303 			    ret);
1304 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1305 
1306 	return ret;
1307 }
1308 
1309 static int
hdcp2_prepare_lc_init(struct intel_connector * connector,struct hdcp2_lc_init * lc_init)1310 hdcp2_prepare_lc_init(struct intel_connector *connector,
1311 		      struct hdcp2_lc_init *lc_init)
1312 {
1313 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1314 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1315 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1316 	struct i915_hdcp_arbiter *arbiter;
1317 	int ret;
1318 
1319 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1320 	arbiter = i915->display.hdcp.arbiter;
1321 
1322 	if (!arbiter || !arbiter->ops) {
1323 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1324 		return -EINVAL;
1325 	}
1326 
1327 	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1328 	if (ret < 0)
1329 		drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
1330 			    ret);
1331 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1332 
1333 	return ret;
1334 }
1335 
1336 static int
hdcp2_verify_lprime(struct intel_connector * connector,struct hdcp2_lc_send_lprime * rx_lprime)1337 hdcp2_verify_lprime(struct intel_connector *connector,
1338 		    struct hdcp2_lc_send_lprime *rx_lprime)
1339 {
1340 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1341 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1342 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1343 	struct i915_hdcp_arbiter *arbiter;
1344 	int ret;
1345 
1346 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1347 	arbiter = i915->display.hdcp.arbiter;
1348 
1349 	if (!arbiter || !arbiter->ops) {
1350 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1351 		return -EINVAL;
1352 	}
1353 
1354 	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1355 	if (ret < 0)
1356 		drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
1357 			    ret);
1358 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1359 
1360 	return ret;
1361 }
1362 
hdcp2_prepare_skey(struct intel_connector * connector,struct hdcp2_ske_send_eks * ske_data)1363 static int hdcp2_prepare_skey(struct intel_connector *connector,
1364 			      struct hdcp2_ske_send_eks *ske_data)
1365 {
1366 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1367 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1368 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1369 	struct i915_hdcp_arbiter *arbiter;
1370 	int ret;
1371 
1372 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1373 	arbiter = i915->display.hdcp.arbiter;
1374 
1375 	if (!arbiter || !arbiter->ops) {
1376 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1377 		return -EINVAL;
1378 	}
1379 
1380 	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1381 	if (ret < 0)
1382 		drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
1383 			    ret);
1384 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1385 
1386 	return ret;
1387 }
1388 
1389 static int
hdcp2_verify_rep_topology_prepare_ack(struct intel_connector * connector,struct hdcp2_rep_send_receiverid_list * rep_topology,struct hdcp2_rep_send_ack * rep_send_ack)1390 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1391 				      struct hdcp2_rep_send_receiverid_list
1392 								*rep_topology,
1393 				      struct hdcp2_rep_send_ack *rep_send_ack)
1394 {
1395 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1396 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1397 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1398 	struct i915_hdcp_arbiter *arbiter;
1399 	int ret;
1400 
1401 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1402 	arbiter = i915->display.hdcp.arbiter;
1403 
1404 	if (!arbiter || !arbiter->ops) {
1405 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1406 		return -EINVAL;
1407 	}
1408 
1409 	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1410 							    data,
1411 							    rep_topology,
1412 							    rep_send_ack);
1413 	if (ret < 0)
1414 		drm_dbg_kms(&i915->drm,
1415 			    "Verify rep topology failed. %d\n", ret);
1416 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1417 
1418 	return ret;
1419 }
1420 
1421 static int
hdcp2_verify_mprime(struct intel_connector * connector,struct hdcp2_rep_stream_ready * stream_ready)1422 hdcp2_verify_mprime(struct intel_connector *connector,
1423 		    struct hdcp2_rep_stream_ready *stream_ready)
1424 {
1425 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1426 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1427 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1428 	struct i915_hdcp_arbiter *arbiter;
1429 	int ret;
1430 
1431 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1432 	arbiter = i915->display.hdcp.arbiter;
1433 
1434 	if (!arbiter || !arbiter->ops) {
1435 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1436 		return -EINVAL;
1437 	}
1438 
1439 	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1440 	if (ret < 0)
1441 		drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
1442 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1443 
1444 	return ret;
1445 }
1446 
hdcp2_authenticate_port(struct intel_connector * connector)1447 static int hdcp2_authenticate_port(struct intel_connector *connector)
1448 {
1449 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1450 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1451 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1452 	struct i915_hdcp_arbiter *arbiter;
1453 	int ret;
1454 
1455 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1456 	arbiter = i915->display.hdcp.arbiter;
1457 
1458 	if (!arbiter || !arbiter->ops) {
1459 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1460 		return -EINVAL;
1461 	}
1462 
1463 	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1464 	if (ret < 0)
1465 		drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
1466 			    ret);
1467 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1468 
1469 	return ret;
1470 }
1471 
hdcp2_close_session(struct intel_connector * connector)1472 static int hdcp2_close_session(struct intel_connector *connector)
1473 {
1474 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1475 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1476 	struct i915_hdcp_arbiter *arbiter;
1477 	int ret;
1478 
1479 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
1480 	arbiter = i915->display.hdcp.arbiter;
1481 
1482 	if (!arbiter || !arbiter->ops) {
1483 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1484 		return -EINVAL;
1485 	}
1486 
1487 	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1488 					     &dig_port->hdcp_port_data);
1489 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1490 
1491 	return ret;
1492 }
1493 
hdcp2_deauthenticate_port(struct intel_connector * connector)1494 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1495 {
1496 	return hdcp2_close_session(connector);
1497 }
1498 
1499 /* Authentication flow starts from here */
hdcp2_authentication_key_exchange(struct intel_connector * connector)1500 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1501 {
1502 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1503 	struct intel_hdcp *hdcp = &connector->hdcp;
1504 	union {
1505 		struct hdcp2_ake_init ake_init;
1506 		struct hdcp2_ake_send_cert send_cert;
1507 		struct hdcp2_ake_no_stored_km no_stored_km;
1508 		struct hdcp2_ake_send_hprime send_hprime;
1509 		struct hdcp2_ake_send_pairing_info pairing_info;
1510 	} msgs;
1511 	const struct intel_hdcp_shim *shim = hdcp->shim;
1512 	size_t size;
1513 	int ret;
1514 
1515 	/* Init for seq_num */
1516 	hdcp->seq_num_v = 0;
1517 	hdcp->seq_num_m = 0;
1518 
1519 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1520 	if (ret < 0)
1521 		return ret;
1522 
1523 	ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1524 				  sizeof(msgs.ake_init));
1525 	if (ret < 0)
1526 		return ret;
1527 
1528 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1529 				 &msgs.send_cert, sizeof(msgs.send_cert));
1530 	if (ret < 0)
1531 		return ret;
1532 
1533 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1534 		drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
1535 		return -EINVAL;
1536 	}
1537 
1538 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1539 
1540 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1541 					msgs.send_cert.cert_rx.receiver_id,
1542 					1) > 0) {
1543 		drm_err(&i915->drm, "Receiver ID is revoked\n");
1544 		return -EPERM;
1545 	}
1546 
1547 	/*
1548 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1549 	 * stored also.
1550 	 */
1551 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1552 					      &hdcp->is_paired,
1553 					      &msgs.no_stored_km, &size);
1554 	if (ret < 0)
1555 		return ret;
1556 
1557 	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1558 	if (ret < 0)
1559 		return ret;
1560 
1561 	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1562 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1563 	if (ret < 0)
1564 		return ret;
1565 
1566 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1567 	if (ret < 0)
1568 		return ret;
1569 
1570 	if (!hdcp->is_paired) {
1571 		/* Pairing is required */
1572 		ret = shim->read_2_2_msg(connector,
1573 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1574 					 &msgs.pairing_info,
1575 					 sizeof(msgs.pairing_info));
1576 		if (ret < 0)
1577 			return ret;
1578 
1579 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1580 		if (ret < 0)
1581 			return ret;
1582 		hdcp->is_paired = true;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
hdcp2_locality_check(struct intel_connector * connector)1588 static int hdcp2_locality_check(struct intel_connector *connector)
1589 {
1590 	struct intel_hdcp *hdcp = &connector->hdcp;
1591 	union {
1592 		struct hdcp2_lc_init lc_init;
1593 		struct hdcp2_lc_send_lprime send_lprime;
1594 	} msgs;
1595 	const struct intel_hdcp_shim *shim = hdcp->shim;
1596 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1597 
1598 	for (i = 0; i < tries; i++) {
1599 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1600 		if (ret < 0)
1601 			continue;
1602 
1603 		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1604 				      sizeof(msgs.lc_init));
1605 		if (ret < 0)
1606 			continue;
1607 
1608 		ret = shim->read_2_2_msg(connector,
1609 					 HDCP_2_2_LC_SEND_LPRIME,
1610 					 &msgs.send_lprime,
1611 					 sizeof(msgs.send_lprime));
1612 		if (ret < 0)
1613 			continue;
1614 
1615 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1616 		if (!ret)
1617 			break;
1618 	}
1619 
1620 	return ret;
1621 }
1622 
hdcp2_session_key_exchange(struct intel_connector * connector)1623 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1624 {
1625 	struct intel_hdcp *hdcp = &connector->hdcp;
1626 	struct hdcp2_ske_send_eks send_eks;
1627 	int ret;
1628 
1629 	ret = hdcp2_prepare_skey(connector, &send_eks);
1630 	if (ret < 0)
1631 		return ret;
1632 
1633 	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1634 					sizeof(send_eks));
1635 	if (ret < 0)
1636 		return ret;
1637 
1638 	return 0;
1639 }
1640 
1641 static
_hdcp2_propagate_stream_management_info(struct intel_connector * connector)1642 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1643 {
1644 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1645 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1646 	struct intel_hdcp *hdcp = &connector->hdcp;
1647 	union {
1648 		struct hdcp2_rep_stream_manage stream_manage;
1649 		struct hdcp2_rep_stream_ready stream_ready;
1650 	} msgs;
1651 	const struct intel_hdcp_shim *shim = hdcp->shim;
1652 	int ret, streams_size_delta, i;
1653 
1654 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1655 		return -ERANGE;
1656 
1657 	/* Prepare RepeaterAuth_Stream_Manage msg */
1658 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1659 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1660 
1661 	msgs.stream_manage.k = cpu_to_be16(data->k);
1662 
1663 	for (i = 0; i < data->k; i++) {
1664 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1665 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1666 	}
1667 
1668 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1669 				sizeof(struct hdcp2_streamid_type);
1670 	/* Send it to Repeater */
1671 	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1672 				  sizeof(msgs.stream_manage) - streams_size_delta);
1673 	if (ret < 0)
1674 		goto out;
1675 
1676 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1677 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1678 	if (ret < 0)
1679 		goto out;
1680 
1681 	data->seq_num_m = hdcp->seq_num_m;
1682 
1683 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1684 
1685 out:
1686 	hdcp->seq_num_m++;
1687 
1688 	return ret;
1689 }
1690 
1691 static
hdcp2_authenticate_repeater_topology(struct intel_connector * connector)1692 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1693 {
1694 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1695 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1696 	struct intel_hdcp *hdcp = &connector->hdcp;
1697 	union {
1698 		struct hdcp2_rep_send_receiverid_list recvid_list;
1699 		struct hdcp2_rep_send_ack rep_ack;
1700 	} msgs;
1701 	const struct intel_hdcp_shim *shim = hdcp->shim;
1702 	u32 seq_num_v, device_cnt;
1703 	u8 *rx_info;
1704 	int ret;
1705 
1706 	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1707 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1708 	if (ret < 0)
1709 		return ret;
1710 
1711 	rx_info = msgs.recvid_list.rx_info;
1712 
1713 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1714 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1715 		drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
1716 		return -EINVAL;
1717 	}
1718 
1719 	/*
1720 	 * MST topology is not Type 1 capable if it contains a downstream
1721 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1722 	 */
1723 	dig_port->hdcp_mst_type1_capable =
1724 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1725 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1726 
1727 	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1728 		drm_dbg_kms(&i915->drm,
1729 			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1730 		return -EINVAL;
1731 	}
1732 
1733 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1734 	seq_num_v =
1735 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1736 
1737 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1738 		drm_dbg_kms(&i915->drm,
1739 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1740 		return -EINVAL;
1741 	}
1742 
1743 	if (seq_num_v < hdcp->seq_num_v) {
1744 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1745 		drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
1746 		return -EINVAL;
1747 	}
1748 
1749 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1750 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1751 	if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1752 					msgs.recvid_list.receiver_ids,
1753 					device_cnt) > 0) {
1754 		drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
1755 		return -EPERM;
1756 	}
1757 
1758 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1759 						    &msgs.recvid_list,
1760 						    &msgs.rep_ack);
1761 	if (ret < 0)
1762 		return ret;
1763 
1764 	hdcp->seq_num_v = seq_num_v;
1765 	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1766 				  sizeof(msgs.rep_ack));
1767 	if (ret < 0)
1768 		return ret;
1769 
1770 	return 0;
1771 }
1772 
hdcp2_authenticate_sink(struct intel_connector * connector)1773 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1774 {
1775 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1776 	struct intel_hdcp *hdcp = &connector->hdcp;
1777 	const struct intel_hdcp_shim *shim = hdcp->shim;
1778 	int ret;
1779 
1780 	ret = hdcp2_authentication_key_exchange(connector);
1781 	if (ret < 0) {
1782 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1783 		return ret;
1784 	}
1785 
1786 	ret = hdcp2_locality_check(connector);
1787 	if (ret < 0) {
1788 		drm_dbg_kms(&i915->drm,
1789 			    "Locality Check failed. Err : %d\n", ret);
1790 		return ret;
1791 	}
1792 
1793 	ret = hdcp2_session_key_exchange(connector);
1794 	if (ret < 0) {
1795 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1796 		return ret;
1797 	}
1798 
1799 	if (shim->config_stream_type) {
1800 		ret = shim->config_stream_type(connector,
1801 					       hdcp->is_repeater,
1802 					       hdcp->content_type);
1803 		if (ret < 0)
1804 			return ret;
1805 	}
1806 
1807 	if (hdcp->is_repeater) {
1808 		ret = hdcp2_authenticate_repeater_topology(connector);
1809 		if (ret < 0) {
1810 			drm_dbg_kms(&i915->drm,
1811 				    "Repeater Auth Failed. Err: %d\n", ret);
1812 			return ret;
1813 		}
1814 	}
1815 
1816 	return ret;
1817 }
1818 
hdcp2_enable_stream_encryption(struct intel_connector * connector)1819 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1820 {
1821 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1822 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1823 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1824 	struct intel_hdcp *hdcp = &connector->hdcp;
1825 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1826 	enum port port = dig_port->base.port;
1827 	int ret = 0;
1828 
1829 	if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1830 			    LINK_ENCRYPTION_STATUS)) {
1831 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1832 			connector->base.base.id, connector->base.name);
1833 		ret = -EPERM;
1834 		goto link_recover;
1835 	}
1836 
1837 	if (hdcp->shim->stream_2_2_encryption) {
1838 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1839 		if (ret) {
1840 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1841 				connector->base.base.id, connector->base.name);
1842 			return ret;
1843 		}
1844 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1845 			    transcoder_name(hdcp->stream_transcoder));
1846 	}
1847 
1848 	return 0;
1849 
1850 link_recover:
1851 	if (hdcp2_deauthenticate_port(connector) < 0)
1852 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1853 
1854 	dig_port->hdcp_auth_status = false;
1855 	data->k = 0;
1856 
1857 	return ret;
1858 }
1859 
hdcp2_enable_encryption(struct intel_connector * connector)1860 static int hdcp2_enable_encryption(struct intel_connector *connector)
1861 {
1862 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1863 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1864 	struct intel_hdcp *hdcp = &connector->hdcp;
1865 	enum port port = dig_port->base.port;
1866 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1867 	int ret;
1868 
1869 	drm_WARN_ON(&i915->drm,
1870 		    intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1871 		    LINK_ENCRYPTION_STATUS);
1872 	if (hdcp->shim->toggle_signalling) {
1873 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1874 						    true);
1875 		if (ret) {
1876 			drm_err(&i915->drm,
1877 				"Failed to enable HDCP signalling. %d\n",
1878 				ret);
1879 			return ret;
1880 		}
1881 	}
1882 
1883 	if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1884 	    LINK_AUTH_STATUS)
1885 		/* Link is Authenticated. Now set for Encryption */
1886 		intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1887 			     0, CTL_LINK_ENCRYPTION_REQ);
1888 
1889 	ret = intel_de_wait_for_set(i915,
1890 				    HDCP2_STATUS(i915, cpu_transcoder,
1891 						 port),
1892 				    LINK_ENCRYPTION_STATUS,
1893 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1894 	dig_port->hdcp_auth_status = true;
1895 
1896 	return ret;
1897 }
1898 
hdcp2_disable_encryption(struct intel_connector * connector)1899 static int hdcp2_disable_encryption(struct intel_connector *connector)
1900 {
1901 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1902 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1903 	struct intel_hdcp *hdcp = &connector->hdcp;
1904 	enum port port = dig_port->base.port;
1905 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1906 	int ret;
1907 
1908 	drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1909 				      LINK_ENCRYPTION_STATUS));
1910 
1911 	intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1912 		     CTL_LINK_ENCRYPTION_REQ, 0);
1913 
1914 	ret = intel_de_wait_for_clear(i915,
1915 				      HDCP2_STATUS(i915, cpu_transcoder,
1916 						   port),
1917 				      LINK_ENCRYPTION_STATUS,
1918 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1919 	if (ret == -ETIMEDOUT)
1920 		drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
1921 
1922 	if (hdcp->shim->toggle_signalling) {
1923 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1924 						    false);
1925 		if (ret) {
1926 			drm_err(&i915->drm,
1927 				"Failed to disable HDCP signalling. %d\n",
1928 				ret);
1929 			return ret;
1930 		}
1931 	}
1932 
1933 	return ret;
1934 }
1935 
1936 static int
hdcp2_propagate_stream_management_info(struct intel_connector * connector)1937 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1938 {
1939 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1940 	int i, tries = 3, ret;
1941 
1942 	if (!connector->hdcp.is_repeater)
1943 		return 0;
1944 
1945 	for (i = 0; i < tries; i++) {
1946 		ret = _hdcp2_propagate_stream_management_info(connector);
1947 		if (!ret)
1948 			break;
1949 
1950 		/* Lets restart the auth incase of seq_num_m roll over */
1951 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1952 			drm_dbg_kms(&i915->drm,
1953 				    "seq_num_m roll over.(%d)\n", ret);
1954 			break;
1955 		}
1956 
1957 		drm_dbg_kms(&i915->drm,
1958 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1959 			    i + 1, tries, ret);
1960 	}
1961 
1962 	return ret;
1963 }
1964 
hdcp2_authenticate_and_encrypt(struct intel_atomic_state * state,struct intel_connector * connector)1965 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
1966 					  struct intel_connector *connector)
1967 {
1968 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1969 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1970 	int ret = 0, i, tries = 3;
1971 
1972 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1973 		ret = hdcp2_authenticate_sink(connector);
1974 		if (!ret) {
1975 			ret = intel_hdcp_prepare_streams(state, connector);
1976 			if (ret) {
1977 				drm_dbg_kms(&i915->drm,
1978 					    "Prepare stream failed.(%d)\n",
1979 					    ret);
1980 				break;
1981 			}
1982 
1983 			ret = hdcp2_propagate_stream_management_info(connector);
1984 			if (ret) {
1985 				drm_dbg_kms(&i915->drm,
1986 					    "Stream management failed.(%d)\n",
1987 					    ret);
1988 				break;
1989 			}
1990 
1991 			ret = hdcp2_authenticate_port(connector);
1992 			if (!ret)
1993 				break;
1994 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1995 				    ret);
1996 		}
1997 
1998 		/* Clearing the mei hdcp session */
1999 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2000 			    i + 1, tries, ret);
2001 		if (hdcp2_deauthenticate_port(connector) < 0)
2002 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2003 	}
2004 
2005 	if (!ret && !dig_port->hdcp_auth_status) {
2006 		/*
2007 		 * Ensuring the required 200mSec min time interval between
2008 		 * Session Key Exchange and encryption.
2009 		 */
2010 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2011 		ret = hdcp2_enable_encryption(connector);
2012 		if (ret < 0) {
2013 			drm_dbg_kms(&i915->drm,
2014 				    "Encryption Enable Failed.(%d)\n", ret);
2015 			if (hdcp2_deauthenticate_port(connector) < 0)
2016 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2017 		}
2018 	}
2019 
2020 	if (!ret)
2021 		ret = hdcp2_enable_stream_encryption(connector);
2022 
2023 	return ret;
2024 }
2025 
_intel_hdcp2_enable(struct intel_atomic_state * state,struct intel_connector * connector)2026 static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2027 			       struct intel_connector *connector)
2028 {
2029 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2030 	struct intel_hdcp *hdcp = &connector->hdcp;
2031 	int ret;
2032 
2033 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2034 		    connector->base.base.id, connector->base.name,
2035 		    hdcp->content_type);
2036 
2037 	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
2038 
2039 	ret = hdcp2_authenticate_and_encrypt(state, connector);
2040 	if (ret) {
2041 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2042 			    hdcp->content_type, ret);
2043 		return ret;
2044 	}
2045 
2046 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2047 		    connector->base.base.id, connector->base.name,
2048 		    hdcp->content_type);
2049 
2050 	hdcp->hdcp2_encrypted = true;
2051 	return 0;
2052 }
2053 
2054 static int
_intel_hdcp2_disable(struct intel_connector * connector,bool hdcp2_link_recovery)2055 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2056 {
2057 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2058 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2059 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2060 	struct intel_hdcp *hdcp = &connector->hdcp;
2061 	int ret;
2062 
2063 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2064 		    connector->base.base.id, connector->base.name);
2065 
2066 	if (hdcp->shim->stream_2_2_encryption) {
2067 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2068 		if (ret) {
2069 			drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2070 				connector->base.base.id, connector->base.name);
2071 			return ret;
2072 		}
2073 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2074 			    transcoder_name(hdcp->stream_transcoder));
2075 
2076 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2077 			return 0;
2078 	}
2079 
2080 	ret = hdcp2_disable_encryption(connector);
2081 
2082 	if (hdcp2_deauthenticate_port(connector) < 0)
2083 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2084 
2085 	connector->hdcp.hdcp2_encrypted = false;
2086 	dig_port->hdcp_auth_status = false;
2087 	data->k = 0;
2088 
2089 	return ret;
2090 }
2091 
2092 /* Implements the Link Integrity Check for HDCP2.2 */
intel_hdcp2_check_link(struct intel_connector * connector)2093 static int intel_hdcp2_check_link(struct intel_connector *connector)
2094 {
2095 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2096 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2097 	struct intel_hdcp *hdcp = &connector->hdcp;
2098 	enum port port = dig_port->base.port;
2099 	enum transcoder cpu_transcoder;
2100 	int ret = 0;
2101 
2102 	mutex_lock(&hdcp->mutex);
2103 	mutex_lock(&dig_port->hdcp_mutex);
2104 	cpu_transcoder = hdcp->cpu_transcoder;
2105 
2106 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2107 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2108 	    !hdcp->hdcp2_encrypted) {
2109 		ret = -EINVAL;
2110 		goto out;
2111 	}
2112 
2113 	if (drm_WARN_ON(&i915->drm,
2114 			!intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
2115 		drm_err(&i915->drm,
2116 			"HDCP2.2 link stopped the encryption, %x\n",
2117 			intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
2118 		ret = -ENXIO;
2119 		_intel_hdcp2_disable(connector, true);
2120 		intel_hdcp_update_value(connector,
2121 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2122 					true);
2123 		goto out;
2124 	}
2125 
2126 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2127 	if (ret == HDCP_LINK_PROTECTED) {
2128 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2129 			intel_hdcp_update_value(connector,
2130 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2131 					true);
2132 		}
2133 		goto out;
2134 	}
2135 
2136 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2137 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2138 			goto out;
2139 
2140 		drm_dbg_kms(&i915->drm,
2141 			    "HDCP2.2 Downstream topology change\n");
2142 	} else {
2143 		drm_dbg_kms(&i915->drm,
2144 			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2145 			    connector->base.base.id, connector->base.name);
2146 	}
2147 
2148 	ret = _intel_hdcp2_disable(connector, true);
2149 	if (ret) {
2150 		drm_err(&i915->drm,
2151 			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2152 			connector->base.base.id, connector->base.name, ret);
2153 		intel_hdcp_update_value(connector,
2154 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2155 		goto out;
2156 	}
2157 
2158 	intel_hdcp_update_value(connector,
2159 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2160 out:
2161 	mutex_unlock(&dig_port->hdcp_mutex);
2162 	mutex_unlock(&hdcp->mutex);
2163 	return ret;
2164 }
2165 
intel_hdcp_check_work(struct work_struct * work)2166 static void intel_hdcp_check_work(struct work_struct *work)
2167 {
2168 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2169 					       struct intel_hdcp,
2170 					       check_work);
2171 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2172 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2173 
2174 	if (drm_connector_is_unregistered(&connector->base))
2175 		return;
2176 
2177 	if (!intel_hdcp2_check_link(connector))
2178 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2179 				   DRM_HDCP2_CHECK_PERIOD_MS);
2180 	else if (!intel_hdcp_check_link(connector))
2181 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2182 				   DRM_HDCP_CHECK_PERIOD_MS);
2183 }
2184 
i915_hdcp_component_bind(struct device * drv_kdev,struct device * mei_kdev,void * data)2185 static int i915_hdcp_component_bind(struct device *drv_kdev,
2186 				    struct device *mei_kdev, void *data)
2187 {
2188 	struct intel_display *display = to_intel_display(drv_kdev);
2189 	struct drm_i915_private *i915 = to_i915(display->drm);
2190 
2191 	drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
2192 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2193 	i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2194 	i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
2195 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2196 
2197 	return 0;
2198 }
2199 
i915_hdcp_component_unbind(struct device * drv_kdev,struct device * mei_kdev,void * data)2200 static void i915_hdcp_component_unbind(struct device *drv_kdev,
2201 				       struct device *mei_kdev, void *data)
2202 {
2203 	struct intel_display *display = to_intel_display(drv_kdev);
2204 	struct drm_i915_private *i915 = to_i915(display->drm);
2205 
2206 	drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
2207 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2208 	i915->display.hdcp.arbiter = NULL;
2209 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2210 }
2211 
2212 static const struct component_ops i915_hdcp_ops = {
2213 	.bind   = i915_hdcp_component_bind,
2214 	.unbind = i915_hdcp_component_unbind,
2215 };
2216 
intel_get_hdcp_ddi_index(enum port port)2217 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2218 {
2219 	switch (port) {
2220 	case PORT_A:
2221 		return HDCP_DDI_A;
2222 	case PORT_B ... PORT_F:
2223 		return (enum hdcp_ddi)port;
2224 	default:
2225 		return HDCP_DDI_INVALID_PORT;
2226 	}
2227 }
2228 
intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)2229 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2230 {
2231 	switch (cpu_transcoder) {
2232 	case TRANSCODER_A ... TRANSCODER_D:
2233 		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2234 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2235 		return HDCP_INVALID_TRANSCODER;
2236 	}
2237 }
2238 
initialize_hdcp_port_data(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2239 static int initialize_hdcp_port_data(struct intel_connector *connector,
2240 				     struct intel_digital_port *dig_port,
2241 				     const struct intel_hdcp_shim *shim)
2242 {
2243 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2244 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2245 	enum port port = dig_port->base.port;
2246 
2247 	if (DISPLAY_VER(i915) < 12)
2248 		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2249 	else
2250 		/*
2251 		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2252 		 * with zero(INVALID PORT index).
2253 		 */
2254 		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2255 
2256 	/*
2257 	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2258 	 * is initialized to zero (invalid transcoder index). This will be
2259 	 * retained for <Gen12 forever.
2260 	 */
2261 	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2262 
2263 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2264 	data->protocol = (u8)shim->protocol;
2265 
2266 	if (!data->streams)
2267 		data->streams = kcalloc(INTEL_NUM_PIPES(i915),
2268 					sizeof(struct hdcp2_streamid_type),
2269 					GFP_KERNEL);
2270 	if (!data->streams) {
2271 		drm_err(&i915->drm, "Out of Memory\n");
2272 		return -ENOMEM;
2273 	}
2274 
2275 	return 0;
2276 }
2277 
is_hdcp2_supported(struct drm_i915_private * i915)2278 static bool is_hdcp2_supported(struct drm_i915_private *i915)
2279 {
2280 	if (intel_hdcp_gsc_cs_required(i915))
2281 		return true;
2282 
2283 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2284 		return false;
2285 
2286 	return (DISPLAY_VER(i915) >= 10 ||
2287 		IS_KABYLAKE(i915) ||
2288 		IS_COFFEELAKE(i915) ||
2289 		IS_COMETLAKE(i915));
2290 }
2291 
intel_hdcp_component_init(struct drm_i915_private * i915)2292 void intel_hdcp_component_init(struct drm_i915_private *i915)
2293 {
2294 	int ret;
2295 
2296 	if (!is_hdcp2_supported(i915))
2297 		return;
2298 
2299 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2300 	drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
2301 
2302 	i915->display.hdcp.comp_added = true;
2303 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2304 	if (intel_hdcp_gsc_cs_required(i915))
2305 		ret = intel_hdcp_gsc_init(i915);
2306 	else
2307 		ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
2308 					  I915_COMPONENT_HDCP);
2309 
2310 	if (ret < 0) {
2311 		drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
2312 			    ret);
2313 		mutex_lock(&i915->display.hdcp.hdcp_mutex);
2314 		i915->display.hdcp.comp_added = false;
2315 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2316 		return;
2317 	}
2318 }
2319 
intel_hdcp2_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2320 static void intel_hdcp2_init(struct intel_connector *connector,
2321 			     struct intel_digital_port *dig_port,
2322 			     const struct intel_hdcp_shim *shim)
2323 {
2324 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2325 	struct intel_hdcp *hdcp = &connector->hdcp;
2326 	int ret;
2327 
2328 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2329 	if (ret) {
2330 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2331 		return;
2332 	}
2333 
2334 	hdcp->hdcp2_supported = true;
2335 }
2336 
intel_hdcp_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2337 int intel_hdcp_init(struct intel_connector *connector,
2338 		    struct intel_digital_port *dig_port,
2339 		    const struct intel_hdcp_shim *shim)
2340 {
2341 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2342 	struct intel_hdcp *hdcp = &connector->hdcp;
2343 	int ret;
2344 
2345 	if (!shim)
2346 		return -EINVAL;
2347 
2348 	if (is_hdcp2_supported(i915))
2349 		intel_hdcp2_init(connector, dig_port, shim);
2350 
2351 	ret =
2352 	drm_connector_attach_content_protection_property(&connector->base,
2353 							 hdcp->hdcp2_supported);
2354 	if (ret) {
2355 		hdcp->hdcp2_supported = false;
2356 		kfree(dig_port->hdcp_port_data.streams);
2357 		return ret;
2358 	}
2359 
2360 	hdcp->shim = shim;
2361 	mutex_init(&hdcp->mutex);
2362 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2363 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2364 	init_waitqueue_head(&hdcp->cp_irq_queue);
2365 
2366 	return 0;
2367 }
2368 
_intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * pipe_config,const struct drm_connector_state * conn_state)2369 static int _intel_hdcp_enable(struct intel_atomic_state *state,
2370 			      struct intel_encoder *encoder,
2371 			      const struct intel_crtc_state *pipe_config,
2372 			      const struct drm_connector_state *conn_state)
2373 {
2374 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2375 	struct intel_connector *connector =
2376 		to_intel_connector(conn_state->connector);
2377 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2378 	struct intel_hdcp *hdcp = &connector->hdcp;
2379 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2380 	int ret = -EINVAL;
2381 
2382 	if (!hdcp->shim)
2383 		return -ENOENT;
2384 
2385 	if (!connector->encoder) {
2386 		drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2387 			connector->base.base.id, connector->base.name);
2388 		return -ENODEV;
2389 	}
2390 
2391 	mutex_lock(&hdcp->mutex);
2392 	mutex_lock(&dig_port->hdcp_mutex);
2393 	drm_WARN_ON(&i915->drm,
2394 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2395 	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2396 
2397 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2398 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2399 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2400 	} else {
2401 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2402 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2403 	}
2404 
2405 	if (DISPLAY_VER(i915) >= 12)
2406 		dig_port->hdcp_port_data.hdcp_transcoder =
2407 			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2408 
2409 	/*
2410 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2411 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2412 	 */
2413 	if (intel_hdcp2_get_capability(connector)) {
2414 		ret = _intel_hdcp2_enable(state, connector);
2415 		if (!ret)
2416 			check_link_interval =
2417 				DRM_HDCP2_CHECK_PERIOD_MS;
2418 	}
2419 
2420 	/*
2421 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2422 	 * be attempted.
2423 	 */
2424 	if (ret && intel_hdcp_get_capability(connector) &&
2425 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2426 		ret = intel_hdcp1_enable(connector);
2427 	}
2428 
2429 	if (!ret) {
2430 		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2431 				   check_link_interval);
2432 		intel_hdcp_update_value(connector,
2433 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2434 					true);
2435 	}
2436 
2437 	mutex_unlock(&dig_port->hdcp_mutex);
2438 	mutex_unlock(&hdcp->mutex);
2439 	return ret;
2440 }
2441 
intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2442 void intel_hdcp_enable(struct intel_atomic_state *state,
2443 		       struct intel_encoder *encoder,
2444 		       const struct intel_crtc_state *crtc_state,
2445 		       const struct drm_connector_state *conn_state)
2446 {
2447 	struct intel_connector *connector =
2448 		to_intel_connector(conn_state->connector);
2449 	struct intel_hdcp *hdcp = &connector->hdcp;
2450 
2451 	/*
2452 	 * Enable hdcp if it's desired or if userspace is enabled and
2453 	 * driver set its state to undesired
2454 	 */
2455 	if (conn_state->content_protection ==
2456 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2457 	    (conn_state->content_protection ==
2458 	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2459 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2460 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2461 }
2462 
intel_hdcp_disable(struct intel_connector * connector)2463 int intel_hdcp_disable(struct intel_connector *connector)
2464 {
2465 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2466 	struct intel_hdcp *hdcp = &connector->hdcp;
2467 	int ret = 0;
2468 
2469 	if (!hdcp->shim)
2470 		return -ENOENT;
2471 
2472 	mutex_lock(&hdcp->mutex);
2473 	mutex_lock(&dig_port->hdcp_mutex);
2474 
2475 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2476 		goto out;
2477 
2478 	intel_hdcp_update_value(connector,
2479 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2480 	if (hdcp->hdcp2_encrypted)
2481 		ret = _intel_hdcp2_disable(connector, false);
2482 	else if (hdcp->hdcp_encrypted)
2483 		ret = _intel_hdcp_disable(connector);
2484 
2485 out:
2486 	mutex_unlock(&dig_port->hdcp_mutex);
2487 	mutex_unlock(&hdcp->mutex);
2488 	cancel_delayed_work_sync(&hdcp->check_work);
2489 	return ret;
2490 }
2491 
intel_hdcp_update_pipe(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2492 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2493 			    struct intel_encoder *encoder,
2494 			    const struct intel_crtc_state *crtc_state,
2495 			    const struct drm_connector_state *conn_state)
2496 {
2497 	struct intel_connector *connector =
2498 				to_intel_connector(conn_state->connector);
2499 	struct intel_hdcp *hdcp = &connector->hdcp;
2500 	bool content_protection_type_changed, desired_and_not_enabled = false;
2501 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2502 
2503 	if (!connector->hdcp.shim)
2504 		return;
2505 
2506 	content_protection_type_changed =
2507 		(conn_state->hdcp_content_type != hdcp->content_type &&
2508 		 conn_state->content_protection !=
2509 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2510 
2511 	/*
2512 	 * During the HDCP encryption session if Type change is requested,
2513 	 * disable the HDCP and reenable it with new TYPE value.
2514 	 */
2515 	if (conn_state->content_protection ==
2516 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2517 	    content_protection_type_changed)
2518 		intel_hdcp_disable(connector);
2519 
2520 	/*
2521 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2522 	 * change procedure.
2523 	 */
2524 	if (content_protection_type_changed) {
2525 		mutex_lock(&hdcp->mutex);
2526 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2527 		drm_connector_get(&connector->base);
2528 		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2529 			drm_connector_put(&connector->base);
2530 		mutex_unlock(&hdcp->mutex);
2531 	}
2532 
2533 	if (conn_state->content_protection ==
2534 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2535 		mutex_lock(&hdcp->mutex);
2536 		/* Avoid enabling hdcp, if it already ENABLED */
2537 		desired_and_not_enabled =
2538 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2539 		mutex_unlock(&hdcp->mutex);
2540 		/*
2541 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2542 		 * prop_work to update correct CP property to user space.
2543 		 */
2544 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2545 			drm_connector_get(&connector->base);
2546 			if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2547 				drm_connector_put(&connector->base);
2548 
2549 		}
2550 	}
2551 
2552 	if (desired_and_not_enabled || content_protection_type_changed)
2553 		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2554 }
2555 
intel_hdcp_component_fini(struct drm_i915_private * i915)2556 void intel_hdcp_component_fini(struct drm_i915_private *i915)
2557 {
2558 	mutex_lock(&i915->display.hdcp.hdcp_mutex);
2559 	if (!i915->display.hdcp.comp_added) {
2560 		mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2561 		return;
2562 	}
2563 
2564 	i915->display.hdcp.comp_added = false;
2565 	mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2566 
2567 	if (intel_hdcp_gsc_cs_required(i915))
2568 		intel_hdcp_gsc_fini(i915);
2569 	else
2570 		component_del(i915->drm.dev, &i915_hdcp_ops);
2571 }
2572 
intel_hdcp_cleanup(struct intel_connector * connector)2573 void intel_hdcp_cleanup(struct intel_connector *connector)
2574 {
2575 	struct intel_hdcp *hdcp = &connector->hdcp;
2576 
2577 	if (!hdcp->shim)
2578 		return;
2579 
2580 	/*
2581 	 * If the connector is registered, it's possible userspace could kick
2582 	 * off another HDCP enable, which would re-spawn the workers.
2583 	 */
2584 	drm_WARN_ON(connector->base.dev,
2585 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2586 
2587 	/*
2588 	 * Now that the connector is not registered, check_work won't be run,
2589 	 * but cancel any outstanding instances of it
2590 	 */
2591 	cancel_delayed_work_sync(&hdcp->check_work);
2592 
2593 	/*
2594 	 * We don't cancel prop_work in the same way as check_work since it
2595 	 * requires connection_mutex which could be held while calling this
2596 	 * function. Instead, we rely on the connector references grabbed before
2597 	 * scheduling prop_work to ensure the connector is alive when prop_work
2598 	 * is run. So if we're in the destroy path (which is where this
2599 	 * function should be called), we're "guaranteed" that prop_work is not
2600 	 * active (tl;dr This Should Never Happen).
2601 	 */
2602 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2603 
2604 	mutex_lock(&hdcp->mutex);
2605 	hdcp->shim = NULL;
2606 	mutex_unlock(&hdcp->mutex);
2607 }
2608 
intel_hdcp_atomic_check(struct drm_connector * connector,struct drm_connector_state * old_state,struct drm_connector_state * new_state)2609 void intel_hdcp_atomic_check(struct drm_connector *connector,
2610 			     struct drm_connector_state *old_state,
2611 			     struct drm_connector_state *new_state)
2612 {
2613 	u64 old_cp = old_state->content_protection;
2614 	u64 new_cp = new_state->content_protection;
2615 	struct drm_crtc_state *crtc_state;
2616 
2617 	if (!new_state->crtc) {
2618 		/*
2619 		 * If the connector is being disabled with CP enabled, mark it
2620 		 * desired so it's re-enabled when the connector is brought back
2621 		 */
2622 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2623 			new_state->content_protection =
2624 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2625 		return;
2626 	}
2627 
2628 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2629 						   new_state->crtc);
2630 	/*
2631 	 * Fix the HDCP uapi content protection state in case of modeset.
2632 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2633 	 * need to be sent if there is transition from ENABLED->DESIRED.
2634 	 */
2635 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2636 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2637 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2638 		new_state->content_protection =
2639 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2640 
2641 	/*
2642 	 * Nothing to do if the state didn't change, or HDCP was activated since
2643 	 * the last commit. And also no change in hdcp content type.
2644 	 */
2645 	if (old_cp == new_cp ||
2646 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2647 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2648 		if (old_state->hdcp_content_type ==
2649 				new_state->hdcp_content_type)
2650 			return;
2651 	}
2652 
2653 	crtc_state->mode_changed = true;
2654 }
2655 
2656 /* Handles the CP_IRQ raised from the DP HDCP sink */
intel_hdcp_handle_cp_irq(struct intel_connector * connector)2657 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2658 {
2659 	struct intel_hdcp *hdcp = &connector->hdcp;
2660 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2661 
2662 	if (!hdcp->shim)
2663 		return;
2664 
2665 	atomic_inc(&connector->hdcp.cp_irq_count);
2666 	wake_up_all(&connector->hdcp.cp_irq_queue);
2667 
2668 	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2669 }
2670