1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
5 *
6 * Authors:
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
9 */
10
11 #include <linux/component.h>
12 #include <linux/debugfs.h>
13 #include <linux/i2c.h>
14 #include <linux/random.h>
15
16 #include <drm/display/drm_hdcp_helper.h>
17 #include <drm/intel/i915_component.h>
18
19 #include "i915_drv.h"
20 #include "i915_reg.h"
21 #include "intel_connector.h"
22 #include "intel_de.h"
23 #include "intel_display_power.h"
24 #include "intel_display_power_well.h"
25 #include "intel_display_types.h"
26 #include "intel_hdcp.h"
27 #include "intel_hdcp_gsc.h"
28 #include "intel_hdcp_regs.h"
29 #include "intel_hdcp_shim.h"
30 #include "intel_pcode.h"
31
32 #define KEY_LOAD_TRIES 5
33 #define HDCP2_LC_RETRY_CNT 3
34
35 static void
intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder * encoder,struct intel_hdcp * hdcp,bool enable)36 intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
37 struct intel_hdcp *hdcp,
38 bool enable)
39 {
40 struct intel_display *display = to_intel_display(encoder);
41 i915_reg_t rekey_reg;
42 u32 rekey_bit = 0;
43
44 /* Here we assume HDMI is in TMDS mode of operation */
45 if (!intel_encoder_is_hdmi(encoder))
46 return;
47
48 if (DISPLAY_VER(display) >= 30) {
49 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
50 rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
51 } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
52 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) {
53 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
54 rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
55 } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) {
56 rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder);
57 rekey_bit = HDCP_LINE_REKEY_DISABLE;
58 }
59
60 if (rekey_bit)
61 intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit);
62 }
63
intel_conn_to_vcpi(struct intel_atomic_state * state,struct intel_connector * connector)64 static int intel_conn_to_vcpi(struct intel_atomic_state *state,
65 struct intel_connector *connector)
66 {
67 struct drm_dp_mst_topology_mgr *mgr;
68 struct drm_dp_mst_atomic_payload *payload;
69 struct drm_dp_mst_topology_state *mst_state;
70 int vcpi = 0;
71
72 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
73 if (!connector->mst.port)
74 return 0;
75 mgr = connector->mst.port->mgr;
76
77 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
78 mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
79 payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
80 if (drm_WARN_ON(mgr->dev, !payload))
81 goto out;
82
83 vcpi = payload->vcpi;
84 if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
85 vcpi = 0;
86 goto out;
87 }
88 out:
89 return vcpi;
90 }
91
92 /*
93 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
94 * content_type for all streams in DP MST topology because security f/w doesn't
95 * have any provision to mark content_type for each stream separately, it marks
96 * all available streams with the content_type proivided at the time of port
97 * authentication. This may prohibit the userspace to use type1 content on
98 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
99 * DP MST topology. Though it is not compulsory, security fw should change its
100 * policy to mark different content_types for different streams.
101 */
102 static int
intel_hdcp_required_content_stream(struct intel_atomic_state * state,struct intel_digital_port * dig_port)103 intel_hdcp_required_content_stream(struct intel_atomic_state *state,
104 struct intel_digital_port *dig_port)
105 {
106 struct intel_display *display = to_intel_display(state);
107 struct drm_connector_list_iter conn_iter;
108 struct intel_digital_port *conn_dig_port;
109 struct intel_connector *connector;
110 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
111 bool enforce_type0 = false;
112 int k;
113
114 if (dig_port->hdcp.auth_status)
115 return 0;
116
117 data->k = 0;
118
119 if (!dig_port->hdcp.mst_type1_capable)
120 enforce_type0 = true;
121
122 drm_connector_list_iter_begin(display->drm, &conn_iter);
123 for_each_intel_connector_iter(connector, &conn_iter) {
124 if (connector->base.status == connector_status_disconnected)
125 continue;
126
127 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
128 continue;
129
130 conn_dig_port = intel_attached_dig_port(connector);
131 if (conn_dig_port != dig_port)
132 continue;
133
134 data->streams[data->k].stream_id =
135 intel_conn_to_vcpi(state, connector);
136 data->k++;
137
138 /* if there is only one active stream */
139 if (dig_port->dp.mst.active_links <= 1)
140 break;
141 }
142 drm_connector_list_iter_end(&conn_iter);
143
144 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
145 return -EINVAL;
146
147 /*
148 * Apply common protection level across all streams in DP MST Topology.
149 * Use highest supported content type for all streams in DP MST Topology.
150 */
151 for (k = 0; k < data->k; k++)
152 data->streams[k].stream_type =
153 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
154
155 return 0;
156 }
157
intel_hdcp_prepare_streams(struct intel_atomic_state * state,struct intel_connector * connector)158 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
159 struct intel_connector *connector)
160 {
161 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
162 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
163 struct intel_hdcp *hdcp = &connector->hdcp;
164
165 if (intel_encoder_is_mst(intel_attached_encoder(connector)))
166 return intel_hdcp_required_content_stream(state, dig_port);
167
168 data->k = 1;
169 data->streams[0].stream_id = 0;
170 data->streams[0].stream_type = hdcp->content_type;
171
172 return 0;
173 }
174
175 static
intel_hdcp_is_ksv_valid(u8 * ksv)176 bool intel_hdcp_is_ksv_valid(u8 *ksv)
177 {
178 int i, ones = 0;
179 /* KSV has 20 1's and 20 0's */
180 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
181 ones += hweight8(ksv[i]);
182 if (ones != 20)
183 return false;
184
185 return true;
186 }
187
188 static
intel_hdcp_read_valid_bksv(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim,u8 * bksv)189 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
190 const struct intel_hdcp_shim *shim, u8 *bksv)
191 {
192 struct intel_display *display = to_intel_display(dig_port);
193 int ret, i, tries = 2;
194
195 /* HDCP spec states that we must retry the bksv if it is invalid */
196 for (i = 0; i < tries; i++) {
197 ret = shim->read_bksv(dig_port, bksv);
198 if (ret)
199 return ret;
200 if (intel_hdcp_is_ksv_valid(bksv))
201 break;
202 }
203 if (i == tries) {
204 drm_dbg_kms(display->drm, "Bksv is invalid\n");
205 return -ENODEV;
206 }
207
208 return 0;
209 }
210
211 /* Is HDCP1.4 capable on Platform and Sink */
intel_hdcp_get_capability(struct intel_connector * connector)212 static bool intel_hdcp_get_capability(struct intel_connector *connector)
213 {
214 struct intel_digital_port *dig_port;
215 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
216 bool capable = false;
217 u8 bksv[5];
218
219 if (!intel_attached_encoder(connector))
220 return capable;
221
222 dig_port = intel_attached_dig_port(connector);
223
224 if (!shim)
225 return capable;
226
227 if (shim->hdcp_get_capability) {
228 shim->hdcp_get_capability(dig_port, &capable);
229 } else {
230 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
231 capable = true;
232 }
233
234 return capable;
235 }
236
237 /*
238 * Check if the source has all the building blocks ready to make
239 * HDCP 2.2 work
240 */
intel_hdcp2_prerequisite(struct intel_connector * connector)241 static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
242 {
243 struct intel_display *display = to_intel_display(connector);
244 struct intel_hdcp *hdcp = &connector->hdcp;
245
246 /* I915 support for HDCP2.2 */
247 if (!hdcp->hdcp2_supported)
248 return false;
249
250 /* If MTL+ make sure gsc is loaded and proxy is setup */
251 if (intel_hdcp_gsc_cs_required(display)) {
252 if (!intel_hdcp_gsc_check_status(display))
253 return false;
254 }
255
256 /* MEI/GSC interface is solid depending on which is used */
257 mutex_lock(&display->hdcp.hdcp_mutex);
258 if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
259 mutex_unlock(&display->hdcp.hdcp_mutex);
260 return false;
261 }
262 mutex_unlock(&display->hdcp.hdcp_mutex);
263
264 return true;
265 }
266
267 /* Is HDCP2.2 capable on Platform and Sink */
intel_hdcp2_get_capability(struct intel_connector * connector)268 static bool intel_hdcp2_get_capability(struct intel_connector *connector)
269 {
270 struct intel_hdcp *hdcp = &connector->hdcp;
271 bool capable = false;
272
273 if (!intel_hdcp2_prerequisite(connector))
274 return false;
275
276 /* Sink's capability for HDCP2.2 */
277 hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
278
279 return capable;
280 }
281
intel_hdcp_get_remote_capability(struct intel_connector * connector,bool * hdcp_capable,bool * hdcp2_capable)282 static void intel_hdcp_get_remote_capability(struct intel_connector *connector,
283 bool *hdcp_capable,
284 bool *hdcp2_capable)
285 {
286 struct intel_hdcp *hdcp = &connector->hdcp;
287
288 if (!hdcp->shim->get_remote_hdcp_capability)
289 return;
290
291 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
292 hdcp2_capable);
293
294 if (!intel_hdcp2_prerequisite(connector))
295 *hdcp2_capable = false;
296 }
297
intel_hdcp_in_use(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)298 static bool intel_hdcp_in_use(struct intel_display *display,
299 enum transcoder cpu_transcoder, enum port port)
300 {
301 return intel_de_read(display,
302 HDCP_STATUS(display, cpu_transcoder, port)) &
303 HDCP_STATUS_ENC;
304 }
305
intel_hdcp2_in_use(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)306 static bool intel_hdcp2_in_use(struct intel_display *display,
307 enum transcoder cpu_transcoder, enum port port)
308 {
309 return intel_de_read(display,
310 HDCP2_STATUS(display, cpu_transcoder, port)) &
311 LINK_ENCRYPTION_STATUS;
312 }
313
intel_hdcp_poll_ksv_fifo(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)314 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
315 const struct intel_hdcp_shim *shim)
316 {
317 int ret, read_ret;
318 bool ksv_ready;
319
320 /* Poll for ksv list ready (spec says max time allowed is 5s) */
321 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
322 &ksv_ready),
323 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
324 100 * 1000);
325 if (ret)
326 return ret;
327 if (read_ret)
328 return read_ret;
329 if (!ksv_ready)
330 return -ETIMEDOUT;
331
332 return 0;
333 }
334
hdcp_key_loadable(struct intel_display * display)335 static bool hdcp_key_loadable(struct intel_display *display)
336 {
337 struct drm_i915_private *i915 = to_i915(display->drm);
338 enum i915_power_well_id id;
339 intel_wakeref_t wakeref;
340 bool enabled = false;
341
342 /*
343 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
344 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
345 */
346 if (display->platform.haswell || display->platform.broadwell)
347 id = HSW_DISP_PW_GLOBAL;
348 else
349 id = SKL_DISP_PW_1;
350
351 /* PG1 (power well #1) needs to be enabled */
352 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
353 enabled = intel_display_power_well_is_enabled(display, id);
354
355 /*
356 * Another req for hdcp key loadability is enabled state of pll for
357 * cdclk. Without active crtc we won't land here. So we are assuming that
358 * cdclk is already on.
359 */
360
361 return enabled;
362 }
363
intel_hdcp_clear_keys(struct intel_display * display)364 static void intel_hdcp_clear_keys(struct intel_display *display)
365 {
366 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
367 intel_de_write(display, HDCP_KEY_STATUS,
368 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
369 }
370
intel_hdcp_load_keys(struct intel_display * display)371 static int intel_hdcp_load_keys(struct intel_display *display)
372 {
373 struct drm_i915_private *i915 = to_i915(display->drm);
374 int ret;
375 u32 val;
376
377 val = intel_de_read(display, HDCP_KEY_STATUS);
378 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
379 return 0;
380
381 /*
382 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
383 * out of reset. So if Key is not already loaded, its an error state.
384 */
385 if (display->platform.haswell || display->platform.broadwell)
386 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
387 return -ENXIO;
388
389 /*
390 * Initiate loading the HDCP key from fuses.
391 *
392 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
393 * version 9 platforms (minus BXT) differ in the key load trigger
394 * process from other platforms. These platforms use the GT Driver
395 * Mailbox interface.
396 */
397 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
398 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
399 if (ret) {
400 drm_err(display->drm,
401 "Failed to initiate HDCP key load (%d)\n",
402 ret);
403 return ret;
404 }
405 } else {
406 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
407 }
408
409 /* Wait for the keys to load (500us) */
410 ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
411 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
412 10, 1, &val);
413 if (ret)
414 return ret;
415 else if (!(val & HDCP_KEY_LOAD_STATUS))
416 return -ENXIO;
417
418 /* Send Aksv over to PCH display for use in authentication */
419 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
420
421 return 0;
422 }
423
424 /* Returns updated SHA-1 index */
intel_write_sha_text(struct intel_display * display,u32 sha_text)425 static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
426 {
427 intel_de_write(display, HDCP_SHA_TEXT, sha_text);
428 if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
429 drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
430 return -ETIMEDOUT;
431 }
432 return 0;
433 }
434
435 static
intel_hdcp_get_repeater_ctl(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)436 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
437 enum transcoder cpu_transcoder, enum port port)
438 {
439 if (DISPLAY_VER(display) >= 12) {
440 switch (cpu_transcoder) {
441 case TRANSCODER_A:
442 return HDCP_TRANSA_REP_PRESENT |
443 HDCP_TRANSA_SHA1_M0;
444 case TRANSCODER_B:
445 return HDCP_TRANSB_REP_PRESENT |
446 HDCP_TRANSB_SHA1_M0;
447 case TRANSCODER_C:
448 return HDCP_TRANSC_REP_PRESENT |
449 HDCP_TRANSC_SHA1_M0;
450 case TRANSCODER_D:
451 return HDCP_TRANSD_REP_PRESENT |
452 HDCP_TRANSD_SHA1_M0;
453 default:
454 drm_err(display->drm, "Unknown transcoder %d\n",
455 cpu_transcoder);
456 return 0;
457 }
458 }
459
460 switch (port) {
461 case PORT_A:
462 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
463 case PORT_B:
464 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
465 case PORT_C:
466 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
467 case PORT_D:
468 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
469 case PORT_E:
470 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
471 default:
472 drm_err(display->drm, "Unknown port %d\n", port);
473 return 0;
474 }
475 }
476
477 static
intel_hdcp_validate_v_prime(struct intel_connector * connector,const struct intel_hdcp_shim * shim,u8 * ksv_fifo,u8 num_downstream,u8 * bstatus)478 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
479 const struct intel_hdcp_shim *shim,
480 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
481 {
482 struct intel_display *display = to_intel_display(connector);
483 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
484 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
485 enum port port = dig_port->base.port;
486 u32 vprime, sha_text, sha_leftovers, rep_ctl;
487 int ret, i, j, sha_idx;
488
489 /* Process V' values from the receiver */
490 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
491 ret = shim->read_v_prime_part(dig_port, i, &vprime);
492 if (ret)
493 return ret;
494 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
495 }
496
497 /*
498 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
499 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
500 * stream is written via the HDCP_SHA_TEXT register in 32-bit
501 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
502 * index will keep track of our progress through the 64 bytes as well as
503 * helping us work the 40-bit KSVs through our 32-bit register.
504 *
505 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
506 */
507 sha_idx = 0;
508 sha_text = 0;
509 sha_leftovers = 0;
510 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
511 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
512 for (i = 0; i < num_downstream; i++) {
513 unsigned int sha_empty;
514 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
515
516 /* Fill up the empty slots in sha_text and write it out */
517 sha_empty = sizeof(sha_text) - sha_leftovers;
518 for (j = 0; j < sha_empty; j++) {
519 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
520 sha_text |= ksv[j] << off;
521 }
522
523 ret = intel_write_sha_text(display, sha_text);
524 if (ret < 0)
525 return ret;
526
527 /* Programming guide writes this every 64 bytes */
528 sha_idx += sizeof(sha_text);
529 if (!(sha_idx % 64))
530 intel_de_write(display, HDCP_REP_CTL,
531 rep_ctl | HDCP_SHA1_TEXT_32);
532
533 /* Store the leftover bytes from the ksv in sha_text */
534 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
535 sha_text = 0;
536 for (j = 0; j < sha_leftovers; j++)
537 sha_text |= ksv[sha_empty + j] <<
538 ((sizeof(sha_text) - j - 1) * 8);
539
540 /*
541 * If we still have room in sha_text for more data, continue.
542 * Otherwise, write it out immediately.
543 */
544 if (sizeof(sha_text) > sha_leftovers)
545 continue;
546
547 ret = intel_write_sha_text(display, sha_text);
548 if (ret < 0)
549 return ret;
550 sha_leftovers = 0;
551 sha_text = 0;
552 sha_idx += sizeof(sha_text);
553 }
554
555 /*
556 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
557 * bytes are leftover from the last ksv, we might be able to fit them
558 * all in sha_text (first 2 cases), or we might need to split them up
559 * into 2 writes (last 2 cases).
560 */
561 if (sha_leftovers == 0) {
562 /* Write 16 bits of text, 16 bits of M0 */
563 intel_de_write(display, HDCP_REP_CTL,
564 rep_ctl | HDCP_SHA1_TEXT_16);
565 ret = intel_write_sha_text(display,
566 bstatus[0] << 8 | bstatus[1]);
567 if (ret < 0)
568 return ret;
569 sha_idx += sizeof(sha_text);
570
571 /* Write 32 bits of M0 */
572 intel_de_write(display, HDCP_REP_CTL,
573 rep_ctl | HDCP_SHA1_TEXT_0);
574 ret = intel_write_sha_text(display, 0);
575 if (ret < 0)
576 return ret;
577 sha_idx += sizeof(sha_text);
578
579 /* Write 16 bits of M0 */
580 intel_de_write(display, HDCP_REP_CTL,
581 rep_ctl | HDCP_SHA1_TEXT_16);
582 ret = intel_write_sha_text(display, 0);
583 if (ret < 0)
584 return ret;
585 sha_idx += sizeof(sha_text);
586
587 } else if (sha_leftovers == 1) {
588 /* Write 24 bits of text, 8 bits of M0 */
589 intel_de_write(display, HDCP_REP_CTL,
590 rep_ctl | HDCP_SHA1_TEXT_24);
591 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
592 /* Only 24-bits of data, must be in the LSB */
593 sha_text = (sha_text & 0xffffff00) >> 8;
594 ret = intel_write_sha_text(display, sha_text);
595 if (ret < 0)
596 return ret;
597 sha_idx += sizeof(sha_text);
598
599 /* Write 32 bits of M0 */
600 intel_de_write(display, HDCP_REP_CTL,
601 rep_ctl | HDCP_SHA1_TEXT_0);
602 ret = intel_write_sha_text(display, 0);
603 if (ret < 0)
604 return ret;
605 sha_idx += sizeof(sha_text);
606
607 /* Write 24 bits of M0 */
608 intel_de_write(display, HDCP_REP_CTL,
609 rep_ctl | HDCP_SHA1_TEXT_8);
610 ret = intel_write_sha_text(display, 0);
611 if (ret < 0)
612 return ret;
613 sha_idx += sizeof(sha_text);
614
615 } else if (sha_leftovers == 2) {
616 /* Write 32 bits of text */
617 intel_de_write(display, HDCP_REP_CTL,
618 rep_ctl | HDCP_SHA1_TEXT_32);
619 sha_text |= bstatus[0] << 8 | bstatus[1];
620 ret = intel_write_sha_text(display, sha_text);
621 if (ret < 0)
622 return ret;
623 sha_idx += sizeof(sha_text);
624
625 /* Write 64 bits of M0 */
626 intel_de_write(display, HDCP_REP_CTL,
627 rep_ctl | HDCP_SHA1_TEXT_0);
628 for (i = 0; i < 2; i++) {
629 ret = intel_write_sha_text(display, 0);
630 if (ret < 0)
631 return ret;
632 sha_idx += sizeof(sha_text);
633 }
634
635 /*
636 * Terminate the SHA-1 stream by hand. For the other leftover
637 * cases this is appended by the hardware.
638 */
639 intel_de_write(display, HDCP_REP_CTL,
640 rep_ctl | HDCP_SHA1_TEXT_32);
641 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
642 ret = intel_write_sha_text(display, sha_text);
643 if (ret < 0)
644 return ret;
645 sha_idx += sizeof(sha_text);
646 } else if (sha_leftovers == 3) {
647 /* Write 32 bits of text (filled from LSB) */
648 intel_de_write(display, HDCP_REP_CTL,
649 rep_ctl | HDCP_SHA1_TEXT_32);
650 sha_text |= bstatus[0];
651 ret = intel_write_sha_text(display, sha_text);
652 if (ret < 0)
653 return ret;
654 sha_idx += sizeof(sha_text);
655
656 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
657 intel_de_write(display, HDCP_REP_CTL,
658 rep_ctl | HDCP_SHA1_TEXT_8);
659 ret = intel_write_sha_text(display, bstatus[1]);
660 if (ret < 0)
661 return ret;
662 sha_idx += sizeof(sha_text);
663
664 /* Write 32 bits of M0 */
665 intel_de_write(display, HDCP_REP_CTL,
666 rep_ctl | HDCP_SHA1_TEXT_0);
667 ret = intel_write_sha_text(display, 0);
668 if (ret < 0)
669 return ret;
670 sha_idx += sizeof(sha_text);
671
672 /* Write 8 bits of M0 */
673 intel_de_write(display, HDCP_REP_CTL,
674 rep_ctl | HDCP_SHA1_TEXT_24);
675 ret = intel_write_sha_text(display, 0);
676 if (ret < 0)
677 return ret;
678 sha_idx += sizeof(sha_text);
679 } else {
680 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
681 sha_leftovers);
682 return -EINVAL;
683 }
684
685 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
686 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
687 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
688 ret = intel_write_sha_text(display, 0);
689 if (ret < 0)
690 return ret;
691 sha_idx += sizeof(sha_text);
692 }
693
694 /*
695 * Last write gets the length of the concatenation in bits. That is:
696 * - 5 bytes per device
697 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
698 */
699 sha_text = (num_downstream * 5 + 10) * 8;
700 ret = intel_write_sha_text(display, sha_text);
701 if (ret < 0)
702 return ret;
703
704 /* Tell the HW we're done with the hash and wait for it to ACK */
705 intel_de_write(display, HDCP_REP_CTL,
706 rep_ctl | HDCP_SHA1_COMPLETE_HASH);
707 if (intel_de_wait_for_set(display, HDCP_REP_CTL,
708 HDCP_SHA1_COMPLETE, 1)) {
709 drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
710 return -ETIMEDOUT;
711 }
712 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
713 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
714 return -ENXIO;
715 }
716
717 return 0;
718 }
719
720 /* Implements Part 2 of the HDCP authorization procedure */
721 static
intel_hdcp_auth_downstream(struct intel_connector * connector)722 int intel_hdcp_auth_downstream(struct intel_connector *connector)
723 {
724 struct intel_display *display = to_intel_display(connector);
725 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
726 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
727 u8 bstatus[2], num_downstream, *ksv_fifo;
728 int ret, i, tries = 3;
729
730 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
731 if (ret) {
732 drm_dbg_kms(display->drm,
733 "KSV list failed to become ready (%d)\n", ret);
734 return ret;
735 }
736
737 ret = shim->read_bstatus(dig_port, bstatus);
738 if (ret)
739 return ret;
740
741 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
742 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
743 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
744 return -EPERM;
745 }
746
747 /*
748 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
749 * the HDCP encryption. That implies that repeater can't have its own
750 * display. As there is no consumption of encrypted content in the
751 * repeater with 0 downstream devices, we are failing the
752 * authentication.
753 */
754 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
755 if (num_downstream == 0) {
756 drm_dbg_kms(display->drm,
757 "Repeater with zero downstream devices\n");
758 return -EINVAL;
759 }
760
761 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
762 if (!ksv_fifo) {
763 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
764 return -ENOMEM;
765 }
766
767 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
768 if (ret)
769 goto err;
770
771 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
772 num_downstream) > 0) {
773 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
774 ret = -EPERM;
775 goto err;
776 }
777
778 /*
779 * When V prime mismatches, DP Spec mandates re-read of
780 * V prime atleast twice.
781 */
782 for (i = 0; i < tries; i++) {
783 ret = intel_hdcp_validate_v_prime(connector, shim,
784 ksv_fifo, num_downstream,
785 bstatus);
786 if (!ret)
787 break;
788 }
789
790 if (i == tries) {
791 drm_dbg_kms(display->drm,
792 "V Prime validation failed.(%d)\n", ret);
793 goto err;
794 }
795
796 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
797 num_downstream);
798 ret = 0;
799 err:
800 kfree(ksv_fifo);
801 return ret;
802 }
803
804 /* Implements Part 1 of the HDCP authorization procedure */
intel_hdcp_auth(struct intel_connector * connector)805 static int intel_hdcp_auth(struct intel_connector *connector)
806 {
807 struct intel_display *display = to_intel_display(connector);
808 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
809 struct intel_hdcp *hdcp = &connector->hdcp;
810 const struct intel_hdcp_shim *shim = hdcp->shim;
811 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
812 enum port port = dig_port->base.port;
813 unsigned long r0_prime_gen_start;
814 int ret, i, tries = 2;
815 union {
816 u32 reg[2];
817 u8 shim[DRM_HDCP_AN_LEN];
818 } an;
819 union {
820 u32 reg[2];
821 u8 shim[DRM_HDCP_KSV_LEN];
822 } bksv;
823 union {
824 u32 reg;
825 u8 shim[DRM_HDCP_RI_LEN];
826 } ri;
827 bool repeater_present, hdcp_capable;
828
829 /*
830 * Detects whether the display is HDCP capable. Although we check for
831 * valid Bksv below, the HDCP over DP spec requires that we check
832 * whether the display supports HDCP before we write An. For HDMI
833 * displays, this is not necessary.
834 */
835 if (shim->hdcp_get_capability) {
836 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
837 if (ret)
838 return ret;
839 if (!hdcp_capable) {
840 drm_dbg_kms(display->drm,
841 "Panel is not HDCP capable\n");
842 return -EINVAL;
843 }
844 }
845
846 /* Initialize An with 2 random values and acquire it */
847 for (i = 0; i < 2; i++)
848 intel_de_write(display,
849 HDCP_ANINIT(display, cpu_transcoder, port),
850 get_random_u32());
851 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
852 HDCP_CONF_CAPTURE_AN);
853
854 /* Wait for An to be acquired */
855 if (intel_de_wait_for_set(display,
856 HDCP_STATUS(display, cpu_transcoder, port),
857 HDCP_STATUS_AN_READY, 1)) {
858 drm_err(display->drm, "Timed out waiting for An\n");
859 return -ETIMEDOUT;
860 }
861
862 an.reg[0] = intel_de_read(display,
863 HDCP_ANLO(display, cpu_transcoder, port));
864 an.reg[1] = intel_de_read(display,
865 HDCP_ANHI(display, cpu_transcoder, port));
866 ret = shim->write_an_aksv(dig_port, an.shim);
867 if (ret)
868 return ret;
869
870 r0_prime_gen_start = jiffies;
871
872 memset(&bksv, 0, sizeof(bksv));
873
874 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
875 if (ret < 0)
876 return ret;
877
878 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
879 drm_err(display->drm, "BKSV is revoked\n");
880 return -EPERM;
881 }
882
883 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
884 bksv.reg[0]);
885 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
886 bksv.reg[1]);
887
888 ret = shim->repeater_present(dig_port, &repeater_present);
889 if (ret)
890 return ret;
891 if (repeater_present)
892 intel_de_write(display, HDCP_REP_CTL,
893 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
894
895 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
896 if (ret)
897 return ret;
898
899 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
900 HDCP_CONF_AUTH_AND_ENC);
901
902 /* Wait for R0 ready */
903 if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
904 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
905 drm_err(display->drm, "Timed out waiting for R0 ready\n");
906 return -ETIMEDOUT;
907 }
908
909 /*
910 * Wait for R0' to become available. The spec says 100ms from Aksv, but
911 * some monitors can take longer than this. We'll set the timeout at
912 * 300ms just to be sure.
913 *
914 * On DP, there's an R0_READY bit available but no such bit
915 * exists on HDMI. Since the upper-bound is the same, we'll just do
916 * the stupid thing instead of polling on one and not the other.
917 */
918 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
919
920 tries = 3;
921
922 /*
923 * DP HDCP Spec mandates the two more reattempt to read R0, incase
924 * of R0 mismatch.
925 */
926 for (i = 0; i < tries; i++) {
927 ri.reg = 0;
928 ret = shim->read_ri_prime(dig_port, ri.shim);
929 if (ret)
930 return ret;
931 intel_de_write(display,
932 HDCP_RPRIME(display, cpu_transcoder, port),
933 ri.reg);
934
935 /* Wait for Ri prime match */
936 if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
937 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
938 break;
939 }
940
941 if (i == tries) {
942 drm_dbg_kms(display->drm,
943 "Timed out waiting for Ri prime match (%x)\n",
944 intel_de_read(display,
945 HDCP_STATUS(display, cpu_transcoder, port)));
946 return -ETIMEDOUT;
947 }
948
949 /* Wait for encryption confirmation */
950 if (intel_de_wait_for_set(display,
951 HDCP_STATUS(display, cpu_transcoder, port),
952 HDCP_STATUS_ENC,
953 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
954 drm_err(display->drm, "Timed out waiting for encryption\n");
955 return -ETIMEDOUT;
956 }
957
958 /* DP MST Auth Part 1 Step 2.a and Step 2.b */
959 if (shim->stream_encryption) {
960 ret = shim->stream_encryption(connector, true);
961 if (ret) {
962 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
963 connector->base.base.id, connector->base.name);
964 return ret;
965 }
966 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
967 transcoder_name(hdcp->stream_transcoder));
968 }
969
970 if (repeater_present)
971 return intel_hdcp_auth_downstream(connector);
972
973 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
974 return 0;
975 }
976
_intel_hdcp_disable(struct intel_connector * connector)977 static int _intel_hdcp_disable(struct intel_connector *connector)
978 {
979 struct intel_display *display = to_intel_display(connector);
980 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
981 struct intel_hdcp *hdcp = &connector->hdcp;
982 enum port port = dig_port->base.port;
983 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
984 u32 repeater_ctl;
985 int ret;
986
987 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
988 connector->base.base.id, connector->base.name);
989
990 if (hdcp->shim->stream_encryption) {
991 ret = hdcp->shim->stream_encryption(connector, false);
992 if (ret) {
993 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
994 connector->base.base.id, connector->base.name);
995 return ret;
996 }
997 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
998 transcoder_name(hdcp->stream_transcoder));
999 /*
1000 * If there are other connectors on this port using HDCP,
1001 * don't disable it until it disabled HDCP encryption for
1002 * all connectors in MST topology.
1003 */
1004 if (dig_port->hdcp.num_streams > 0)
1005 return 0;
1006 }
1007
1008 hdcp->hdcp_encrypted = false;
1009 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
1010 if (intel_de_wait_for_clear(display,
1011 HDCP_STATUS(display, cpu_transcoder, port),
1012 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1013 drm_err(display->drm,
1014 "Failed to disable HDCP, timeout clearing status\n");
1015 return -ETIMEDOUT;
1016 }
1017
1018 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
1019 port);
1020 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
1021
1022 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1023 if (ret) {
1024 drm_err(display->drm, "Failed to disable HDCP signalling\n");
1025 return ret;
1026 }
1027
1028 drm_dbg_kms(display->drm, "HDCP is disabled\n");
1029 return 0;
1030 }
1031
intel_hdcp1_enable(struct intel_connector * connector)1032 static int intel_hdcp1_enable(struct intel_connector *connector)
1033 {
1034 struct intel_display *display = to_intel_display(connector);
1035 struct intel_hdcp *hdcp = &connector->hdcp;
1036 int i, ret, tries = 3;
1037
1038 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1039 connector->base.base.id, connector->base.name);
1040
1041 if (!hdcp_key_loadable(display)) {
1042 drm_err(display->drm, "HDCP key Load is not possible\n");
1043 return -ENXIO;
1044 }
1045
1046 for (i = 0; i < KEY_LOAD_TRIES; i++) {
1047 ret = intel_hdcp_load_keys(display);
1048 if (!ret)
1049 break;
1050 intel_hdcp_clear_keys(display);
1051 }
1052 if (ret) {
1053 drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
1054 ret);
1055 return ret;
1056 }
1057
1058 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true);
1059
1060 /* Incase of authentication failures, HDCP spec expects reauth. */
1061 for (i = 0; i < tries; i++) {
1062 ret = intel_hdcp_auth(connector);
1063 if (!ret) {
1064 hdcp->hdcp_encrypted = true;
1065 return 0;
1066 }
1067
1068 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
1069
1070 /* Ensuring HDCP encryption and signalling are stopped. */
1071 _intel_hdcp_disable(connector);
1072 }
1073
1074 drm_dbg_kms(display->drm,
1075 "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1076 return ret;
1077 }
1078
intel_hdcp_to_connector(struct intel_hdcp * hdcp)1079 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1080 {
1081 return container_of(hdcp, struct intel_connector, hdcp);
1082 }
1083
intel_hdcp_update_value(struct intel_connector * connector,u64 value,bool update_property)1084 static void intel_hdcp_update_value(struct intel_connector *connector,
1085 u64 value, bool update_property)
1086 {
1087 struct intel_display *display = to_intel_display(connector);
1088 struct drm_i915_private *i915 = to_i915(display->drm);
1089 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1090 struct intel_hdcp *hdcp = &connector->hdcp;
1091
1092 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
1093
1094 if (hdcp->value == value)
1095 return;
1096
1097 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex));
1098
1099 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1100 if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0))
1101 dig_port->hdcp.num_streams--;
1102 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1103 dig_port->hdcp.num_streams++;
1104 }
1105
1106 hdcp->value = value;
1107 if (update_property) {
1108 drm_connector_get(&connector->base);
1109 if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
1110 drm_connector_put(&connector->base);
1111 }
1112 }
1113
1114 /* Implements Part 3 of the HDCP authorization procedure */
intel_hdcp_check_link(struct intel_connector * connector)1115 static int intel_hdcp_check_link(struct intel_connector *connector)
1116 {
1117 struct intel_display *display = to_intel_display(connector);
1118 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1119 struct intel_hdcp *hdcp = &connector->hdcp;
1120 enum port port = dig_port->base.port;
1121 enum transcoder cpu_transcoder;
1122 int ret = 0;
1123
1124 mutex_lock(&hdcp->mutex);
1125 mutex_lock(&dig_port->hdcp.mutex);
1126
1127 cpu_transcoder = hdcp->cpu_transcoder;
1128
1129 /* Check_link valid only when HDCP1.4 is enabled */
1130 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1131 !hdcp->hdcp_encrypted) {
1132 ret = -EINVAL;
1133 goto out;
1134 }
1135
1136 if (drm_WARN_ON(display->drm,
1137 !intel_hdcp_in_use(display, cpu_transcoder, port))) {
1138 drm_err(display->drm,
1139 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1140 connector->base.base.id, connector->base.name,
1141 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
1142 ret = -ENXIO;
1143 intel_hdcp_update_value(connector,
1144 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1145 true);
1146 goto out;
1147 }
1148
1149 if (hdcp->shim->check_link(dig_port, connector)) {
1150 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1151 intel_hdcp_update_value(connector,
1152 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1153 }
1154 goto out;
1155 }
1156
1157 drm_dbg_kms(display->drm,
1158 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1159 connector->base.base.id, connector->base.name);
1160
1161 ret = _intel_hdcp_disable(connector);
1162 if (ret) {
1163 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
1164 intel_hdcp_update_value(connector,
1165 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1166 true);
1167 goto out;
1168 }
1169
1170 ret = intel_hdcp1_enable(connector);
1171 if (ret) {
1172 drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
1173 intel_hdcp_update_value(connector,
1174 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1175 true);
1176 goto out;
1177 }
1178
1179 out:
1180 mutex_unlock(&dig_port->hdcp.mutex);
1181 mutex_unlock(&hdcp->mutex);
1182 return ret;
1183 }
1184
intel_hdcp_prop_work(struct work_struct * work)1185 static void intel_hdcp_prop_work(struct work_struct *work)
1186 {
1187 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1188 prop_work);
1189 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1190 struct intel_display *display = to_intel_display(connector);
1191
1192 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
1193 mutex_lock(&hdcp->mutex);
1194
1195 /*
1196 * This worker is only used to flip between ENABLED/DESIRED. Either of
1197 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1198 * we're running just after hdcp has been disabled, so just exit
1199 */
1200 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1201 drm_hdcp_update_content_protection(&connector->base,
1202 hdcp->value);
1203
1204 mutex_unlock(&hdcp->mutex);
1205 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1206
1207 drm_connector_put(&connector->base);
1208 }
1209
is_hdcp_supported(struct intel_display * display,enum port port)1210 bool is_hdcp_supported(struct intel_display *display, enum port port)
1211 {
1212 return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
1213 (DISPLAY_VER(display) >= 12 || port < PORT_E);
1214 }
1215
1216 static int
hdcp2_prepare_ake_init(struct intel_connector * connector,struct hdcp2_ake_init * ake_data)1217 hdcp2_prepare_ake_init(struct intel_connector *connector,
1218 struct hdcp2_ake_init *ake_data)
1219 {
1220 struct intel_display *display = to_intel_display(connector);
1221 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1222 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1223 struct i915_hdcp_arbiter *arbiter;
1224 int ret;
1225
1226 mutex_lock(&display->hdcp.hdcp_mutex);
1227 arbiter = display->hdcp.arbiter;
1228
1229 if (!arbiter || !arbiter->ops) {
1230 mutex_unlock(&display->hdcp.hdcp_mutex);
1231 return -EINVAL;
1232 }
1233
1234 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1235 if (ret)
1236 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
1237 ret);
1238 mutex_unlock(&display->hdcp.hdcp_mutex);
1239
1240 return ret;
1241 }
1242
1243 static int
hdcp2_verify_rx_cert_prepare_km(struct intel_connector * connector,struct hdcp2_ake_send_cert * rx_cert,bool * paired,struct hdcp2_ake_no_stored_km * ek_pub_km,size_t * msg_sz)1244 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1245 struct hdcp2_ake_send_cert *rx_cert,
1246 bool *paired,
1247 struct hdcp2_ake_no_stored_km *ek_pub_km,
1248 size_t *msg_sz)
1249 {
1250 struct intel_display *display = to_intel_display(connector);
1251 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1252 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1253 struct i915_hdcp_arbiter *arbiter;
1254 int ret;
1255
1256 mutex_lock(&display->hdcp.hdcp_mutex);
1257 arbiter = display->hdcp.arbiter;
1258
1259 if (!arbiter || !arbiter->ops) {
1260 mutex_unlock(&display->hdcp.hdcp_mutex);
1261 return -EINVAL;
1262 }
1263
1264 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1265 rx_cert, paired,
1266 ek_pub_km, msg_sz);
1267 if (ret < 0)
1268 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
1269 ret);
1270 mutex_unlock(&display->hdcp.hdcp_mutex);
1271
1272 return ret;
1273 }
1274
hdcp2_verify_hprime(struct intel_connector * connector,struct hdcp2_ake_send_hprime * rx_hprime)1275 static int hdcp2_verify_hprime(struct intel_connector *connector,
1276 struct hdcp2_ake_send_hprime *rx_hprime)
1277 {
1278 struct intel_display *display = to_intel_display(connector);
1279 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1280 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1281 struct i915_hdcp_arbiter *arbiter;
1282 int ret;
1283
1284 mutex_lock(&display->hdcp.hdcp_mutex);
1285 arbiter = display->hdcp.arbiter;
1286
1287 if (!arbiter || !arbiter->ops) {
1288 mutex_unlock(&display->hdcp.hdcp_mutex);
1289 return -EINVAL;
1290 }
1291
1292 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1293 if (ret < 0)
1294 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
1295 mutex_unlock(&display->hdcp.hdcp_mutex);
1296
1297 return ret;
1298 }
1299
1300 static int
hdcp2_store_pairing_info(struct intel_connector * connector,struct hdcp2_ake_send_pairing_info * pairing_info)1301 hdcp2_store_pairing_info(struct intel_connector *connector,
1302 struct hdcp2_ake_send_pairing_info *pairing_info)
1303 {
1304 struct intel_display *display = to_intel_display(connector);
1305 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1306 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1307 struct i915_hdcp_arbiter *arbiter;
1308 int ret;
1309
1310 mutex_lock(&display->hdcp.hdcp_mutex);
1311 arbiter = display->hdcp.arbiter;
1312
1313 if (!arbiter || !arbiter->ops) {
1314 mutex_unlock(&display->hdcp.hdcp_mutex);
1315 return -EINVAL;
1316 }
1317
1318 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1319 if (ret < 0)
1320 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
1321 ret);
1322 mutex_unlock(&display->hdcp.hdcp_mutex);
1323
1324 return ret;
1325 }
1326
1327 static int
hdcp2_prepare_lc_init(struct intel_connector * connector,struct hdcp2_lc_init * lc_init)1328 hdcp2_prepare_lc_init(struct intel_connector *connector,
1329 struct hdcp2_lc_init *lc_init)
1330 {
1331 struct intel_display *display = to_intel_display(connector);
1332 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1333 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1334 struct i915_hdcp_arbiter *arbiter;
1335 int ret;
1336
1337 mutex_lock(&display->hdcp.hdcp_mutex);
1338 arbiter = display->hdcp.arbiter;
1339
1340 if (!arbiter || !arbiter->ops) {
1341 mutex_unlock(&display->hdcp.hdcp_mutex);
1342 return -EINVAL;
1343 }
1344
1345 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1346 if (ret < 0)
1347 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
1348 ret);
1349 mutex_unlock(&display->hdcp.hdcp_mutex);
1350
1351 return ret;
1352 }
1353
1354 static int
hdcp2_verify_lprime(struct intel_connector * connector,struct hdcp2_lc_send_lprime * rx_lprime)1355 hdcp2_verify_lprime(struct intel_connector *connector,
1356 struct hdcp2_lc_send_lprime *rx_lprime)
1357 {
1358 struct intel_display *display = to_intel_display(connector);
1359 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1360 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1361 struct i915_hdcp_arbiter *arbiter;
1362 int ret;
1363
1364 mutex_lock(&display->hdcp.hdcp_mutex);
1365 arbiter = display->hdcp.arbiter;
1366
1367 if (!arbiter || !arbiter->ops) {
1368 mutex_unlock(&display->hdcp.hdcp_mutex);
1369 return -EINVAL;
1370 }
1371
1372 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1373 if (ret < 0)
1374 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
1375 ret);
1376 mutex_unlock(&display->hdcp.hdcp_mutex);
1377
1378 return ret;
1379 }
1380
hdcp2_prepare_skey(struct intel_connector * connector,struct hdcp2_ske_send_eks * ske_data)1381 static int hdcp2_prepare_skey(struct intel_connector *connector,
1382 struct hdcp2_ske_send_eks *ske_data)
1383 {
1384 struct intel_display *display = to_intel_display(connector);
1385 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1386 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1387 struct i915_hdcp_arbiter *arbiter;
1388 int ret;
1389
1390 mutex_lock(&display->hdcp.hdcp_mutex);
1391 arbiter = display->hdcp.arbiter;
1392
1393 if (!arbiter || !arbiter->ops) {
1394 mutex_unlock(&display->hdcp.hdcp_mutex);
1395 return -EINVAL;
1396 }
1397
1398 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1399 if (ret < 0)
1400 drm_dbg_kms(display->drm, "Get session key failed. %d\n",
1401 ret);
1402 mutex_unlock(&display->hdcp.hdcp_mutex);
1403
1404 return ret;
1405 }
1406
1407 static int
hdcp2_verify_rep_topology_prepare_ack(struct intel_connector * connector,struct hdcp2_rep_send_receiverid_list * rep_topology,struct hdcp2_rep_send_ack * rep_send_ack)1408 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1409 struct hdcp2_rep_send_receiverid_list
1410 *rep_topology,
1411 struct hdcp2_rep_send_ack *rep_send_ack)
1412 {
1413 struct intel_display *display = to_intel_display(connector);
1414 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1415 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1416 struct i915_hdcp_arbiter *arbiter;
1417 int ret;
1418
1419 mutex_lock(&display->hdcp.hdcp_mutex);
1420 arbiter = display->hdcp.arbiter;
1421
1422 if (!arbiter || !arbiter->ops) {
1423 mutex_unlock(&display->hdcp.hdcp_mutex);
1424 return -EINVAL;
1425 }
1426
1427 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1428 data,
1429 rep_topology,
1430 rep_send_ack);
1431 if (ret < 0)
1432 drm_dbg_kms(display->drm,
1433 "Verify rep topology failed. %d\n", ret);
1434 mutex_unlock(&display->hdcp.hdcp_mutex);
1435
1436 return ret;
1437 }
1438
1439 static int
hdcp2_verify_mprime(struct intel_connector * connector,struct hdcp2_rep_stream_ready * stream_ready)1440 hdcp2_verify_mprime(struct intel_connector *connector,
1441 struct hdcp2_rep_stream_ready *stream_ready)
1442 {
1443 struct intel_display *display = to_intel_display(connector);
1444 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1445 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1446 struct i915_hdcp_arbiter *arbiter;
1447 int ret;
1448
1449 mutex_lock(&display->hdcp.hdcp_mutex);
1450 arbiter = display->hdcp.arbiter;
1451
1452 if (!arbiter || !arbiter->ops) {
1453 mutex_unlock(&display->hdcp.hdcp_mutex);
1454 return -EINVAL;
1455 }
1456
1457 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1458 if (ret < 0)
1459 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
1460 mutex_unlock(&display->hdcp.hdcp_mutex);
1461
1462 return ret;
1463 }
1464
hdcp2_authenticate_port(struct intel_connector * connector)1465 static int hdcp2_authenticate_port(struct intel_connector *connector)
1466 {
1467 struct intel_display *display = to_intel_display(connector);
1468 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1469 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1470 struct i915_hdcp_arbiter *arbiter;
1471 int ret;
1472
1473 mutex_lock(&display->hdcp.hdcp_mutex);
1474 arbiter = display->hdcp.arbiter;
1475
1476 if (!arbiter || !arbiter->ops) {
1477 mutex_unlock(&display->hdcp.hdcp_mutex);
1478 return -EINVAL;
1479 }
1480
1481 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1482 if (ret < 0)
1483 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
1484 ret);
1485 mutex_unlock(&display->hdcp.hdcp_mutex);
1486
1487 return ret;
1488 }
1489
hdcp2_close_session(struct intel_connector * connector)1490 static int hdcp2_close_session(struct intel_connector *connector)
1491 {
1492 struct intel_display *display = to_intel_display(connector);
1493 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1494 struct i915_hdcp_arbiter *arbiter;
1495 int ret;
1496
1497 mutex_lock(&display->hdcp.hdcp_mutex);
1498 arbiter = display->hdcp.arbiter;
1499
1500 if (!arbiter || !arbiter->ops) {
1501 mutex_unlock(&display->hdcp.hdcp_mutex);
1502 return -EINVAL;
1503 }
1504
1505 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1506 &dig_port->hdcp.port_data);
1507 mutex_unlock(&display->hdcp.hdcp_mutex);
1508
1509 return ret;
1510 }
1511
hdcp2_deauthenticate_port(struct intel_connector * connector)1512 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1513 {
1514 return hdcp2_close_session(connector);
1515 }
1516
1517 /* Authentication flow starts from here */
hdcp2_authentication_key_exchange(struct intel_connector * connector)1518 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1519 {
1520 struct intel_display *display = to_intel_display(connector);
1521 struct intel_digital_port *dig_port =
1522 intel_attached_dig_port(connector);
1523 struct intel_hdcp *hdcp = &connector->hdcp;
1524 union {
1525 struct hdcp2_ake_init ake_init;
1526 struct hdcp2_ake_send_cert send_cert;
1527 struct hdcp2_ake_no_stored_km no_stored_km;
1528 struct hdcp2_ake_send_hprime send_hprime;
1529 struct hdcp2_ake_send_pairing_info pairing_info;
1530 } msgs;
1531 const struct intel_hdcp_shim *shim = hdcp->shim;
1532 size_t size;
1533 int ret, i, max_retries;
1534
1535 /* Init for seq_num */
1536 hdcp->seq_num_v = 0;
1537 hdcp->seq_num_m = 0;
1538
1539 if (intel_encoder_is_dp(&dig_port->base) ||
1540 intel_encoder_is_mst(&dig_port->base))
1541 max_retries = 10;
1542 else
1543 max_retries = 1;
1544
1545 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1546 if (ret < 0)
1547 return ret;
1548
1549 /*
1550 * Retry the first read and write to downstream at least 10 times
1551 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
1552 * (dock decides to stop advertising hdcp2 capability for some reason).
1553 * The reason being that during suspend resume dock usually keeps the
1554 * HDCP2 registers inaccessible causing AUX error. This wouldn't be a
1555 * big problem if the userspace just kept retrying with some delay while
1556 * it continues to play low value content but most userspace applications
1557 * end up throwing an error when it receives one from KMD. This makes
1558 * sure we give the dock and the sink devices to complete its power cycle
1559 * and then try HDCP authentication. The values of 10 and delay of 50ms
1560 * was decided based on multiple trial and errors.
1561 */
1562 for (i = 0; i < max_retries; i++) {
1563 if (!intel_hdcp2_get_capability(connector)) {
1564 msleep(50);
1565 continue;
1566 }
1567
1568 ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1569 sizeof(msgs.ake_init));
1570 if (ret < 0)
1571 continue;
1572
1573 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1574 &msgs.send_cert, sizeof(msgs.send_cert));
1575 if (ret > 0)
1576 break;
1577 }
1578
1579 if (ret < 0)
1580 return ret;
1581
1582 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1583 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
1584 return -EINVAL;
1585 }
1586
1587 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1588
1589 if (drm_hdcp_check_ksvs_revoked(display->drm,
1590 msgs.send_cert.cert_rx.receiver_id,
1591 1) > 0) {
1592 drm_err(display->drm, "Receiver ID is revoked\n");
1593 return -EPERM;
1594 }
1595
1596 /*
1597 * Here msgs.no_stored_km will hold msgs corresponding to the km
1598 * stored also.
1599 */
1600 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1601 &hdcp->is_paired,
1602 &msgs.no_stored_km, &size);
1603 if (ret < 0)
1604 return ret;
1605
1606 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1607 if (ret < 0)
1608 return ret;
1609
1610 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1611 &msgs.send_hprime, sizeof(msgs.send_hprime));
1612 if (ret < 0)
1613 return ret;
1614
1615 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1616 if (ret < 0)
1617 return ret;
1618
1619 if (!hdcp->is_paired) {
1620 /* Pairing is required */
1621 ret = shim->read_2_2_msg(connector,
1622 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1623 &msgs.pairing_info,
1624 sizeof(msgs.pairing_info));
1625 if (ret < 0)
1626 return ret;
1627
1628 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1629 if (ret < 0)
1630 return ret;
1631 hdcp->is_paired = true;
1632 }
1633
1634 return 0;
1635 }
1636
hdcp2_locality_check(struct intel_connector * connector)1637 static int hdcp2_locality_check(struct intel_connector *connector)
1638 {
1639 struct intel_hdcp *hdcp = &connector->hdcp;
1640 union {
1641 struct hdcp2_lc_init lc_init;
1642 struct hdcp2_lc_send_lprime send_lprime;
1643 } msgs;
1644 const struct intel_hdcp_shim *shim = hdcp->shim;
1645 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1646
1647 for (i = 0; i < tries; i++) {
1648 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1649 if (ret < 0)
1650 continue;
1651
1652 ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1653 sizeof(msgs.lc_init));
1654 if (ret < 0)
1655 continue;
1656
1657 ret = shim->read_2_2_msg(connector,
1658 HDCP_2_2_LC_SEND_LPRIME,
1659 &msgs.send_lprime,
1660 sizeof(msgs.send_lprime));
1661 if (ret < 0)
1662 continue;
1663
1664 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1665 if (!ret)
1666 break;
1667 }
1668
1669 return ret;
1670 }
1671
hdcp2_session_key_exchange(struct intel_connector * connector)1672 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1673 {
1674 struct intel_hdcp *hdcp = &connector->hdcp;
1675 struct hdcp2_ske_send_eks send_eks;
1676 int ret;
1677
1678 ret = hdcp2_prepare_skey(connector, &send_eks);
1679 if (ret < 0)
1680 return ret;
1681
1682 ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1683 sizeof(send_eks));
1684 if (ret < 0)
1685 return ret;
1686
1687 return 0;
1688 }
1689
1690 static
_hdcp2_propagate_stream_management_info(struct intel_connector * connector)1691 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1692 {
1693 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1694 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1695 struct intel_hdcp *hdcp = &connector->hdcp;
1696 union {
1697 struct hdcp2_rep_stream_manage stream_manage;
1698 struct hdcp2_rep_stream_ready stream_ready;
1699 } msgs;
1700 const struct intel_hdcp_shim *shim = hdcp->shim;
1701 int ret, streams_size_delta, i;
1702
1703 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1704 return -ERANGE;
1705
1706 /* Prepare RepeaterAuth_Stream_Manage msg */
1707 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1708 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1709
1710 msgs.stream_manage.k = cpu_to_be16(data->k);
1711
1712 for (i = 0; i < data->k; i++) {
1713 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1714 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1715 }
1716
1717 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1718 sizeof(struct hdcp2_streamid_type);
1719 /* Send it to Repeater */
1720 ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1721 sizeof(msgs.stream_manage) - streams_size_delta);
1722 if (ret < 0)
1723 goto out;
1724
1725 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1726 &msgs.stream_ready, sizeof(msgs.stream_ready));
1727 if (ret < 0)
1728 goto out;
1729
1730 data->seq_num_m = hdcp->seq_num_m;
1731
1732 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1733
1734 out:
1735 hdcp->seq_num_m++;
1736
1737 return ret;
1738 }
1739
1740 static
hdcp2_authenticate_repeater_topology(struct intel_connector * connector)1741 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1742 {
1743 struct intel_display *display = to_intel_display(connector);
1744 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1745 struct intel_hdcp *hdcp = &connector->hdcp;
1746 union {
1747 struct hdcp2_rep_send_receiverid_list recvid_list;
1748 struct hdcp2_rep_send_ack rep_ack;
1749 } msgs;
1750 const struct intel_hdcp_shim *shim = hdcp->shim;
1751 u32 seq_num_v, device_cnt;
1752 u8 *rx_info;
1753 int ret;
1754
1755 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1756 &msgs.recvid_list, sizeof(msgs.recvid_list));
1757 if (ret < 0)
1758 return ret;
1759
1760 rx_info = msgs.recvid_list.rx_info;
1761
1762 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1763 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1764 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
1765 return -EINVAL;
1766 }
1767
1768 /*
1769 * MST topology is not Type 1 capable if it contains a downstream
1770 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1771 */
1772 dig_port->hdcp.mst_type1_capable =
1773 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1774 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1775
1776 if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) {
1777 drm_dbg_kms(display->drm,
1778 "HDCP1.x or 2.0 Legacy Device Downstream\n");
1779 return -EINVAL;
1780 }
1781
1782 /* Converting and Storing the seq_num_v to local variable as DWORD */
1783 seq_num_v =
1784 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1785
1786 if (!hdcp->hdcp2_encrypted && seq_num_v) {
1787 drm_dbg_kms(display->drm,
1788 "Non zero Seq_num_v at first RecvId_List msg\n");
1789 return -EINVAL;
1790 }
1791
1792 if (seq_num_v < hdcp->seq_num_v) {
1793 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1794 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
1795 return -EINVAL;
1796 }
1797
1798 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1799 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1800 if (drm_hdcp_check_ksvs_revoked(display->drm,
1801 msgs.recvid_list.receiver_ids,
1802 device_cnt) > 0) {
1803 drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
1804 return -EPERM;
1805 }
1806
1807 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1808 &msgs.recvid_list,
1809 &msgs.rep_ack);
1810 if (ret < 0)
1811 return ret;
1812
1813 hdcp->seq_num_v = seq_num_v;
1814 ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1815 sizeof(msgs.rep_ack));
1816 if (ret < 0)
1817 return ret;
1818
1819 return 0;
1820 }
1821
hdcp2_authenticate_sink(struct intel_connector * connector)1822 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1823 {
1824 struct intel_display *display = to_intel_display(connector);
1825 struct intel_hdcp *hdcp = &connector->hdcp;
1826 const struct intel_hdcp_shim *shim = hdcp->shim;
1827 int ret;
1828
1829 ret = hdcp2_authentication_key_exchange(connector);
1830 if (ret < 0) {
1831 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
1832 return ret;
1833 }
1834
1835 ret = hdcp2_locality_check(connector);
1836 if (ret < 0) {
1837 drm_dbg_kms(display->drm,
1838 "Locality Check failed. Err : %d\n", ret);
1839 return ret;
1840 }
1841
1842 ret = hdcp2_session_key_exchange(connector);
1843 if (ret < 0) {
1844 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
1845 return ret;
1846 }
1847
1848 if (shim->config_stream_type) {
1849 ret = shim->config_stream_type(connector,
1850 hdcp->is_repeater,
1851 hdcp->content_type);
1852 if (ret < 0)
1853 return ret;
1854 }
1855
1856 if (hdcp->is_repeater) {
1857 ret = hdcp2_authenticate_repeater_topology(connector);
1858 if (ret < 0) {
1859 drm_dbg_kms(display->drm,
1860 "Repeater Auth Failed. Err: %d\n", ret);
1861 return ret;
1862 }
1863 }
1864
1865 return ret;
1866 }
1867
hdcp2_enable_stream_encryption(struct intel_connector * connector)1868 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1869 {
1870 struct intel_display *display = to_intel_display(connector);
1871 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1872 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1873 struct intel_hdcp *hdcp = &connector->hdcp;
1874 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1875 enum port port = dig_port->base.port;
1876 int ret = 0;
1877
1878 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1879 LINK_ENCRYPTION_STATUS)) {
1880 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1881 connector->base.base.id, connector->base.name);
1882 ret = -EPERM;
1883 goto link_recover;
1884 }
1885
1886 if (hdcp->shim->stream_2_2_encryption) {
1887 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1888 if (ret) {
1889 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1890 connector->base.base.id, connector->base.name);
1891 return ret;
1892 }
1893 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1894 transcoder_name(hdcp->stream_transcoder));
1895 }
1896
1897 return 0;
1898
1899 link_recover:
1900 if (hdcp2_deauthenticate_port(connector) < 0)
1901 drm_dbg_kms(display->drm, "Port deauth failed.\n");
1902
1903 dig_port->hdcp.auth_status = false;
1904 data->k = 0;
1905
1906 return ret;
1907 }
1908
hdcp2_enable_encryption(struct intel_connector * connector)1909 static int hdcp2_enable_encryption(struct intel_connector *connector)
1910 {
1911 struct intel_display *display = to_intel_display(connector);
1912 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1913 struct intel_hdcp *hdcp = &connector->hdcp;
1914 enum port port = dig_port->base.port;
1915 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1916 int ret;
1917
1918 drm_WARN_ON(display->drm,
1919 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1920 LINK_ENCRYPTION_STATUS);
1921 if (hdcp->shim->toggle_signalling) {
1922 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1923 true);
1924 if (ret) {
1925 drm_err(display->drm,
1926 "Failed to enable HDCP signalling. %d\n",
1927 ret);
1928 return ret;
1929 }
1930 }
1931
1932 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1933 LINK_AUTH_STATUS)
1934 /* Link is Authenticated. Now set for Encryption */
1935 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1936 0, CTL_LINK_ENCRYPTION_REQ);
1937
1938 ret = intel_de_wait_for_set(display,
1939 HDCP2_STATUS(display, cpu_transcoder,
1940 port),
1941 LINK_ENCRYPTION_STATUS,
1942 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1943 dig_port->hdcp.auth_status = true;
1944
1945 return ret;
1946 }
1947
hdcp2_disable_encryption(struct intel_connector * connector)1948 static int hdcp2_disable_encryption(struct intel_connector *connector)
1949 {
1950 struct intel_display *display = to_intel_display(connector);
1951 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1952 struct intel_hdcp *hdcp = &connector->hdcp;
1953 enum port port = dig_port->base.port;
1954 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1955 int ret;
1956
1957 drm_WARN_ON(display->drm,
1958 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1959 LINK_ENCRYPTION_STATUS));
1960
1961 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1962 CTL_LINK_ENCRYPTION_REQ, 0);
1963
1964 ret = intel_de_wait_for_clear(display,
1965 HDCP2_STATUS(display, cpu_transcoder,
1966 port),
1967 LINK_ENCRYPTION_STATUS,
1968 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1969 if (ret == -ETIMEDOUT)
1970 drm_dbg_kms(display->drm, "Disable Encryption Timedout");
1971
1972 if (hdcp->shim->toggle_signalling) {
1973 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1974 false);
1975 if (ret) {
1976 drm_err(display->drm,
1977 "Failed to disable HDCP signalling. %d\n",
1978 ret);
1979 return ret;
1980 }
1981 }
1982
1983 return ret;
1984 }
1985
1986 static int
hdcp2_propagate_stream_management_info(struct intel_connector * connector)1987 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1988 {
1989 struct intel_display *display = to_intel_display(connector);
1990 int i, tries = 3, ret;
1991
1992 if (!connector->hdcp.is_repeater)
1993 return 0;
1994
1995 for (i = 0; i < tries; i++) {
1996 ret = _hdcp2_propagate_stream_management_info(connector);
1997 if (!ret)
1998 break;
1999
2000 /* Lets restart the auth incase of seq_num_m roll over */
2001 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
2002 drm_dbg_kms(display->drm,
2003 "seq_num_m roll over.(%d)\n", ret);
2004 break;
2005 }
2006
2007 drm_dbg_kms(display->drm,
2008 "HDCP2 stream management %d of %d Failed.(%d)\n",
2009 i + 1, tries, ret);
2010 }
2011
2012 return ret;
2013 }
2014
hdcp2_authenticate_and_encrypt(struct intel_atomic_state * state,struct intel_connector * connector)2015 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
2016 struct intel_connector *connector)
2017 {
2018 struct intel_display *display = to_intel_display(connector);
2019 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2020 int ret = 0, i, tries = 3;
2021
2022 for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) {
2023 ret = hdcp2_authenticate_sink(connector);
2024 if (!ret) {
2025 ret = intel_hdcp_prepare_streams(state, connector);
2026 if (ret) {
2027 drm_dbg_kms(display->drm,
2028 "Prepare stream failed.(%d)\n",
2029 ret);
2030 break;
2031 }
2032
2033 ret = hdcp2_propagate_stream_management_info(connector);
2034 if (ret) {
2035 drm_dbg_kms(display->drm,
2036 "Stream management failed.(%d)\n",
2037 ret);
2038 break;
2039 }
2040
2041 ret = hdcp2_authenticate_port(connector);
2042 if (!ret)
2043 break;
2044 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
2045 ret);
2046 }
2047
2048 /* Clearing the mei hdcp session */
2049 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2050 i + 1, tries, ret);
2051 if (hdcp2_deauthenticate_port(connector) < 0)
2052 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2053 }
2054
2055 if (!ret && !dig_port->hdcp.auth_status) {
2056 /*
2057 * Ensuring the required 200mSec min time interval between
2058 * Session Key Exchange and encryption.
2059 */
2060 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2061 ret = hdcp2_enable_encryption(connector);
2062 if (ret < 0) {
2063 drm_dbg_kms(display->drm,
2064 "Encryption Enable Failed.(%d)\n", ret);
2065 if (hdcp2_deauthenticate_port(connector) < 0)
2066 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2067 }
2068 }
2069
2070 if (!ret)
2071 ret = hdcp2_enable_stream_encryption(connector);
2072
2073 return ret;
2074 }
2075
_intel_hdcp2_enable(struct intel_atomic_state * state,struct intel_connector * connector)2076 static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2077 struct intel_connector *connector)
2078 {
2079 struct intel_display *display = to_intel_display(connector);
2080 struct intel_hdcp *hdcp = &connector->hdcp;
2081 int ret;
2082
2083 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2084 connector->base.base.id, connector->base.name,
2085 hdcp->content_type);
2086
2087 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false);
2088
2089 ret = hdcp2_authenticate_and_encrypt(state, connector);
2090 if (ret) {
2091 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
2092 hdcp->content_type, ret);
2093 return ret;
2094 }
2095
2096 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2097 connector->base.base.id, connector->base.name,
2098 hdcp->content_type);
2099
2100 hdcp->hdcp2_encrypted = true;
2101 return 0;
2102 }
2103
2104 static int
_intel_hdcp2_disable(struct intel_connector * connector,bool hdcp2_link_recovery)2105 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2106 {
2107 struct intel_display *display = to_intel_display(connector);
2108 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2109 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
2110 struct intel_hdcp *hdcp = &connector->hdcp;
2111 int ret;
2112
2113 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2114 connector->base.base.id, connector->base.name);
2115
2116 if (hdcp->shim->stream_2_2_encryption) {
2117 ret = hdcp->shim->stream_2_2_encryption(connector, false);
2118 if (ret) {
2119 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2120 connector->base.base.id, connector->base.name);
2121 return ret;
2122 }
2123 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2124 transcoder_name(hdcp->stream_transcoder));
2125
2126 if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery)
2127 return 0;
2128 }
2129
2130 ret = hdcp2_disable_encryption(connector);
2131
2132 if (hdcp2_deauthenticate_port(connector) < 0)
2133 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2134
2135 connector->hdcp.hdcp2_encrypted = false;
2136 dig_port->hdcp.auth_status = false;
2137 data->k = 0;
2138
2139 return ret;
2140 }
2141
2142 /* Implements the Link Integrity Check for HDCP2.2 */
intel_hdcp2_check_link(struct intel_connector * connector)2143 static int intel_hdcp2_check_link(struct intel_connector *connector)
2144 {
2145 struct intel_display *display = to_intel_display(connector);
2146 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2147 struct intel_hdcp *hdcp = &connector->hdcp;
2148 enum port port = dig_port->base.port;
2149 enum transcoder cpu_transcoder;
2150 int ret = 0;
2151
2152 mutex_lock(&hdcp->mutex);
2153 mutex_lock(&dig_port->hdcp.mutex);
2154 cpu_transcoder = hdcp->cpu_transcoder;
2155
2156 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2157 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2158 !hdcp->hdcp2_encrypted) {
2159 ret = -EINVAL;
2160 goto out;
2161 }
2162
2163 if (drm_WARN_ON(display->drm,
2164 !intel_hdcp2_in_use(display, cpu_transcoder, port))) {
2165 drm_err(display->drm,
2166 "HDCP2.2 link stopped the encryption, %x\n",
2167 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
2168 ret = -ENXIO;
2169 _intel_hdcp2_disable(connector, true);
2170 intel_hdcp_update_value(connector,
2171 DRM_MODE_CONTENT_PROTECTION_DESIRED,
2172 true);
2173 goto out;
2174 }
2175
2176 ret = hdcp->shim->check_2_2_link(dig_port, connector);
2177 if (ret == HDCP_LINK_PROTECTED) {
2178 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2179 intel_hdcp_update_value(connector,
2180 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2181 true);
2182 }
2183 goto out;
2184 }
2185
2186 if (ret == HDCP_TOPOLOGY_CHANGE) {
2187 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2188 goto out;
2189
2190 drm_dbg_kms(display->drm,
2191 "HDCP2.2 Downstream topology change\n");
2192
2193 ret = hdcp2_authenticate_repeater_topology(connector);
2194 if (!ret) {
2195 intel_hdcp_update_value(connector,
2196 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2197 true);
2198 goto out;
2199 }
2200
2201 drm_dbg_kms(display->drm,
2202 "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
2203 connector->base.base.id, connector->base.name,
2204 ret);
2205 } else {
2206 drm_dbg_kms(display->drm,
2207 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2208 connector->base.base.id, connector->base.name);
2209 }
2210
2211 ret = _intel_hdcp2_disable(connector, true);
2212 if (ret) {
2213 drm_err(display->drm,
2214 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2215 connector->base.base.id, connector->base.name, ret);
2216 intel_hdcp_update_value(connector,
2217 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2218 goto out;
2219 }
2220
2221 intel_hdcp_update_value(connector,
2222 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2223 out:
2224 mutex_unlock(&dig_port->hdcp.mutex);
2225 mutex_unlock(&hdcp->mutex);
2226 return ret;
2227 }
2228
intel_hdcp_check_work(struct work_struct * work)2229 static void intel_hdcp_check_work(struct work_struct *work)
2230 {
2231 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2232 struct intel_hdcp,
2233 check_work);
2234 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2235 struct intel_display *display = to_intel_display(connector);
2236 struct drm_i915_private *i915 = to_i915(display->drm);
2237
2238 if (drm_connector_is_unregistered(&connector->base))
2239 return;
2240
2241 if (!intel_hdcp2_check_link(connector))
2242 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2243 DRM_HDCP2_CHECK_PERIOD_MS);
2244 else if (!intel_hdcp_check_link(connector))
2245 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2246 DRM_HDCP_CHECK_PERIOD_MS);
2247 }
2248
i915_hdcp_component_bind(struct device * drv_kdev,struct device * mei_kdev,void * data)2249 static int i915_hdcp_component_bind(struct device *drv_kdev,
2250 struct device *mei_kdev, void *data)
2251 {
2252 struct intel_display *display = to_intel_display(drv_kdev);
2253
2254 drm_dbg(display->drm, "I915 HDCP comp bind\n");
2255 mutex_lock(&display->hdcp.hdcp_mutex);
2256 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2257 display->hdcp.arbiter->hdcp_dev = mei_kdev;
2258 mutex_unlock(&display->hdcp.hdcp_mutex);
2259
2260 return 0;
2261 }
2262
i915_hdcp_component_unbind(struct device * drv_kdev,struct device * mei_kdev,void * data)2263 static void i915_hdcp_component_unbind(struct device *drv_kdev,
2264 struct device *mei_kdev, void *data)
2265 {
2266 struct intel_display *display = to_intel_display(drv_kdev);
2267
2268 drm_dbg(display->drm, "I915 HDCP comp unbind\n");
2269 mutex_lock(&display->hdcp.hdcp_mutex);
2270 display->hdcp.arbiter = NULL;
2271 mutex_unlock(&display->hdcp.hdcp_mutex);
2272 }
2273
2274 static const struct component_ops i915_hdcp_ops = {
2275 .bind = i915_hdcp_component_bind,
2276 .unbind = i915_hdcp_component_unbind,
2277 };
2278
intel_get_hdcp_ddi_index(enum port port)2279 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2280 {
2281 switch (port) {
2282 case PORT_A:
2283 return HDCP_DDI_A;
2284 case PORT_B ... PORT_F:
2285 return (enum hdcp_ddi)port;
2286 default:
2287 return HDCP_DDI_INVALID_PORT;
2288 }
2289 }
2290
intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)2291 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2292 {
2293 switch (cpu_transcoder) {
2294 case TRANSCODER_A ... TRANSCODER_D:
2295 return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2296 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2297 return HDCP_INVALID_TRANSCODER;
2298 }
2299 }
2300
initialize_hdcp_port_data(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2301 static int initialize_hdcp_port_data(struct intel_connector *connector,
2302 struct intel_digital_port *dig_port,
2303 const struct intel_hdcp_shim *shim)
2304 {
2305 struct intel_display *display = to_intel_display(connector);
2306 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
2307 enum port port = dig_port->base.port;
2308
2309 if (DISPLAY_VER(display) < 12)
2310 data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2311 else
2312 /*
2313 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2314 * with zero(INVALID PORT index).
2315 */
2316 data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2317
2318 /*
2319 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2320 * is initialized to zero (invalid transcoder index). This will be
2321 * retained for <Gen12 forever.
2322 */
2323 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2324
2325 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2326 data->protocol = (u8)shim->protocol;
2327
2328 if (!data->streams)
2329 data->streams = kcalloc(INTEL_NUM_PIPES(display),
2330 sizeof(struct hdcp2_streamid_type),
2331 GFP_KERNEL);
2332 if (!data->streams) {
2333 drm_err(display->drm, "Out of Memory\n");
2334 return -ENOMEM;
2335 }
2336
2337 return 0;
2338 }
2339
is_hdcp2_supported(struct intel_display * display)2340 static bool is_hdcp2_supported(struct intel_display *display)
2341 {
2342 if (intel_hdcp_gsc_cs_required(display))
2343 return true;
2344
2345 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2346 return false;
2347
2348 return DISPLAY_VER(display) >= 10 ||
2349 display->platform.kabylake ||
2350 display->platform.coffeelake ||
2351 display->platform.cometlake;
2352 }
2353
intel_hdcp_component_init(struct intel_display * display)2354 void intel_hdcp_component_init(struct intel_display *display)
2355 {
2356 int ret;
2357
2358 if (!is_hdcp2_supported(display))
2359 return;
2360
2361 mutex_lock(&display->hdcp.hdcp_mutex);
2362 drm_WARN_ON(display->drm, display->hdcp.comp_added);
2363
2364 display->hdcp.comp_added = true;
2365 mutex_unlock(&display->hdcp.hdcp_mutex);
2366 if (intel_hdcp_gsc_cs_required(display))
2367 ret = intel_hdcp_gsc_init(display);
2368 else
2369 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
2370 I915_COMPONENT_HDCP);
2371
2372 if (ret < 0) {
2373 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
2374 ret);
2375 mutex_lock(&display->hdcp.hdcp_mutex);
2376 display->hdcp.comp_added = false;
2377 mutex_unlock(&display->hdcp.hdcp_mutex);
2378 return;
2379 }
2380 }
2381
intel_hdcp2_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2382 static void intel_hdcp2_init(struct intel_connector *connector,
2383 struct intel_digital_port *dig_port,
2384 const struct intel_hdcp_shim *shim)
2385 {
2386 struct intel_display *display = to_intel_display(connector);
2387 struct intel_hdcp *hdcp = &connector->hdcp;
2388 int ret;
2389
2390 ret = initialize_hdcp_port_data(connector, dig_port, shim);
2391 if (ret) {
2392 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
2393 return;
2394 }
2395
2396 hdcp->hdcp2_supported = true;
2397 }
2398
intel_hdcp_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2399 int intel_hdcp_init(struct intel_connector *connector,
2400 struct intel_digital_port *dig_port,
2401 const struct intel_hdcp_shim *shim)
2402 {
2403 struct intel_display *display = to_intel_display(connector);
2404 struct intel_hdcp *hdcp = &connector->hdcp;
2405 int ret;
2406
2407 if (!shim)
2408 return -EINVAL;
2409
2410 if (is_hdcp2_supported(display))
2411 intel_hdcp2_init(connector, dig_port, shim);
2412
2413 ret = drm_connector_attach_content_protection_property(&connector->base,
2414 hdcp->hdcp2_supported);
2415 if (ret) {
2416 hdcp->hdcp2_supported = false;
2417 kfree(dig_port->hdcp.port_data.streams);
2418 return ret;
2419 }
2420
2421 hdcp->shim = shim;
2422 mutex_init(&hdcp->mutex);
2423 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2424 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2425 init_waitqueue_head(&hdcp->cp_irq_queue);
2426
2427 return 0;
2428 }
2429
_intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * pipe_config,const struct drm_connector_state * conn_state)2430 static int _intel_hdcp_enable(struct intel_atomic_state *state,
2431 struct intel_encoder *encoder,
2432 const struct intel_crtc_state *pipe_config,
2433 const struct drm_connector_state *conn_state)
2434 {
2435 struct intel_display *display = to_intel_display(encoder);
2436 struct drm_i915_private *i915 = to_i915(display->drm);
2437 struct intel_connector *connector =
2438 to_intel_connector(conn_state->connector);
2439 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2440 struct intel_hdcp *hdcp = &connector->hdcp;
2441 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2442 int ret = -EINVAL;
2443
2444 if (!hdcp->shim)
2445 return -ENOENT;
2446
2447 if (!connector->encoder) {
2448 drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2449 connector->base.base.id, connector->base.name);
2450 return -ENODEV;
2451 }
2452
2453 mutex_lock(&hdcp->mutex);
2454 mutex_lock(&dig_port->hdcp.mutex);
2455 drm_WARN_ON(display->drm,
2456 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2457 hdcp->content_type = (u8)conn_state->hdcp_content_type;
2458
2459 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2460 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2461 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2462 } else {
2463 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2464 hdcp->stream_transcoder = INVALID_TRANSCODER;
2465 }
2466
2467 if (DISPLAY_VER(display) >= 12)
2468 dig_port->hdcp.port_data.hdcp_transcoder =
2469 intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2470
2471 /*
2472 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2473 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2474 */
2475 if (!hdcp->force_hdcp14 && intel_hdcp2_get_capability(connector)) {
2476 ret = _intel_hdcp2_enable(state, connector);
2477 if (!ret)
2478 check_link_interval =
2479 DRM_HDCP2_CHECK_PERIOD_MS;
2480 }
2481
2482 if (hdcp->force_hdcp14)
2483 drm_dbg_kms(display->drm, "Forcing HDCP 1.4\n");
2484
2485 /*
2486 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2487 * be attempted.
2488 */
2489 if (ret && intel_hdcp_get_capability(connector) &&
2490 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2491 ret = intel_hdcp1_enable(connector);
2492 }
2493
2494 if (!ret) {
2495 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2496 check_link_interval);
2497 intel_hdcp_update_value(connector,
2498 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2499 true);
2500 }
2501
2502 mutex_unlock(&dig_port->hdcp.mutex);
2503 mutex_unlock(&hdcp->mutex);
2504 return ret;
2505 }
2506
intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2507 void intel_hdcp_enable(struct intel_atomic_state *state,
2508 struct intel_encoder *encoder,
2509 const struct intel_crtc_state *crtc_state,
2510 const struct drm_connector_state *conn_state)
2511 {
2512 struct intel_connector *connector =
2513 to_intel_connector(conn_state->connector);
2514 struct intel_hdcp *hdcp = &connector->hdcp;
2515
2516 /*
2517 * Enable hdcp if it's desired or if userspace is enabled and
2518 * driver set its state to undesired
2519 */
2520 if (conn_state->content_protection ==
2521 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2522 (conn_state->content_protection ==
2523 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2524 DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2525 _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2526 }
2527
intel_hdcp_disable(struct intel_connector * connector)2528 int intel_hdcp_disable(struct intel_connector *connector)
2529 {
2530 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2531 struct intel_hdcp *hdcp = &connector->hdcp;
2532 int ret = 0;
2533
2534 if (!hdcp->shim)
2535 return -ENOENT;
2536
2537 mutex_lock(&hdcp->mutex);
2538 mutex_lock(&dig_port->hdcp.mutex);
2539
2540 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2541 goto out;
2542
2543 intel_hdcp_update_value(connector,
2544 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2545 if (hdcp->hdcp2_encrypted)
2546 ret = _intel_hdcp2_disable(connector, false);
2547 else if (hdcp->hdcp_encrypted)
2548 ret = _intel_hdcp_disable(connector);
2549
2550 out:
2551 mutex_unlock(&dig_port->hdcp.mutex);
2552 mutex_unlock(&hdcp->mutex);
2553 cancel_delayed_work_sync(&hdcp->check_work);
2554 return ret;
2555 }
2556
intel_hdcp_update_pipe(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2557 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2558 struct intel_encoder *encoder,
2559 const struct intel_crtc_state *crtc_state,
2560 const struct drm_connector_state *conn_state)
2561 {
2562 struct intel_connector *connector =
2563 to_intel_connector(conn_state->connector);
2564 struct intel_hdcp *hdcp = &connector->hdcp;
2565 bool content_protection_type_changed, desired_and_not_enabled = false;
2566 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2567
2568 if (!connector->hdcp.shim)
2569 return;
2570
2571 content_protection_type_changed =
2572 (conn_state->hdcp_content_type != hdcp->content_type &&
2573 conn_state->content_protection !=
2574 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2575
2576 /*
2577 * During the HDCP encryption session if Type change is requested,
2578 * disable the HDCP and re-enable it with new TYPE value.
2579 */
2580 if (conn_state->content_protection ==
2581 DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2582 content_protection_type_changed)
2583 intel_hdcp_disable(connector);
2584
2585 /*
2586 * Mark the hdcp state as DESIRED after the hdcp disable of type
2587 * change procedure.
2588 */
2589 if (content_protection_type_changed) {
2590 mutex_lock(&hdcp->mutex);
2591 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2592 drm_connector_get(&connector->base);
2593 if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2594 drm_connector_put(&connector->base);
2595 mutex_unlock(&hdcp->mutex);
2596 }
2597
2598 if (conn_state->content_protection ==
2599 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2600 mutex_lock(&hdcp->mutex);
2601 /* Avoid enabling hdcp, if it already ENABLED */
2602 desired_and_not_enabled =
2603 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2604 mutex_unlock(&hdcp->mutex);
2605 /*
2606 * If HDCP already ENABLED and CP property is DESIRED, schedule
2607 * prop_work to update correct CP property to user space.
2608 */
2609 if (!desired_and_not_enabled && !content_protection_type_changed) {
2610 drm_connector_get(&connector->base);
2611 if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2612 drm_connector_put(&connector->base);
2613
2614 }
2615 }
2616
2617 if (desired_and_not_enabled || content_protection_type_changed)
2618 _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2619 }
2620
intel_hdcp_cancel_works(struct intel_connector * connector)2621 void intel_hdcp_cancel_works(struct intel_connector *connector)
2622 {
2623 if (!connector->hdcp.shim)
2624 return;
2625
2626 cancel_delayed_work_sync(&connector->hdcp.check_work);
2627 cancel_work_sync(&connector->hdcp.prop_work);
2628 }
2629
intel_hdcp_component_fini(struct intel_display * display)2630 void intel_hdcp_component_fini(struct intel_display *display)
2631 {
2632 mutex_lock(&display->hdcp.hdcp_mutex);
2633 if (!display->hdcp.comp_added) {
2634 mutex_unlock(&display->hdcp.hdcp_mutex);
2635 return;
2636 }
2637
2638 display->hdcp.comp_added = false;
2639 mutex_unlock(&display->hdcp.hdcp_mutex);
2640
2641 if (intel_hdcp_gsc_cs_required(display))
2642 intel_hdcp_gsc_fini(display);
2643 else
2644 component_del(display->drm->dev, &i915_hdcp_ops);
2645 }
2646
intel_hdcp_cleanup(struct intel_connector * connector)2647 void intel_hdcp_cleanup(struct intel_connector *connector)
2648 {
2649 struct intel_hdcp *hdcp = &connector->hdcp;
2650
2651 if (!hdcp->shim)
2652 return;
2653
2654 /*
2655 * If the connector is registered, it's possible userspace could kick
2656 * off another HDCP enable, which would re-spawn the workers.
2657 */
2658 drm_WARN_ON(connector->base.dev,
2659 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2660
2661 /*
2662 * Now that the connector is not registered, check_work won't be run,
2663 * but cancel any outstanding instances of it
2664 */
2665 cancel_delayed_work_sync(&hdcp->check_work);
2666
2667 /*
2668 * We don't cancel prop_work in the same way as check_work since it
2669 * requires connection_mutex which could be held while calling this
2670 * function. Instead, we rely on the connector references grabbed before
2671 * scheduling prop_work to ensure the connector is alive when prop_work
2672 * is run. So if we're in the destroy path (which is where this
2673 * function should be called), we're "guaranteed" that prop_work is not
2674 * active (tl;dr This Should Never Happen).
2675 */
2676 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2677
2678 mutex_lock(&hdcp->mutex);
2679 hdcp->shim = NULL;
2680 mutex_unlock(&hdcp->mutex);
2681 }
2682
intel_hdcp_atomic_check(struct drm_connector * connector,struct drm_connector_state * old_state,struct drm_connector_state * new_state)2683 void intel_hdcp_atomic_check(struct drm_connector *connector,
2684 struct drm_connector_state *old_state,
2685 struct drm_connector_state *new_state)
2686 {
2687 u64 old_cp = old_state->content_protection;
2688 u64 new_cp = new_state->content_protection;
2689 struct drm_crtc_state *crtc_state;
2690
2691 if (!new_state->crtc) {
2692 /*
2693 * If the connector is being disabled with CP enabled, mark it
2694 * desired so it's re-enabled when the connector is brought back
2695 */
2696 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2697 new_state->content_protection =
2698 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2699 return;
2700 }
2701
2702 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2703 new_state->crtc);
2704 /*
2705 * Fix the HDCP uapi content protection state in case of modeset.
2706 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2707 * need to be sent if there is transition from ENABLED->DESIRED.
2708 */
2709 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2710 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2711 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2712 new_state->content_protection =
2713 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2714
2715 /*
2716 * Nothing to do if the state didn't change, or HDCP was activated since
2717 * the last commit. And also no change in hdcp content type.
2718 */
2719 if (old_cp == new_cp ||
2720 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2721 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2722 if (old_state->hdcp_content_type ==
2723 new_state->hdcp_content_type)
2724 return;
2725 }
2726
2727 crtc_state->mode_changed = true;
2728 }
2729
2730 /* Handles the CP_IRQ raised from the DP HDCP sink */
intel_hdcp_handle_cp_irq(struct intel_connector * connector)2731 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2732 {
2733 struct intel_hdcp *hdcp = &connector->hdcp;
2734 struct intel_display *display = to_intel_display(connector);
2735 struct drm_i915_private *i915 = to_i915(display->drm);
2736
2737 if (!hdcp->shim)
2738 return;
2739
2740 atomic_inc(&connector->hdcp.cp_irq_count);
2741 wake_up_all(&connector->hdcp.cp_irq_queue);
2742
2743 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2744 }
2745
__intel_hdcp_info(struct seq_file * m,struct intel_connector * connector,bool remote_req)2746 static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
2747 bool remote_req)
2748 {
2749 bool hdcp_cap = false, hdcp2_cap = false;
2750
2751 if (!connector->hdcp.shim) {
2752 seq_puts(m, "No Connector Support");
2753 goto out;
2754 }
2755
2756 if (remote_req) {
2757 intel_hdcp_get_remote_capability(connector, &hdcp_cap, &hdcp2_cap);
2758 } else {
2759 hdcp_cap = intel_hdcp_get_capability(connector);
2760 hdcp2_cap = intel_hdcp2_get_capability(connector);
2761 }
2762
2763 if (hdcp_cap)
2764 seq_puts(m, "HDCP1.4 ");
2765 if (hdcp2_cap)
2766 seq_puts(m, "HDCP2.2 ");
2767
2768 if (!hdcp_cap && !hdcp2_cap)
2769 seq_puts(m, "None");
2770
2771 out:
2772 seq_puts(m, "\n");
2773 }
2774
intel_hdcp_info(struct seq_file * m,struct intel_connector * connector)2775 void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector)
2776 {
2777 seq_puts(m, "\tHDCP version: ");
2778 if (connector->mst.dp) {
2779 __intel_hdcp_info(m, connector, true);
2780 seq_puts(m, "\tMST Hub HDCP version: ");
2781 }
2782 __intel_hdcp_info(m, connector, false);
2783 }
2784
intel_hdcp_sink_capability_show(struct seq_file * m,void * data)2785 static int intel_hdcp_sink_capability_show(struct seq_file *m, void *data)
2786 {
2787 struct intel_connector *connector = m->private;
2788 struct intel_display *display = to_intel_display(connector);
2789 int ret;
2790
2791 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2792 if (ret)
2793 return ret;
2794
2795 if (!connector->base.encoder ||
2796 connector->base.status != connector_status_connected) {
2797 ret = -ENODEV;
2798 goto out;
2799 }
2800
2801 seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
2802 connector->base.base.id);
2803 __intel_hdcp_info(m, connector, false);
2804
2805 out:
2806 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2807
2808 return ret;
2809 }
2810 DEFINE_SHOW_ATTRIBUTE(intel_hdcp_sink_capability);
2811
intel_hdcp_force_14_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)2812 static ssize_t intel_hdcp_force_14_write(struct file *file,
2813 const char __user *ubuf,
2814 size_t len, loff_t *offp)
2815 {
2816 struct seq_file *m = file->private_data;
2817 struct intel_connector *connector = m->private;
2818 struct intel_hdcp *hdcp = &connector->hdcp;
2819 bool force_hdcp14 = false;
2820 int ret;
2821
2822 if (len == 0)
2823 return 0;
2824
2825 ret = kstrtobool_from_user(ubuf, len, &force_hdcp14);
2826 if (ret < 0)
2827 return ret;
2828
2829 hdcp->force_hdcp14 = force_hdcp14;
2830 *offp += len;
2831
2832 return len;
2833 }
2834
intel_hdcp_force_14_show(struct seq_file * m,void * data)2835 static int intel_hdcp_force_14_show(struct seq_file *m, void *data)
2836 {
2837 struct intel_connector *connector = m->private;
2838 struct intel_display *display = to_intel_display(connector);
2839 struct intel_encoder *encoder = intel_attached_encoder(connector);
2840 struct intel_hdcp *hdcp = &connector->hdcp;
2841 struct drm_crtc *crtc;
2842 int ret;
2843
2844 if (!encoder)
2845 return -ENODEV;
2846
2847 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2848 if (ret)
2849 return ret;
2850
2851 crtc = connector->base.state->crtc;
2852 if (connector->base.status != connector_status_connected || !crtc) {
2853 ret = -ENODEV;
2854 goto out;
2855 }
2856
2857 seq_printf(m, "%s\n",
2858 str_yes_no(hdcp->force_hdcp14));
2859 out:
2860 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2861
2862 return ret;
2863 }
2864
intel_hdcp_force_14_open(struct inode * inode,struct file * file)2865 static int intel_hdcp_force_14_open(struct inode *inode,
2866 struct file *file)
2867 {
2868 return single_open(file, intel_hdcp_force_14_show,
2869 inode->i_private);
2870 }
2871
2872 static const struct file_operations intel_hdcp_force_14_fops = {
2873 .owner = THIS_MODULE,
2874 .open = intel_hdcp_force_14_open,
2875 .read = seq_read,
2876 .llseek = seq_lseek,
2877 .release = single_release,
2878 .write = intel_hdcp_force_14_write
2879 };
2880
intel_hdcp_connector_debugfs_add(struct intel_connector * connector)2881 void intel_hdcp_connector_debugfs_add(struct intel_connector *connector)
2882 {
2883 struct dentry *root = connector->base.debugfs_entry;
2884 int connector_type = connector->base.connector_type;
2885
2886 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2887 connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2888 connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2889 debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
2890 connector, &intel_hdcp_sink_capability_fops);
2891 debugfs_create_file("i915_force_hdcp14", 0644, root,
2892 connector, &intel_hdcp_force_14_fops);
2893 }
2894 }
2895