1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
5 *
6 * Authors:
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
9 */
10
11 #include <linux/component.h>
12 #include <linux/debugfs.h>
13 #include <linux/i2c.h>
14 #include <linux/iopoll.h>
15 #include <linux/random.h>
16
17 #include <drm/display/drm_hdcp_helper.h>
18 #include <drm/drm_print.h>
19 #include <drm/intel/i915_component.h>
20
21 #include "i915_reg.h"
22 #include "intel_connector.h"
23 #include "intel_de.h"
24 #include "intel_display_jiffies.h"
25 #include "intel_display_power.h"
26 #include "intel_display_power_well.h"
27 #include "intel_display_regs.h"
28 #include "intel_display_rpm.h"
29 #include "intel_display_types.h"
30 #include "intel_dp_mst.h"
31 #include "intel_hdcp.h"
32 #include "intel_hdcp_gsc_message.h"
33 #include "intel_hdcp_regs.h"
34 #include "intel_hdcp_shim.h"
35 #include "intel_parent.h"
36 #include "intel_pcode.h"
37 #include "intel_step.h"
38
39 #define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
40
41 #define KEY_LOAD_TRIES 5
42 #define HDCP2_LC_RETRY_CNT 3
43
44 static void
intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder * encoder,struct intel_hdcp * hdcp,bool enable)45 intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
46 struct intel_hdcp *hdcp,
47 bool enable)
48 {
49 struct intel_display *display = to_intel_display(encoder);
50 i915_reg_t rekey_reg;
51 u32 rekey_bit = 0;
52
53 /* Here we assume HDMI is in TMDS mode of operation */
54 if (!intel_encoder_is_hdmi(encoder))
55 return;
56
57 if (DISPLAY_VER(display) >= 30) {
58 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
59 rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
60 } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
61 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) {
62 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
63 rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
64 } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) {
65 rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder);
66 rekey_bit = HDCP_LINE_REKEY_DISABLE;
67 }
68
69 if (rekey_bit)
70 intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit);
71 }
72
intel_conn_to_vcpi(struct intel_atomic_state * state,struct intel_connector * connector)73 static int intel_conn_to_vcpi(struct intel_atomic_state *state,
74 struct intel_connector *connector)
75 {
76 struct drm_dp_mst_topology_mgr *mgr;
77 struct drm_dp_mst_atomic_payload *payload;
78 struct drm_dp_mst_topology_state *mst_state;
79 int vcpi = 0;
80
81 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
82 if (!connector->mst.port)
83 return 0;
84 mgr = connector->mst.port->mgr;
85
86 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
87 mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
88 payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
89 if (drm_WARN_ON(mgr->dev, !payload))
90 goto out;
91
92 vcpi = payload->vcpi;
93 if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
94 vcpi = 0;
95 goto out;
96 }
97 out:
98 return vcpi;
99 }
100
101 /*
102 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
103 * content_type for all streams in DP MST topology because security f/w doesn't
104 * have any provision to mark content_type for each stream separately, it marks
105 * all available streams with the content_type proivided at the time of port
106 * authentication. This may prohibit the userspace to use type1 content on
107 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
108 * DP MST topology. Though it is not compulsory, security fw should change its
109 * policy to mark different content_types for different streams.
110 */
111 static int
intel_hdcp_required_content_stream(struct intel_atomic_state * state,struct intel_digital_port * dig_port)112 intel_hdcp_required_content_stream(struct intel_atomic_state *state,
113 struct intel_digital_port *dig_port)
114 {
115 struct intel_display *display = to_intel_display(state);
116 struct drm_connector_list_iter conn_iter;
117 struct intel_digital_port *conn_dig_port;
118 struct intel_connector *connector;
119 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
120 bool enforce_type0 = false;
121 int k;
122
123 if (dig_port->hdcp.auth_status)
124 return 0;
125
126 data->k = 0;
127
128 if (!dig_port->hdcp.mst_type1_capable)
129 enforce_type0 = true;
130
131 drm_connector_list_iter_begin(display->drm, &conn_iter);
132 for_each_intel_connector_iter(connector, &conn_iter) {
133 if (connector->base.status == connector_status_disconnected)
134 continue;
135
136 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
137 continue;
138
139 conn_dig_port = intel_attached_dig_port(connector);
140 if (conn_dig_port != dig_port)
141 continue;
142
143 data->streams[data->k].stream_id =
144 intel_conn_to_vcpi(state, connector);
145 data->k++;
146
147 /* if there is only one active stream */
148 if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
149 break;
150 }
151 drm_connector_list_iter_end(&conn_iter);
152
153 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
154 return -EINVAL;
155
156 /*
157 * Apply common protection level across all streams in DP MST Topology.
158 * Use highest supported content type for all streams in DP MST Topology.
159 */
160 for (k = 0; k < data->k; k++)
161 data->streams[k].stream_type =
162 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
163
164 return 0;
165 }
166
intel_hdcp_prepare_streams(struct intel_atomic_state * state,struct intel_connector * connector)167 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
168 struct intel_connector *connector)
169 {
170 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
171 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
172 struct intel_hdcp *hdcp = &connector->hdcp;
173
174 if (intel_encoder_is_mst(intel_attached_encoder(connector)))
175 return intel_hdcp_required_content_stream(state, dig_port);
176
177 data->k = 1;
178 data->streams[0].stream_id = 0;
179 data->streams[0].stream_type = hdcp->content_type;
180
181 return 0;
182 }
183
184 static
intel_hdcp_is_ksv_valid(u8 * ksv)185 bool intel_hdcp_is_ksv_valid(u8 *ksv)
186 {
187 int i, ones = 0;
188 /* KSV has 20 1's and 20 0's */
189 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
190 ones += hweight8(ksv[i]);
191 if (ones != 20)
192 return false;
193
194 return true;
195 }
196
197 static
intel_hdcp_read_valid_bksv(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim,u8 * bksv)198 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
199 const struct intel_hdcp_shim *shim, u8 *bksv)
200 {
201 struct intel_display *display = to_intel_display(dig_port);
202 int ret, i, tries = 2;
203
204 /* HDCP spec states that we must retry the bksv if it is invalid */
205 for (i = 0; i < tries; i++) {
206 ret = shim->read_bksv(dig_port, bksv);
207 if (ret)
208 return ret;
209 if (intel_hdcp_is_ksv_valid(bksv))
210 break;
211 }
212 if (i == tries) {
213 drm_dbg_kms(display->drm, "Bksv is invalid\n");
214 return -ENODEV;
215 }
216
217 return 0;
218 }
219
220 /* Is HDCP1.4 capable on Platform and Sink */
intel_hdcp_get_capability(struct intel_connector * connector)221 static bool intel_hdcp_get_capability(struct intel_connector *connector)
222 {
223 struct intel_digital_port *dig_port;
224 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
225 bool capable = false;
226 u8 bksv[5];
227
228 if (!intel_attached_encoder(connector))
229 return capable;
230
231 dig_port = intel_attached_dig_port(connector);
232
233 if (!shim)
234 return capable;
235
236 if (shim->hdcp_get_capability) {
237 shim->hdcp_get_capability(dig_port, &capable);
238 } else {
239 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
240 capable = true;
241 }
242
243 return capable;
244 }
245
246 /*
247 * Check if the source has all the building blocks ready to make
248 * HDCP 2.2 work
249 */
intel_hdcp2_prerequisite(struct intel_connector * connector)250 static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
251 {
252 struct intel_display *display = to_intel_display(connector);
253 struct intel_hdcp *hdcp = &connector->hdcp;
254
255 /* I915 support for HDCP2.2 */
256 if (!hdcp->hdcp2_supported)
257 return false;
258
259 /* If MTL+ make sure gsc is loaded and proxy is setup */
260 if (USE_HDCP_GSC(display)) {
261 if (!intel_parent_hdcp_gsc_check_status(display))
262 return false;
263 }
264
265 /* MEI/GSC interface is solid depending on which is used */
266 mutex_lock(&display->hdcp.hdcp_mutex);
267 if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
268 mutex_unlock(&display->hdcp.hdcp_mutex);
269 return false;
270 }
271 mutex_unlock(&display->hdcp.hdcp_mutex);
272
273 return true;
274 }
275
276 /* Is HDCP2.2 capable on Platform and Sink */
intel_hdcp2_get_capability(struct intel_connector * connector)277 static bool intel_hdcp2_get_capability(struct intel_connector *connector)
278 {
279 struct intel_hdcp *hdcp = &connector->hdcp;
280 bool capable = false;
281
282 if (!intel_hdcp2_prerequisite(connector))
283 return false;
284
285 /* Sink's capability for HDCP2.2 */
286 hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
287
288 return capable;
289 }
290
intel_hdcp_get_remote_capability(struct intel_connector * connector,bool * hdcp_capable,bool * hdcp2_capable)291 static void intel_hdcp_get_remote_capability(struct intel_connector *connector,
292 bool *hdcp_capable,
293 bool *hdcp2_capable)
294 {
295 struct intel_hdcp *hdcp = &connector->hdcp;
296
297 if (!hdcp->shim->get_remote_hdcp_capability)
298 return;
299
300 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
301 hdcp2_capable);
302
303 if (!intel_hdcp2_prerequisite(connector))
304 *hdcp2_capable = false;
305 }
306
intel_hdcp_in_use(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)307 static bool intel_hdcp_in_use(struct intel_display *display,
308 enum transcoder cpu_transcoder, enum port port)
309 {
310 return intel_de_read(display,
311 HDCP_STATUS(display, cpu_transcoder, port)) &
312 HDCP_STATUS_ENC;
313 }
314
intel_hdcp2_in_use(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)315 static bool intel_hdcp2_in_use(struct intel_display *display,
316 enum transcoder cpu_transcoder, enum port port)
317 {
318 return intel_de_read(display,
319 HDCP2_STATUS(display, cpu_transcoder, port)) &
320 LINK_ENCRYPTION_STATUS;
321 }
322
intel_hdcp_poll_ksv_fifo(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)323 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
324 const struct intel_hdcp_shim *shim)
325 {
326 int ret, read_ret;
327 bool ksv_ready;
328
329 /* Poll for ksv list ready (spec says max time allowed is 5s) */
330 ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready),
331 read_ret || ksv_ready,
332 100 * 1000, 5 * 1000 * 1000, false);
333 if (ret)
334 return ret;
335 if (read_ret)
336 return read_ret;
337
338 return 0;
339 }
340
hdcp_key_loadable(struct intel_display * display)341 static bool hdcp_key_loadable(struct intel_display *display)
342 {
343 enum i915_power_well_id id;
344 bool enabled = false;
345
346 /*
347 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
348 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
349 */
350 if (display->platform.haswell || display->platform.broadwell)
351 id = HSW_DISP_PW_GLOBAL;
352 else
353 id = SKL_DISP_PW_1;
354
355 /* PG1 (power well #1) needs to be enabled */
356 with_intel_display_rpm(display)
357 enabled = intel_display_power_well_is_enabled(display, id);
358
359 /*
360 * Another req for hdcp key loadability is enabled state of pll for
361 * cdclk. Without active crtc we won't land here. So we are assuming that
362 * cdclk is already on.
363 */
364
365 return enabled;
366 }
367
intel_hdcp_clear_keys(struct intel_display * display)368 static void intel_hdcp_clear_keys(struct intel_display *display)
369 {
370 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
371 intel_de_write(display, HDCP_KEY_STATUS,
372 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
373 }
374
intel_hdcp_load_keys(struct intel_display * display)375 static int intel_hdcp_load_keys(struct intel_display *display)
376 {
377 int ret;
378 u32 val;
379
380 val = intel_de_read(display, HDCP_KEY_STATUS);
381 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
382 return 0;
383
384 /*
385 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
386 * out of reset. So if Key is not already loaded, its an error state.
387 */
388 if (display->platform.haswell || display->platform.broadwell)
389 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
390 return -ENXIO;
391
392 /*
393 * Initiate loading the HDCP key from fuses.
394 *
395 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
396 * version 9 platforms (minus BXT) differ in the key load trigger
397 * process from other platforms. These platforms use the GT Driver
398 * Mailbox interface.
399 */
400 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
401 ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
402 if (ret) {
403 drm_err(display->drm,
404 "Failed to initiate HDCP key load (%d)\n",
405 ret);
406 return ret;
407 }
408 } else {
409 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
410 }
411
412 /* Wait for the keys to load (500us) */
413 ret = intel_de_wait_ms(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE,
414 HDCP_KEY_LOAD_DONE, 1, &val);
415 if (ret)
416 return ret;
417 else if (!(val & HDCP_KEY_LOAD_STATUS))
418 return -ENXIO;
419
420 /* Send Aksv over to PCH display for use in authentication */
421 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
422
423 return 0;
424 }
425
426 /* Returns updated SHA-1 index */
intel_write_sha_text(struct intel_display * display,u32 sha_text)427 static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
428 {
429 intel_de_write(display, HDCP_SHA_TEXT, sha_text);
430 if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
431 drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
432 return -ETIMEDOUT;
433 }
434 return 0;
435 }
436
437 static
intel_hdcp_get_repeater_ctl(struct intel_display * display,enum transcoder cpu_transcoder,enum port port)438 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
439 enum transcoder cpu_transcoder, enum port port)
440 {
441 if (DISPLAY_VER(display) >= 12) {
442 switch (cpu_transcoder) {
443 case TRANSCODER_A:
444 return HDCP_TRANSA_REP_PRESENT |
445 HDCP_TRANSA_SHA1_M0;
446 case TRANSCODER_B:
447 return HDCP_TRANSB_REP_PRESENT |
448 HDCP_TRANSB_SHA1_M0;
449 case TRANSCODER_C:
450 return HDCP_TRANSC_REP_PRESENT |
451 HDCP_TRANSC_SHA1_M0;
452 case TRANSCODER_D:
453 return HDCP_TRANSD_REP_PRESENT |
454 HDCP_TRANSD_SHA1_M0;
455 default:
456 drm_err(display->drm, "Unknown transcoder %d\n",
457 cpu_transcoder);
458 return 0;
459 }
460 }
461
462 switch (port) {
463 case PORT_A:
464 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
465 case PORT_B:
466 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
467 case PORT_C:
468 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
469 case PORT_D:
470 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
471 case PORT_E:
472 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
473 default:
474 drm_err(display->drm, "Unknown port %d\n", port);
475 return 0;
476 }
477 }
478
479 static
intel_hdcp_validate_v_prime(struct intel_connector * connector,const struct intel_hdcp_shim * shim,u8 * ksv_fifo,u8 num_downstream,u8 * bstatus)480 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
481 const struct intel_hdcp_shim *shim,
482 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
483 {
484 struct intel_display *display = to_intel_display(connector);
485 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
487 enum port port = dig_port->base.port;
488 u32 vprime, sha_text, sha_leftovers, rep_ctl;
489 int ret, i, j, sha_idx;
490
491 /* Process V' values from the receiver */
492 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
493 ret = shim->read_v_prime_part(dig_port, i, &vprime);
494 if (ret)
495 return ret;
496 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
497 }
498
499 /*
500 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
501 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
502 * stream is written via the HDCP_SHA_TEXT register in 32-bit
503 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
504 * index will keep track of our progress through the 64 bytes as well as
505 * helping us work the 40-bit KSVs through our 32-bit register.
506 *
507 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
508 */
509 sha_idx = 0;
510 sha_text = 0;
511 sha_leftovers = 0;
512 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
513 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
514 for (i = 0; i < num_downstream; i++) {
515 unsigned int sha_empty;
516 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
517
518 /* Fill up the empty slots in sha_text and write it out */
519 sha_empty = sizeof(sha_text) - sha_leftovers;
520 for (j = 0; j < sha_empty; j++) {
521 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
522 sha_text |= ksv[j] << off;
523 }
524
525 ret = intel_write_sha_text(display, sha_text);
526 if (ret < 0)
527 return ret;
528
529 /* Programming guide writes this every 64 bytes */
530 sha_idx += sizeof(sha_text);
531 if (!(sha_idx % 64))
532 intel_de_write(display, HDCP_REP_CTL,
533 rep_ctl | HDCP_SHA1_TEXT_32);
534
535 /* Store the leftover bytes from the ksv in sha_text */
536 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
537 sha_text = 0;
538 for (j = 0; j < sha_leftovers; j++)
539 sha_text |= ksv[sha_empty + j] <<
540 ((sizeof(sha_text) - j - 1) * 8);
541
542 /*
543 * If we still have room in sha_text for more data, continue.
544 * Otherwise, write it out immediately.
545 */
546 if (sizeof(sha_text) > sha_leftovers)
547 continue;
548
549 ret = intel_write_sha_text(display, sha_text);
550 if (ret < 0)
551 return ret;
552 sha_leftovers = 0;
553 sha_text = 0;
554 sha_idx += sizeof(sha_text);
555 }
556
557 /*
558 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
559 * bytes are leftover from the last ksv, we might be able to fit them
560 * all in sha_text (first 2 cases), or we might need to split them up
561 * into 2 writes (last 2 cases).
562 */
563 if (sha_leftovers == 0) {
564 /* Write 16 bits of text, 16 bits of M0 */
565 intel_de_write(display, HDCP_REP_CTL,
566 rep_ctl | HDCP_SHA1_TEXT_16);
567 ret = intel_write_sha_text(display,
568 bstatus[0] << 8 | bstatus[1]);
569 if (ret < 0)
570 return ret;
571 sha_idx += sizeof(sha_text);
572
573 /* Write 32 bits of M0 */
574 intel_de_write(display, HDCP_REP_CTL,
575 rep_ctl | HDCP_SHA1_TEXT_0);
576 ret = intel_write_sha_text(display, 0);
577 if (ret < 0)
578 return ret;
579 sha_idx += sizeof(sha_text);
580
581 /* Write 16 bits of M0 */
582 intel_de_write(display, HDCP_REP_CTL,
583 rep_ctl | HDCP_SHA1_TEXT_16);
584 ret = intel_write_sha_text(display, 0);
585 if (ret < 0)
586 return ret;
587 sha_idx += sizeof(sha_text);
588
589 } else if (sha_leftovers == 1) {
590 /* Write 24 bits of text, 8 bits of M0 */
591 intel_de_write(display, HDCP_REP_CTL,
592 rep_ctl | HDCP_SHA1_TEXT_24);
593 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
594 /* Only 24-bits of data, must be in the LSB */
595 sha_text = (sha_text & 0xffffff00) >> 8;
596 ret = intel_write_sha_text(display, sha_text);
597 if (ret < 0)
598 return ret;
599 sha_idx += sizeof(sha_text);
600
601 /* Write 32 bits of M0 */
602 intel_de_write(display, HDCP_REP_CTL,
603 rep_ctl | HDCP_SHA1_TEXT_0);
604 ret = intel_write_sha_text(display, 0);
605 if (ret < 0)
606 return ret;
607 sha_idx += sizeof(sha_text);
608
609 /* Write 24 bits of M0 */
610 intel_de_write(display, HDCP_REP_CTL,
611 rep_ctl | HDCP_SHA1_TEXT_8);
612 ret = intel_write_sha_text(display, 0);
613 if (ret < 0)
614 return ret;
615 sha_idx += sizeof(sha_text);
616
617 } else if (sha_leftovers == 2) {
618 /* Write 32 bits of text */
619 intel_de_write(display, HDCP_REP_CTL,
620 rep_ctl | HDCP_SHA1_TEXT_32);
621 sha_text |= bstatus[0] << 8 | bstatus[1];
622 ret = intel_write_sha_text(display, sha_text);
623 if (ret < 0)
624 return ret;
625 sha_idx += sizeof(sha_text);
626
627 /* Write 64 bits of M0 */
628 intel_de_write(display, HDCP_REP_CTL,
629 rep_ctl | HDCP_SHA1_TEXT_0);
630 for (i = 0; i < 2; i++) {
631 ret = intel_write_sha_text(display, 0);
632 if (ret < 0)
633 return ret;
634 sha_idx += sizeof(sha_text);
635 }
636
637 /*
638 * Terminate the SHA-1 stream by hand. For the other leftover
639 * cases this is appended by the hardware.
640 */
641 intel_de_write(display, HDCP_REP_CTL,
642 rep_ctl | HDCP_SHA1_TEXT_32);
643 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
644 ret = intel_write_sha_text(display, sha_text);
645 if (ret < 0)
646 return ret;
647 sha_idx += sizeof(sha_text);
648 } else if (sha_leftovers == 3) {
649 /* Write 32 bits of text (filled from LSB) */
650 intel_de_write(display, HDCP_REP_CTL,
651 rep_ctl | HDCP_SHA1_TEXT_32);
652 sha_text |= bstatus[0];
653 ret = intel_write_sha_text(display, sha_text);
654 if (ret < 0)
655 return ret;
656 sha_idx += sizeof(sha_text);
657
658 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
659 intel_de_write(display, HDCP_REP_CTL,
660 rep_ctl | HDCP_SHA1_TEXT_8);
661 ret = intel_write_sha_text(display, bstatus[1]);
662 if (ret < 0)
663 return ret;
664 sha_idx += sizeof(sha_text);
665
666 /* Write 32 bits of M0 */
667 intel_de_write(display, HDCP_REP_CTL,
668 rep_ctl | HDCP_SHA1_TEXT_0);
669 ret = intel_write_sha_text(display, 0);
670 if (ret < 0)
671 return ret;
672 sha_idx += sizeof(sha_text);
673
674 /* Write 8 bits of M0 */
675 intel_de_write(display, HDCP_REP_CTL,
676 rep_ctl | HDCP_SHA1_TEXT_24);
677 ret = intel_write_sha_text(display, 0);
678 if (ret < 0)
679 return ret;
680 sha_idx += sizeof(sha_text);
681 } else {
682 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
683 sha_leftovers);
684 return -EINVAL;
685 }
686
687 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
688 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
689 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
690 ret = intel_write_sha_text(display, 0);
691 if (ret < 0)
692 return ret;
693 sha_idx += sizeof(sha_text);
694 }
695
696 /*
697 * Last write gets the length of the concatenation in bits. That is:
698 * - 5 bytes per device
699 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
700 */
701 sha_text = (num_downstream * 5 + 10) * 8;
702 ret = intel_write_sha_text(display, sha_text);
703 if (ret < 0)
704 return ret;
705
706 /* Tell the HW we're done with the hash and wait for it to ACK */
707 intel_de_write(display, HDCP_REP_CTL,
708 rep_ctl | HDCP_SHA1_COMPLETE_HASH);
709 if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL,
710 HDCP_SHA1_COMPLETE, 1)) {
711 drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
712 return -ETIMEDOUT;
713 }
714 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
715 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
716 return -ENXIO;
717 }
718
719 return 0;
720 }
721
722 /* Implements Part 2 of the HDCP authorization procedure */
723 static
intel_hdcp_auth_downstream(struct intel_connector * connector)724 int intel_hdcp_auth_downstream(struct intel_connector *connector)
725 {
726 struct intel_display *display = to_intel_display(connector);
727 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
728 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
729 u8 bstatus[2], num_downstream, *ksv_fifo;
730 int ret, i, tries = 3;
731
732 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
733 if (ret) {
734 drm_dbg_kms(display->drm,
735 "KSV list failed to become ready (%d)\n", ret);
736 return ret;
737 }
738
739 ret = shim->read_bstatus(dig_port, bstatus);
740 if (ret)
741 return ret;
742
743 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
744 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
745 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
746 return -EPERM;
747 }
748
749 /*
750 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
751 * the HDCP encryption. That implies that repeater can't have its own
752 * display. As there is no consumption of encrypted content in the
753 * repeater with 0 downstream devices, we are failing the
754 * authentication.
755 */
756 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
757 if (num_downstream == 0) {
758 drm_dbg_kms(display->drm,
759 "Repeater with zero downstream devices\n");
760 return -EINVAL;
761 }
762
763 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
764 if (!ksv_fifo) {
765 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
766 return -ENOMEM;
767 }
768
769 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
770 if (ret)
771 goto err;
772
773 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
774 num_downstream) > 0) {
775 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
776 ret = -EPERM;
777 goto err;
778 }
779
780 /*
781 * When V prime mismatches, DP Spec mandates re-read of
782 * V prime atleast twice.
783 */
784 for (i = 0; i < tries; i++) {
785 ret = intel_hdcp_validate_v_prime(connector, shim,
786 ksv_fifo, num_downstream,
787 bstatus);
788 if (!ret)
789 break;
790 }
791
792 if (i == tries) {
793 drm_dbg_kms(display->drm,
794 "V Prime validation failed.(%d)\n", ret);
795 goto err;
796 }
797
798 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
799 num_downstream);
800 ret = 0;
801 err:
802 kfree(ksv_fifo);
803 return ret;
804 }
805
806 /* Implements Part 1 of the HDCP authorization procedure */
intel_hdcp_auth(struct intel_connector * connector)807 static int intel_hdcp_auth(struct intel_connector *connector)
808 {
809 struct intel_display *display = to_intel_display(connector);
810 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
811 struct intel_hdcp *hdcp = &connector->hdcp;
812 const struct intel_hdcp_shim *shim = hdcp->shim;
813 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
814 enum port port = dig_port->base.port;
815 unsigned long r0_prime_gen_start;
816 int ret, i, tries = 2;
817 u32 val;
818 union {
819 u32 reg[2];
820 u8 shim[DRM_HDCP_AN_LEN];
821 } an;
822 union {
823 u32 reg[2];
824 u8 shim[DRM_HDCP_KSV_LEN];
825 } bksv;
826 union {
827 u32 reg;
828 u8 shim[DRM_HDCP_RI_LEN];
829 } ri;
830 bool repeater_present, hdcp_capable;
831
832 /*
833 * Detects whether the display is HDCP capable. Although we check for
834 * valid Bksv below, the HDCP over DP spec requires that we check
835 * whether the display supports HDCP before we write An. For HDMI
836 * displays, this is not necessary.
837 */
838 if (shim->hdcp_get_capability) {
839 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
840 if (ret)
841 return ret;
842 if (!hdcp_capable) {
843 drm_dbg_kms(display->drm,
844 "Panel is not HDCP capable\n");
845 return -EINVAL;
846 }
847 }
848
849 /* Initialize An with 2 random values and acquire it */
850 for (i = 0; i < 2; i++)
851 intel_de_write(display,
852 HDCP_ANINIT(display, cpu_transcoder, port),
853 get_random_u32());
854 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
855 HDCP_CONF_CAPTURE_AN);
856
857 /* Wait for An to be acquired */
858 if (intel_de_wait_for_set_ms(display,
859 HDCP_STATUS(display, cpu_transcoder, port),
860 HDCP_STATUS_AN_READY, 1)) {
861 drm_err(display->drm, "Timed out waiting for An\n");
862 return -ETIMEDOUT;
863 }
864
865 an.reg[0] = intel_de_read(display,
866 HDCP_ANLO(display, cpu_transcoder, port));
867 an.reg[1] = intel_de_read(display,
868 HDCP_ANHI(display, cpu_transcoder, port));
869 ret = shim->write_an_aksv(dig_port, an.shim);
870 if (ret)
871 return ret;
872
873 r0_prime_gen_start = jiffies;
874
875 memset(&bksv, 0, sizeof(bksv));
876
877 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
878 if (ret < 0)
879 return ret;
880
881 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
882 drm_err(display->drm, "BKSV is revoked\n");
883 return -EPERM;
884 }
885
886 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
887 bksv.reg[0]);
888 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
889 bksv.reg[1]);
890
891 ret = shim->repeater_present(dig_port, &repeater_present);
892 if (ret)
893 return ret;
894 if (repeater_present)
895 intel_de_write(display, HDCP_REP_CTL,
896 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
897
898 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
899 if (ret)
900 return ret;
901
902 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
903 HDCP_CONF_AUTH_AND_ENC);
904
905 /* Wait for R0 ready */
906 ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
907 val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC),
908 100, 1000, false);
909 if (ret) {
910 drm_err(display->drm, "Timed out waiting for R0 ready\n");
911 return -ETIMEDOUT;
912 }
913
914 /*
915 * Wait for R0' to become available. The spec says 100ms from Aksv, but
916 * some monitors can take longer than this. We'll set the timeout at
917 * 300ms just to be sure.
918 *
919 * On DP, there's an R0_READY bit available but no such bit
920 * exists on HDMI. Since the upper-bound is the same, we'll just do
921 * the stupid thing instead of polling on one and not the other.
922 */
923 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
924
925 tries = 3;
926
927 /*
928 * DP HDCP Spec mandates the two more reattempt to read R0, incase
929 * of R0 mismatch.
930 */
931 for (i = 0; i < tries; i++) {
932 ri.reg = 0;
933 ret = shim->read_ri_prime(dig_port, ri.shim);
934 if (ret)
935 return ret;
936 intel_de_write(display,
937 HDCP_RPRIME(display, cpu_transcoder, port),
938 ri.reg);
939
940 /* Wait for Ri prime match */
941 ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
942 val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC),
943 100, 1000, false);
944 if (!ret)
945 break;
946 }
947
948 if (i == tries) {
949 drm_dbg_kms(display->drm,
950 "Timed out waiting for Ri prime match (%x)\n", val);
951 return -ETIMEDOUT;
952 }
953
954 /* Wait for encryption confirmation */
955 if (intel_de_wait_for_set_ms(display,
956 HDCP_STATUS(display, cpu_transcoder, port),
957 HDCP_STATUS_ENC,
958 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
959 drm_err(display->drm, "Timed out waiting for encryption\n");
960 return -ETIMEDOUT;
961 }
962
963 /* DP MST Auth Part 1 Step 2.a and Step 2.b */
964 if (shim->stream_encryption) {
965 ret = shim->stream_encryption(connector, true);
966 if (ret) {
967 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
968 connector->base.base.id, connector->base.name);
969 return ret;
970 }
971 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
972 transcoder_name(hdcp->stream_transcoder));
973 }
974
975 if (repeater_present)
976 return intel_hdcp_auth_downstream(connector);
977
978 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
979 return 0;
980 }
981
_intel_hdcp_disable(struct intel_connector * connector)982 static int _intel_hdcp_disable(struct intel_connector *connector)
983 {
984 struct intel_display *display = to_intel_display(connector);
985 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
986 struct intel_hdcp *hdcp = &connector->hdcp;
987 enum port port = dig_port->base.port;
988 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
989 u32 repeater_ctl;
990 int ret;
991
992 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
993 connector->base.base.id, connector->base.name);
994
995 if (hdcp->shim->stream_encryption) {
996 ret = hdcp->shim->stream_encryption(connector, false);
997 if (ret) {
998 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
999 connector->base.base.id, connector->base.name);
1000 return ret;
1001 }
1002 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
1003 transcoder_name(hdcp->stream_transcoder));
1004 /*
1005 * If there are other connectors on this port using HDCP,
1006 * don't disable it until it disabled HDCP encryption for
1007 * all connectors in MST topology.
1008 */
1009 if (dig_port->hdcp.num_streams > 0)
1010 return 0;
1011 }
1012
1013 hdcp->hdcp_encrypted = false;
1014 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
1015 if (intel_de_wait_for_clear_ms(display,
1016 HDCP_STATUS(display, cpu_transcoder, port),
1017 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1018 drm_err(display->drm,
1019 "Failed to disable HDCP, timeout clearing status\n");
1020 return -ETIMEDOUT;
1021 }
1022
1023 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
1024 port);
1025 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
1026
1027 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1028 if (ret) {
1029 drm_err(display->drm, "Failed to disable HDCP signalling\n");
1030 return ret;
1031 }
1032
1033 drm_dbg_kms(display->drm, "HDCP is disabled\n");
1034 return 0;
1035 }
1036
intel_hdcp1_enable(struct intel_connector * connector)1037 static int intel_hdcp1_enable(struct intel_connector *connector)
1038 {
1039 struct intel_display *display = to_intel_display(connector);
1040 struct intel_hdcp *hdcp = &connector->hdcp;
1041 int i, ret, tries = 3;
1042
1043 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1044 connector->base.base.id, connector->base.name);
1045
1046 if (!hdcp_key_loadable(display)) {
1047 drm_err(display->drm, "HDCP key Load is not possible\n");
1048 return -ENXIO;
1049 }
1050
1051 for (i = 0; i < KEY_LOAD_TRIES; i++) {
1052 ret = intel_hdcp_load_keys(display);
1053 if (!ret)
1054 break;
1055 intel_hdcp_clear_keys(display);
1056 }
1057 if (ret) {
1058 drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
1059 ret);
1060 return ret;
1061 }
1062
1063 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true);
1064
1065 /* Incase of authentication failures, HDCP spec expects reauth. */
1066 for (i = 0; i < tries; i++) {
1067 ret = intel_hdcp_auth(connector);
1068 if (!ret) {
1069 hdcp->hdcp_encrypted = true;
1070 return 0;
1071 }
1072
1073 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
1074
1075 /* Ensuring HDCP encryption and signalling are stopped. */
1076 _intel_hdcp_disable(connector);
1077 }
1078
1079 drm_dbg_kms(display->drm,
1080 "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1081 return ret;
1082 }
1083
intel_hdcp_to_connector(struct intel_hdcp * hdcp)1084 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1085 {
1086 return container_of(hdcp, struct intel_connector, hdcp);
1087 }
1088
intel_hdcp_update_value(struct intel_connector * connector,u64 value,bool update_property)1089 static void intel_hdcp_update_value(struct intel_connector *connector,
1090 u64 value, bool update_property)
1091 {
1092 struct intel_display *display = to_intel_display(connector);
1093 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1094 struct intel_hdcp *hdcp = &connector->hdcp;
1095
1096 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
1097
1098 if (hdcp->value == value)
1099 return;
1100
1101 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex));
1102
1103 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1104 if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0))
1105 dig_port->hdcp.num_streams--;
1106 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1107 dig_port->hdcp.num_streams++;
1108 }
1109
1110 hdcp->value = value;
1111 if (update_property) {
1112 drm_connector_get(&connector->base);
1113 if (!queue_work(display->wq.unordered, &hdcp->prop_work))
1114 drm_connector_put(&connector->base);
1115 }
1116 }
1117
1118 /* Implements Part 3 of the HDCP authorization procedure */
intel_hdcp_check_link(struct intel_connector * connector)1119 static int intel_hdcp_check_link(struct intel_connector *connector)
1120 {
1121 struct intel_display *display = to_intel_display(connector);
1122 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1123 struct intel_hdcp *hdcp = &connector->hdcp;
1124 enum port port = dig_port->base.port;
1125 enum transcoder cpu_transcoder;
1126 int ret = 0;
1127
1128 mutex_lock(&hdcp->mutex);
1129 mutex_lock(&dig_port->hdcp.mutex);
1130
1131 cpu_transcoder = hdcp->cpu_transcoder;
1132
1133 /* Check_link valid only when HDCP1.4 is enabled */
1134 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1135 !hdcp->hdcp_encrypted) {
1136 ret = -EINVAL;
1137 goto out;
1138 }
1139
1140 if (drm_WARN_ON(display->drm,
1141 !intel_hdcp_in_use(display, cpu_transcoder, port))) {
1142 drm_err(display->drm,
1143 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1144 connector->base.base.id, connector->base.name,
1145 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
1146 ret = -ENXIO;
1147 intel_hdcp_update_value(connector,
1148 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1149 true);
1150 goto out;
1151 }
1152
1153 if (hdcp->shim->check_link(dig_port, connector)) {
1154 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1155 intel_hdcp_update_value(connector,
1156 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1157 }
1158 goto out;
1159 }
1160
1161 drm_dbg_kms(display->drm,
1162 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1163 connector->base.base.id, connector->base.name);
1164
1165 ret = _intel_hdcp_disable(connector);
1166 if (ret) {
1167 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
1168 intel_hdcp_update_value(connector,
1169 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1170 true);
1171 goto out;
1172 }
1173
1174 ret = intel_hdcp1_enable(connector);
1175 if (ret) {
1176 drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
1177 intel_hdcp_update_value(connector,
1178 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1179 true);
1180 goto out;
1181 }
1182
1183 out:
1184 mutex_unlock(&dig_port->hdcp.mutex);
1185 mutex_unlock(&hdcp->mutex);
1186 return ret;
1187 }
1188
intel_hdcp_prop_work(struct work_struct * work)1189 static void intel_hdcp_prop_work(struct work_struct *work)
1190 {
1191 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1192 prop_work);
1193 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1194 struct intel_display *display = to_intel_display(connector);
1195
1196 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
1197 mutex_lock(&hdcp->mutex);
1198
1199 /*
1200 * This worker is only used to flip between ENABLED/DESIRED. Either of
1201 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1202 * we're running just after hdcp has been disabled, so just exit
1203 */
1204 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1205 drm_hdcp_update_content_protection(&connector->base,
1206 hdcp->value);
1207
1208 mutex_unlock(&hdcp->mutex);
1209 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1210
1211 drm_connector_put(&connector->base);
1212 }
1213
is_hdcp_supported(struct intel_display * display,enum port port)1214 bool is_hdcp_supported(struct intel_display *display, enum port port)
1215 {
1216 return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
1217 (DISPLAY_VER(display) >= 12 || port < PORT_E);
1218 }
1219
1220 static int
hdcp2_prepare_ake_init(struct intel_connector * connector,struct hdcp2_ake_init * ake_data)1221 hdcp2_prepare_ake_init(struct intel_connector *connector,
1222 struct hdcp2_ake_init *ake_data)
1223 {
1224 struct intel_display *display = to_intel_display(connector);
1225 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1226 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1227 struct i915_hdcp_arbiter *arbiter;
1228 int ret;
1229
1230 mutex_lock(&display->hdcp.hdcp_mutex);
1231 arbiter = display->hdcp.arbiter;
1232
1233 if (!arbiter || !arbiter->ops) {
1234 mutex_unlock(&display->hdcp.hdcp_mutex);
1235 return -EINVAL;
1236 }
1237
1238 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1239 if (ret)
1240 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
1241 ret);
1242 mutex_unlock(&display->hdcp.hdcp_mutex);
1243
1244 return ret;
1245 }
1246
1247 static int
hdcp2_verify_rx_cert_prepare_km(struct intel_connector * connector,struct hdcp2_ake_send_cert * rx_cert,bool * paired,struct hdcp2_ake_no_stored_km * ek_pub_km,size_t * msg_sz)1248 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1249 struct hdcp2_ake_send_cert *rx_cert,
1250 bool *paired,
1251 struct hdcp2_ake_no_stored_km *ek_pub_km,
1252 size_t *msg_sz)
1253 {
1254 struct intel_display *display = to_intel_display(connector);
1255 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1256 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1257 struct i915_hdcp_arbiter *arbiter;
1258 int ret;
1259
1260 mutex_lock(&display->hdcp.hdcp_mutex);
1261 arbiter = display->hdcp.arbiter;
1262
1263 if (!arbiter || !arbiter->ops) {
1264 mutex_unlock(&display->hdcp.hdcp_mutex);
1265 return -EINVAL;
1266 }
1267
1268 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1269 rx_cert, paired,
1270 ek_pub_km, msg_sz);
1271 if (ret < 0)
1272 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
1273 ret);
1274 mutex_unlock(&display->hdcp.hdcp_mutex);
1275
1276 return ret;
1277 }
1278
hdcp2_verify_hprime(struct intel_connector * connector,struct hdcp2_ake_send_hprime * rx_hprime)1279 static int hdcp2_verify_hprime(struct intel_connector *connector,
1280 struct hdcp2_ake_send_hprime *rx_hprime)
1281 {
1282 struct intel_display *display = to_intel_display(connector);
1283 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1284 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1285 struct i915_hdcp_arbiter *arbiter;
1286 int ret;
1287
1288 mutex_lock(&display->hdcp.hdcp_mutex);
1289 arbiter = display->hdcp.arbiter;
1290
1291 if (!arbiter || !arbiter->ops) {
1292 mutex_unlock(&display->hdcp.hdcp_mutex);
1293 return -EINVAL;
1294 }
1295
1296 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1297 if (ret < 0)
1298 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
1299 mutex_unlock(&display->hdcp.hdcp_mutex);
1300
1301 return ret;
1302 }
1303
1304 static int
hdcp2_store_pairing_info(struct intel_connector * connector,struct hdcp2_ake_send_pairing_info * pairing_info)1305 hdcp2_store_pairing_info(struct intel_connector *connector,
1306 struct hdcp2_ake_send_pairing_info *pairing_info)
1307 {
1308 struct intel_display *display = to_intel_display(connector);
1309 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1310 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1311 struct i915_hdcp_arbiter *arbiter;
1312 int ret;
1313
1314 mutex_lock(&display->hdcp.hdcp_mutex);
1315 arbiter = display->hdcp.arbiter;
1316
1317 if (!arbiter || !arbiter->ops) {
1318 mutex_unlock(&display->hdcp.hdcp_mutex);
1319 return -EINVAL;
1320 }
1321
1322 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1323 if (ret < 0)
1324 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
1325 ret);
1326 mutex_unlock(&display->hdcp.hdcp_mutex);
1327
1328 return ret;
1329 }
1330
1331 static int
hdcp2_prepare_lc_init(struct intel_connector * connector,struct hdcp2_lc_init * lc_init)1332 hdcp2_prepare_lc_init(struct intel_connector *connector,
1333 struct hdcp2_lc_init *lc_init)
1334 {
1335 struct intel_display *display = to_intel_display(connector);
1336 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1337 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1338 struct i915_hdcp_arbiter *arbiter;
1339 int ret;
1340
1341 mutex_lock(&display->hdcp.hdcp_mutex);
1342 arbiter = display->hdcp.arbiter;
1343
1344 if (!arbiter || !arbiter->ops) {
1345 mutex_unlock(&display->hdcp.hdcp_mutex);
1346 return -EINVAL;
1347 }
1348
1349 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1350 if (ret < 0)
1351 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
1352 ret);
1353 mutex_unlock(&display->hdcp.hdcp_mutex);
1354
1355 return ret;
1356 }
1357
1358 static int
hdcp2_verify_lprime(struct intel_connector * connector,struct hdcp2_lc_send_lprime * rx_lprime)1359 hdcp2_verify_lprime(struct intel_connector *connector,
1360 struct hdcp2_lc_send_lprime *rx_lprime)
1361 {
1362 struct intel_display *display = to_intel_display(connector);
1363 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1364 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1365 struct i915_hdcp_arbiter *arbiter;
1366 int ret;
1367
1368 mutex_lock(&display->hdcp.hdcp_mutex);
1369 arbiter = display->hdcp.arbiter;
1370
1371 if (!arbiter || !arbiter->ops) {
1372 mutex_unlock(&display->hdcp.hdcp_mutex);
1373 return -EINVAL;
1374 }
1375
1376 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1377 if (ret < 0)
1378 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
1379 ret);
1380 mutex_unlock(&display->hdcp.hdcp_mutex);
1381
1382 return ret;
1383 }
1384
hdcp2_prepare_skey(struct intel_connector * connector,struct hdcp2_ske_send_eks * ske_data)1385 static int hdcp2_prepare_skey(struct intel_connector *connector,
1386 struct hdcp2_ske_send_eks *ske_data)
1387 {
1388 struct intel_display *display = to_intel_display(connector);
1389 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1390 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1391 struct i915_hdcp_arbiter *arbiter;
1392 int ret;
1393
1394 mutex_lock(&display->hdcp.hdcp_mutex);
1395 arbiter = display->hdcp.arbiter;
1396
1397 if (!arbiter || !arbiter->ops) {
1398 mutex_unlock(&display->hdcp.hdcp_mutex);
1399 return -EINVAL;
1400 }
1401
1402 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1403 if (ret < 0)
1404 drm_dbg_kms(display->drm, "Get session key failed. %d\n",
1405 ret);
1406 mutex_unlock(&display->hdcp.hdcp_mutex);
1407
1408 return ret;
1409 }
1410
1411 static int
hdcp2_verify_rep_topology_prepare_ack(struct intel_connector * connector,struct hdcp2_rep_send_receiverid_list * rep_topology,struct hdcp2_rep_send_ack * rep_send_ack)1412 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1413 struct hdcp2_rep_send_receiverid_list
1414 *rep_topology,
1415 struct hdcp2_rep_send_ack *rep_send_ack)
1416 {
1417 struct intel_display *display = to_intel_display(connector);
1418 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1419 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1420 struct i915_hdcp_arbiter *arbiter;
1421 int ret;
1422
1423 mutex_lock(&display->hdcp.hdcp_mutex);
1424 arbiter = display->hdcp.arbiter;
1425
1426 if (!arbiter || !arbiter->ops) {
1427 mutex_unlock(&display->hdcp.hdcp_mutex);
1428 return -EINVAL;
1429 }
1430
1431 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1432 data,
1433 rep_topology,
1434 rep_send_ack);
1435 if (ret < 0)
1436 drm_dbg_kms(display->drm,
1437 "Verify rep topology failed. %d\n", ret);
1438 mutex_unlock(&display->hdcp.hdcp_mutex);
1439
1440 return ret;
1441 }
1442
1443 static int
hdcp2_verify_mprime(struct intel_connector * connector,struct hdcp2_rep_stream_ready * stream_ready)1444 hdcp2_verify_mprime(struct intel_connector *connector,
1445 struct hdcp2_rep_stream_ready *stream_ready)
1446 {
1447 struct intel_display *display = to_intel_display(connector);
1448 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1449 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1450 struct i915_hdcp_arbiter *arbiter;
1451 int ret;
1452
1453 mutex_lock(&display->hdcp.hdcp_mutex);
1454 arbiter = display->hdcp.arbiter;
1455
1456 if (!arbiter || !arbiter->ops) {
1457 mutex_unlock(&display->hdcp.hdcp_mutex);
1458 return -EINVAL;
1459 }
1460
1461 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1462 if (ret < 0)
1463 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
1464 mutex_unlock(&display->hdcp.hdcp_mutex);
1465
1466 return ret;
1467 }
1468
hdcp2_authenticate_port(struct intel_connector * connector)1469 static int hdcp2_authenticate_port(struct intel_connector *connector)
1470 {
1471 struct intel_display *display = to_intel_display(connector);
1472 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1473 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1474 struct i915_hdcp_arbiter *arbiter;
1475 int ret;
1476
1477 mutex_lock(&display->hdcp.hdcp_mutex);
1478 arbiter = display->hdcp.arbiter;
1479
1480 if (!arbiter || !arbiter->ops) {
1481 mutex_unlock(&display->hdcp.hdcp_mutex);
1482 return -EINVAL;
1483 }
1484
1485 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1486 if (ret < 0)
1487 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
1488 ret);
1489 mutex_unlock(&display->hdcp.hdcp_mutex);
1490
1491 return ret;
1492 }
1493
hdcp2_close_session(struct intel_connector * connector)1494 static int hdcp2_close_session(struct intel_connector *connector)
1495 {
1496 struct intel_display *display = to_intel_display(connector);
1497 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1498 struct i915_hdcp_arbiter *arbiter;
1499 int ret;
1500
1501 mutex_lock(&display->hdcp.hdcp_mutex);
1502 arbiter = display->hdcp.arbiter;
1503
1504 if (!arbiter || !arbiter->ops) {
1505 mutex_unlock(&display->hdcp.hdcp_mutex);
1506 return -EINVAL;
1507 }
1508
1509 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1510 &dig_port->hdcp.port_data);
1511 mutex_unlock(&display->hdcp.hdcp_mutex);
1512
1513 return ret;
1514 }
1515
hdcp2_deauthenticate_port(struct intel_connector * connector)1516 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1517 {
1518 return hdcp2_close_session(connector);
1519 }
1520
1521 /* Authentication flow starts from here */
hdcp2_authentication_key_exchange(struct intel_connector * connector)1522 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1523 {
1524 struct intel_display *display = to_intel_display(connector);
1525 struct intel_digital_port *dig_port =
1526 intel_attached_dig_port(connector);
1527 struct intel_hdcp *hdcp = &connector->hdcp;
1528 union {
1529 struct hdcp2_ake_init ake_init;
1530 struct hdcp2_ake_send_cert send_cert;
1531 struct hdcp2_ake_no_stored_km no_stored_km;
1532 struct hdcp2_ake_send_hprime send_hprime;
1533 struct hdcp2_ake_send_pairing_info pairing_info;
1534 } msgs;
1535 const struct intel_hdcp_shim *shim = hdcp->shim;
1536 size_t size;
1537 int ret, i, max_retries;
1538
1539 /* Init for seq_num */
1540 hdcp->seq_num_v = 0;
1541 hdcp->seq_num_m = 0;
1542
1543 if (intel_encoder_is_dp(&dig_port->base) ||
1544 intel_encoder_is_mst(&dig_port->base))
1545 max_retries = 10;
1546 else
1547 max_retries = 1;
1548
1549 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1550 if (ret < 0)
1551 return ret;
1552
1553 /*
1554 * Retry the first read and write to downstream at least 10 times
1555 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
1556 * (dock decides to stop advertising hdcp2 capability for some reason).
1557 * The reason being that during suspend resume dock usually keeps the
1558 * HDCP2 registers inaccessible causing AUX error. This wouldn't be a
1559 * big problem if the userspace just kept retrying with some delay while
1560 * it continues to play low value content but most userspace applications
1561 * end up throwing an error when it receives one from KMD. This makes
1562 * sure we give the dock and the sink devices to complete its power cycle
1563 * and then try HDCP authentication. The values of 10 and delay of 50ms
1564 * was decided based on multiple trial and errors.
1565 */
1566 for (i = 0; i < max_retries; i++) {
1567 if (!intel_hdcp2_get_capability(connector)) {
1568 msleep(50);
1569 continue;
1570 }
1571
1572 ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1573 sizeof(msgs.ake_init));
1574 if (ret < 0)
1575 continue;
1576
1577 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1578 &msgs.send_cert, sizeof(msgs.send_cert));
1579 if (ret > 0)
1580 break;
1581 }
1582
1583 if (ret < 0)
1584 return ret;
1585
1586 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1587 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
1588 return -EINVAL;
1589 }
1590
1591 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1592
1593 if (drm_hdcp_check_ksvs_revoked(display->drm,
1594 msgs.send_cert.cert_rx.receiver_id,
1595 1) > 0) {
1596 drm_err(display->drm, "Receiver ID is revoked\n");
1597 return -EPERM;
1598 }
1599
1600 /*
1601 * Here msgs.no_stored_km will hold msgs corresponding to the km
1602 * stored also.
1603 */
1604 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1605 &hdcp->is_paired,
1606 &msgs.no_stored_km, &size);
1607 if (ret < 0)
1608 return ret;
1609
1610 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1611 if (ret < 0)
1612 return ret;
1613
1614 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1615 &msgs.send_hprime, sizeof(msgs.send_hprime));
1616 if (ret < 0)
1617 return ret;
1618
1619 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1620 if (ret < 0)
1621 return ret;
1622
1623 if (!hdcp->is_paired) {
1624 /* Pairing is required */
1625 ret = shim->read_2_2_msg(connector,
1626 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1627 &msgs.pairing_info,
1628 sizeof(msgs.pairing_info));
1629 if (ret < 0)
1630 return ret;
1631
1632 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1633 if (ret < 0)
1634 return ret;
1635 hdcp->is_paired = true;
1636 }
1637
1638 return 0;
1639 }
1640
hdcp2_locality_check(struct intel_connector * connector)1641 static int hdcp2_locality_check(struct intel_connector *connector)
1642 {
1643 struct intel_hdcp *hdcp = &connector->hdcp;
1644 union {
1645 struct hdcp2_lc_init lc_init;
1646 struct hdcp2_lc_send_lprime send_lprime;
1647 } msgs;
1648 const struct intel_hdcp_shim *shim = hdcp->shim;
1649 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1650
1651 for (i = 0; i < tries; i++) {
1652 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1653 if (ret < 0)
1654 continue;
1655
1656 ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1657 sizeof(msgs.lc_init));
1658 if (ret < 0)
1659 continue;
1660
1661 ret = shim->read_2_2_msg(connector,
1662 HDCP_2_2_LC_SEND_LPRIME,
1663 &msgs.send_lprime,
1664 sizeof(msgs.send_lprime));
1665 if (ret < 0)
1666 continue;
1667
1668 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1669 if (!ret)
1670 break;
1671 }
1672
1673 return ret;
1674 }
1675
hdcp2_session_key_exchange(struct intel_connector * connector)1676 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1677 {
1678 struct intel_hdcp *hdcp = &connector->hdcp;
1679 struct hdcp2_ske_send_eks send_eks;
1680 int ret;
1681
1682 ret = hdcp2_prepare_skey(connector, &send_eks);
1683 if (ret < 0)
1684 return ret;
1685
1686 ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1687 sizeof(send_eks));
1688 if (ret < 0)
1689 return ret;
1690
1691 return 0;
1692 }
1693
1694 static
_hdcp2_propagate_stream_management_info(struct intel_connector * connector)1695 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1696 {
1697 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1698 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1699 struct intel_hdcp *hdcp = &connector->hdcp;
1700 union {
1701 struct hdcp2_rep_stream_manage stream_manage;
1702 struct hdcp2_rep_stream_ready stream_ready;
1703 } msgs;
1704 const struct intel_hdcp_shim *shim = hdcp->shim;
1705 int ret, streams_size_delta, i;
1706
1707 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1708 return -ERANGE;
1709
1710 /* Prepare RepeaterAuth_Stream_Manage msg */
1711 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1712 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1713
1714 msgs.stream_manage.k = cpu_to_be16(data->k);
1715
1716 for (i = 0; i < data->k; i++) {
1717 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1718 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1719 }
1720
1721 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1722 sizeof(struct hdcp2_streamid_type);
1723 /* Send it to Repeater */
1724 ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1725 sizeof(msgs.stream_manage) - streams_size_delta);
1726 if (ret < 0)
1727 goto out;
1728
1729 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1730 &msgs.stream_ready, sizeof(msgs.stream_ready));
1731 if (ret < 0)
1732 goto out;
1733
1734 data->seq_num_m = hdcp->seq_num_m;
1735
1736 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1737
1738 out:
1739 hdcp->seq_num_m++;
1740
1741 return ret;
1742 }
1743
1744 static
hdcp2_authenticate_repeater_topology(struct intel_connector * connector)1745 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1746 {
1747 struct intel_display *display = to_intel_display(connector);
1748 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1749 struct intel_hdcp *hdcp = &connector->hdcp;
1750 union {
1751 struct hdcp2_rep_send_receiverid_list recvid_list;
1752 struct hdcp2_rep_send_ack rep_ack;
1753 } msgs;
1754 const struct intel_hdcp_shim *shim = hdcp->shim;
1755 u32 seq_num_v, device_cnt;
1756 u8 *rx_info;
1757 int ret;
1758
1759 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1760 &msgs.recvid_list, sizeof(msgs.recvid_list));
1761 if (ret < 0)
1762 return ret;
1763
1764 rx_info = msgs.recvid_list.rx_info;
1765
1766 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1767 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1768 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
1769 return -EINVAL;
1770 }
1771
1772 /*
1773 * MST topology is not Type 1 capable if it contains a downstream
1774 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1775 */
1776 dig_port->hdcp.mst_type1_capable =
1777 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1778 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1779
1780 if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) {
1781 drm_dbg_kms(display->drm,
1782 "HDCP1.x or 2.0 Legacy Device Downstream\n");
1783 return -EINVAL;
1784 }
1785
1786 /* Converting and Storing the seq_num_v to local variable as DWORD */
1787 seq_num_v =
1788 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1789
1790 if (!hdcp->hdcp2_encrypted && seq_num_v) {
1791 drm_dbg_kms(display->drm,
1792 "Non zero Seq_num_v at first RecvId_List msg\n");
1793 return -EINVAL;
1794 }
1795
1796 if (seq_num_v < hdcp->seq_num_v) {
1797 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1798 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
1799 return -EINVAL;
1800 }
1801
1802 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1803 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1804 if (drm_hdcp_check_ksvs_revoked(display->drm,
1805 msgs.recvid_list.receiver_ids,
1806 device_cnt) > 0) {
1807 drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
1808 return -EPERM;
1809 }
1810
1811 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1812 &msgs.recvid_list,
1813 &msgs.rep_ack);
1814 if (ret < 0)
1815 return ret;
1816
1817 hdcp->seq_num_v = seq_num_v;
1818 ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1819 sizeof(msgs.rep_ack));
1820 if (ret < 0)
1821 return ret;
1822
1823 return 0;
1824 }
1825
hdcp2_authenticate_sink(struct intel_connector * connector)1826 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1827 {
1828 struct intel_display *display = to_intel_display(connector);
1829 struct intel_hdcp *hdcp = &connector->hdcp;
1830 const struct intel_hdcp_shim *shim = hdcp->shim;
1831 int ret;
1832
1833 ret = hdcp2_authentication_key_exchange(connector);
1834 if (ret < 0) {
1835 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
1836 return ret;
1837 }
1838
1839 ret = hdcp2_locality_check(connector);
1840 if (ret < 0) {
1841 drm_dbg_kms(display->drm,
1842 "Locality Check failed. Err : %d\n", ret);
1843 return ret;
1844 }
1845
1846 ret = hdcp2_session_key_exchange(connector);
1847 if (ret < 0) {
1848 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
1849 return ret;
1850 }
1851
1852 if (shim->config_stream_type) {
1853 ret = shim->config_stream_type(connector,
1854 hdcp->is_repeater,
1855 hdcp->content_type);
1856 if (ret < 0)
1857 return ret;
1858 }
1859
1860 if (hdcp->is_repeater) {
1861 ret = hdcp2_authenticate_repeater_topology(connector);
1862 if (ret < 0) {
1863 drm_dbg_kms(display->drm,
1864 "Repeater Auth Failed. Err: %d\n", ret);
1865 return ret;
1866 }
1867 }
1868
1869 return ret;
1870 }
1871
hdcp2_enable_stream_encryption(struct intel_connector * connector)1872 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1873 {
1874 struct intel_display *display = to_intel_display(connector);
1875 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1876 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
1877 struct intel_hdcp *hdcp = &connector->hdcp;
1878 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1879 enum port port = dig_port->base.port;
1880 int ret = 0;
1881
1882 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1883 LINK_ENCRYPTION_STATUS)) {
1884 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1885 connector->base.base.id, connector->base.name);
1886 ret = -EPERM;
1887 goto link_recover;
1888 }
1889
1890 if (hdcp->shim->stream_2_2_encryption) {
1891 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1892 if (ret) {
1893 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1894 connector->base.base.id, connector->base.name);
1895 return ret;
1896 }
1897 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1898 transcoder_name(hdcp->stream_transcoder));
1899 }
1900
1901 return 0;
1902
1903 link_recover:
1904 if (hdcp2_deauthenticate_port(connector) < 0)
1905 drm_dbg_kms(display->drm, "Port deauth failed.\n");
1906
1907 dig_port->hdcp.auth_status = false;
1908 data->k = 0;
1909
1910 return ret;
1911 }
1912
hdcp2_enable_encryption(struct intel_connector * connector)1913 static int hdcp2_enable_encryption(struct intel_connector *connector)
1914 {
1915 struct intel_display *display = to_intel_display(connector);
1916 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1917 struct intel_hdcp *hdcp = &connector->hdcp;
1918 enum port port = dig_port->base.port;
1919 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1920 int ret;
1921
1922 drm_WARN_ON(display->drm,
1923 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1924 LINK_ENCRYPTION_STATUS);
1925 if (hdcp->shim->toggle_signalling) {
1926 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1927 true);
1928 if (ret) {
1929 drm_err(display->drm,
1930 "Failed to enable HDCP signalling. %d\n",
1931 ret);
1932 return ret;
1933 }
1934 }
1935
1936 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1937 LINK_AUTH_STATUS)
1938 /* Link is Authenticated. Now set for Encryption */
1939 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1940 0, CTL_LINK_ENCRYPTION_REQ);
1941
1942 ret = intel_de_wait_for_set_ms(display,
1943 HDCP2_STATUS(display, cpu_transcoder, port),
1944 LINK_ENCRYPTION_STATUS,
1945 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1946 dig_port->hdcp.auth_status = true;
1947
1948 return ret;
1949 }
1950
hdcp2_disable_encryption(struct intel_connector * connector)1951 static int hdcp2_disable_encryption(struct intel_connector *connector)
1952 {
1953 struct intel_display *display = to_intel_display(connector);
1954 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1955 struct intel_hdcp *hdcp = &connector->hdcp;
1956 enum port port = dig_port->base.port;
1957 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1958 int ret;
1959
1960 drm_WARN_ON(display->drm,
1961 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1962 LINK_ENCRYPTION_STATUS));
1963
1964 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1965 CTL_LINK_ENCRYPTION_REQ, 0);
1966
1967 ret = intel_de_wait_for_clear_ms(display,
1968 HDCP2_STATUS(display, cpu_transcoder, port),
1969 LINK_ENCRYPTION_STATUS,
1970 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1971 if (ret == -ETIMEDOUT)
1972 drm_dbg_kms(display->drm, "Disable Encryption Timedout");
1973
1974 if (hdcp->shim->toggle_signalling) {
1975 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1976 false);
1977 if (ret) {
1978 drm_err(display->drm,
1979 "Failed to disable HDCP signalling. %d\n",
1980 ret);
1981 return ret;
1982 }
1983 }
1984
1985 return ret;
1986 }
1987
1988 static int
hdcp2_propagate_stream_management_info(struct intel_connector * connector)1989 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1990 {
1991 struct intel_display *display = to_intel_display(connector);
1992 int i, tries = 3, ret;
1993
1994 if (!connector->hdcp.is_repeater)
1995 return 0;
1996
1997 for (i = 0; i < tries; i++) {
1998 ret = _hdcp2_propagate_stream_management_info(connector);
1999 if (!ret)
2000 break;
2001
2002 /* Lets restart the auth incase of seq_num_m roll over */
2003 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
2004 drm_dbg_kms(display->drm,
2005 "seq_num_m roll over.(%d)\n", ret);
2006 break;
2007 }
2008
2009 drm_dbg_kms(display->drm,
2010 "HDCP2 stream management %d of %d Failed.(%d)\n",
2011 i + 1, tries, ret);
2012 }
2013
2014 return ret;
2015 }
2016
hdcp2_authenticate_and_encrypt(struct intel_atomic_state * state,struct intel_connector * connector)2017 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
2018 struct intel_connector *connector)
2019 {
2020 struct intel_display *display = to_intel_display(connector);
2021 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2022 int ret = 0, i, tries = 3;
2023
2024 for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) {
2025 ret = hdcp2_authenticate_sink(connector);
2026 if (!ret) {
2027 ret = intel_hdcp_prepare_streams(state, connector);
2028 if (ret) {
2029 drm_dbg_kms(display->drm,
2030 "Prepare stream failed.(%d)\n",
2031 ret);
2032 break;
2033 }
2034
2035 ret = hdcp2_propagate_stream_management_info(connector);
2036 if (ret) {
2037 drm_dbg_kms(display->drm,
2038 "Stream management failed.(%d)\n",
2039 ret);
2040 break;
2041 }
2042
2043 ret = hdcp2_authenticate_port(connector);
2044 if (!ret)
2045 break;
2046 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
2047 ret);
2048 }
2049
2050 /* Clearing the mei hdcp session */
2051 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2052 i + 1, tries, ret);
2053 if (hdcp2_deauthenticate_port(connector) < 0)
2054 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2055 }
2056
2057 if (!ret && !dig_port->hdcp.auth_status) {
2058 /*
2059 * Ensuring the required 200mSec min time interval between
2060 * Session Key Exchange and encryption.
2061 */
2062 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2063 ret = hdcp2_enable_encryption(connector);
2064 if (ret < 0) {
2065 drm_dbg_kms(display->drm,
2066 "Encryption Enable Failed.(%d)\n", ret);
2067 if (hdcp2_deauthenticate_port(connector) < 0)
2068 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2069 }
2070 }
2071
2072 if (!ret)
2073 ret = hdcp2_enable_stream_encryption(connector);
2074
2075 return ret;
2076 }
2077
_intel_hdcp2_enable(struct intel_atomic_state * state,struct intel_connector * connector)2078 static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2079 struct intel_connector *connector)
2080 {
2081 struct intel_display *display = to_intel_display(connector);
2082 struct intel_hdcp *hdcp = &connector->hdcp;
2083 int ret;
2084
2085 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2086 connector->base.base.id, connector->base.name,
2087 hdcp->content_type);
2088
2089 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false);
2090
2091 ret = hdcp2_authenticate_and_encrypt(state, connector);
2092 if (ret) {
2093 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
2094 hdcp->content_type, ret);
2095 return ret;
2096 }
2097
2098 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2099 connector->base.base.id, connector->base.name,
2100 hdcp->content_type);
2101
2102 hdcp->hdcp2_encrypted = true;
2103 return 0;
2104 }
2105
2106 static int
_intel_hdcp2_disable(struct intel_connector * connector,bool hdcp2_link_recovery)2107 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2108 {
2109 struct intel_display *display = to_intel_display(connector);
2110 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2111 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
2112 struct intel_hdcp *hdcp = &connector->hdcp;
2113 int ret;
2114
2115 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2116 connector->base.base.id, connector->base.name);
2117
2118 if (hdcp->shim->stream_2_2_encryption) {
2119 ret = hdcp->shim->stream_2_2_encryption(connector, false);
2120 if (ret) {
2121 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2122 connector->base.base.id, connector->base.name);
2123 return ret;
2124 }
2125 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2126 transcoder_name(hdcp->stream_transcoder));
2127
2128 if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery)
2129 return 0;
2130 }
2131
2132 ret = hdcp2_disable_encryption(connector);
2133
2134 if (hdcp2_deauthenticate_port(connector) < 0)
2135 drm_dbg_kms(display->drm, "Port deauth failed.\n");
2136
2137 connector->hdcp.hdcp2_encrypted = false;
2138 dig_port->hdcp.auth_status = false;
2139 data->k = 0;
2140
2141 return ret;
2142 }
2143
2144 /* Implements the Link Integrity Check for HDCP2.2 */
intel_hdcp2_check_link(struct intel_connector * connector)2145 static int intel_hdcp2_check_link(struct intel_connector *connector)
2146 {
2147 struct intel_display *display = to_intel_display(connector);
2148 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2149 struct intel_hdcp *hdcp = &connector->hdcp;
2150 enum port port = dig_port->base.port;
2151 enum transcoder cpu_transcoder;
2152 int ret = 0;
2153
2154 mutex_lock(&hdcp->mutex);
2155 mutex_lock(&dig_port->hdcp.mutex);
2156 cpu_transcoder = hdcp->cpu_transcoder;
2157
2158 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2159 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2160 !hdcp->hdcp2_encrypted) {
2161 ret = -EINVAL;
2162 goto out;
2163 }
2164
2165 if (drm_WARN_ON(display->drm,
2166 !intel_hdcp2_in_use(display, cpu_transcoder, port))) {
2167 drm_err(display->drm,
2168 "HDCP2.2 link stopped the encryption, %x\n",
2169 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
2170 ret = -ENXIO;
2171 _intel_hdcp2_disable(connector, true);
2172 intel_hdcp_update_value(connector,
2173 DRM_MODE_CONTENT_PROTECTION_DESIRED,
2174 true);
2175 goto out;
2176 }
2177
2178 ret = hdcp->shim->check_2_2_link(dig_port, connector);
2179 if (ret == HDCP_LINK_PROTECTED) {
2180 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2181 intel_hdcp_update_value(connector,
2182 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2183 true);
2184 }
2185 goto out;
2186 }
2187
2188 if (ret == HDCP_TOPOLOGY_CHANGE) {
2189 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2190 goto out;
2191
2192 drm_dbg_kms(display->drm,
2193 "HDCP2.2 Downstream topology change\n");
2194
2195 ret = hdcp2_authenticate_repeater_topology(connector);
2196 if (!ret) {
2197 intel_hdcp_update_value(connector,
2198 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2199 true);
2200 goto out;
2201 }
2202
2203 drm_dbg_kms(display->drm,
2204 "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
2205 connector->base.base.id, connector->base.name,
2206 ret);
2207 } else {
2208 drm_dbg_kms(display->drm,
2209 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2210 connector->base.base.id, connector->base.name);
2211 }
2212
2213 ret = _intel_hdcp2_disable(connector, true);
2214 if (ret) {
2215 drm_err(display->drm,
2216 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2217 connector->base.base.id, connector->base.name, ret);
2218 intel_hdcp_update_value(connector,
2219 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2220 goto out;
2221 }
2222
2223 intel_hdcp_update_value(connector,
2224 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2225 out:
2226 mutex_unlock(&dig_port->hdcp.mutex);
2227 mutex_unlock(&hdcp->mutex);
2228 return ret;
2229 }
2230
intel_hdcp_check_work(struct work_struct * work)2231 static void intel_hdcp_check_work(struct work_struct *work)
2232 {
2233 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2234 struct intel_hdcp,
2235 check_work);
2236 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2237 struct intel_display *display = to_intel_display(connector);
2238
2239 if (drm_connector_is_unregistered(&connector->base))
2240 return;
2241
2242 if (!intel_hdcp2_check_link(connector))
2243 queue_delayed_work(display->wq.unordered, &hdcp->check_work,
2244 DRM_HDCP2_CHECK_PERIOD_MS);
2245 else if (!intel_hdcp_check_link(connector))
2246 queue_delayed_work(display->wq.unordered, &hdcp->check_work,
2247 DRM_HDCP_CHECK_PERIOD_MS);
2248 }
2249
i915_hdcp_component_bind(struct device * drv_kdev,struct device * mei_kdev,void * data)2250 static int i915_hdcp_component_bind(struct device *drv_kdev,
2251 struct device *mei_kdev, void *data)
2252 {
2253 struct intel_display *display = to_intel_display(drv_kdev);
2254
2255 drm_dbg(display->drm, "I915 HDCP comp bind\n");
2256 mutex_lock(&display->hdcp.hdcp_mutex);
2257 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2258 display->hdcp.arbiter->hdcp_dev = mei_kdev;
2259 mutex_unlock(&display->hdcp.hdcp_mutex);
2260
2261 return 0;
2262 }
2263
i915_hdcp_component_unbind(struct device * drv_kdev,struct device * mei_kdev,void * data)2264 static void i915_hdcp_component_unbind(struct device *drv_kdev,
2265 struct device *mei_kdev, void *data)
2266 {
2267 struct intel_display *display = to_intel_display(drv_kdev);
2268
2269 drm_dbg(display->drm, "I915 HDCP comp unbind\n");
2270 mutex_lock(&display->hdcp.hdcp_mutex);
2271 display->hdcp.arbiter = NULL;
2272 mutex_unlock(&display->hdcp.hdcp_mutex);
2273 }
2274
2275 static const struct component_ops i915_hdcp_ops = {
2276 .bind = i915_hdcp_component_bind,
2277 .unbind = i915_hdcp_component_unbind,
2278 };
2279
intel_get_hdcp_ddi_index(enum port port)2280 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2281 {
2282 switch (port) {
2283 case PORT_A:
2284 return HDCP_DDI_A;
2285 case PORT_B ... PORT_F:
2286 return (enum hdcp_ddi)port;
2287 default:
2288 return HDCP_DDI_INVALID_PORT;
2289 }
2290 }
2291
intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)2292 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2293 {
2294 switch (cpu_transcoder) {
2295 case TRANSCODER_A ... TRANSCODER_D:
2296 return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2297 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2298 return HDCP_INVALID_TRANSCODER;
2299 }
2300 }
2301
initialize_hdcp_port_data(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2302 static int initialize_hdcp_port_data(struct intel_connector *connector,
2303 struct intel_digital_port *dig_port,
2304 const struct intel_hdcp_shim *shim)
2305 {
2306 struct intel_display *display = to_intel_display(connector);
2307 struct hdcp_port_data *data = &dig_port->hdcp.port_data;
2308 enum port port = dig_port->base.port;
2309
2310 if (DISPLAY_VER(display) < 12)
2311 data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2312 else
2313 /*
2314 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2315 * with zero(INVALID PORT index).
2316 */
2317 data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2318
2319 /*
2320 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2321 * is initialized to zero (invalid transcoder index). This will be
2322 * retained for <Gen12 forever.
2323 */
2324 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2325
2326 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2327 data->protocol = (u8)shim->protocol;
2328
2329 if (!data->streams)
2330 data->streams = kzalloc_objs(struct hdcp2_streamid_type,
2331 INTEL_NUM_PIPES(display));
2332 if (!data->streams) {
2333 drm_err(display->drm, "Out of Memory\n");
2334 return -ENOMEM;
2335 }
2336
2337 return 0;
2338 }
2339
is_hdcp2_supported(struct intel_display * display)2340 static bool is_hdcp2_supported(struct intel_display *display)
2341 {
2342 if (USE_HDCP_GSC(display))
2343 return true;
2344
2345 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2346 return false;
2347
2348 return DISPLAY_VER(display) >= 10 ||
2349 display->platform.kabylake ||
2350 display->platform.coffeelake ||
2351 display->platform.cometlake;
2352 }
2353
intel_hdcp_component_init(struct intel_display * display)2354 void intel_hdcp_component_init(struct intel_display *display)
2355 {
2356 int ret;
2357
2358 if (!is_hdcp2_supported(display))
2359 return;
2360
2361 mutex_lock(&display->hdcp.hdcp_mutex);
2362 drm_WARN_ON(display->drm, display->hdcp.comp_added);
2363
2364 display->hdcp.comp_added = true;
2365 mutex_unlock(&display->hdcp.hdcp_mutex);
2366 if (USE_HDCP_GSC(display))
2367 ret = intel_hdcp_gsc_init(display);
2368 else
2369 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
2370 I915_COMPONENT_HDCP);
2371
2372 if (ret < 0) {
2373 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
2374 ret);
2375 mutex_lock(&display->hdcp.hdcp_mutex);
2376 display->hdcp.comp_added = false;
2377 mutex_unlock(&display->hdcp.hdcp_mutex);
2378 return;
2379 }
2380 }
2381
intel_hdcp2_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2382 static void intel_hdcp2_init(struct intel_connector *connector,
2383 struct intel_digital_port *dig_port,
2384 const struct intel_hdcp_shim *shim)
2385 {
2386 struct intel_display *display = to_intel_display(connector);
2387 struct intel_hdcp *hdcp = &connector->hdcp;
2388 int ret;
2389
2390 ret = initialize_hdcp_port_data(connector, dig_port, shim);
2391 if (ret) {
2392 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
2393 return;
2394 }
2395
2396 hdcp->hdcp2_supported = true;
2397 }
2398
intel_hdcp_init(struct intel_connector * connector,struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)2399 int intel_hdcp_init(struct intel_connector *connector,
2400 struct intel_digital_port *dig_port,
2401 const struct intel_hdcp_shim *shim)
2402 {
2403 struct intel_display *display = to_intel_display(connector);
2404 struct intel_hdcp *hdcp = &connector->hdcp;
2405 int ret;
2406
2407 if (!shim)
2408 return -EINVAL;
2409
2410 if (is_hdcp2_supported(display))
2411 intel_hdcp2_init(connector, dig_port, shim);
2412
2413 ret = drm_connector_attach_content_protection_property(&connector->base,
2414 hdcp->hdcp2_supported);
2415 if (ret) {
2416 hdcp->hdcp2_supported = false;
2417 kfree(dig_port->hdcp.port_data.streams);
2418 return ret;
2419 }
2420
2421 hdcp->shim = shim;
2422 mutex_init(&hdcp->mutex);
2423 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2424 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2425 init_waitqueue_head(&hdcp->cp_irq_queue);
2426
2427 return 0;
2428 }
2429
_intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * pipe_config,const struct drm_connector_state * conn_state)2430 static int _intel_hdcp_enable(struct intel_atomic_state *state,
2431 struct intel_encoder *encoder,
2432 const struct intel_crtc_state *pipe_config,
2433 const struct drm_connector_state *conn_state)
2434 {
2435 struct intel_display *display = to_intel_display(encoder);
2436 struct intel_connector *connector =
2437 to_intel_connector(conn_state->connector);
2438 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2439 struct intel_hdcp *hdcp = &connector->hdcp;
2440 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2441 int ret = -EINVAL;
2442
2443 if (!hdcp->shim)
2444 return -ENOENT;
2445
2446 mutex_lock(&hdcp->mutex);
2447 mutex_lock(&dig_port->hdcp.mutex);
2448 drm_WARN_ON(display->drm,
2449 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2450 hdcp->content_type = (u8)conn_state->hdcp_content_type;
2451
2452 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2453 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2454 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2455 } else {
2456 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2457 hdcp->stream_transcoder = INVALID_TRANSCODER;
2458 }
2459
2460 if (DISPLAY_VER(display) >= 12)
2461 dig_port->hdcp.port_data.hdcp_transcoder =
2462 intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2463
2464 /*
2465 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2466 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2467 */
2468 if (!hdcp->force_hdcp14 && intel_hdcp2_get_capability(connector)) {
2469 ret = _intel_hdcp2_enable(state, connector);
2470 if (!ret)
2471 check_link_interval =
2472 DRM_HDCP2_CHECK_PERIOD_MS;
2473 }
2474
2475 if (hdcp->force_hdcp14)
2476 drm_dbg_kms(display->drm, "Forcing HDCP 1.4\n");
2477
2478 /*
2479 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2480 * be attempted.
2481 */
2482 if (ret && intel_hdcp_get_capability(connector) &&
2483 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2484 ret = intel_hdcp1_enable(connector);
2485 }
2486
2487 if (!ret) {
2488 queue_delayed_work(display->wq.unordered, &hdcp->check_work,
2489 check_link_interval);
2490 intel_hdcp_update_value(connector,
2491 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2492 true);
2493 }
2494
2495 mutex_unlock(&dig_port->hdcp.mutex);
2496 mutex_unlock(&hdcp->mutex);
2497 return ret;
2498 }
2499
intel_hdcp_enable(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2500 void intel_hdcp_enable(struct intel_atomic_state *state,
2501 struct intel_encoder *encoder,
2502 const struct intel_crtc_state *crtc_state,
2503 const struct drm_connector_state *conn_state)
2504 {
2505 struct intel_connector *connector =
2506 to_intel_connector(conn_state->connector);
2507 struct intel_hdcp *hdcp = &connector->hdcp;
2508
2509 /*
2510 * Enable hdcp if it's desired or if userspace is enabled and
2511 * driver set its state to undesired
2512 */
2513 if (conn_state->content_protection ==
2514 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2515 (conn_state->content_protection ==
2516 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2517 DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2518 _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2519 }
2520
intel_hdcp_disable(struct intel_connector * connector)2521 int intel_hdcp_disable(struct intel_connector *connector)
2522 {
2523 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2524 struct intel_hdcp *hdcp = &connector->hdcp;
2525 int ret = 0;
2526
2527 if (!hdcp->shim)
2528 return -ENOENT;
2529
2530 mutex_lock(&hdcp->mutex);
2531 mutex_lock(&dig_port->hdcp.mutex);
2532
2533 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2534 goto out;
2535
2536 intel_hdcp_update_value(connector,
2537 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2538 if (hdcp->hdcp2_encrypted)
2539 ret = _intel_hdcp2_disable(connector, false);
2540 else if (hdcp->hdcp_encrypted)
2541 ret = _intel_hdcp_disable(connector);
2542
2543 out:
2544 mutex_unlock(&dig_port->hdcp.mutex);
2545 mutex_unlock(&hdcp->mutex);
2546 cancel_delayed_work_sync(&hdcp->check_work);
2547 return ret;
2548 }
2549
intel_hdcp_update_pipe(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2550 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2551 struct intel_encoder *encoder,
2552 const struct intel_crtc_state *crtc_state,
2553 const struct drm_connector_state *conn_state)
2554 {
2555 struct intel_connector *connector =
2556 to_intel_connector(conn_state->connector);
2557 struct intel_hdcp *hdcp = &connector->hdcp;
2558 bool content_protection_type_changed, desired_and_not_enabled = false;
2559 struct intel_display *display = to_intel_display(connector);
2560
2561 if (!connector->hdcp.shim)
2562 return;
2563
2564 content_protection_type_changed =
2565 (conn_state->hdcp_content_type != hdcp->content_type &&
2566 conn_state->content_protection !=
2567 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2568
2569 /*
2570 * During the HDCP encryption session if Type change is requested,
2571 * disable the HDCP and re-enable it with new TYPE value.
2572 */
2573 if (conn_state->content_protection ==
2574 DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2575 content_protection_type_changed)
2576 intel_hdcp_disable(connector);
2577
2578 /*
2579 * Mark the hdcp state as DESIRED after the hdcp disable of type
2580 * change procedure.
2581 */
2582 if (content_protection_type_changed) {
2583 mutex_lock(&hdcp->mutex);
2584 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2585 drm_connector_get(&connector->base);
2586 if (!queue_work(display->wq.unordered, &hdcp->prop_work))
2587 drm_connector_put(&connector->base);
2588 mutex_unlock(&hdcp->mutex);
2589 }
2590
2591 if (conn_state->content_protection ==
2592 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2593 mutex_lock(&hdcp->mutex);
2594 /* Avoid enabling hdcp, if it already ENABLED */
2595 desired_and_not_enabled =
2596 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2597 mutex_unlock(&hdcp->mutex);
2598 /*
2599 * If HDCP already ENABLED and CP property is DESIRED, schedule
2600 * prop_work to update correct CP property to user space.
2601 */
2602 if (!desired_and_not_enabled && !content_protection_type_changed) {
2603 drm_connector_get(&connector->base);
2604 if (!queue_work(display->wq.unordered, &hdcp->prop_work))
2605 drm_connector_put(&connector->base);
2606
2607 }
2608 }
2609
2610 if (desired_and_not_enabled || content_protection_type_changed)
2611 _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2612 }
2613
intel_hdcp_cancel_works(struct intel_connector * connector)2614 void intel_hdcp_cancel_works(struct intel_connector *connector)
2615 {
2616 if (!connector->hdcp.shim)
2617 return;
2618
2619 cancel_delayed_work_sync(&connector->hdcp.check_work);
2620 cancel_work_sync(&connector->hdcp.prop_work);
2621 }
2622
intel_hdcp_component_fini(struct intel_display * display)2623 void intel_hdcp_component_fini(struct intel_display *display)
2624 {
2625 mutex_lock(&display->hdcp.hdcp_mutex);
2626 if (!display->hdcp.comp_added) {
2627 mutex_unlock(&display->hdcp.hdcp_mutex);
2628 return;
2629 }
2630
2631 display->hdcp.comp_added = false;
2632 mutex_unlock(&display->hdcp.hdcp_mutex);
2633
2634 if (USE_HDCP_GSC(display))
2635 intel_hdcp_gsc_fini(display);
2636 else
2637 component_del(display->drm->dev, &i915_hdcp_ops);
2638 }
2639
intel_hdcp_cleanup(struct intel_connector * connector)2640 void intel_hdcp_cleanup(struct intel_connector *connector)
2641 {
2642 struct intel_hdcp *hdcp = &connector->hdcp;
2643
2644 if (!hdcp->shim)
2645 return;
2646
2647 /*
2648 * If the connector is registered, it's possible userspace could kick
2649 * off another HDCP enable, which would re-spawn the workers.
2650 */
2651 drm_WARN_ON(connector->base.dev,
2652 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2653
2654 /*
2655 * Now that the connector is not registered, check_work won't be run,
2656 * but cancel any outstanding instances of it
2657 */
2658 cancel_delayed_work_sync(&hdcp->check_work);
2659
2660 /*
2661 * We don't cancel prop_work in the same way as check_work since it
2662 * requires connection_mutex which could be held while calling this
2663 * function. Instead, we rely on the connector references grabbed before
2664 * scheduling prop_work to ensure the connector is alive when prop_work
2665 * is run. So if we're in the destroy path (which is where this
2666 * function should be called), we're "guaranteed" that prop_work is not
2667 * active (tl;dr This Should Never Happen).
2668 */
2669 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2670
2671 mutex_lock(&hdcp->mutex);
2672 hdcp->shim = NULL;
2673 mutex_unlock(&hdcp->mutex);
2674 }
2675
intel_hdcp_atomic_check(struct drm_connector * connector,struct drm_connector_state * old_state,struct drm_connector_state * new_state)2676 void intel_hdcp_atomic_check(struct drm_connector *connector,
2677 struct drm_connector_state *old_state,
2678 struct drm_connector_state *new_state)
2679 {
2680 u64 old_cp = old_state->content_protection;
2681 u64 new_cp = new_state->content_protection;
2682 struct drm_crtc_state *crtc_state;
2683
2684 if (!new_state->crtc) {
2685 /*
2686 * If the connector is being disabled with CP enabled, mark it
2687 * desired so it's re-enabled when the connector is brought back
2688 */
2689 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2690 new_state->content_protection =
2691 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2692 return;
2693 }
2694
2695 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2696 new_state->crtc);
2697 /*
2698 * Fix the HDCP uapi content protection state in case of modeset.
2699 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2700 * need to be sent if there is transition from ENABLED->DESIRED.
2701 */
2702 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2703 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2704 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2705 new_state->content_protection =
2706 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2707
2708 /*
2709 * Nothing to do if the state didn't change, or HDCP was activated since
2710 * the last commit. And also no change in hdcp content type.
2711 */
2712 if (old_cp == new_cp ||
2713 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2714 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2715 if (old_state->hdcp_content_type ==
2716 new_state->hdcp_content_type)
2717 return;
2718 }
2719
2720 crtc_state->mode_changed = true;
2721 }
2722
2723 /* Handles the CP_IRQ raised from the DP HDCP sink */
intel_hdcp_handle_cp_irq(struct intel_connector * connector)2724 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2725 {
2726 struct intel_hdcp *hdcp = &connector->hdcp;
2727 struct intel_display *display = to_intel_display(connector);
2728
2729 if (!hdcp->shim)
2730 return;
2731
2732 atomic_inc(&connector->hdcp.cp_irq_count);
2733 wake_up_all(&connector->hdcp.cp_irq_queue);
2734
2735 queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0);
2736 }
2737
__intel_hdcp_info(struct seq_file * m,struct intel_connector * connector,bool remote_req)2738 static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
2739 bool remote_req)
2740 {
2741 bool hdcp_cap = false, hdcp2_cap = false;
2742
2743 if (!connector->hdcp.shim) {
2744 seq_puts(m, "No Connector Support");
2745 goto out;
2746 }
2747
2748 if (remote_req) {
2749 intel_hdcp_get_remote_capability(connector, &hdcp_cap, &hdcp2_cap);
2750 } else {
2751 hdcp_cap = intel_hdcp_get_capability(connector);
2752 hdcp2_cap = intel_hdcp2_get_capability(connector);
2753 }
2754
2755 if (hdcp_cap)
2756 seq_puts(m, "HDCP1.4 ");
2757 if (hdcp2_cap)
2758 seq_puts(m, "HDCP2.2 ");
2759
2760 if (!hdcp_cap && !hdcp2_cap)
2761 seq_puts(m, "None");
2762
2763 out:
2764 seq_puts(m, "\n");
2765 }
2766
intel_hdcp_info(struct seq_file * m,struct intel_connector * connector)2767 void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector)
2768 {
2769 seq_puts(m, "\tHDCP version: ");
2770 if (connector->mst.dp) {
2771 __intel_hdcp_info(m, connector, true);
2772 seq_puts(m, "\tMST Hub HDCP version: ");
2773 }
2774 __intel_hdcp_info(m, connector, false);
2775 }
2776
intel_hdcp_sink_capability_show(struct seq_file * m,void * data)2777 static int intel_hdcp_sink_capability_show(struct seq_file *m, void *data)
2778 {
2779 struct intel_connector *connector = m->private;
2780 struct intel_display *display = to_intel_display(connector);
2781 int ret;
2782
2783 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2784 if (ret)
2785 return ret;
2786
2787 if (!connector->base.encoder ||
2788 connector->base.status != connector_status_connected) {
2789 ret = -ENODEV;
2790 goto out;
2791 }
2792
2793 seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
2794 connector->base.base.id);
2795 __intel_hdcp_info(m, connector, false);
2796
2797 out:
2798 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2799
2800 return ret;
2801 }
2802 DEFINE_SHOW_ATTRIBUTE(intel_hdcp_sink_capability);
2803
intel_hdcp_force_14_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)2804 static ssize_t intel_hdcp_force_14_write(struct file *file,
2805 const char __user *ubuf,
2806 size_t len, loff_t *offp)
2807 {
2808 struct seq_file *m = file->private_data;
2809 struct intel_connector *connector = m->private;
2810 struct intel_hdcp *hdcp = &connector->hdcp;
2811 bool force_hdcp14 = false;
2812 int ret;
2813
2814 if (len == 0)
2815 return 0;
2816
2817 ret = kstrtobool_from_user(ubuf, len, &force_hdcp14);
2818 if (ret < 0)
2819 return ret;
2820
2821 hdcp->force_hdcp14 = force_hdcp14;
2822 *offp += len;
2823
2824 return len;
2825 }
2826
intel_hdcp_force_14_show(struct seq_file * m,void * data)2827 static int intel_hdcp_force_14_show(struct seq_file *m, void *data)
2828 {
2829 struct intel_connector *connector = m->private;
2830 struct intel_display *display = to_intel_display(connector);
2831 struct intel_encoder *encoder = intel_attached_encoder(connector);
2832 struct intel_hdcp *hdcp = &connector->hdcp;
2833 struct drm_crtc *crtc;
2834 int ret;
2835
2836 if (!encoder)
2837 return -ENODEV;
2838
2839 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2840 if (ret)
2841 return ret;
2842
2843 crtc = connector->base.state->crtc;
2844 if (connector->base.status != connector_status_connected || !crtc) {
2845 ret = -ENODEV;
2846 goto out;
2847 }
2848
2849 seq_printf(m, "%s\n",
2850 str_yes_no(hdcp->force_hdcp14));
2851 out:
2852 drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2853
2854 return ret;
2855 }
2856
intel_hdcp_force_14_open(struct inode * inode,struct file * file)2857 static int intel_hdcp_force_14_open(struct inode *inode,
2858 struct file *file)
2859 {
2860 return single_open(file, intel_hdcp_force_14_show,
2861 inode->i_private);
2862 }
2863
2864 static const struct file_operations intel_hdcp_force_14_fops = {
2865 .owner = THIS_MODULE,
2866 .open = intel_hdcp_force_14_open,
2867 .read = seq_read,
2868 .llseek = seq_lseek,
2869 .release = single_release,
2870 .write = intel_hdcp_force_14_write
2871 };
2872
intel_hdcp_connector_debugfs_add(struct intel_connector * connector)2873 void intel_hdcp_connector_debugfs_add(struct intel_connector *connector)
2874 {
2875 struct dentry *root = connector->base.debugfs_entry;
2876 int connector_type = connector->base.connector_type;
2877
2878 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2879 connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2880 connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2881 debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
2882 connector, &intel_hdcp_sink_capability_fops);
2883 debugfs_create_file("i915_force_hdcp14", 0644, root,
2884 connector, &intel_hdcp_force_14_fops);
2885 }
2886 }
2887