1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/i2c.h> 13 #include <linux/random.h> 14 15 #include <drm/display/drm_hdcp_helper.h> 16 #include <drm/intel/i915_component.h> 17 18 #include "i915_drv.h" 19 #include "i915_reg.h" 20 #include "intel_connector.h" 21 #include "intel_de.h" 22 #include "intel_display_power.h" 23 #include "intel_display_power_well.h" 24 #include "intel_display_types.h" 25 #include "intel_hdcp.h" 26 #include "intel_hdcp_gsc.h" 27 #include "intel_hdcp_regs.h" 28 #include "intel_hdcp_shim.h" 29 #include "intel_pcode.h" 30 31 #define KEY_LOAD_TRIES 5 32 #define HDCP2_LC_RETRY_CNT 3 33 34 /* WA: 16022217614 */ 35 static void 36 intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder, 37 struct intel_hdcp *hdcp) 38 { 39 struct intel_display *display = to_intel_display(encoder); 40 41 /* Here we assume HDMI is in TMDS mode of operation */ 42 if (encoder->type != INTEL_OUTPUT_HDMI) 43 return; 44 45 if (DISPLAY_VER(display) >= 14) { 46 if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) 47 intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), 48 0, HDCP_LINE_REKEY_DISABLE); 49 else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || 50 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) 51 intel_de_rmw(display, 52 TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder), 53 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE); 54 } 55 } 56 57 static int intel_conn_to_vcpi(struct intel_atomic_state *state, 58 struct intel_connector *connector) 59 { 60 struct drm_dp_mst_topology_mgr *mgr; 61 struct drm_dp_mst_atomic_payload *payload; 62 struct drm_dp_mst_topology_state *mst_state; 63 int vcpi = 0; 64 65 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 66 if (!connector->port) 67 return 0; 68 mgr = connector->port->mgr; 69 70 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx); 71 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 72 payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); 73 if (drm_WARN_ON(mgr->dev, !payload)) 74 goto out; 75 76 vcpi = payload->vcpi; 77 if (drm_WARN_ON(mgr->dev, vcpi < 0)) { 78 vcpi = 0; 79 goto out; 80 } 81 out: 82 return vcpi; 83 } 84 85 /* 86 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 87 * content_type for all streams in DP MST topology because security f/w doesn't 88 * have any provision to mark content_type for each stream separately, it marks 89 * all available streams with the content_type proivided at the time of port 90 * authentication. This may prohibit the userspace to use type1 content on 91 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 92 * DP MST topology. Though it is not compulsory, security fw should change its 93 * policy to mark different content_types for different streams. 94 */ 95 static int 96 intel_hdcp_required_content_stream(struct intel_atomic_state *state, 97 struct intel_digital_port *dig_port) 98 { 99 struct intel_display *display = to_intel_display(state); 100 struct drm_connector_list_iter conn_iter; 101 struct intel_digital_port *conn_dig_port; 102 struct intel_connector *connector; 103 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 104 bool enforce_type0 = false; 105 int k; 106 107 if (dig_port->hdcp_auth_status) 108 return 0; 109 110 data->k = 0; 111 112 if (!dig_port->hdcp_mst_type1_capable) 113 enforce_type0 = true; 114 115 drm_connector_list_iter_begin(display->drm, &conn_iter); 116 for_each_intel_connector_iter(connector, &conn_iter) { 117 if (connector->base.status == connector_status_disconnected) 118 continue; 119 120 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 121 continue; 122 123 conn_dig_port = intel_attached_dig_port(connector); 124 if (conn_dig_port != dig_port) 125 continue; 126 127 data->streams[data->k].stream_id = 128 intel_conn_to_vcpi(state, connector); 129 data->k++; 130 131 /* if there is only one active stream */ 132 if (dig_port->dp.active_mst_links <= 1) 133 break; 134 } 135 drm_connector_list_iter_end(&conn_iter); 136 137 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) 138 return -EINVAL; 139 140 /* 141 * Apply common protection level across all streams in DP MST Topology. 142 * Use highest supported content type for all streams in DP MST Topology. 143 */ 144 for (k = 0; k < data->k; k++) 145 data->streams[k].stream_type = 146 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 147 148 return 0; 149 } 150 151 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state, 152 struct intel_connector *connector) 153 { 154 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 155 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 156 struct intel_hdcp *hdcp = &connector->hdcp; 157 158 if (intel_encoder_is_mst(intel_attached_encoder(connector))) 159 return intel_hdcp_required_content_stream(state, dig_port); 160 161 data->k = 1; 162 data->streams[0].stream_id = 0; 163 data->streams[0].stream_type = hdcp->content_type; 164 165 return 0; 166 } 167 168 static 169 bool intel_hdcp_is_ksv_valid(u8 *ksv) 170 { 171 int i, ones = 0; 172 /* KSV has 20 1's and 20 0's */ 173 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 174 ones += hweight8(ksv[i]); 175 if (ones != 20) 176 return false; 177 178 return true; 179 } 180 181 static 182 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 183 const struct intel_hdcp_shim *shim, u8 *bksv) 184 { 185 struct intel_display *display = to_intel_display(dig_port); 186 int ret, i, tries = 2; 187 188 /* HDCP spec states that we must retry the bksv if it is invalid */ 189 for (i = 0; i < tries; i++) { 190 ret = shim->read_bksv(dig_port, bksv); 191 if (ret) 192 return ret; 193 if (intel_hdcp_is_ksv_valid(bksv)) 194 break; 195 } 196 if (i == tries) { 197 drm_dbg_kms(display->drm, "Bksv is invalid\n"); 198 return -ENODEV; 199 } 200 201 return 0; 202 } 203 204 /* Is HDCP1.4 capable on Platform and Sink */ 205 bool intel_hdcp_get_capability(struct intel_connector *connector) 206 { 207 struct intel_digital_port *dig_port; 208 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 209 bool capable = false; 210 u8 bksv[5]; 211 212 if (!intel_attached_encoder(connector)) 213 return capable; 214 215 dig_port = intel_attached_dig_port(connector); 216 217 if (!shim) 218 return capable; 219 220 if (shim->hdcp_get_capability) { 221 shim->hdcp_get_capability(dig_port, &capable); 222 } else { 223 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 224 capable = true; 225 } 226 227 return capable; 228 } 229 230 /* 231 * Check if the source has all the building blocks ready to make 232 * HDCP 2.2 work 233 */ 234 static bool intel_hdcp2_prerequisite(struct intel_connector *connector) 235 { 236 struct intel_display *display = to_intel_display(connector); 237 struct intel_hdcp *hdcp = &connector->hdcp; 238 239 /* I915 support for HDCP2.2 */ 240 if (!hdcp->hdcp2_supported) 241 return false; 242 243 /* If MTL+ make sure gsc is loaded and proxy is setup */ 244 if (intel_hdcp_gsc_cs_required(display)) { 245 if (!intel_hdcp_gsc_check_status(display)) 246 return false; 247 } 248 249 /* MEI/GSC interface is solid depending on which is used */ 250 mutex_lock(&display->hdcp.hdcp_mutex); 251 if (!display->hdcp.comp_added || !display->hdcp.arbiter) { 252 mutex_unlock(&display->hdcp.hdcp_mutex); 253 return false; 254 } 255 mutex_unlock(&display->hdcp.hdcp_mutex); 256 257 return true; 258 } 259 260 /* Is HDCP2.2 capable on Platform and Sink */ 261 bool intel_hdcp2_get_capability(struct intel_connector *connector) 262 { 263 struct intel_hdcp *hdcp = &connector->hdcp; 264 bool capable = false; 265 266 if (!intel_hdcp2_prerequisite(connector)) 267 return false; 268 269 /* Sink's capability for HDCP2.2 */ 270 hdcp->shim->hdcp_2_2_get_capability(connector, &capable); 271 272 return capable; 273 } 274 275 void intel_hdcp_get_remote_capability(struct intel_connector *connector, 276 bool *hdcp_capable, 277 bool *hdcp2_capable) 278 { 279 struct intel_hdcp *hdcp = &connector->hdcp; 280 281 if (!hdcp->shim->get_remote_hdcp_capability) 282 return; 283 284 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable, 285 hdcp2_capable); 286 287 if (!intel_hdcp2_prerequisite(connector)) 288 *hdcp2_capable = false; 289 } 290 291 static bool intel_hdcp_in_use(struct intel_display *display, 292 enum transcoder cpu_transcoder, enum port port) 293 { 294 return intel_de_read(display, 295 HDCP_STATUS(display, cpu_transcoder, port)) & 296 HDCP_STATUS_ENC; 297 } 298 299 static bool intel_hdcp2_in_use(struct intel_display *display, 300 enum transcoder cpu_transcoder, enum port port) 301 { 302 return intel_de_read(display, 303 HDCP2_STATUS(display, cpu_transcoder, port)) & 304 LINK_ENCRYPTION_STATUS; 305 } 306 307 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 308 const struct intel_hdcp_shim *shim) 309 { 310 int ret, read_ret; 311 bool ksv_ready; 312 313 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 314 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, 315 &ksv_ready), 316 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 317 100 * 1000); 318 if (ret) 319 return ret; 320 if (read_ret) 321 return read_ret; 322 if (!ksv_ready) 323 return -ETIMEDOUT; 324 325 return 0; 326 } 327 328 static bool hdcp_key_loadable(struct intel_display *display) 329 { 330 struct drm_i915_private *i915 = to_i915(display->drm); 331 enum i915_power_well_id id; 332 intel_wakeref_t wakeref; 333 bool enabled = false; 334 335 /* 336 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 337 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 338 */ 339 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 340 id = HSW_DISP_PW_GLOBAL; 341 else 342 id = SKL_DISP_PW_1; 343 344 /* PG1 (power well #1) needs to be enabled */ 345 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 346 enabled = intel_display_power_well_is_enabled(i915, id); 347 348 /* 349 * Another req for hdcp key loadability is enabled state of pll for 350 * cdclk. Without active crtc we wont land here. So we are assuming that 351 * cdclk is already on. 352 */ 353 354 return enabled; 355 } 356 357 static void intel_hdcp_clear_keys(struct intel_display *display) 358 { 359 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 360 intel_de_write(display, HDCP_KEY_STATUS, 361 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 362 } 363 364 static int intel_hdcp_load_keys(struct intel_display *display) 365 { 366 struct drm_i915_private *i915 = to_i915(display->drm); 367 int ret; 368 u32 val; 369 370 val = intel_de_read(display, HDCP_KEY_STATUS); 371 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 372 return 0; 373 374 /* 375 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 376 * out of reset. So if Key is not already loaded, its an error state. 377 */ 378 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 379 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 380 return -ENXIO; 381 382 /* 383 * Initiate loading the HDCP key from fuses. 384 * 385 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 386 * version 9 platforms (minus BXT) differ in the key load trigger 387 * process from other platforms. These platforms use the GT Driver 388 * Mailbox interface. 389 */ 390 if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) { 391 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); 392 if (ret) { 393 drm_err(display->drm, 394 "Failed to initiate HDCP key load (%d)\n", 395 ret); 396 return ret; 397 } 398 } else { 399 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 400 } 401 402 /* Wait for the keys to load (500us) */ 403 ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, 404 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 405 10, 1, &val); 406 if (ret) 407 return ret; 408 else if (!(val & HDCP_KEY_LOAD_STATUS)) 409 return -ENXIO; 410 411 /* Send Aksv over to PCH display for use in authentication */ 412 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 413 414 return 0; 415 } 416 417 /* Returns updated SHA-1 index */ 418 static int intel_write_sha_text(struct intel_display *display, u32 sha_text) 419 { 420 intel_de_write(display, HDCP_SHA_TEXT, sha_text); 421 if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 422 drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); 423 return -ETIMEDOUT; 424 } 425 return 0; 426 } 427 428 static 429 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, 430 enum transcoder cpu_transcoder, enum port port) 431 { 432 if (DISPLAY_VER(display) >= 12) { 433 switch (cpu_transcoder) { 434 case TRANSCODER_A: 435 return HDCP_TRANSA_REP_PRESENT | 436 HDCP_TRANSA_SHA1_M0; 437 case TRANSCODER_B: 438 return HDCP_TRANSB_REP_PRESENT | 439 HDCP_TRANSB_SHA1_M0; 440 case TRANSCODER_C: 441 return HDCP_TRANSC_REP_PRESENT | 442 HDCP_TRANSC_SHA1_M0; 443 case TRANSCODER_D: 444 return HDCP_TRANSD_REP_PRESENT | 445 HDCP_TRANSD_SHA1_M0; 446 default: 447 drm_err(display->drm, "Unknown transcoder %d\n", 448 cpu_transcoder); 449 return 0; 450 } 451 } 452 453 switch (port) { 454 case PORT_A: 455 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 456 case PORT_B: 457 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 458 case PORT_C: 459 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 460 case PORT_D: 461 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 462 case PORT_E: 463 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 464 default: 465 drm_err(display->drm, "Unknown port %d\n", port); 466 return 0; 467 } 468 } 469 470 static 471 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 472 const struct intel_hdcp_shim *shim, 473 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 474 { 475 struct intel_display *display = to_intel_display(connector); 476 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 477 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 478 enum port port = dig_port->base.port; 479 u32 vprime, sha_text, sha_leftovers, rep_ctl; 480 int ret, i, j, sha_idx; 481 482 /* Process V' values from the receiver */ 483 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 484 ret = shim->read_v_prime_part(dig_port, i, &vprime); 485 if (ret) 486 return ret; 487 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); 488 } 489 490 /* 491 * We need to write the concatenation of all device KSVs, BINFO (DP) || 492 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 493 * stream is written via the HDCP_SHA_TEXT register in 32-bit 494 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 495 * index will keep track of our progress through the 64 bytes as well as 496 * helping us work the 40-bit KSVs through our 32-bit register. 497 * 498 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 499 */ 500 sha_idx = 0; 501 sha_text = 0; 502 sha_leftovers = 0; 503 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); 504 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 505 for (i = 0; i < num_downstream; i++) { 506 unsigned int sha_empty; 507 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 508 509 /* Fill up the empty slots in sha_text and write it out */ 510 sha_empty = sizeof(sha_text) - sha_leftovers; 511 for (j = 0; j < sha_empty; j++) { 512 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 513 sha_text |= ksv[j] << off; 514 } 515 516 ret = intel_write_sha_text(display, sha_text); 517 if (ret < 0) 518 return ret; 519 520 /* Programming guide writes this every 64 bytes */ 521 sha_idx += sizeof(sha_text); 522 if (!(sha_idx % 64)) 523 intel_de_write(display, HDCP_REP_CTL, 524 rep_ctl | HDCP_SHA1_TEXT_32); 525 526 /* Store the leftover bytes from the ksv in sha_text */ 527 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 528 sha_text = 0; 529 for (j = 0; j < sha_leftovers; j++) 530 sha_text |= ksv[sha_empty + j] << 531 ((sizeof(sha_text) - j - 1) * 8); 532 533 /* 534 * If we still have room in sha_text for more data, continue. 535 * Otherwise, write it out immediately. 536 */ 537 if (sizeof(sha_text) > sha_leftovers) 538 continue; 539 540 ret = intel_write_sha_text(display, sha_text); 541 if (ret < 0) 542 return ret; 543 sha_leftovers = 0; 544 sha_text = 0; 545 sha_idx += sizeof(sha_text); 546 } 547 548 /* 549 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 550 * bytes are leftover from the last ksv, we might be able to fit them 551 * all in sha_text (first 2 cases), or we might need to split them up 552 * into 2 writes (last 2 cases). 553 */ 554 if (sha_leftovers == 0) { 555 /* Write 16 bits of text, 16 bits of M0 */ 556 intel_de_write(display, HDCP_REP_CTL, 557 rep_ctl | HDCP_SHA1_TEXT_16); 558 ret = intel_write_sha_text(display, 559 bstatus[0] << 8 | bstatus[1]); 560 if (ret < 0) 561 return ret; 562 sha_idx += sizeof(sha_text); 563 564 /* Write 32 bits of M0 */ 565 intel_de_write(display, HDCP_REP_CTL, 566 rep_ctl | HDCP_SHA1_TEXT_0); 567 ret = intel_write_sha_text(display, 0); 568 if (ret < 0) 569 return ret; 570 sha_idx += sizeof(sha_text); 571 572 /* Write 16 bits of M0 */ 573 intel_de_write(display, HDCP_REP_CTL, 574 rep_ctl | HDCP_SHA1_TEXT_16); 575 ret = intel_write_sha_text(display, 0); 576 if (ret < 0) 577 return ret; 578 sha_idx += sizeof(sha_text); 579 580 } else if (sha_leftovers == 1) { 581 /* Write 24 bits of text, 8 bits of M0 */ 582 intel_de_write(display, HDCP_REP_CTL, 583 rep_ctl | HDCP_SHA1_TEXT_24); 584 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 585 /* Only 24-bits of data, must be in the LSB */ 586 sha_text = (sha_text & 0xffffff00) >> 8; 587 ret = intel_write_sha_text(display, sha_text); 588 if (ret < 0) 589 return ret; 590 sha_idx += sizeof(sha_text); 591 592 /* Write 32 bits of M0 */ 593 intel_de_write(display, HDCP_REP_CTL, 594 rep_ctl | HDCP_SHA1_TEXT_0); 595 ret = intel_write_sha_text(display, 0); 596 if (ret < 0) 597 return ret; 598 sha_idx += sizeof(sha_text); 599 600 /* Write 24 bits of M0 */ 601 intel_de_write(display, HDCP_REP_CTL, 602 rep_ctl | HDCP_SHA1_TEXT_8); 603 ret = intel_write_sha_text(display, 0); 604 if (ret < 0) 605 return ret; 606 sha_idx += sizeof(sha_text); 607 608 } else if (sha_leftovers == 2) { 609 /* Write 32 bits of text */ 610 intel_de_write(display, HDCP_REP_CTL, 611 rep_ctl | HDCP_SHA1_TEXT_32); 612 sha_text |= bstatus[0] << 8 | bstatus[1]; 613 ret = intel_write_sha_text(display, sha_text); 614 if (ret < 0) 615 return ret; 616 sha_idx += sizeof(sha_text); 617 618 /* Write 64 bits of M0 */ 619 intel_de_write(display, HDCP_REP_CTL, 620 rep_ctl | HDCP_SHA1_TEXT_0); 621 for (i = 0; i < 2; i++) { 622 ret = intel_write_sha_text(display, 0); 623 if (ret < 0) 624 return ret; 625 sha_idx += sizeof(sha_text); 626 } 627 628 /* 629 * Terminate the SHA-1 stream by hand. For the other leftover 630 * cases this is appended by the hardware. 631 */ 632 intel_de_write(display, HDCP_REP_CTL, 633 rep_ctl | HDCP_SHA1_TEXT_32); 634 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 635 ret = intel_write_sha_text(display, sha_text); 636 if (ret < 0) 637 return ret; 638 sha_idx += sizeof(sha_text); 639 } else if (sha_leftovers == 3) { 640 /* Write 32 bits of text (filled from LSB) */ 641 intel_de_write(display, HDCP_REP_CTL, 642 rep_ctl | HDCP_SHA1_TEXT_32); 643 sha_text |= bstatus[0]; 644 ret = intel_write_sha_text(display, sha_text); 645 if (ret < 0) 646 return ret; 647 sha_idx += sizeof(sha_text); 648 649 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 650 intel_de_write(display, HDCP_REP_CTL, 651 rep_ctl | HDCP_SHA1_TEXT_8); 652 ret = intel_write_sha_text(display, bstatus[1]); 653 if (ret < 0) 654 return ret; 655 sha_idx += sizeof(sha_text); 656 657 /* Write 32 bits of M0 */ 658 intel_de_write(display, HDCP_REP_CTL, 659 rep_ctl | HDCP_SHA1_TEXT_0); 660 ret = intel_write_sha_text(display, 0); 661 if (ret < 0) 662 return ret; 663 sha_idx += sizeof(sha_text); 664 665 /* Write 8 bits of M0 */ 666 intel_de_write(display, HDCP_REP_CTL, 667 rep_ctl | HDCP_SHA1_TEXT_24); 668 ret = intel_write_sha_text(display, 0); 669 if (ret < 0) 670 return ret; 671 sha_idx += sizeof(sha_text); 672 } else { 673 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", 674 sha_leftovers); 675 return -EINVAL; 676 } 677 678 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 679 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 680 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 681 ret = intel_write_sha_text(display, 0); 682 if (ret < 0) 683 return ret; 684 sha_idx += sizeof(sha_text); 685 } 686 687 /* 688 * Last write gets the length of the concatenation in bits. That is: 689 * - 5 bytes per device 690 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 691 */ 692 sha_text = (num_downstream * 5 + 10) * 8; 693 ret = intel_write_sha_text(display, sha_text); 694 if (ret < 0) 695 return ret; 696 697 /* Tell the HW we're done with the hash and wait for it to ACK */ 698 intel_de_write(display, HDCP_REP_CTL, 699 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 700 if (intel_de_wait_for_set(display, HDCP_REP_CTL, 701 HDCP_SHA1_COMPLETE, 1)) { 702 drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); 703 return -ETIMEDOUT; 704 } 705 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 706 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); 707 return -ENXIO; 708 } 709 710 return 0; 711 } 712 713 /* Implements Part 2 of the HDCP authorization procedure */ 714 static 715 int intel_hdcp_auth_downstream(struct intel_connector *connector) 716 { 717 struct intel_display *display = to_intel_display(connector); 718 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 719 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 720 u8 bstatus[2], num_downstream, *ksv_fifo; 721 int ret, i, tries = 3; 722 723 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 724 if (ret) { 725 drm_dbg_kms(display->drm, 726 "KSV list failed to become ready (%d)\n", ret); 727 return ret; 728 } 729 730 ret = shim->read_bstatus(dig_port, bstatus); 731 if (ret) 732 return ret; 733 734 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 735 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 736 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); 737 return -EPERM; 738 } 739 740 /* 741 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 742 * the HDCP encryption. That implies that repeater can't have its own 743 * display. As there is no consumption of encrypted content in the 744 * repeater with 0 downstream devices, we are failing the 745 * authentication. 746 */ 747 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 748 if (num_downstream == 0) { 749 drm_dbg_kms(display->drm, 750 "Repeater with zero downstream devices\n"); 751 return -EINVAL; 752 } 753 754 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 755 if (!ksv_fifo) { 756 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); 757 return -ENOMEM; 758 } 759 760 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 761 if (ret) 762 goto err; 763 764 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, 765 num_downstream) > 0) { 766 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); 767 ret = -EPERM; 768 goto err; 769 } 770 771 /* 772 * When V prime mismatches, DP Spec mandates re-read of 773 * V prime atleast twice. 774 */ 775 for (i = 0; i < tries; i++) { 776 ret = intel_hdcp_validate_v_prime(connector, shim, 777 ksv_fifo, num_downstream, 778 bstatus); 779 if (!ret) 780 break; 781 } 782 783 if (i == tries) { 784 drm_dbg_kms(display->drm, 785 "V Prime validation failed.(%d)\n", ret); 786 goto err; 787 } 788 789 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", 790 num_downstream); 791 ret = 0; 792 err: 793 kfree(ksv_fifo); 794 return ret; 795 } 796 797 /* Implements Part 1 of the HDCP authorization procedure */ 798 static int intel_hdcp_auth(struct intel_connector *connector) 799 { 800 struct intel_display *display = to_intel_display(connector); 801 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 802 struct intel_hdcp *hdcp = &connector->hdcp; 803 const struct intel_hdcp_shim *shim = hdcp->shim; 804 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 805 enum port port = dig_port->base.port; 806 unsigned long r0_prime_gen_start; 807 int ret, i, tries = 2; 808 union { 809 u32 reg[2]; 810 u8 shim[DRM_HDCP_AN_LEN]; 811 } an; 812 union { 813 u32 reg[2]; 814 u8 shim[DRM_HDCP_KSV_LEN]; 815 } bksv; 816 union { 817 u32 reg; 818 u8 shim[DRM_HDCP_RI_LEN]; 819 } ri; 820 bool repeater_present, hdcp_capable; 821 822 /* 823 * Detects whether the display is HDCP capable. Although we check for 824 * valid Bksv below, the HDCP over DP spec requires that we check 825 * whether the display supports HDCP before we write An. For HDMI 826 * displays, this is not necessary. 827 */ 828 if (shim->hdcp_get_capability) { 829 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable); 830 if (ret) 831 return ret; 832 if (!hdcp_capable) { 833 drm_dbg_kms(display->drm, 834 "Panel is not HDCP capable\n"); 835 return -EINVAL; 836 } 837 } 838 839 /* Initialize An with 2 random values and acquire it */ 840 for (i = 0; i < 2; i++) 841 intel_de_write(display, 842 HDCP_ANINIT(display, cpu_transcoder, port), 843 get_random_u32()); 844 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 845 HDCP_CONF_CAPTURE_AN); 846 847 /* Wait for An to be acquired */ 848 if (intel_de_wait_for_set(display, 849 HDCP_STATUS(display, cpu_transcoder, port), 850 HDCP_STATUS_AN_READY, 1)) { 851 drm_err(display->drm, "Timed out waiting for An\n"); 852 return -ETIMEDOUT; 853 } 854 855 an.reg[0] = intel_de_read(display, 856 HDCP_ANLO(display, cpu_transcoder, port)); 857 an.reg[1] = intel_de_read(display, 858 HDCP_ANHI(display, cpu_transcoder, port)); 859 ret = shim->write_an_aksv(dig_port, an.shim); 860 if (ret) 861 return ret; 862 863 r0_prime_gen_start = jiffies; 864 865 memset(&bksv, 0, sizeof(bksv)); 866 867 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 868 if (ret < 0) 869 return ret; 870 871 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { 872 drm_err(display->drm, "BKSV is revoked\n"); 873 return -EPERM; 874 } 875 876 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), 877 bksv.reg[0]); 878 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), 879 bksv.reg[1]); 880 881 ret = shim->repeater_present(dig_port, &repeater_present); 882 if (ret) 883 return ret; 884 if (repeater_present) 885 intel_de_write(display, HDCP_REP_CTL, 886 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); 887 888 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 889 if (ret) 890 return ret; 891 892 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 893 HDCP_CONF_AUTH_AND_ENC); 894 895 /* Wait for R0 ready */ 896 if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 897 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 898 drm_err(display->drm, "Timed out waiting for R0 ready\n"); 899 return -ETIMEDOUT; 900 } 901 902 /* 903 * Wait for R0' to become available. The spec says 100ms from Aksv, but 904 * some monitors can take longer than this. We'll set the timeout at 905 * 300ms just to be sure. 906 * 907 * On DP, there's an R0_READY bit available but no such bit 908 * exists on HDMI. Since the upper-bound is the same, we'll just do 909 * the stupid thing instead of polling on one and not the other. 910 */ 911 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 912 913 tries = 3; 914 915 /* 916 * DP HDCP Spec mandates the two more reattempt to read R0, incase 917 * of R0 mismatch. 918 */ 919 for (i = 0; i < tries; i++) { 920 ri.reg = 0; 921 ret = shim->read_ri_prime(dig_port, ri.shim); 922 if (ret) 923 return ret; 924 intel_de_write(display, 925 HDCP_RPRIME(display, cpu_transcoder, port), 926 ri.reg); 927 928 /* Wait for Ri prime match */ 929 if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 930 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 931 break; 932 } 933 934 if (i == tries) { 935 drm_dbg_kms(display->drm, 936 "Timed out waiting for Ri prime match (%x)\n", 937 intel_de_read(display, 938 HDCP_STATUS(display, cpu_transcoder, port))); 939 return -ETIMEDOUT; 940 } 941 942 /* Wait for encryption confirmation */ 943 if (intel_de_wait_for_set(display, 944 HDCP_STATUS(display, cpu_transcoder, port), 945 HDCP_STATUS_ENC, 946 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 947 drm_err(display->drm, "Timed out waiting for encryption\n"); 948 return -ETIMEDOUT; 949 } 950 951 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 952 if (shim->stream_encryption) { 953 ret = shim->stream_encryption(connector, true); 954 if (ret) { 955 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", 956 connector->base.base.id, connector->base.name); 957 return ret; 958 } 959 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 960 transcoder_name(hdcp->stream_transcoder)); 961 } 962 963 if (repeater_present) 964 return intel_hdcp_auth_downstream(connector); 965 966 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); 967 return 0; 968 } 969 970 static int _intel_hdcp_disable(struct intel_connector *connector) 971 { 972 struct intel_display *display = to_intel_display(connector); 973 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 974 struct intel_hdcp *hdcp = &connector->hdcp; 975 enum port port = dig_port->base.port; 976 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 977 u32 repeater_ctl; 978 int ret; 979 980 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", 981 connector->base.base.id, connector->base.name); 982 983 if (hdcp->shim->stream_encryption) { 984 ret = hdcp->shim->stream_encryption(connector, false); 985 if (ret) { 986 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", 987 connector->base.base.id, connector->base.name); 988 return ret; 989 } 990 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 991 transcoder_name(hdcp->stream_transcoder)); 992 /* 993 * If there are other connectors on this port using HDCP, 994 * don't disable it until it disabled HDCP encryption for 995 * all connectors in MST topology. 996 */ 997 if (dig_port->num_hdcp_streams > 0) 998 return 0; 999 } 1000 1001 hdcp->hdcp_encrypted = false; 1002 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); 1003 if (intel_de_wait_for_clear(display, 1004 HDCP_STATUS(display, cpu_transcoder, port), 1005 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 1006 drm_err(display->drm, 1007 "Failed to disable HDCP, timeout clearing status\n"); 1008 return -ETIMEDOUT; 1009 } 1010 1011 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, 1012 port); 1013 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); 1014 1015 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 1016 if (ret) { 1017 drm_err(display->drm, "Failed to disable HDCP signalling\n"); 1018 return ret; 1019 } 1020 1021 drm_dbg_kms(display->drm, "HDCP is disabled\n"); 1022 return 0; 1023 } 1024 1025 static int intel_hdcp1_enable(struct intel_connector *connector) 1026 { 1027 struct intel_display *display = to_intel_display(connector); 1028 struct intel_hdcp *hdcp = &connector->hdcp; 1029 int i, ret, tries = 3; 1030 1031 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", 1032 connector->base.base.id, connector->base.name); 1033 1034 if (!hdcp_key_loadable(display)) { 1035 drm_err(display->drm, "HDCP key Load is not possible\n"); 1036 return -ENXIO; 1037 } 1038 1039 for (i = 0; i < KEY_LOAD_TRIES; i++) { 1040 ret = intel_hdcp_load_keys(display); 1041 if (!ret) 1042 break; 1043 intel_hdcp_clear_keys(display); 1044 } 1045 if (ret) { 1046 drm_err(display->drm, "Could not load HDCP keys, (%d)\n", 1047 ret); 1048 return ret; 1049 } 1050 1051 /* Incase of authentication failures, HDCP spec expects reauth. */ 1052 for (i = 0; i < tries; i++) { 1053 ret = intel_hdcp_auth(connector); 1054 if (!ret) { 1055 hdcp->hdcp_encrypted = true; 1056 return 0; 1057 } 1058 1059 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); 1060 1061 /* Ensuring HDCP encryption and signalling are stopped. */ 1062 _intel_hdcp_disable(connector); 1063 } 1064 1065 drm_dbg_kms(display->drm, 1066 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 1067 return ret; 1068 } 1069 1070 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 1071 { 1072 return container_of(hdcp, struct intel_connector, hdcp); 1073 } 1074 1075 static void intel_hdcp_update_value(struct intel_connector *connector, 1076 u64 value, bool update_property) 1077 { 1078 struct intel_display *display = to_intel_display(connector); 1079 struct drm_i915_private *i915 = to_i915(display->drm); 1080 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1081 struct intel_hdcp *hdcp = &connector->hdcp; 1082 1083 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); 1084 1085 if (hdcp->value == value) 1086 return; 1087 1088 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex)); 1089 1090 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1091 if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0)) 1092 dig_port->num_hdcp_streams--; 1093 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1094 dig_port->num_hdcp_streams++; 1095 } 1096 1097 hdcp->value = value; 1098 if (update_property) { 1099 drm_connector_get(&connector->base); 1100 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 1101 drm_connector_put(&connector->base); 1102 } 1103 } 1104 1105 /* Implements Part 3 of the HDCP authorization procedure */ 1106 static int intel_hdcp_check_link(struct intel_connector *connector) 1107 { 1108 struct intel_display *display = to_intel_display(connector); 1109 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1110 struct intel_hdcp *hdcp = &connector->hdcp; 1111 enum port port = dig_port->base.port; 1112 enum transcoder cpu_transcoder; 1113 int ret = 0; 1114 1115 mutex_lock(&hdcp->mutex); 1116 mutex_lock(&dig_port->hdcp_mutex); 1117 1118 cpu_transcoder = hdcp->cpu_transcoder; 1119 1120 /* Check_link valid only when HDCP1.4 is enabled */ 1121 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1122 !hdcp->hdcp_encrypted) { 1123 ret = -EINVAL; 1124 goto out; 1125 } 1126 1127 if (drm_WARN_ON(display->drm, 1128 !intel_hdcp_in_use(display, cpu_transcoder, port))) { 1129 drm_err(display->drm, 1130 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", 1131 connector->base.base.id, connector->base.name, 1132 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); 1133 ret = -ENXIO; 1134 intel_hdcp_update_value(connector, 1135 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1136 true); 1137 goto out; 1138 } 1139 1140 if (hdcp->shim->check_link(dig_port, connector)) { 1141 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1142 intel_hdcp_update_value(connector, 1143 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1144 } 1145 goto out; 1146 } 1147 1148 drm_dbg_kms(display->drm, 1149 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", 1150 connector->base.base.id, connector->base.name); 1151 1152 ret = _intel_hdcp_disable(connector); 1153 if (ret) { 1154 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); 1155 intel_hdcp_update_value(connector, 1156 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1157 true); 1158 goto out; 1159 } 1160 1161 intel_hdcp_update_value(connector, 1162 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1163 true); 1164 out: 1165 mutex_unlock(&dig_port->hdcp_mutex); 1166 mutex_unlock(&hdcp->mutex); 1167 return ret; 1168 } 1169 1170 static void intel_hdcp_prop_work(struct work_struct *work) 1171 { 1172 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1173 prop_work); 1174 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1175 struct intel_display *display = to_intel_display(connector); 1176 1177 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); 1178 mutex_lock(&hdcp->mutex); 1179 1180 /* 1181 * This worker is only used to flip between ENABLED/DESIRED. Either of 1182 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1183 * we're running just after hdcp has been disabled, so just exit 1184 */ 1185 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1186 drm_hdcp_update_content_protection(&connector->base, 1187 hdcp->value); 1188 1189 mutex_unlock(&hdcp->mutex); 1190 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1191 1192 drm_connector_put(&connector->base); 1193 } 1194 1195 bool is_hdcp_supported(struct intel_display *display, enum port port) 1196 { 1197 return DISPLAY_RUNTIME_INFO(display)->has_hdcp && 1198 (DISPLAY_VER(display) >= 12 || port < PORT_E); 1199 } 1200 1201 static int 1202 hdcp2_prepare_ake_init(struct intel_connector *connector, 1203 struct hdcp2_ake_init *ake_data) 1204 { 1205 struct intel_display *display = to_intel_display(connector); 1206 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1207 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1208 struct i915_hdcp_arbiter *arbiter; 1209 int ret; 1210 1211 mutex_lock(&display->hdcp.hdcp_mutex); 1212 arbiter = display->hdcp.arbiter; 1213 1214 if (!arbiter || !arbiter->ops) { 1215 mutex_unlock(&display->hdcp.hdcp_mutex); 1216 return -EINVAL; 1217 } 1218 1219 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1220 if (ret) 1221 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", 1222 ret); 1223 mutex_unlock(&display->hdcp.hdcp_mutex); 1224 1225 return ret; 1226 } 1227 1228 static int 1229 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1230 struct hdcp2_ake_send_cert *rx_cert, 1231 bool *paired, 1232 struct hdcp2_ake_no_stored_km *ek_pub_km, 1233 size_t *msg_sz) 1234 { 1235 struct intel_display *display = to_intel_display(connector); 1236 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1237 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1238 struct i915_hdcp_arbiter *arbiter; 1239 int ret; 1240 1241 mutex_lock(&display->hdcp.hdcp_mutex); 1242 arbiter = display->hdcp.arbiter; 1243 1244 if (!arbiter || !arbiter->ops) { 1245 mutex_unlock(&display->hdcp.hdcp_mutex); 1246 return -EINVAL; 1247 } 1248 1249 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1250 rx_cert, paired, 1251 ek_pub_km, msg_sz); 1252 if (ret < 0) 1253 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", 1254 ret); 1255 mutex_unlock(&display->hdcp.hdcp_mutex); 1256 1257 return ret; 1258 } 1259 1260 static int hdcp2_verify_hprime(struct intel_connector *connector, 1261 struct hdcp2_ake_send_hprime *rx_hprime) 1262 { 1263 struct intel_display *display = to_intel_display(connector); 1264 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1265 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1266 struct i915_hdcp_arbiter *arbiter; 1267 int ret; 1268 1269 mutex_lock(&display->hdcp.hdcp_mutex); 1270 arbiter = display->hdcp.arbiter; 1271 1272 if (!arbiter || !arbiter->ops) { 1273 mutex_unlock(&display->hdcp.hdcp_mutex); 1274 return -EINVAL; 1275 } 1276 1277 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1278 if (ret < 0) 1279 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); 1280 mutex_unlock(&display->hdcp.hdcp_mutex); 1281 1282 return ret; 1283 } 1284 1285 static int 1286 hdcp2_store_pairing_info(struct intel_connector *connector, 1287 struct hdcp2_ake_send_pairing_info *pairing_info) 1288 { 1289 struct intel_display *display = to_intel_display(connector); 1290 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1291 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1292 struct i915_hdcp_arbiter *arbiter; 1293 int ret; 1294 1295 mutex_lock(&display->hdcp.hdcp_mutex); 1296 arbiter = display->hdcp.arbiter; 1297 1298 if (!arbiter || !arbiter->ops) { 1299 mutex_unlock(&display->hdcp.hdcp_mutex); 1300 return -EINVAL; 1301 } 1302 1303 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1304 if (ret < 0) 1305 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", 1306 ret); 1307 mutex_unlock(&display->hdcp.hdcp_mutex); 1308 1309 return ret; 1310 } 1311 1312 static int 1313 hdcp2_prepare_lc_init(struct intel_connector *connector, 1314 struct hdcp2_lc_init *lc_init) 1315 { 1316 struct intel_display *display = to_intel_display(connector); 1317 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1318 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1319 struct i915_hdcp_arbiter *arbiter; 1320 int ret; 1321 1322 mutex_lock(&display->hdcp.hdcp_mutex); 1323 arbiter = display->hdcp.arbiter; 1324 1325 if (!arbiter || !arbiter->ops) { 1326 mutex_unlock(&display->hdcp.hdcp_mutex); 1327 return -EINVAL; 1328 } 1329 1330 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1331 if (ret < 0) 1332 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", 1333 ret); 1334 mutex_unlock(&display->hdcp.hdcp_mutex); 1335 1336 return ret; 1337 } 1338 1339 static int 1340 hdcp2_verify_lprime(struct intel_connector *connector, 1341 struct hdcp2_lc_send_lprime *rx_lprime) 1342 { 1343 struct intel_display *display = to_intel_display(connector); 1344 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1345 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1346 struct i915_hdcp_arbiter *arbiter; 1347 int ret; 1348 1349 mutex_lock(&display->hdcp.hdcp_mutex); 1350 arbiter = display->hdcp.arbiter; 1351 1352 if (!arbiter || !arbiter->ops) { 1353 mutex_unlock(&display->hdcp.hdcp_mutex); 1354 return -EINVAL; 1355 } 1356 1357 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1358 if (ret < 0) 1359 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", 1360 ret); 1361 mutex_unlock(&display->hdcp.hdcp_mutex); 1362 1363 return ret; 1364 } 1365 1366 static int hdcp2_prepare_skey(struct intel_connector *connector, 1367 struct hdcp2_ske_send_eks *ske_data) 1368 { 1369 struct intel_display *display = to_intel_display(connector); 1370 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1371 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1372 struct i915_hdcp_arbiter *arbiter; 1373 int ret; 1374 1375 mutex_lock(&display->hdcp.hdcp_mutex); 1376 arbiter = display->hdcp.arbiter; 1377 1378 if (!arbiter || !arbiter->ops) { 1379 mutex_unlock(&display->hdcp.hdcp_mutex); 1380 return -EINVAL; 1381 } 1382 1383 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1384 if (ret < 0) 1385 drm_dbg_kms(display->drm, "Get session key failed. %d\n", 1386 ret); 1387 mutex_unlock(&display->hdcp.hdcp_mutex); 1388 1389 return ret; 1390 } 1391 1392 static int 1393 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1394 struct hdcp2_rep_send_receiverid_list 1395 *rep_topology, 1396 struct hdcp2_rep_send_ack *rep_send_ack) 1397 { 1398 struct intel_display *display = to_intel_display(connector); 1399 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1400 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1401 struct i915_hdcp_arbiter *arbiter; 1402 int ret; 1403 1404 mutex_lock(&display->hdcp.hdcp_mutex); 1405 arbiter = display->hdcp.arbiter; 1406 1407 if (!arbiter || !arbiter->ops) { 1408 mutex_unlock(&display->hdcp.hdcp_mutex); 1409 return -EINVAL; 1410 } 1411 1412 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1413 data, 1414 rep_topology, 1415 rep_send_ack); 1416 if (ret < 0) 1417 drm_dbg_kms(display->drm, 1418 "Verify rep topology failed. %d\n", ret); 1419 mutex_unlock(&display->hdcp.hdcp_mutex); 1420 1421 return ret; 1422 } 1423 1424 static int 1425 hdcp2_verify_mprime(struct intel_connector *connector, 1426 struct hdcp2_rep_stream_ready *stream_ready) 1427 { 1428 struct intel_display *display = to_intel_display(connector); 1429 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1430 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1431 struct i915_hdcp_arbiter *arbiter; 1432 int ret; 1433 1434 mutex_lock(&display->hdcp.hdcp_mutex); 1435 arbiter = display->hdcp.arbiter; 1436 1437 if (!arbiter || !arbiter->ops) { 1438 mutex_unlock(&display->hdcp.hdcp_mutex); 1439 return -EINVAL; 1440 } 1441 1442 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1443 if (ret < 0) 1444 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); 1445 mutex_unlock(&display->hdcp.hdcp_mutex); 1446 1447 return ret; 1448 } 1449 1450 static int hdcp2_authenticate_port(struct intel_connector *connector) 1451 { 1452 struct intel_display *display = to_intel_display(connector); 1453 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1454 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1455 struct i915_hdcp_arbiter *arbiter; 1456 int ret; 1457 1458 mutex_lock(&display->hdcp.hdcp_mutex); 1459 arbiter = display->hdcp.arbiter; 1460 1461 if (!arbiter || !arbiter->ops) { 1462 mutex_unlock(&display->hdcp.hdcp_mutex); 1463 return -EINVAL; 1464 } 1465 1466 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1467 if (ret < 0) 1468 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", 1469 ret); 1470 mutex_unlock(&display->hdcp.hdcp_mutex); 1471 1472 return ret; 1473 } 1474 1475 static int hdcp2_close_session(struct intel_connector *connector) 1476 { 1477 struct intel_display *display = to_intel_display(connector); 1478 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1479 struct i915_hdcp_arbiter *arbiter; 1480 int ret; 1481 1482 mutex_lock(&display->hdcp.hdcp_mutex); 1483 arbiter = display->hdcp.arbiter; 1484 1485 if (!arbiter || !arbiter->ops) { 1486 mutex_unlock(&display->hdcp.hdcp_mutex); 1487 return -EINVAL; 1488 } 1489 1490 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1491 &dig_port->hdcp_port_data); 1492 mutex_unlock(&display->hdcp.hdcp_mutex); 1493 1494 return ret; 1495 } 1496 1497 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1498 { 1499 return hdcp2_close_session(connector); 1500 } 1501 1502 /* Authentication flow starts from here */ 1503 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1504 { 1505 struct intel_display *display = to_intel_display(connector); 1506 struct intel_hdcp *hdcp = &connector->hdcp; 1507 union { 1508 struct hdcp2_ake_init ake_init; 1509 struct hdcp2_ake_send_cert send_cert; 1510 struct hdcp2_ake_no_stored_km no_stored_km; 1511 struct hdcp2_ake_send_hprime send_hprime; 1512 struct hdcp2_ake_send_pairing_info pairing_info; 1513 } msgs; 1514 const struct intel_hdcp_shim *shim = hdcp->shim; 1515 size_t size; 1516 int ret, i; 1517 1518 /* Init for seq_num */ 1519 hdcp->seq_num_v = 0; 1520 hdcp->seq_num_m = 0; 1521 1522 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1523 if (ret < 0) 1524 return ret; 1525 1526 /* 1527 * Retry the first read and write to downstream at least 10 times 1528 * with a 50ms delay if not hdcp2 capable(dock decides to stop advertising 1529 * hdcp2 capability for some reason). The reason being that 1530 * during suspend resume dock usually keeps the HDCP2 registers inaccesible 1531 * causing AUX error. This wouldn't be a big problem if the userspace 1532 * just kept retrying with some delay while it continues to play low 1533 * value content but most userpace applications end up throwing an error 1534 * when it receives one from KMD. This makes sure we give the dock 1535 * and the sink devices to complete its power cycle and then try HDCP 1536 * authentication. The values of 10 and delay of 50ms was decided based 1537 * on multiple trial and errors. 1538 */ 1539 for (i = 0; i < 10; i++) { 1540 if (!intel_hdcp2_get_capability(connector)) { 1541 msleep(50); 1542 continue; 1543 } 1544 1545 ret = shim->write_2_2_msg(connector, &msgs.ake_init, 1546 sizeof(msgs.ake_init)); 1547 if (ret < 0) 1548 continue; 1549 1550 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, 1551 &msgs.send_cert, sizeof(msgs.send_cert)); 1552 if (ret > 0) 1553 break; 1554 } 1555 1556 if (ret < 0) 1557 return ret; 1558 1559 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1560 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1561 return -EINVAL; 1562 } 1563 1564 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1565 1566 if (drm_hdcp_check_ksvs_revoked(display->drm, 1567 msgs.send_cert.cert_rx.receiver_id, 1568 1) > 0) { 1569 drm_err(display->drm, "Receiver ID is revoked\n"); 1570 return -EPERM; 1571 } 1572 1573 /* 1574 * Here msgs.no_stored_km will hold msgs corresponding to the km 1575 * stored also. 1576 */ 1577 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1578 &hdcp->is_paired, 1579 &msgs.no_stored_km, &size); 1580 if (ret < 0) 1581 return ret; 1582 1583 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size); 1584 if (ret < 0) 1585 return ret; 1586 1587 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME, 1588 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1589 if (ret < 0) 1590 return ret; 1591 1592 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1593 if (ret < 0) 1594 return ret; 1595 1596 if (!hdcp->is_paired) { 1597 /* Pairing is required */ 1598 ret = shim->read_2_2_msg(connector, 1599 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1600 &msgs.pairing_info, 1601 sizeof(msgs.pairing_info)); 1602 if (ret < 0) 1603 return ret; 1604 1605 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1606 if (ret < 0) 1607 return ret; 1608 hdcp->is_paired = true; 1609 } 1610 1611 return 0; 1612 } 1613 1614 static int hdcp2_locality_check(struct intel_connector *connector) 1615 { 1616 struct intel_hdcp *hdcp = &connector->hdcp; 1617 union { 1618 struct hdcp2_lc_init lc_init; 1619 struct hdcp2_lc_send_lprime send_lprime; 1620 } msgs; 1621 const struct intel_hdcp_shim *shim = hdcp->shim; 1622 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1623 1624 for (i = 0; i < tries; i++) { 1625 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1626 if (ret < 0) 1627 continue; 1628 1629 ret = shim->write_2_2_msg(connector, &msgs.lc_init, 1630 sizeof(msgs.lc_init)); 1631 if (ret < 0) 1632 continue; 1633 1634 ret = shim->read_2_2_msg(connector, 1635 HDCP_2_2_LC_SEND_LPRIME, 1636 &msgs.send_lprime, 1637 sizeof(msgs.send_lprime)); 1638 if (ret < 0) 1639 continue; 1640 1641 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1642 if (!ret) 1643 break; 1644 } 1645 1646 return ret; 1647 } 1648 1649 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1650 { 1651 struct intel_hdcp *hdcp = &connector->hdcp; 1652 struct hdcp2_ske_send_eks send_eks; 1653 int ret; 1654 1655 ret = hdcp2_prepare_skey(connector, &send_eks); 1656 if (ret < 0) 1657 return ret; 1658 1659 ret = hdcp->shim->write_2_2_msg(connector, &send_eks, 1660 sizeof(send_eks)); 1661 if (ret < 0) 1662 return ret; 1663 1664 return 0; 1665 } 1666 1667 static 1668 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1669 { 1670 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1671 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1672 struct intel_hdcp *hdcp = &connector->hdcp; 1673 union { 1674 struct hdcp2_rep_stream_manage stream_manage; 1675 struct hdcp2_rep_stream_ready stream_ready; 1676 } msgs; 1677 const struct intel_hdcp_shim *shim = hdcp->shim; 1678 int ret, streams_size_delta, i; 1679 1680 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1681 return -ERANGE; 1682 1683 /* Prepare RepeaterAuth_Stream_Manage msg */ 1684 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1685 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1686 1687 msgs.stream_manage.k = cpu_to_be16(data->k); 1688 1689 for (i = 0; i < data->k; i++) { 1690 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1691 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1692 } 1693 1694 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1695 sizeof(struct hdcp2_streamid_type); 1696 /* Send it to Repeater */ 1697 ret = shim->write_2_2_msg(connector, &msgs.stream_manage, 1698 sizeof(msgs.stream_manage) - streams_size_delta); 1699 if (ret < 0) 1700 goto out; 1701 1702 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY, 1703 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1704 if (ret < 0) 1705 goto out; 1706 1707 data->seq_num_m = hdcp->seq_num_m; 1708 1709 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1710 1711 out: 1712 hdcp->seq_num_m++; 1713 1714 return ret; 1715 } 1716 1717 static 1718 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1719 { 1720 struct intel_display *display = to_intel_display(connector); 1721 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1722 struct intel_hdcp *hdcp = &connector->hdcp; 1723 union { 1724 struct hdcp2_rep_send_receiverid_list recvid_list; 1725 struct hdcp2_rep_send_ack rep_ack; 1726 } msgs; 1727 const struct intel_hdcp_shim *shim = hdcp->shim; 1728 u32 seq_num_v, device_cnt; 1729 u8 *rx_info; 1730 int ret; 1731 1732 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST, 1733 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1734 if (ret < 0) 1735 return ret; 1736 1737 rx_info = msgs.recvid_list.rx_info; 1738 1739 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1740 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1741 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); 1742 return -EINVAL; 1743 } 1744 1745 /* 1746 * MST topology is not Type 1 capable if it contains a downstream 1747 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1748 */ 1749 dig_port->hdcp_mst_type1_capable = 1750 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1751 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1752 1753 if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) { 1754 drm_dbg_kms(display->drm, 1755 "HDCP1.x or 2.0 Legacy Device Downstream\n"); 1756 return -EINVAL; 1757 } 1758 1759 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1760 seq_num_v = 1761 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1762 1763 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1764 drm_dbg_kms(display->drm, 1765 "Non zero Seq_num_v at first RecvId_List msg\n"); 1766 return -EINVAL; 1767 } 1768 1769 if (seq_num_v < hdcp->seq_num_v) { 1770 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1771 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); 1772 return -EINVAL; 1773 } 1774 1775 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1776 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1777 if (drm_hdcp_check_ksvs_revoked(display->drm, 1778 msgs.recvid_list.receiver_ids, 1779 device_cnt) > 0) { 1780 drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); 1781 return -EPERM; 1782 } 1783 1784 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1785 &msgs.recvid_list, 1786 &msgs.rep_ack); 1787 if (ret < 0) 1788 return ret; 1789 1790 hdcp->seq_num_v = seq_num_v; 1791 ret = shim->write_2_2_msg(connector, &msgs.rep_ack, 1792 sizeof(msgs.rep_ack)); 1793 if (ret < 0) 1794 return ret; 1795 1796 return 0; 1797 } 1798 1799 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1800 { 1801 struct intel_display *display = to_intel_display(connector); 1802 struct intel_hdcp *hdcp = &connector->hdcp; 1803 const struct intel_hdcp_shim *shim = hdcp->shim; 1804 int ret; 1805 1806 ret = hdcp2_authentication_key_exchange(connector); 1807 if (ret < 0) { 1808 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); 1809 return ret; 1810 } 1811 1812 ret = hdcp2_locality_check(connector); 1813 if (ret < 0) { 1814 drm_dbg_kms(display->drm, 1815 "Locality Check failed. Err : %d\n", ret); 1816 return ret; 1817 } 1818 1819 ret = hdcp2_session_key_exchange(connector); 1820 if (ret < 0) { 1821 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); 1822 return ret; 1823 } 1824 1825 if (shim->config_stream_type) { 1826 ret = shim->config_stream_type(connector, 1827 hdcp->is_repeater, 1828 hdcp->content_type); 1829 if (ret < 0) 1830 return ret; 1831 } 1832 1833 if (hdcp->is_repeater) { 1834 ret = hdcp2_authenticate_repeater_topology(connector); 1835 if (ret < 0) { 1836 drm_dbg_kms(display->drm, 1837 "Repeater Auth Failed. Err: %d\n", ret); 1838 return ret; 1839 } 1840 } 1841 1842 return ret; 1843 } 1844 1845 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1846 { 1847 struct intel_display *display = to_intel_display(connector); 1848 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1849 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1850 struct intel_hdcp *hdcp = &connector->hdcp; 1851 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1852 enum port port = dig_port->base.port; 1853 int ret = 0; 1854 1855 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1856 LINK_ENCRYPTION_STATUS)) { 1857 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", 1858 connector->base.base.id, connector->base.name); 1859 ret = -EPERM; 1860 goto link_recover; 1861 } 1862 1863 if (hdcp->shim->stream_2_2_encryption) { 1864 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1865 if (ret) { 1866 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", 1867 connector->base.base.id, connector->base.name); 1868 return ret; 1869 } 1870 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1871 transcoder_name(hdcp->stream_transcoder)); 1872 } 1873 1874 return 0; 1875 1876 link_recover: 1877 if (hdcp2_deauthenticate_port(connector) < 0) 1878 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 1879 1880 dig_port->hdcp_auth_status = false; 1881 data->k = 0; 1882 1883 return ret; 1884 } 1885 1886 static int hdcp2_enable_encryption(struct intel_connector *connector) 1887 { 1888 struct intel_display *display = to_intel_display(connector); 1889 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1890 struct intel_hdcp *hdcp = &connector->hdcp; 1891 enum port port = dig_port->base.port; 1892 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1893 int ret; 1894 1895 drm_WARN_ON(display->drm, 1896 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1897 LINK_ENCRYPTION_STATUS); 1898 if (hdcp->shim->toggle_signalling) { 1899 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1900 true); 1901 if (ret) { 1902 drm_err(display->drm, 1903 "Failed to enable HDCP signalling. %d\n", 1904 ret); 1905 return ret; 1906 } 1907 } 1908 1909 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1910 LINK_AUTH_STATUS) 1911 /* Link is Authenticated. Now set for Encryption */ 1912 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1913 0, CTL_LINK_ENCRYPTION_REQ); 1914 1915 ret = intel_de_wait_for_set(display, 1916 HDCP2_STATUS(display, cpu_transcoder, 1917 port), 1918 LINK_ENCRYPTION_STATUS, 1919 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1920 dig_port->hdcp_auth_status = true; 1921 1922 return ret; 1923 } 1924 1925 static int hdcp2_disable_encryption(struct intel_connector *connector) 1926 { 1927 struct intel_display *display = to_intel_display(connector); 1928 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1929 struct intel_hdcp *hdcp = &connector->hdcp; 1930 enum port port = dig_port->base.port; 1931 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1932 int ret; 1933 1934 drm_WARN_ON(display->drm, 1935 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1936 LINK_ENCRYPTION_STATUS)); 1937 1938 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1939 CTL_LINK_ENCRYPTION_REQ, 0); 1940 1941 ret = intel_de_wait_for_clear(display, 1942 HDCP2_STATUS(display, cpu_transcoder, 1943 port), 1944 LINK_ENCRYPTION_STATUS, 1945 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1946 if (ret == -ETIMEDOUT) 1947 drm_dbg_kms(display->drm, "Disable Encryption Timedout"); 1948 1949 if (hdcp->shim->toggle_signalling) { 1950 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1951 false); 1952 if (ret) { 1953 drm_err(display->drm, 1954 "Failed to disable HDCP signalling. %d\n", 1955 ret); 1956 return ret; 1957 } 1958 } 1959 1960 return ret; 1961 } 1962 1963 static int 1964 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1965 { 1966 struct intel_display *display = to_intel_display(connector); 1967 int i, tries = 3, ret; 1968 1969 if (!connector->hdcp.is_repeater) 1970 return 0; 1971 1972 for (i = 0; i < tries; i++) { 1973 ret = _hdcp2_propagate_stream_management_info(connector); 1974 if (!ret) 1975 break; 1976 1977 /* Lets restart the auth incase of seq_num_m roll over */ 1978 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1979 drm_dbg_kms(display->drm, 1980 "seq_num_m roll over.(%d)\n", ret); 1981 break; 1982 } 1983 1984 drm_dbg_kms(display->drm, 1985 "HDCP2 stream management %d of %d Failed.(%d)\n", 1986 i + 1, tries, ret); 1987 } 1988 1989 return ret; 1990 } 1991 1992 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, 1993 struct intel_connector *connector) 1994 { 1995 struct intel_display *display = to_intel_display(connector); 1996 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1997 int ret = 0, i, tries = 3; 1998 1999 for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { 2000 ret = hdcp2_authenticate_sink(connector); 2001 if (!ret) { 2002 ret = intel_hdcp_prepare_streams(state, connector); 2003 if (ret) { 2004 drm_dbg_kms(display->drm, 2005 "Prepare stream failed.(%d)\n", 2006 ret); 2007 break; 2008 } 2009 2010 ret = hdcp2_propagate_stream_management_info(connector); 2011 if (ret) { 2012 drm_dbg_kms(display->drm, 2013 "Stream management failed.(%d)\n", 2014 ret); 2015 break; 2016 } 2017 2018 ret = hdcp2_authenticate_port(connector); 2019 if (!ret) 2020 break; 2021 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", 2022 ret); 2023 } 2024 2025 /* Clearing the mei hdcp session */ 2026 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 2027 i + 1, tries, ret); 2028 if (hdcp2_deauthenticate_port(connector) < 0) 2029 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2030 } 2031 2032 if (!ret && !dig_port->hdcp_auth_status) { 2033 /* 2034 * Ensuring the required 200mSec min time interval between 2035 * Session Key Exchange and encryption. 2036 */ 2037 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 2038 ret = hdcp2_enable_encryption(connector); 2039 if (ret < 0) { 2040 drm_dbg_kms(display->drm, 2041 "Encryption Enable Failed.(%d)\n", ret); 2042 if (hdcp2_deauthenticate_port(connector) < 0) 2043 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2044 } 2045 } 2046 2047 if (!ret) 2048 ret = hdcp2_enable_stream_encryption(connector); 2049 2050 return ret; 2051 } 2052 2053 static int _intel_hdcp2_enable(struct intel_atomic_state *state, 2054 struct intel_connector *connector) 2055 { 2056 struct intel_display *display = to_intel_display(connector); 2057 struct intel_hdcp *hdcp = &connector->hdcp; 2058 int ret; 2059 2060 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", 2061 connector->base.base.id, connector->base.name, 2062 hdcp->content_type); 2063 2064 intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp); 2065 2066 ret = hdcp2_authenticate_and_encrypt(state, connector); 2067 if (ret) { 2068 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 2069 hdcp->content_type, ret); 2070 return ret; 2071 } 2072 2073 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", 2074 connector->base.base.id, connector->base.name, 2075 hdcp->content_type); 2076 2077 hdcp->hdcp2_encrypted = true; 2078 return 0; 2079 } 2080 2081 static int 2082 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 2083 { 2084 struct intel_display *display = to_intel_display(connector); 2085 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2086 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2087 struct intel_hdcp *hdcp = &connector->hdcp; 2088 int ret; 2089 2090 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", 2091 connector->base.base.id, connector->base.name); 2092 2093 if (hdcp->shim->stream_2_2_encryption) { 2094 ret = hdcp->shim->stream_2_2_encryption(connector, false); 2095 if (ret) { 2096 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", 2097 connector->base.base.id, connector->base.name); 2098 return ret; 2099 } 2100 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 2101 transcoder_name(hdcp->stream_transcoder)); 2102 2103 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) 2104 return 0; 2105 } 2106 2107 ret = hdcp2_disable_encryption(connector); 2108 2109 if (hdcp2_deauthenticate_port(connector) < 0) 2110 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2111 2112 connector->hdcp.hdcp2_encrypted = false; 2113 dig_port->hdcp_auth_status = false; 2114 data->k = 0; 2115 2116 return ret; 2117 } 2118 2119 /* Implements the Link Integrity Check for HDCP2.2 */ 2120 static int intel_hdcp2_check_link(struct intel_connector *connector) 2121 { 2122 struct intel_display *display = to_intel_display(connector); 2123 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2124 struct intel_hdcp *hdcp = &connector->hdcp; 2125 enum port port = dig_port->base.port; 2126 enum transcoder cpu_transcoder; 2127 int ret = 0; 2128 2129 mutex_lock(&hdcp->mutex); 2130 mutex_lock(&dig_port->hdcp_mutex); 2131 cpu_transcoder = hdcp->cpu_transcoder; 2132 2133 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2134 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2135 !hdcp->hdcp2_encrypted) { 2136 ret = -EINVAL; 2137 goto out; 2138 } 2139 2140 if (drm_WARN_ON(display->drm, 2141 !intel_hdcp2_in_use(display, cpu_transcoder, port))) { 2142 drm_err(display->drm, 2143 "HDCP2.2 link stopped the encryption, %x\n", 2144 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); 2145 ret = -ENXIO; 2146 _intel_hdcp2_disable(connector, true); 2147 intel_hdcp_update_value(connector, 2148 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2149 true); 2150 goto out; 2151 } 2152 2153 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2154 if (ret == HDCP_LINK_PROTECTED) { 2155 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2156 intel_hdcp_update_value(connector, 2157 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2158 true); 2159 } 2160 goto out; 2161 } 2162 2163 if (ret == HDCP_TOPOLOGY_CHANGE) { 2164 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2165 goto out; 2166 2167 drm_dbg_kms(display->drm, 2168 "HDCP2.2 Downstream topology change\n"); 2169 } else { 2170 drm_dbg_kms(display->drm, 2171 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", 2172 connector->base.base.id, connector->base.name); 2173 } 2174 2175 ret = _intel_hdcp2_disable(connector, true); 2176 if (ret) { 2177 drm_err(display->drm, 2178 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", 2179 connector->base.base.id, connector->base.name, ret); 2180 intel_hdcp_update_value(connector, 2181 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2182 goto out; 2183 } 2184 2185 intel_hdcp_update_value(connector, 2186 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2187 out: 2188 mutex_unlock(&dig_port->hdcp_mutex); 2189 mutex_unlock(&hdcp->mutex); 2190 return ret; 2191 } 2192 2193 static void intel_hdcp_check_work(struct work_struct *work) 2194 { 2195 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2196 struct intel_hdcp, 2197 check_work); 2198 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2199 struct intel_display *display = to_intel_display(connector); 2200 struct drm_i915_private *i915 = to_i915(display->drm); 2201 2202 if (drm_connector_is_unregistered(&connector->base)) 2203 return; 2204 2205 if (!intel_hdcp2_check_link(connector)) 2206 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2207 DRM_HDCP2_CHECK_PERIOD_MS); 2208 else if (!intel_hdcp_check_link(connector)) 2209 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2210 DRM_HDCP_CHECK_PERIOD_MS); 2211 } 2212 2213 static int i915_hdcp_component_bind(struct device *drv_kdev, 2214 struct device *mei_kdev, void *data) 2215 { 2216 struct intel_display *display = to_intel_display(drv_kdev); 2217 2218 drm_dbg(display->drm, "I915 HDCP comp bind\n"); 2219 mutex_lock(&display->hdcp.hdcp_mutex); 2220 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2221 display->hdcp.arbiter->hdcp_dev = mei_kdev; 2222 mutex_unlock(&display->hdcp.hdcp_mutex); 2223 2224 return 0; 2225 } 2226 2227 static void i915_hdcp_component_unbind(struct device *drv_kdev, 2228 struct device *mei_kdev, void *data) 2229 { 2230 struct intel_display *display = to_intel_display(drv_kdev); 2231 2232 drm_dbg(display->drm, "I915 HDCP comp unbind\n"); 2233 mutex_lock(&display->hdcp.hdcp_mutex); 2234 display->hdcp.arbiter = NULL; 2235 mutex_unlock(&display->hdcp.hdcp_mutex); 2236 } 2237 2238 static const struct component_ops i915_hdcp_ops = { 2239 .bind = i915_hdcp_component_bind, 2240 .unbind = i915_hdcp_component_unbind, 2241 }; 2242 2243 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2244 { 2245 switch (port) { 2246 case PORT_A: 2247 return HDCP_DDI_A; 2248 case PORT_B ... PORT_F: 2249 return (enum hdcp_ddi)port; 2250 default: 2251 return HDCP_DDI_INVALID_PORT; 2252 } 2253 } 2254 2255 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2256 { 2257 switch (cpu_transcoder) { 2258 case TRANSCODER_A ... TRANSCODER_D: 2259 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2260 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2261 return HDCP_INVALID_TRANSCODER; 2262 } 2263 } 2264 2265 static int initialize_hdcp_port_data(struct intel_connector *connector, 2266 struct intel_digital_port *dig_port, 2267 const struct intel_hdcp_shim *shim) 2268 { 2269 struct intel_display *display = to_intel_display(connector); 2270 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2271 enum port port = dig_port->base.port; 2272 2273 if (DISPLAY_VER(display) < 12) 2274 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2275 else 2276 /* 2277 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2278 * with zero(INVALID PORT index). 2279 */ 2280 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2281 2282 /* 2283 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2284 * is initialized to zero (invalid transcoder index). This will be 2285 * retained for <Gen12 forever. 2286 */ 2287 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2288 2289 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2290 data->protocol = (u8)shim->protocol; 2291 2292 if (!data->streams) 2293 data->streams = kcalloc(INTEL_NUM_PIPES(display), 2294 sizeof(struct hdcp2_streamid_type), 2295 GFP_KERNEL); 2296 if (!data->streams) { 2297 drm_err(display->drm, "Out of Memory\n"); 2298 return -ENOMEM; 2299 } 2300 2301 return 0; 2302 } 2303 2304 static bool is_hdcp2_supported(struct intel_display *display) 2305 { 2306 struct drm_i915_private *i915 = to_i915(display->drm); 2307 2308 if (intel_hdcp_gsc_cs_required(display)) 2309 return true; 2310 2311 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2312 return false; 2313 2314 return (DISPLAY_VER(display) >= 10 || 2315 IS_KABYLAKE(i915) || 2316 IS_COFFEELAKE(i915) || 2317 IS_COMETLAKE(i915)); 2318 } 2319 2320 void intel_hdcp_component_init(struct intel_display *display) 2321 { 2322 int ret; 2323 2324 if (!is_hdcp2_supported(display)) 2325 return; 2326 2327 mutex_lock(&display->hdcp.hdcp_mutex); 2328 drm_WARN_ON(display->drm, display->hdcp.comp_added); 2329 2330 display->hdcp.comp_added = true; 2331 mutex_unlock(&display->hdcp.hdcp_mutex); 2332 if (intel_hdcp_gsc_cs_required(display)) 2333 ret = intel_hdcp_gsc_init(display); 2334 else 2335 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, 2336 I915_COMPONENT_HDCP); 2337 2338 if (ret < 0) { 2339 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", 2340 ret); 2341 mutex_lock(&display->hdcp.hdcp_mutex); 2342 display->hdcp.comp_added = false; 2343 mutex_unlock(&display->hdcp.hdcp_mutex); 2344 return; 2345 } 2346 } 2347 2348 static void intel_hdcp2_init(struct intel_connector *connector, 2349 struct intel_digital_port *dig_port, 2350 const struct intel_hdcp_shim *shim) 2351 { 2352 struct intel_display *display = to_intel_display(connector); 2353 struct intel_hdcp *hdcp = &connector->hdcp; 2354 int ret; 2355 2356 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2357 if (ret) { 2358 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); 2359 return; 2360 } 2361 2362 hdcp->hdcp2_supported = true; 2363 } 2364 2365 int intel_hdcp_init(struct intel_connector *connector, 2366 struct intel_digital_port *dig_port, 2367 const struct intel_hdcp_shim *shim) 2368 { 2369 struct intel_display *display = to_intel_display(connector); 2370 struct intel_hdcp *hdcp = &connector->hdcp; 2371 int ret; 2372 2373 if (!shim) 2374 return -EINVAL; 2375 2376 if (is_hdcp2_supported(display)) 2377 intel_hdcp2_init(connector, dig_port, shim); 2378 2379 ret = drm_connector_attach_content_protection_property(&connector->base, 2380 hdcp->hdcp2_supported); 2381 if (ret) { 2382 hdcp->hdcp2_supported = false; 2383 kfree(dig_port->hdcp_port_data.streams); 2384 return ret; 2385 } 2386 2387 hdcp->shim = shim; 2388 mutex_init(&hdcp->mutex); 2389 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2390 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2391 init_waitqueue_head(&hdcp->cp_irq_queue); 2392 2393 return 0; 2394 } 2395 2396 static int _intel_hdcp_enable(struct intel_atomic_state *state, 2397 struct intel_encoder *encoder, 2398 const struct intel_crtc_state *pipe_config, 2399 const struct drm_connector_state *conn_state) 2400 { 2401 struct intel_display *display = to_intel_display(encoder); 2402 struct drm_i915_private *i915 = to_i915(display->drm); 2403 struct intel_connector *connector = 2404 to_intel_connector(conn_state->connector); 2405 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2406 struct intel_hdcp *hdcp = &connector->hdcp; 2407 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2408 int ret = -EINVAL; 2409 2410 if (!hdcp->shim) 2411 return -ENOENT; 2412 2413 if (!connector->encoder) { 2414 drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", 2415 connector->base.base.id, connector->base.name); 2416 return -ENODEV; 2417 } 2418 2419 mutex_lock(&hdcp->mutex); 2420 mutex_lock(&dig_port->hdcp_mutex); 2421 drm_WARN_ON(display->drm, 2422 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2423 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2424 2425 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2426 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2427 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2428 } else { 2429 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2430 hdcp->stream_transcoder = INVALID_TRANSCODER; 2431 } 2432 2433 if (DISPLAY_VER(display) >= 12) 2434 dig_port->hdcp_port_data.hdcp_transcoder = 2435 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2436 2437 /* 2438 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2439 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2440 */ 2441 if (intel_hdcp2_get_capability(connector)) { 2442 ret = _intel_hdcp2_enable(state, connector); 2443 if (!ret) 2444 check_link_interval = 2445 DRM_HDCP2_CHECK_PERIOD_MS; 2446 } 2447 2448 /* 2449 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2450 * be attempted. 2451 */ 2452 if (ret && intel_hdcp_get_capability(connector) && 2453 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2454 ret = intel_hdcp1_enable(connector); 2455 } 2456 2457 if (!ret) { 2458 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2459 check_link_interval); 2460 intel_hdcp_update_value(connector, 2461 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2462 true); 2463 } 2464 2465 mutex_unlock(&dig_port->hdcp_mutex); 2466 mutex_unlock(&hdcp->mutex); 2467 return ret; 2468 } 2469 2470 void intel_hdcp_enable(struct intel_atomic_state *state, 2471 struct intel_encoder *encoder, 2472 const struct intel_crtc_state *crtc_state, 2473 const struct drm_connector_state *conn_state) 2474 { 2475 struct intel_connector *connector = 2476 to_intel_connector(conn_state->connector); 2477 struct intel_hdcp *hdcp = &connector->hdcp; 2478 2479 /* 2480 * Enable hdcp if it's desired or if userspace is enabled and 2481 * driver set its state to undesired 2482 */ 2483 if (conn_state->content_protection == 2484 DRM_MODE_CONTENT_PROTECTION_DESIRED || 2485 (conn_state->content_protection == 2486 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 2487 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2488 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2489 } 2490 2491 int intel_hdcp_disable(struct intel_connector *connector) 2492 { 2493 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2494 struct intel_hdcp *hdcp = &connector->hdcp; 2495 int ret = 0; 2496 2497 if (!hdcp->shim) 2498 return -ENOENT; 2499 2500 mutex_lock(&hdcp->mutex); 2501 mutex_lock(&dig_port->hdcp_mutex); 2502 2503 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2504 goto out; 2505 2506 intel_hdcp_update_value(connector, 2507 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2508 if (hdcp->hdcp2_encrypted) 2509 ret = _intel_hdcp2_disable(connector, false); 2510 else if (hdcp->hdcp_encrypted) 2511 ret = _intel_hdcp_disable(connector); 2512 2513 out: 2514 mutex_unlock(&dig_port->hdcp_mutex); 2515 mutex_unlock(&hdcp->mutex); 2516 cancel_delayed_work_sync(&hdcp->check_work); 2517 return ret; 2518 } 2519 2520 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2521 struct intel_encoder *encoder, 2522 const struct intel_crtc_state *crtc_state, 2523 const struct drm_connector_state *conn_state) 2524 { 2525 struct intel_connector *connector = 2526 to_intel_connector(conn_state->connector); 2527 struct intel_hdcp *hdcp = &connector->hdcp; 2528 bool content_protection_type_changed, desired_and_not_enabled = false; 2529 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2530 2531 if (!connector->hdcp.shim) 2532 return; 2533 2534 content_protection_type_changed = 2535 (conn_state->hdcp_content_type != hdcp->content_type && 2536 conn_state->content_protection != 2537 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2538 2539 /* 2540 * During the HDCP encryption session if Type change is requested, 2541 * disable the HDCP and reenable it with new TYPE value. 2542 */ 2543 if (conn_state->content_protection == 2544 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2545 content_protection_type_changed) 2546 intel_hdcp_disable(connector); 2547 2548 /* 2549 * Mark the hdcp state as DESIRED after the hdcp disable of type 2550 * change procedure. 2551 */ 2552 if (content_protection_type_changed) { 2553 mutex_lock(&hdcp->mutex); 2554 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2555 drm_connector_get(&connector->base); 2556 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2557 drm_connector_put(&connector->base); 2558 mutex_unlock(&hdcp->mutex); 2559 } 2560 2561 if (conn_state->content_protection == 2562 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2563 mutex_lock(&hdcp->mutex); 2564 /* Avoid enabling hdcp, if it already ENABLED */ 2565 desired_and_not_enabled = 2566 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2567 mutex_unlock(&hdcp->mutex); 2568 /* 2569 * If HDCP already ENABLED and CP property is DESIRED, schedule 2570 * prop_work to update correct CP property to user space. 2571 */ 2572 if (!desired_and_not_enabled && !content_protection_type_changed) { 2573 drm_connector_get(&connector->base); 2574 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2575 drm_connector_put(&connector->base); 2576 2577 } 2578 } 2579 2580 if (desired_and_not_enabled || content_protection_type_changed) 2581 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2582 } 2583 2584 void intel_hdcp_component_fini(struct intel_display *display) 2585 { 2586 mutex_lock(&display->hdcp.hdcp_mutex); 2587 if (!display->hdcp.comp_added) { 2588 mutex_unlock(&display->hdcp.hdcp_mutex); 2589 return; 2590 } 2591 2592 display->hdcp.comp_added = false; 2593 mutex_unlock(&display->hdcp.hdcp_mutex); 2594 2595 if (intel_hdcp_gsc_cs_required(display)) 2596 intel_hdcp_gsc_fini(display); 2597 else 2598 component_del(display->drm->dev, &i915_hdcp_ops); 2599 } 2600 2601 void intel_hdcp_cleanup(struct intel_connector *connector) 2602 { 2603 struct intel_hdcp *hdcp = &connector->hdcp; 2604 2605 if (!hdcp->shim) 2606 return; 2607 2608 /* 2609 * If the connector is registered, it's possible userspace could kick 2610 * off another HDCP enable, which would re-spawn the workers. 2611 */ 2612 drm_WARN_ON(connector->base.dev, 2613 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2614 2615 /* 2616 * Now that the connector is not registered, check_work won't be run, 2617 * but cancel any outstanding instances of it 2618 */ 2619 cancel_delayed_work_sync(&hdcp->check_work); 2620 2621 /* 2622 * We don't cancel prop_work in the same way as check_work since it 2623 * requires connection_mutex which could be held while calling this 2624 * function. Instead, we rely on the connector references grabbed before 2625 * scheduling prop_work to ensure the connector is alive when prop_work 2626 * is run. So if we're in the destroy path (which is where this 2627 * function should be called), we're "guaranteed" that prop_work is not 2628 * active (tl;dr This Should Never Happen). 2629 */ 2630 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2631 2632 mutex_lock(&hdcp->mutex); 2633 hdcp->shim = NULL; 2634 mutex_unlock(&hdcp->mutex); 2635 } 2636 2637 void intel_hdcp_atomic_check(struct drm_connector *connector, 2638 struct drm_connector_state *old_state, 2639 struct drm_connector_state *new_state) 2640 { 2641 u64 old_cp = old_state->content_protection; 2642 u64 new_cp = new_state->content_protection; 2643 struct drm_crtc_state *crtc_state; 2644 2645 if (!new_state->crtc) { 2646 /* 2647 * If the connector is being disabled with CP enabled, mark it 2648 * desired so it's re-enabled when the connector is brought back 2649 */ 2650 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2651 new_state->content_protection = 2652 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2653 return; 2654 } 2655 2656 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2657 new_state->crtc); 2658 /* 2659 * Fix the HDCP uapi content protection state in case of modeset. 2660 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2661 * need to be sent if there is transition from ENABLED->DESIRED. 2662 */ 2663 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2664 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2665 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2666 new_state->content_protection = 2667 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2668 2669 /* 2670 * Nothing to do if the state didn't change, or HDCP was activated since 2671 * the last commit. And also no change in hdcp content type. 2672 */ 2673 if (old_cp == new_cp || 2674 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2675 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2676 if (old_state->hdcp_content_type == 2677 new_state->hdcp_content_type) 2678 return; 2679 } 2680 2681 crtc_state->mode_changed = true; 2682 } 2683 2684 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2685 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2686 { 2687 struct intel_hdcp *hdcp = &connector->hdcp; 2688 struct intel_display *display = to_intel_display(connector); 2689 struct drm_i915_private *i915 = to_i915(display->drm); 2690 2691 if (!hdcp->shim) 2692 return; 2693 2694 atomic_inc(&connector->hdcp.cp_irq_count); 2695 wake_up_all(&connector->hdcp.cp_irq_queue); 2696 2697 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); 2698 } 2699