1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/debugfs.h> 13 #include <linux/i2c.h> 14 #include <linux/random.h> 15 16 #include <drm/display/drm_hdcp_helper.h> 17 #include <drm/intel/i915_component.h> 18 19 #include "i915_drv.h" 20 #include "i915_reg.h" 21 #include "intel_connector.h" 22 #include "intel_de.h" 23 #include "intel_display_power.h" 24 #include "intel_display_power_well.h" 25 #include "intel_display_rpm.h" 26 #include "intel_display_types.h" 27 #include "intel_dp_mst.h" 28 #include "intel_hdcp.h" 29 #include "intel_hdcp_gsc.h" 30 #include "intel_hdcp_gsc_message.h" 31 #include "intel_hdcp_regs.h" 32 #include "intel_hdcp_shim.h" 33 #include "intel_pcode.h" 34 35 #define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14) 36 37 #define KEY_LOAD_TRIES 5 38 #define HDCP2_LC_RETRY_CNT 3 39 40 static void 41 intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder, 42 struct intel_hdcp *hdcp, 43 bool enable) 44 { 45 struct intel_display *display = to_intel_display(encoder); 46 i915_reg_t rekey_reg; 47 u32 rekey_bit = 0; 48 49 /* Here we assume HDMI is in TMDS mode of operation */ 50 if (!intel_encoder_is_hdmi(encoder)) 51 return; 52 53 if (DISPLAY_VER(display) >= 30) { 54 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder); 55 rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE; 56 } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || 57 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) { 58 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder); 59 rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE; 60 } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) { 61 rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder); 62 rekey_bit = HDCP_LINE_REKEY_DISABLE; 63 } 64 65 if (rekey_bit) 66 intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit); 67 } 68 69 static int intel_conn_to_vcpi(struct intel_atomic_state *state, 70 struct intel_connector *connector) 71 { 72 struct drm_dp_mst_topology_mgr *mgr; 73 struct drm_dp_mst_atomic_payload *payload; 74 struct drm_dp_mst_topology_state *mst_state; 75 int vcpi = 0; 76 77 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 78 if (!connector->mst.port) 79 return 0; 80 mgr = connector->mst.port->mgr; 81 82 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx); 83 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 84 payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port); 85 if (drm_WARN_ON(mgr->dev, !payload)) 86 goto out; 87 88 vcpi = payload->vcpi; 89 if (drm_WARN_ON(mgr->dev, vcpi < 0)) { 90 vcpi = 0; 91 goto out; 92 } 93 out: 94 return vcpi; 95 } 96 97 /* 98 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 99 * content_type for all streams in DP MST topology because security f/w doesn't 100 * have any provision to mark content_type for each stream separately, it marks 101 * all available streams with the content_type proivided at the time of port 102 * authentication. This may prohibit the userspace to use type1 content on 103 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 104 * DP MST topology. Though it is not compulsory, security fw should change its 105 * policy to mark different content_types for different streams. 106 */ 107 static int 108 intel_hdcp_required_content_stream(struct intel_atomic_state *state, 109 struct intel_digital_port *dig_port) 110 { 111 struct intel_display *display = to_intel_display(state); 112 struct drm_connector_list_iter conn_iter; 113 struct intel_digital_port *conn_dig_port; 114 struct intel_connector *connector; 115 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 116 bool enforce_type0 = false; 117 int k; 118 119 if (dig_port->hdcp.auth_status) 120 return 0; 121 122 data->k = 0; 123 124 if (!dig_port->hdcp.mst_type1_capable) 125 enforce_type0 = true; 126 127 drm_connector_list_iter_begin(display->drm, &conn_iter); 128 for_each_intel_connector_iter(connector, &conn_iter) { 129 if (connector->base.status == connector_status_disconnected) 130 continue; 131 132 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 133 continue; 134 135 conn_dig_port = intel_attached_dig_port(connector); 136 if (conn_dig_port != dig_port) 137 continue; 138 139 data->streams[data->k].stream_id = 140 intel_conn_to_vcpi(state, connector); 141 data->k++; 142 143 /* if there is only one active stream */ 144 if (intel_dp_mst_active_streams(&dig_port->dp) <= 1) 145 break; 146 } 147 drm_connector_list_iter_end(&conn_iter); 148 149 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) 150 return -EINVAL; 151 152 /* 153 * Apply common protection level across all streams in DP MST Topology. 154 * Use highest supported content type for all streams in DP MST Topology. 155 */ 156 for (k = 0; k < data->k; k++) 157 data->streams[k].stream_type = 158 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 159 160 return 0; 161 } 162 163 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state, 164 struct intel_connector *connector) 165 { 166 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 167 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 168 struct intel_hdcp *hdcp = &connector->hdcp; 169 170 if (intel_encoder_is_mst(intel_attached_encoder(connector))) 171 return intel_hdcp_required_content_stream(state, dig_port); 172 173 data->k = 1; 174 data->streams[0].stream_id = 0; 175 data->streams[0].stream_type = hdcp->content_type; 176 177 return 0; 178 } 179 180 static 181 bool intel_hdcp_is_ksv_valid(u8 *ksv) 182 { 183 int i, ones = 0; 184 /* KSV has 20 1's and 20 0's */ 185 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 186 ones += hweight8(ksv[i]); 187 if (ones != 20) 188 return false; 189 190 return true; 191 } 192 193 static 194 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 195 const struct intel_hdcp_shim *shim, u8 *bksv) 196 { 197 struct intel_display *display = to_intel_display(dig_port); 198 int ret, i, tries = 2; 199 200 /* HDCP spec states that we must retry the bksv if it is invalid */ 201 for (i = 0; i < tries; i++) { 202 ret = shim->read_bksv(dig_port, bksv); 203 if (ret) 204 return ret; 205 if (intel_hdcp_is_ksv_valid(bksv)) 206 break; 207 } 208 if (i == tries) { 209 drm_dbg_kms(display->drm, "Bksv is invalid\n"); 210 return -ENODEV; 211 } 212 213 return 0; 214 } 215 216 /* Is HDCP1.4 capable on Platform and Sink */ 217 static bool intel_hdcp_get_capability(struct intel_connector *connector) 218 { 219 struct intel_digital_port *dig_port; 220 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 221 bool capable = false; 222 u8 bksv[5]; 223 224 if (!intel_attached_encoder(connector)) 225 return capable; 226 227 dig_port = intel_attached_dig_port(connector); 228 229 if (!shim) 230 return capable; 231 232 if (shim->hdcp_get_capability) { 233 shim->hdcp_get_capability(dig_port, &capable); 234 } else { 235 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 236 capable = true; 237 } 238 239 return capable; 240 } 241 242 /* 243 * Check if the source has all the building blocks ready to make 244 * HDCP 2.2 work 245 */ 246 static bool intel_hdcp2_prerequisite(struct intel_connector *connector) 247 { 248 struct intel_display *display = to_intel_display(connector); 249 struct intel_hdcp *hdcp = &connector->hdcp; 250 251 /* I915 support for HDCP2.2 */ 252 if (!hdcp->hdcp2_supported) 253 return false; 254 255 /* If MTL+ make sure gsc is loaded and proxy is setup */ 256 if (USE_HDCP_GSC(display)) { 257 if (!intel_hdcp_gsc_check_status(display->drm)) 258 return false; 259 } 260 261 /* MEI/GSC interface is solid depending on which is used */ 262 mutex_lock(&display->hdcp.hdcp_mutex); 263 if (!display->hdcp.comp_added || !display->hdcp.arbiter) { 264 mutex_unlock(&display->hdcp.hdcp_mutex); 265 return false; 266 } 267 mutex_unlock(&display->hdcp.hdcp_mutex); 268 269 return true; 270 } 271 272 /* Is HDCP2.2 capable on Platform and Sink */ 273 static bool intel_hdcp2_get_capability(struct intel_connector *connector) 274 { 275 struct intel_hdcp *hdcp = &connector->hdcp; 276 bool capable = false; 277 278 if (!intel_hdcp2_prerequisite(connector)) 279 return false; 280 281 /* Sink's capability for HDCP2.2 */ 282 hdcp->shim->hdcp_2_2_get_capability(connector, &capable); 283 284 return capable; 285 } 286 287 static void intel_hdcp_get_remote_capability(struct intel_connector *connector, 288 bool *hdcp_capable, 289 bool *hdcp2_capable) 290 { 291 struct intel_hdcp *hdcp = &connector->hdcp; 292 293 if (!hdcp->shim->get_remote_hdcp_capability) 294 return; 295 296 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable, 297 hdcp2_capable); 298 299 if (!intel_hdcp2_prerequisite(connector)) 300 *hdcp2_capable = false; 301 } 302 303 static bool intel_hdcp_in_use(struct intel_display *display, 304 enum transcoder cpu_transcoder, enum port port) 305 { 306 return intel_de_read(display, 307 HDCP_STATUS(display, cpu_transcoder, port)) & 308 HDCP_STATUS_ENC; 309 } 310 311 static bool intel_hdcp2_in_use(struct intel_display *display, 312 enum transcoder cpu_transcoder, enum port port) 313 { 314 return intel_de_read(display, 315 HDCP2_STATUS(display, cpu_transcoder, port)) & 316 LINK_ENCRYPTION_STATUS; 317 } 318 319 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 320 const struct intel_hdcp_shim *shim) 321 { 322 int ret, read_ret; 323 bool ksv_ready; 324 325 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 326 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, 327 &ksv_ready), 328 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 329 100 * 1000); 330 if (ret) 331 return ret; 332 if (read_ret) 333 return read_ret; 334 if (!ksv_ready) 335 return -ETIMEDOUT; 336 337 return 0; 338 } 339 340 static bool hdcp_key_loadable(struct intel_display *display) 341 { 342 enum i915_power_well_id id; 343 bool enabled = false; 344 345 /* 346 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 347 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 348 */ 349 if (display->platform.haswell || display->platform.broadwell) 350 id = HSW_DISP_PW_GLOBAL; 351 else 352 id = SKL_DISP_PW_1; 353 354 /* PG1 (power well #1) needs to be enabled */ 355 with_intel_display_rpm(display) 356 enabled = intel_display_power_well_is_enabled(display, id); 357 358 /* 359 * Another req for hdcp key loadability is enabled state of pll for 360 * cdclk. Without active crtc we won't land here. So we are assuming that 361 * cdclk is already on. 362 */ 363 364 return enabled; 365 } 366 367 static void intel_hdcp_clear_keys(struct intel_display *display) 368 { 369 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 370 intel_de_write(display, HDCP_KEY_STATUS, 371 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 372 } 373 374 static int intel_hdcp_load_keys(struct intel_display *display) 375 { 376 struct drm_i915_private *i915 = to_i915(display->drm); 377 int ret; 378 u32 val; 379 380 val = intel_de_read(display, HDCP_KEY_STATUS); 381 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 382 return 0; 383 384 /* 385 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 386 * out of reset. So if Key is not already loaded, its an error state. 387 */ 388 if (display->platform.haswell || display->platform.broadwell) 389 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 390 return -ENXIO; 391 392 /* 393 * Initiate loading the HDCP key from fuses. 394 * 395 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 396 * version 9 platforms (minus BXT) differ in the key load trigger 397 * process from other platforms. These platforms use the GT Driver 398 * Mailbox interface. 399 */ 400 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) { 401 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); 402 if (ret) { 403 drm_err(display->drm, 404 "Failed to initiate HDCP key load (%d)\n", 405 ret); 406 return ret; 407 } 408 } else { 409 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 410 } 411 412 /* Wait for the keys to load (500us) */ 413 ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, 414 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 415 10, 1, &val); 416 if (ret) 417 return ret; 418 else if (!(val & HDCP_KEY_LOAD_STATUS)) 419 return -ENXIO; 420 421 /* Send Aksv over to PCH display for use in authentication */ 422 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 423 424 return 0; 425 } 426 427 /* Returns updated SHA-1 index */ 428 static int intel_write_sha_text(struct intel_display *display, u32 sha_text) 429 { 430 intel_de_write(display, HDCP_SHA_TEXT, sha_text); 431 if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 432 drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); 433 return -ETIMEDOUT; 434 } 435 return 0; 436 } 437 438 static 439 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, 440 enum transcoder cpu_transcoder, enum port port) 441 { 442 if (DISPLAY_VER(display) >= 12) { 443 switch (cpu_transcoder) { 444 case TRANSCODER_A: 445 return HDCP_TRANSA_REP_PRESENT | 446 HDCP_TRANSA_SHA1_M0; 447 case TRANSCODER_B: 448 return HDCP_TRANSB_REP_PRESENT | 449 HDCP_TRANSB_SHA1_M0; 450 case TRANSCODER_C: 451 return HDCP_TRANSC_REP_PRESENT | 452 HDCP_TRANSC_SHA1_M0; 453 case TRANSCODER_D: 454 return HDCP_TRANSD_REP_PRESENT | 455 HDCP_TRANSD_SHA1_M0; 456 default: 457 drm_err(display->drm, "Unknown transcoder %d\n", 458 cpu_transcoder); 459 return 0; 460 } 461 } 462 463 switch (port) { 464 case PORT_A: 465 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 466 case PORT_B: 467 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 468 case PORT_C: 469 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 470 case PORT_D: 471 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 472 case PORT_E: 473 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 474 default: 475 drm_err(display->drm, "Unknown port %d\n", port); 476 return 0; 477 } 478 } 479 480 static 481 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 482 const struct intel_hdcp_shim *shim, 483 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 484 { 485 struct intel_display *display = to_intel_display(connector); 486 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 487 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 488 enum port port = dig_port->base.port; 489 u32 vprime, sha_text, sha_leftovers, rep_ctl; 490 int ret, i, j, sha_idx; 491 492 /* Process V' values from the receiver */ 493 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 494 ret = shim->read_v_prime_part(dig_port, i, &vprime); 495 if (ret) 496 return ret; 497 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); 498 } 499 500 /* 501 * We need to write the concatenation of all device KSVs, BINFO (DP) || 502 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 503 * stream is written via the HDCP_SHA_TEXT register in 32-bit 504 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 505 * index will keep track of our progress through the 64 bytes as well as 506 * helping us work the 40-bit KSVs through our 32-bit register. 507 * 508 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 509 */ 510 sha_idx = 0; 511 sha_text = 0; 512 sha_leftovers = 0; 513 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); 514 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 515 for (i = 0; i < num_downstream; i++) { 516 unsigned int sha_empty; 517 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 518 519 /* Fill up the empty slots in sha_text and write it out */ 520 sha_empty = sizeof(sha_text) - sha_leftovers; 521 for (j = 0; j < sha_empty; j++) { 522 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 523 sha_text |= ksv[j] << off; 524 } 525 526 ret = intel_write_sha_text(display, sha_text); 527 if (ret < 0) 528 return ret; 529 530 /* Programming guide writes this every 64 bytes */ 531 sha_idx += sizeof(sha_text); 532 if (!(sha_idx % 64)) 533 intel_de_write(display, HDCP_REP_CTL, 534 rep_ctl | HDCP_SHA1_TEXT_32); 535 536 /* Store the leftover bytes from the ksv in sha_text */ 537 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 538 sha_text = 0; 539 for (j = 0; j < sha_leftovers; j++) 540 sha_text |= ksv[sha_empty + j] << 541 ((sizeof(sha_text) - j - 1) * 8); 542 543 /* 544 * If we still have room in sha_text for more data, continue. 545 * Otherwise, write it out immediately. 546 */ 547 if (sizeof(sha_text) > sha_leftovers) 548 continue; 549 550 ret = intel_write_sha_text(display, sha_text); 551 if (ret < 0) 552 return ret; 553 sha_leftovers = 0; 554 sha_text = 0; 555 sha_idx += sizeof(sha_text); 556 } 557 558 /* 559 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 560 * bytes are leftover from the last ksv, we might be able to fit them 561 * all in sha_text (first 2 cases), or we might need to split them up 562 * into 2 writes (last 2 cases). 563 */ 564 if (sha_leftovers == 0) { 565 /* Write 16 bits of text, 16 bits of M0 */ 566 intel_de_write(display, HDCP_REP_CTL, 567 rep_ctl | HDCP_SHA1_TEXT_16); 568 ret = intel_write_sha_text(display, 569 bstatus[0] << 8 | bstatus[1]); 570 if (ret < 0) 571 return ret; 572 sha_idx += sizeof(sha_text); 573 574 /* Write 32 bits of M0 */ 575 intel_de_write(display, HDCP_REP_CTL, 576 rep_ctl | HDCP_SHA1_TEXT_0); 577 ret = intel_write_sha_text(display, 0); 578 if (ret < 0) 579 return ret; 580 sha_idx += sizeof(sha_text); 581 582 /* Write 16 bits of M0 */ 583 intel_de_write(display, HDCP_REP_CTL, 584 rep_ctl | HDCP_SHA1_TEXT_16); 585 ret = intel_write_sha_text(display, 0); 586 if (ret < 0) 587 return ret; 588 sha_idx += sizeof(sha_text); 589 590 } else if (sha_leftovers == 1) { 591 /* Write 24 bits of text, 8 bits of M0 */ 592 intel_de_write(display, HDCP_REP_CTL, 593 rep_ctl | HDCP_SHA1_TEXT_24); 594 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 595 /* Only 24-bits of data, must be in the LSB */ 596 sha_text = (sha_text & 0xffffff00) >> 8; 597 ret = intel_write_sha_text(display, sha_text); 598 if (ret < 0) 599 return ret; 600 sha_idx += sizeof(sha_text); 601 602 /* Write 32 bits of M0 */ 603 intel_de_write(display, HDCP_REP_CTL, 604 rep_ctl | HDCP_SHA1_TEXT_0); 605 ret = intel_write_sha_text(display, 0); 606 if (ret < 0) 607 return ret; 608 sha_idx += sizeof(sha_text); 609 610 /* Write 24 bits of M0 */ 611 intel_de_write(display, HDCP_REP_CTL, 612 rep_ctl | HDCP_SHA1_TEXT_8); 613 ret = intel_write_sha_text(display, 0); 614 if (ret < 0) 615 return ret; 616 sha_idx += sizeof(sha_text); 617 618 } else if (sha_leftovers == 2) { 619 /* Write 32 bits of text */ 620 intel_de_write(display, HDCP_REP_CTL, 621 rep_ctl | HDCP_SHA1_TEXT_32); 622 sha_text |= bstatus[0] << 8 | bstatus[1]; 623 ret = intel_write_sha_text(display, sha_text); 624 if (ret < 0) 625 return ret; 626 sha_idx += sizeof(sha_text); 627 628 /* Write 64 bits of M0 */ 629 intel_de_write(display, HDCP_REP_CTL, 630 rep_ctl | HDCP_SHA1_TEXT_0); 631 for (i = 0; i < 2; i++) { 632 ret = intel_write_sha_text(display, 0); 633 if (ret < 0) 634 return ret; 635 sha_idx += sizeof(sha_text); 636 } 637 638 /* 639 * Terminate the SHA-1 stream by hand. For the other leftover 640 * cases this is appended by the hardware. 641 */ 642 intel_de_write(display, HDCP_REP_CTL, 643 rep_ctl | HDCP_SHA1_TEXT_32); 644 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 645 ret = intel_write_sha_text(display, sha_text); 646 if (ret < 0) 647 return ret; 648 sha_idx += sizeof(sha_text); 649 } else if (sha_leftovers == 3) { 650 /* Write 32 bits of text (filled from LSB) */ 651 intel_de_write(display, HDCP_REP_CTL, 652 rep_ctl | HDCP_SHA1_TEXT_32); 653 sha_text |= bstatus[0]; 654 ret = intel_write_sha_text(display, sha_text); 655 if (ret < 0) 656 return ret; 657 sha_idx += sizeof(sha_text); 658 659 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 660 intel_de_write(display, HDCP_REP_CTL, 661 rep_ctl | HDCP_SHA1_TEXT_8); 662 ret = intel_write_sha_text(display, bstatus[1]); 663 if (ret < 0) 664 return ret; 665 sha_idx += sizeof(sha_text); 666 667 /* Write 32 bits of M0 */ 668 intel_de_write(display, HDCP_REP_CTL, 669 rep_ctl | HDCP_SHA1_TEXT_0); 670 ret = intel_write_sha_text(display, 0); 671 if (ret < 0) 672 return ret; 673 sha_idx += sizeof(sha_text); 674 675 /* Write 8 bits of M0 */ 676 intel_de_write(display, HDCP_REP_CTL, 677 rep_ctl | HDCP_SHA1_TEXT_24); 678 ret = intel_write_sha_text(display, 0); 679 if (ret < 0) 680 return ret; 681 sha_idx += sizeof(sha_text); 682 } else { 683 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", 684 sha_leftovers); 685 return -EINVAL; 686 } 687 688 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 689 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 690 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 691 ret = intel_write_sha_text(display, 0); 692 if (ret < 0) 693 return ret; 694 sha_idx += sizeof(sha_text); 695 } 696 697 /* 698 * Last write gets the length of the concatenation in bits. That is: 699 * - 5 bytes per device 700 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 701 */ 702 sha_text = (num_downstream * 5 + 10) * 8; 703 ret = intel_write_sha_text(display, sha_text); 704 if (ret < 0) 705 return ret; 706 707 /* Tell the HW we're done with the hash and wait for it to ACK */ 708 intel_de_write(display, HDCP_REP_CTL, 709 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 710 if (intel_de_wait_for_set(display, HDCP_REP_CTL, 711 HDCP_SHA1_COMPLETE, 1)) { 712 drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); 713 return -ETIMEDOUT; 714 } 715 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 716 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); 717 return -ENXIO; 718 } 719 720 return 0; 721 } 722 723 /* Implements Part 2 of the HDCP authorization procedure */ 724 static 725 int intel_hdcp_auth_downstream(struct intel_connector *connector) 726 { 727 struct intel_display *display = to_intel_display(connector); 728 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 729 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 730 u8 bstatus[2], num_downstream, *ksv_fifo; 731 int ret, i, tries = 3; 732 733 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 734 if (ret) { 735 drm_dbg_kms(display->drm, 736 "KSV list failed to become ready (%d)\n", ret); 737 return ret; 738 } 739 740 ret = shim->read_bstatus(dig_port, bstatus); 741 if (ret) 742 return ret; 743 744 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 745 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 746 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); 747 return -EPERM; 748 } 749 750 /* 751 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 752 * the HDCP encryption. That implies that repeater can't have its own 753 * display. As there is no consumption of encrypted content in the 754 * repeater with 0 downstream devices, we are failing the 755 * authentication. 756 */ 757 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 758 if (num_downstream == 0) { 759 drm_dbg_kms(display->drm, 760 "Repeater with zero downstream devices\n"); 761 return -EINVAL; 762 } 763 764 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 765 if (!ksv_fifo) { 766 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); 767 return -ENOMEM; 768 } 769 770 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 771 if (ret) 772 goto err; 773 774 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, 775 num_downstream) > 0) { 776 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); 777 ret = -EPERM; 778 goto err; 779 } 780 781 /* 782 * When V prime mismatches, DP Spec mandates re-read of 783 * V prime atleast twice. 784 */ 785 for (i = 0; i < tries; i++) { 786 ret = intel_hdcp_validate_v_prime(connector, shim, 787 ksv_fifo, num_downstream, 788 bstatus); 789 if (!ret) 790 break; 791 } 792 793 if (i == tries) { 794 drm_dbg_kms(display->drm, 795 "V Prime validation failed.(%d)\n", ret); 796 goto err; 797 } 798 799 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", 800 num_downstream); 801 ret = 0; 802 err: 803 kfree(ksv_fifo); 804 return ret; 805 } 806 807 /* Implements Part 1 of the HDCP authorization procedure */ 808 static int intel_hdcp_auth(struct intel_connector *connector) 809 { 810 struct intel_display *display = to_intel_display(connector); 811 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 812 struct intel_hdcp *hdcp = &connector->hdcp; 813 const struct intel_hdcp_shim *shim = hdcp->shim; 814 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 815 enum port port = dig_port->base.port; 816 unsigned long r0_prime_gen_start; 817 int ret, i, tries = 2; 818 union { 819 u32 reg[2]; 820 u8 shim[DRM_HDCP_AN_LEN]; 821 } an; 822 union { 823 u32 reg[2]; 824 u8 shim[DRM_HDCP_KSV_LEN]; 825 } bksv; 826 union { 827 u32 reg; 828 u8 shim[DRM_HDCP_RI_LEN]; 829 } ri; 830 bool repeater_present, hdcp_capable; 831 832 /* 833 * Detects whether the display is HDCP capable. Although we check for 834 * valid Bksv below, the HDCP over DP spec requires that we check 835 * whether the display supports HDCP before we write An. For HDMI 836 * displays, this is not necessary. 837 */ 838 if (shim->hdcp_get_capability) { 839 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable); 840 if (ret) 841 return ret; 842 if (!hdcp_capable) { 843 drm_dbg_kms(display->drm, 844 "Panel is not HDCP capable\n"); 845 return -EINVAL; 846 } 847 } 848 849 /* Initialize An with 2 random values and acquire it */ 850 for (i = 0; i < 2; i++) 851 intel_de_write(display, 852 HDCP_ANINIT(display, cpu_transcoder, port), 853 get_random_u32()); 854 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 855 HDCP_CONF_CAPTURE_AN); 856 857 /* Wait for An to be acquired */ 858 if (intel_de_wait_for_set(display, 859 HDCP_STATUS(display, cpu_transcoder, port), 860 HDCP_STATUS_AN_READY, 1)) { 861 drm_err(display->drm, "Timed out waiting for An\n"); 862 return -ETIMEDOUT; 863 } 864 865 an.reg[0] = intel_de_read(display, 866 HDCP_ANLO(display, cpu_transcoder, port)); 867 an.reg[1] = intel_de_read(display, 868 HDCP_ANHI(display, cpu_transcoder, port)); 869 ret = shim->write_an_aksv(dig_port, an.shim); 870 if (ret) 871 return ret; 872 873 r0_prime_gen_start = jiffies; 874 875 memset(&bksv, 0, sizeof(bksv)); 876 877 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 878 if (ret < 0) 879 return ret; 880 881 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { 882 drm_err(display->drm, "BKSV is revoked\n"); 883 return -EPERM; 884 } 885 886 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), 887 bksv.reg[0]); 888 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), 889 bksv.reg[1]); 890 891 ret = shim->repeater_present(dig_port, &repeater_present); 892 if (ret) 893 return ret; 894 if (repeater_present) 895 intel_de_write(display, HDCP_REP_CTL, 896 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); 897 898 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 899 if (ret) 900 return ret; 901 902 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 903 HDCP_CONF_AUTH_AND_ENC); 904 905 /* Wait for R0 ready */ 906 if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 907 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 908 drm_err(display->drm, "Timed out waiting for R0 ready\n"); 909 return -ETIMEDOUT; 910 } 911 912 /* 913 * Wait for R0' to become available. The spec says 100ms from Aksv, but 914 * some monitors can take longer than this. We'll set the timeout at 915 * 300ms just to be sure. 916 * 917 * On DP, there's an R0_READY bit available but no such bit 918 * exists on HDMI. Since the upper-bound is the same, we'll just do 919 * the stupid thing instead of polling on one and not the other. 920 */ 921 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 922 923 tries = 3; 924 925 /* 926 * DP HDCP Spec mandates the two more reattempt to read R0, incase 927 * of R0 mismatch. 928 */ 929 for (i = 0; i < tries; i++) { 930 ri.reg = 0; 931 ret = shim->read_ri_prime(dig_port, ri.shim); 932 if (ret) 933 return ret; 934 intel_de_write(display, 935 HDCP_RPRIME(display, cpu_transcoder, port), 936 ri.reg); 937 938 /* Wait for Ri prime match */ 939 if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 940 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 941 break; 942 } 943 944 if (i == tries) { 945 drm_dbg_kms(display->drm, 946 "Timed out waiting for Ri prime match (%x)\n", 947 intel_de_read(display, 948 HDCP_STATUS(display, cpu_transcoder, port))); 949 return -ETIMEDOUT; 950 } 951 952 /* Wait for encryption confirmation */ 953 if (intel_de_wait_for_set(display, 954 HDCP_STATUS(display, cpu_transcoder, port), 955 HDCP_STATUS_ENC, 956 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 957 drm_err(display->drm, "Timed out waiting for encryption\n"); 958 return -ETIMEDOUT; 959 } 960 961 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 962 if (shim->stream_encryption) { 963 ret = shim->stream_encryption(connector, true); 964 if (ret) { 965 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", 966 connector->base.base.id, connector->base.name); 967 return ret; 968 } 969 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 970 transcoder_name(hdcp->stream_transcoder)); 971 } 972 973 if (repeater_present) 974 return intel_hdcp_auth_downstream(connector); 975 976 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); 977 return 0; 978 } 979 980 static int _intel_hdcp_disable(struct intel_connector *connector) 981 { 982 struct intel_display *display = to_intel_display(connector); 983 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 984 struct intel_hdcp *hdcp = &connector->hdcp; 985 enum port port = dig_port->base.port; 986 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 987 u32 repeater_ctl; 988 int ret; 989 990 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", 991 connector->base.base.id, connector->base.name); 992 993 if (hdcp->shim->stream_encryption) { 994 ret = hdcp->shim->stream_encryption(connector, false); 995 if (ret) { 996 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", 997 connector->base.base.id, connector->base.name); 998 return ret; 999 } 1000 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 1001 transcoder_name(hdcp->stream_transcoder)); 1002 /* 1003 * If there are other connectors on this port using HDCP, 1004 * don't disable it until it disabled HDCP encryption for 1005 * all connectors in MST topology. 1006 */ 1007 if (dig_port->hdcp.num_streams > 0) 1008 return 0; 1009 } 1010 1011 hdcp->hdcp_encrypted = false; 1012 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); 1013 if (intel_de_wait_for_clear(display, 1014 HDCP_STATUS(display, cpu_transcoder, port), 1015 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 1016 drm_err(display->drm, 1017 "Failed to disable HDCP, timeout clearing status\n"); 1018 return -ETIMEDOUT; 1019 } 1020 1021 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, 1022 port); 1023 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); 1024 1025 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 1026 if (ret) { 1027 drm_err(display->drm, "Failed to disable HDCP signalling\n"); 1028 return ret; 1029 } 1030 1031 drm_dbg_kms(display->drm, "HDCP is disabled\n"); 1032 return 0; 1033 } 1034 1035 static int intel_hdcp1_enable(struct intel_connector *connector) 1036 { 1037 struct intel_display *display = to_intel_display(connector); 1038 struct intel_hdcp *hdcp = &connector->hdcp; 1039 int i, ret, tries = 3; 1040 1041 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", 1042 connector->base.base.id, connector->base.name); 1043 1044 if (!hdcp_key_loadable(display)) { 1045 drm_err(display->drm, "HDCP key Load is not possible\n"); 1046 return -ENXIO; 1047 } 1048 1049 for (i = 0; i < KEY_LOAD_TRIES; i++) { 1050 ret = intel_hdcp_load_keys(display); 1051 if (!ret) 1052 break; 1053 intel_hdcp_clear_keys(display); 1054 } 1055 if (ret) { 1056 drm_err(display->drm, "Could not load HDCP keys, (%d)\n", 1057 ret); 1058 return ret; 1059 } 1060 1061 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true); 1062 1063 /* Incase of authentication failures, HDCP spec expects reauth. */ 1064 for (i = 0; i < tries; i++) { 1065 ret = intel_hdcp_auth(connector); 1066 if (!ret) { 1067 hdcp->hdcp_encrypted = true; 1068 return 0; 1069 } 1070 1071 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); 1072 1073 /* Ensuring HDCP encryption and signalling are stopped. */ 1074 _intel_hdcp_disable(connector); 1075 } 1076 1077 drm_dbg_kms(display->drm, 1078 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 1079 return ret; 1080 } 1081 1082 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 1083 { 1084 return container_of(hdcp, struct intel_connector, hdcp); 1085 } 1086 1087 static void intel_hdcp_update_value(struct intel_connector *connector, 1088 u64 value, bool update_property) 1089 { 1090 struct intel_display *display = to_intel_display(connector); 1091 struct drm_i915_private *i915 = to_i915(display->drm); 1092 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1093 struct intel_hdcp *hdcp = &connector->hdcp; 1094 1095 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); 1096 1097 if (hdcp->value == value) 1098 return; 1099 1100 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex)); 1101 1102 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1103 if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0)) 1104 dig_port->hdcp.num_streams--; 1105 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1106 dig_port->hdcp.num_streams++; 1107 } 1108 1109 hdcp->value = value; 1110 if (update_property) { 1111 drm_connector_get(&connector->base); 1112 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 1113 drm_connector_put(&connector->base); 1114 } 1115 } 1116 1117 /* Implements Part 3 of the HDCP authorization procedure */ 1118 static int intel_hdcp_check_link(struct intel_connector *connector) 1119 { 1120 struct intel_display *display = to_intel_display(connector); 1121 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1122 struct intel_hdcp *hdcp = &connector->hdcp; 1123 enum port port = dig_port->base.port; 1124 enum transcoder cpu_transcoder; 1125 int ret = 0; 1126 1127 mutex_lock(&hdcp->mutex); 1128 mutex_lock(&dig_port->hdcp.mutex); 1129 1130 cpu_transcoder = hdcp->cpu_transcoder; 1131 1132 /* Check_link valid only when HDCP1.4 is enabled */ 1133 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1134 !hdcp->hdcp_encrypted) { 1135 ret = -EINVAL; 1136 goto out; 1137 } 1138 1139 if (drm_WARN_ON(display->drm, 1140 !intel_hdcp_in_use(display, cpu_transcoder, port))) { 1141 drm_err(display->drm, 1142 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", 1143 connector->base.base.id, connector->base.name, 1144 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); 1145 ret = -ENXIO; 1146 intel_hdcp_update_value(connector, 1147 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1148 true); 1149 goto out; 1150 } 1151 1152 if (hdcp->shim->check_link(dig_port, connector)) { 1153 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1154 intel_hdcp_update_value(connector, 1155 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1156 } 1157 goto out; 1158 } 1159 1160 drm_dbg_kms(display->drm, 1161 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", 1162 connector->base.base.id, connector->base.name); 1163 1164 ret = _intel_hdcp_disable(connector); 1165 if (ret) { 1166 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); 1167 intel_hdcp_update_value(connector, 1168 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1169 true); 1170 goto out; 1171 } 1172 1173 ret = intel_hdcp1_enable(connector); 1174 if (ret) { 1175 drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret); 1176 intel_hdcp_update_value(connector, 1177 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1178 true); 1179 goto out; 1180 } 1181 1182 out: 1183 mutex_unlock(&dig_port->hdcp.mutex); 1184 mutex_unlock(&hdcp->mutex); 1185 return ret; 1186 } 1187 1188 static void intel_hdcp_prop_work(struct work_struct *work) 1189 { 1190 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1191 prop_work); 1192 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1193 struct intel_display *display = to_intel_display(connector); 1194 1195 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); 1196 mutex_lock(&hdcp->mutex); 1197 1198 /* 1199 * This worker is only used to flip between ENABLED/DESIRED. Either of 1200 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1201 * we're running just after hdcp has been disabled, so just exit 1202 */ 1203 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1204 drm_hdcp_update_content_protection(&connector->base, 1205 hdcp->value); 1206 1207 mutex_unlock(&hdcp->mutex); 1208 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1209 1210 drm_connector_put(&connector->base); 1211 } 1212 1213 bool is_hdcp_supported(struct intel_display *display, enum port port) 1214 { 1215 return DISPLAY_RUNTIME_INFO(display)->has_hdcp && 1216 (DISPLAY_VER(display) >= 12 || port < PORT_E); 1217 } 1218 1219 static int 1220 hdcp2_prepare_ake_init(struct intel_connector *connector, 1221 struct hdcp2_ake_init *ake_data) 1222 { 1223 struct intel_display *display = to_intel_display(connector); 1224 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1225 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1226 struct i915_hdcp_arbiter *arbiter; 1227 int ret; 1228 1229 mutex_lock(&display->hdcp.hdcp_mutex); 1230 arbiter = display->hdcp.arbiter; 1231 1232 if (!arbiter || !arbiter->ops) { 1233 mutex_unlock(&display->hdcp.hdcp_mutex); 1234 return -EINVAL; 1235 } 1236 1237 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1238 if (ret) 1239 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", 1240 ret); 1241 mutex_unlock(&display->hdcp.hdcp_mutex); 1242 1243 return ret; 1244 } 1245 1246 static int 1247 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1248 struct hdcp2_ake_send_cert *rx_cert, 1249 bool *paired, 1250 struct hdcp2_ake_no_stored_km *ek_pub_km, 1251 size_t *msg_sz) 1252 { 1253 struct intel_display *display = to_intel_display(connector); 1254 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1255 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1256 struct i915_hdcp_arbiter *arbiter; 1257 int ret; 1258 1259 mutex_lock(&display->hdcp.hdcp_mutex); 1260 arbiter = display->hdcp.arbiter; 1261 1262 if (!arbiter || !arbiter->ops) { 1263 mutex_unlock(&display->hdcp.hdcp_mutex); 1264 return -EINVAL; 1265 } 1266 1267 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1268 rx_cert, paired, 1269 ek_pub_km, msg_sz); 1270 if (ret < 0) 1271 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", 1272 ret); 1273 mutex_unlock(&display->hdcp.hdcp_mutex); 1274 1275 return ret; 1276 } 1277 1278 static int hdcp2_verify_hprime(struct intel_connector *connector, 1279 struct hdcp2_ake_send_hprime *rx_hprime) 1280 { 1281 struct intel_display *display = to_intel_display(connector); 1282 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1283 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1284 struct i915_hdcp_arbiter *arbiter; 1285 int ret; 1286 1287 mutex_lock(&display->hdcp.hdcp_mutex); 1288 arbiter = display->hdcp.arbiter; 1289 1290 if (!arbiter || !arbiter->ops) { 1291 mutex_unlock(&display->hdcp.hdcp_mutex); 1292 return -EINVAL; 1293 } 1294 1295 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1296 if (ret < 0) 1297 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); 1298 mutex_unlock(&display->hdcp.hdcp_mutex); 1299 1300 return ret; 1301 } 1302 1303 static int 1304 hdcp2_store_pairing_info(struct intel_connector *connector, 1305 struct hdcp2_ake_send_pairing_info *pairing_info) 1306 { 1307 struct intel_display *display = to_intel_display(connector); 1308 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1309 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1310 struct i915_hdcp_arbiter *arbiter; 1311 int ret; 1312 1313 mutex_lock(&display->hdcp.hdcp_mutex); 1314 arbiter = display->hdcp.arbiter; 1315 1316 if (!arbiter || !arbiter->ops) { 1317 mutex_unlock(&display->hdcp.hdcp_mutex); 1318 return -EINVAL; 1319 } 1320 1321 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1322 if (ret < 0) 1323 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", 1324 ret); 1325 mutex_unlock(&display->hdcp.hdcp_mutex); 1326 1327 return ret; 1328 } 1329 1330 static int 1331 hdcp2_prepare_lc_init(struct intel_connector *connector, 1332 struct hdcp2_lc_init *lc_init) 1333 { 1334 struct intel_display *display = to_intel_display(connector); 1335 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1336 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1337 struct i915_hdcp_arbiter *arbiter; 1338 int ret; 1339 1340 mutex_lock(&display->hdcp.hdcp_mutex); 1341 arbiter = display->hdcp.arbiter; 1342 1343 if (!arbiter || !arbiter->ops) { 1344 mutex_unlock(&display->hdcp.hdcp_mutex); 1345 return -EINVAL; 1346 } 1347 1348 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1349 if (ret < 0) 1350 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", 1351 ret); 1352 mutex_unlock(&display->hdcp.hdcp_mutex); 1353 1354 return ret; 1355 } 1356 1357 static int 1358 hdcp2_verify_lprime(struct intel_connector *connector, 1359 struct hdcp2_lc_send_lprime *rx_lprime) 1360 { 1361 struct intel_display *display = to_intel_display(connector); 1362 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1363 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1364 struct i915_hdcp_arbiter *arbiter; 1365 int ret; 1366 1367 mutex_lock(&display->hdcp.hdcp_mutex); 1368 arbiter = display->hdcp.arbiter; 1369 1370 if (!arbiter || !arbiter->ops) { 1371 mutex_unlock(&display->hdcp.hdcp_mutex); 1372 return -EINVAL; 1373 } 1374 1375 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1376 if (ret < 0) 1377 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", 1378 ret); 1379 mutex_unlock(&display->hdcp.hdcp_mutex); 1380 1381 return ret; 1382 } 1383 1384 static int hdcp2_prepare_skey(struct intel_connector *connector, 1385 struct hdcp2_ske_send_eks *ske_data) 1386 { 1387 struct intel_display *display = to_intel_display(connector); 1388 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1389 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1390 struct i915_hdcp_arbiter *arbiter; 1391 int ret; 1392 1393 mutex_lock(&display->hdcp.hdcp_mutex); 1394 arbiter = display->hdcp.arbiter; 1395 1396 if (!arbiter || !arbiter->ops) { 1397 mutex_unlock(&display->hdcp.hdcp_mutex); 1398 return -EINVAL; 1399 } 1400 1401 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1402 if (ret < 0) 1403 drm_dbg_kms(display->drm, "Get session key failed. %d\n", 1404 ret); 1405 mutex_unlock(&display->hdcp.hdcp_mutex); 1406 1407 return ret; 1408 } 1409 1410 static int 1411 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1412 struct hdcp2_rep_send_receiverid_list 1413 *rep_topology, 1414 struct hdcp2_rep_send_ack *rep_send_ack) 1415 { 1416 struct intel_display *display = to_intel_display(connector); 1417 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1418 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1419 struct i915_hdcp_arbiter *arbiter; 1420 int ret; 1421 1422 mutex_lock(&display->hdcp.hdcp_mutex); 1423 arbiter = display->hdcp.arbiter; 1424 1425 if (!arbiter || !arbiter->ops) { 1426 mutex_unlock(&display->hdcp.hdcp_mutex); 1427 return -EINVAL; 1428 } 1429 1430 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1431 data, 1432 rep_topology, 1433 rep_send_ack); 1434 if (ret < 0) 1435 drm_dbg_kms(display->drm, 1436 "Verify rep topology failed. %d\n", ret); 1437 mutex_unlock(&display->hdcp.hdcp_mutex); 1438 1439 return ret; 1440 } 1441 1442 static int 1443 hdcp2_verify_mprime(struct intel_connector *connector, 1444 struct hdcp2_rep_stream_ready *stream_ready) 1445 { 1446 struct intel_display *display = to_intel_display(connector); 1447 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1448 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1449 struct i915_hdcp_arbiter *arbiter; 1450 int ret; 1451 1452 mutex_lock(&display->hdcp.hdcp_mutex); 1453 arbiter = display->hdcp.arbiter; 1454 1455 if (!arbiter || !arbiter->ops) { 1456 mutex_unlock(&display->hdcp.hdcp_mutex); 1457 return -EINVAL; 1458 } 1459 1460 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1461 if (ret < 0) 1462 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); 1463 mutex_unlock(&display->hdcp.hdcp_mutex); 1464 1465 return ret; 1466 } 1467 1468 static int hdcp2_authenticate_port(struct intel_connector *connector) 1469 { 1470 struct intel_display *display = to_intel_display(connector); 1471 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1472 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1473 struct i915_hdcp_arbiter *arbiter; 1474 int ret; 1475 1476 mutex_lock(&display->hdcp.hdcp_mutex); 1477 arbiter = display->hdcp.arbiter; 1478 1479 if (!arbiter || !arbiter->ops) { 1480 mutex_unlock(&display->hdcp.hdcp_mutex); 1481 return -EINVAL; 1482 } 1483 1484 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1485 if (ret < 0) 1486 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", 1487 ret); 1488 mutex_unlock(&display->hdcp.hdcp_mutex); 1489 1490 return ret; 1491 } 1492 1493 static int hdcp2_close_session(struct intel_connector *connector) 1494 { 1495 struct intel_display *display = to_intel_display(connector); 1496 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1497 struct i915_hdcp_arbiter *arbiter; 1498 int ret; 1499 1500 mutex_lock(&display->hdcp.hdcp_mutex); 1501 arbiter = display->hdcp.arbiter; 1502 1503 if (!arbiter || !arbiter->ops) { 1504 mutex_unlock(&display->hdcp.hdcp_mutex); 1505 return -EINVAL; 1506 } 1507 1508 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1509 &dig_port->hdcp.port_data); 1510 mutex_unlock(&display->hdcp.hdcp_mutex); 1511 1512 return ret; 1513 } 1514 1515 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1516 { 1517 return hdcp2_close_session(connector); 1518 } 1519 1520 /* Authentication flow starts from here */ 1521 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1522 { 1523 struct intel_display *display = to_intel_display(connector); 1524 struct intel_digital_port *dig_port = 1525 intel_attached_dig_port(connector); 1526 struct intel_hdcp *hdcp = &connector->hdcp; 1527 union { 1528 struct hdcp2_ake_init ake_init; 1529 struct hdcp2_ake_send_cert send_cert; 1530 struct hdcp2_ake_no_stored_km no_stored_km; 1531 struct hdcp2_ake_send_hprime send_hprime; 1532 struct hdcp2_ake_send_pairing_info pairing_info; 1533 } msgs; 1534 const struct intel_hdcp_shim *shim = hdcp->shim; 1535 size_t size; 1536 int ret, i, max_retries; 1537 1538 /* Init for seq_num */ 1539 hdcp->seq_num_v = 0; 1540 hdcp->seq_num_m = 0; 1541 1542 if (intel_encoder_is_dp(&dig_port->base) || 1543 intel_encoder_is_mst(&dig_port->base)) 1544 max_retries = 10; 1545 else 1546 max_retries = 1; 1547 1548 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1549 if (ret < 0) 1550 return ret; 1551 1552 /* 1553 * Retry the first read and write to downstream at least 10 times 1554 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders 1555 * (dock decides to stop advertising hdcp2 capability for some reason). 1556 * The reason being that during suspend resume dock usually keeps the 1557 * HDCP2 registers inaccessible causing AUX error. This wouldn't be a 1558 * big problem if the userspace just kept retrying with some delay while 1559 * it continues to play low value content but most userspace applications 1560 * end up throwing an error when it receives one from KMD. This makes 1561 * sure we give the dock and the sink devices to complete its power cycle 1562 * and then try HDCP authentication. The values of 10 and delay of 50ms 1563 * was decided based on multiple trial and errors. 1564 */ 1565 for (i = 0; i < max_retries; i++) { 1566 if (!intel_hdcp2_get_capability(connector)) { 1567 msleep(50); 1568 continue; 1569 } 1570 1571 ret = shim->write_2_2_msg(connector, &msgs.ake_init, 1572 sizeof(msgs.ake_init)); 1573 if (ret < 0) 1574 continue; 1575 1576 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, 1577 &msgs.send_cert, sizeof(msgs.send_cert)); 1578 if (ret > 0) 1579 break; 1580 } 1581 1582 if (ret < 0) 1583 return ret; 1584 1585 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1586 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1587 return -EINVAL; 1588 } 1589 1590 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1591 1592 if (drm_hdcp_check_ksvs_revoked(display->drm, 1593 msgs.send_cert.cert_rx.receiver_id, 1594 1) > 0) { 1595 drm_err(display->drm, "Receiver ID is revoked\n"); 1596 return -EPERM; 1597 } 1598 1599 /* 1600 * Here msgs.no_stored_km will hold msgs corresponding to the km 1601 * stored also. 1602 */ 1603 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1604 &hdcp->is_paired, 1605 &msgs.no_stored_km, &size); 1606 if (ret < 0) 1607 return ret; 1608 1609 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size); 1610 if (ret < 0) 1611 return ret; 1612 1613 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME, 1614 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1615 if (ret < 0) 1616 return ret; 1617 1618 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1619 if (ret < 0) 1620 return ret; 1621 1622 if (!hdcp->is_paired) { 1623 /* Pairing is required */ 1624 ret = shim->read_2_2_msg(connector, 1625 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1626 &msgs.pairing_info, 1627 sizeof(msgs.pairing_info)); 1628 if (ret < 0) 1629 return ret; 1630 1631 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1632 if (ret < 0) 1633 return ret; 1634 hdcp->is_paired = true; 1635 } 1636 1637 return 0; 1638 } 1639 1640 static int hdcp2_locality_check(struct intel_connector *connector) 1641 { 1642 struct intel_hdcp *hdcp = &connector->hdcp; 1643 union { 1644 struct hdcp2_lc_init lc_init; 1645 struct hdcp2_lc_send_lprime send_lprime; 1646 } msgs; 1647 const struct intel_hdcp_shim *shim = hdcp->shim; 1648 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1649 1650 for (i = 0; i < tries; i++) { 1651 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1652 if (ret < 0) 1653 continue; 1654 1655 ret = shim->write_2_2_msg(connector, &msgs.lc_init, 1656 sizeof(msgs.lc_init)); 1657 if (ret < 0) 1658 continue; 1659 1660 ret = shim->read_2_2_msg(connector, 1661 HDCP_2_2_LC_SEND_LPRIME, 1662 &msgs.send_lprime, 1663 sizeof(msgs.send_lprime)); 1664 if (ret < 0) 1665 continue; 1666 1667 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1668 if (!ret) 1669 break; 1670 } 1671 1672 return ret; 1673 } 1674 1675 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1676 { 1677 struct intel_hdcp *hdcp = &connector->hdcp; 1678 struct hdcp2_ske_send_eks send_eks; 1679 int ret; 1680 1681 ret = hdcp2_prepare_skey(connector, &send_eks); 1682 if (ret < 0) 1683 return ret; 1684 1685 ret = hdcp->shim->write_2_2_msg(connector, &send_eks, 1686 sizeof(send_eks)); 1687 if (ret < 0) 1688 return ret; 1689 1690 return 0; 1691 } 1692 1693 static 1694 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1695 { 1696 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1697 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1698 struct intel_hdcp *hdcp = &connector->hdcp; 1699 union { 1700 struct hdcp2_rep_stream_manage stream_manage; 1701 struct hdcp2_rep_stream_ready stream_ready; 1702 } msgs; 1703 const struct intel_hdcp_shim *shim = hdcp->shim; 1704 int ret, streams_size_delta, i; 1705 1706 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1707 return -ERANGE; 1708 1709 /* Prepare RepeaterAuth_Stream_Manage msg */ 1710 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1711 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1712 1713 msgs.stream_manage.k = cpu_to_be16(data->k); 1714 1715 for (i = 0; i < data->k; i++) { 1716 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1717 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1718 } 1719 1720 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1721 sizeof(struct hdcp2_streamid_type); 1722 /* Send it to Repeater */ 1723 ret = shim->write_2_2_msg(connector, &msgs.stream_manage, 1724 sizeof(msgs.stream_manage) - streams_size_delta); 1725 if (ret < 0) 1726 goto out; 1727 1728 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY, 1729 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1730 if (ret < 0) 1731 goto out; 1732 1733 data->seq_num_m = hdcp->seq_num_m; 1734 1735 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1736 1737 out: 1738 hdcp->seq_num_m++; 1739 1740 return ret; 1741 } 1742 1743 static 1744 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1745 { 1746 struct intel_display *display = to_intel_display(connector); 1747 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1748 struct intel_hdcp *hdcp = &connector->hdcp; 1749 union { 1750 struct hdcp2_rep_send_receiverid_list recvid_list; 1751 struct hdcp2_rep_send_ack rep_ack; 1752 } msgs; 1753 const struct intel_hdcp_shim *shim = hdcp->shim; 1754 u32 seq_num_v, device_cnt; 1755 u8 *rx_info; 1756 int ret; 1757 1758 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST, 1759 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1760 if (ret < 0) 1761 return ret; 1762 1763 rx_info = msgs.recvid_list.rx_info; 1764 1765 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1766 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1767 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); 1768 return -EINVAL; 1769 } 1770 1771 /* 1772 * MST topology is not Type 1 capable if it contains a downstream 1773 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1774 */ 1775 dig_port->hdcp.mst_type1_capable = 1776 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1777 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1778 1779 if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) { 1780 drm_dbg_kms(display->drm, 1781 "HDCP1.x or 2.0 Legacy Device Downstream\n"); 1782 return -EINVAL; 1783 } 1784 1785 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1786 seq_num_v = 1787 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1788 1789 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1790 drm_dbg_kms(display->drm, 1791 "Non zero Seq_num_v at first RecvId_List msg\n"); 1792 return -EINVAL; 1793 } 1794 1795 if (seq_num_v < hdcp->seq_num_v) { 1796 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1797 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); 1798 return -EINVAL; 1799 } 1800 1801 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1802 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1803 if (drm_hdcp_check_ksvs_revoked(display->drm, 1804 msgs.recvid_list.receiver_ids, 1805 device_cnt) > 0) { 1806 drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); 1807 return -EPERM; 1808 } 1809 1810 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1811 &msgs.recvid_list, 1812 &msgs.rep_ack); 1813 if (ret < 0) 1814 return ret; 1815 1816 hdcp->seq_num_v = seq_num_v; 1817 ret = shim->write_2_2_msg(connector, &msgs.rep_ack, 1818 sizeof(msgs.rep_ack)); 1819 if (ret < 0) 1820 return ret; 1821 1822 return 0; 1823 } 1824 1825 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1826 { 1827 struct intel_display *display = to_intel_display(connector); 1828 struct intel_hdcp *hdcp = &connector->hdcp; 1829 const struct intel_hdcp_shim *shim = hdcp->shim; 1830 int ret; 1831 1832 ret = hdcp2_authentication_key_exchange(connector); 1833 if (ret < 0) { 1834 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); 1835 return ret; 1836 } 1837 1838 ret = hdcp2_locality_check(connector); 1839 if (ret < 0) { 1840 drm_dbg_kms(display->drm, 1841 "Locality Check failed. Err : %d\n", ret); 1842 return ret; 1843 } 1844 1845 ret = hdcp2_session_key_exchange(connector); 1846 if (ret < 0) { 1847 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); 1848 return ret; 1849 } 1850 1851 if (shim->config_stream_type) { 1852 ret = shim->config_stream_type(connector, 1853 hdcp->is_repeater, 1854 hdcp->content_type); 1855 if (ret < 0) 1856 return ret; 1857 } 1858 1859 if (hdcp->is_repeater) { 1860 ret = hdcp2_authenticate_repeater_topology(connector); 1861 if (ret < 0) { 1862 drm_dbg_kms(display->drm, 1863 "Repeater Auth Failed. Err: %d\n", ret); 1864 return ret; 1865 } 1866 } 1867 1868 return ret; 1869 } 1870 1871 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1872 { 1873 struct intel_display *display = to_intel_display(connector); 1874 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1875 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1876 struct intel_hdcp *hdcp = &connector->hdcp; 1877 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1878 enum port port = dig_port->base.port; 1879 int ret = 0; 1880 1881 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1882 LINK_ENCRYPTION_STATUS)) { 1883 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", 1884 connector->base.base.id, connector->base.name); 1885 ret = -EPERM; 1886 goto link_recover; 1887 } 1888 1889 if (hdcp->shim->stream_2_2_encryption) { 1890 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1891 if (ret) { 1892 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", 1893 connector->base.base.id, connector->base.name); 1894 return ret; 1895 } 1896 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1897 transcoder_name(hdcp->stream_transcoder)); 1898 } 1899 1900 return 0; 1901 1902 link_recover: 1903 if (hdcp2_deauthenticate_port(connector) < 0) 1904 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 1905 1906 dig_port->hdcp.auth_status = false; 1907 data->k = 0; 1908 1909 return ret; 1910 } 1911 1912 static int hdcp2_enable_encryption(struct intel_connector *connector) 1913 { 1914 struct intel_display *display = to_intel_display(connector); 1915 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1916 struct intel_hdcp *hdcp = &connector->hdcp; 1917 enum port port = dig_port->base.port; 1918 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1919 int ret; 1920 1921 drm_WARN_ON(display->drm, 1922 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1923 LINK_ENCRYPTION_STATUS); 1924 if (hdcp->shim->toggle_signalling) { 1925 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1926 true); 1927 if (ret) { 1928 drm_err(display->drm, 1929 "Failed to enable HDCP signalling. %d\n", 1930 ret); 1931 return ret; 1932 } 1933 } 1934 1935 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1936 LINK_AUTH_STATUS) 1937 /* Link is Authenticated. Now set for Encryption */ 1938 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1939 0, CTL_LINK_ENCRYPTION_REQ); 1940 1941 ret = intel_de_wait_for_set(display, 1942 HDCP2_STATUS(display, cpu_transcoder, 1943 port), 1944 LINK_ENCRYPTION_STATUS, 1945 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1946 dig_port->hdcp.auth_status = true; 1947 1948 return ret; 1949 } 1950 1951 static int hdcp2_disable_encryption(struct intel_connector *connector) 1952 { 1953 struct intel_display *display = to_intel_display(connector); 1954 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1955 struct intel_hdcp *hdcp = &connector->hdcp; 1956 enum port port = dig_port->base.port; 1957 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1958 int ret; 1959 1960 drm_WARN_ON(display->drm, 1961 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1962 LINK_ENCRYPTION_STATUS)); 1963 1964 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1965 CTL_LINK_ENCRYPTION_REQ, 0); 1966 1967 ret = intel_de_wait_for_clear(display, 1968 HDCP2_STATUS(display, cpu_transcoder, 1969 port), 1970 LINK_ENCRYPTION_STATUS, 1971 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1972 if (ret == -ETIMEDOUT) 1973 drm_dbg_kms(display->drm, "Disable Encryption Timedout"); 1974 1975 if (hdcp->shim->toggle_signalling) { 1976 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1977 false); 1978 if (ret) { 1979 drm_err(display->drm, 1980 "Failed to disable HDCP signalling. %d\n", 1981 ret); 1982 return ret; 1983 } 1984 } 1985 1986 return ret; 1987 } 1988 1989 static int 1990 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1991 { 1992 struct intel_display *display = to_intel_display(connector); 1993 int i, tries = 3, ret; 1994 1995 if (!connector->hdcp.is_repeater) 1996 return 0; 1997 1998 for (i = 0; i < tries; i++) { 1999 ret = _hdcp2_propagate_stream_management_info(connector); 2000 if (!ret) 2001 break; 2002 2003 /* Lets restart the auth incase of seq_num_m roll over */ 2004 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 2005 drm_dbg_kms(display->drm, 2006 "seq_num_m roll over.(%d)\n", ret); 2007 break; 2008 } 2009 2010 drm_dbg_kms(display->drm, 2011 "HDCP2 stream management %d of %d Failed.(%d)\n", 2012 i + 1, tries, ret); 2013 } 2014 2015 return ret; 2016 } 2017 2018 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, 2019 struct intel_connector *connector) 2020 { 2021 struct intel_display *display = to_intel_display(connector); 2022 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2023 int ret = 0, i, tries = 3; 2024 2025 for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) { 2026 ret = hdcp2_authenticate_sink(connector); 2027 if (!ret) { 2028 ret = intel_hdcp_prepare_streams(state, connector); 2029 if (ret) { 2030 drm_dbg_kms(display->drm, 2031 "Prepare stream failed.(%d)\n", 2032 ret); 2033 break; 2034 } 2035 2036 ret = hdcp2_propagate_stream_management_info(connector); 2037 if (ret) { 2038 drm_dbg_kms(display->drm, 2039 "Stream management failed.(%d)\n", 2040 ret); 2041 break; 2042 } 2043 2044 ret = hdcp2_authenticate_port(connector); 2045 if (!ret) 2046 break; 2047 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", 2048 ret); 2049 } 2050 2051 /* Clearing the mei hdcp session */ 2052 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 2053 i + 1, tries, ret); 2054 if (hdcp2_deauthenticate_port(connector) < 0) 2055 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2056 } 2057 2058 if (!ret && !dig_port->hdcp.auth_status) { 2059 /* 2060 * Ensuring the required 200mSec min time interval between 2061 * Session Key Exchange and encryption. 2062 */ 2063 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 2064 ret = hdcp2_enable_encryption(connector); 2065 if (ret < 0) { 2066 drm_dbg_kms(display->drm, 2067 "Encryption Enable Failed.(%d)\n", ret); 2068 if (hdcp2_deauthenticate_port(connector) < 0) 2069 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2070 } 2071 } 2072 2073 if (!ret) 2074 ret = hdcp2_enable_stream_encryption(connector); 2075 2076 return ret; 2077 } 2078 2079 static int _intel_hdcp2_enable(struct intel_atomic_state *state, 2080 struct intel_connector *connector) 2081 { 2082 struct intel_display *display = to_intel_display(connector); 2083 struct intel_hdcp *hdcp = &connector->hdcp; 2084 int ret; 2085 2086 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", 2087 connector->base.base.id, connector->base.name, 2088 hdcp->content_type); 2089 2090 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false); 2091 2092 ret = hdcp2_authenticate_and_encrypt(state, connector); 2093 if (ret) { 2094 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 2095 hdcp->content_type, ret); 2096 return ret; 2097 } 2098 2099 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", 2100 connector->base.base.id, connector->base.name, 2101 hdcp->content_type); 2102 2103 hdcp->hdcp2_encrypted = true; 2104 return 0; 2105 } 2106 2107 static int 2108 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 2109 { 2110 struct intel_display *display = to_intel_display(connector); 2111 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2112 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 2113 struct intel_hdcp *hdcp = &connector->hdcp; 2114 int ret; 2115 2116 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", 2117 connector->base.base.id, connector->base.name); 2118 2119 if (hdcp->shim->stream_2_2_encryption) { 2120 ret = hdcp->shim->stream_2_2_encryption(connector, false); 2121 if (ret) { 2122 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", 2123 connector->base.base.id, connector->base.name); 2124 return ret; 2125 } 2126 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 2127 transcoder_name(hdcp->stream_transcoder)); 2128 2129 if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery) 2130 return 0; 2131 } 2132 2133 ret = hdcp2_disable_encryption(connector); 2134 2135 if (hdcp2_deauthenticate_port(connector) < 0) 2136 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2137 2138 connector->hdcp.hdcp2_encrypted = false; 2139 dig_port->hdcp.auth_status = false; 2140 data->k = 0; 2141 2142 return ret; 2143 } 2144 2145 /* Implements the Link Integrity Check for HDCP2.2 */ 2146 static int intel_hdcp2_check_link(struct intel_connector *connector) 2147 { 2148 struct intel_display *display = to_intel_display(connector); 2149 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2150 struct intel_hdcp *hdcp = &connector->hdcp; 2151 enum port port = dig_port->base.port; 2152 enum transcoder cpu_transcoder; 2153 int ret = 0; 2154 2155 mutex_lock(&hdcp->mutex); 2156 mutex_lock(&dig_port->hdcp.mutex); 2157 cpu_transcoder = hdcp->cpu_transcoder; 2158 2159 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2160 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2161 !hdcp->hdcp2_encrypted) { 2162 ret = -EINVAL; 2163 goto out; 2164 } 2165 2166 if (drm_WARN_ON(display->drm, 2167 !intel_hdcp2_in_use(display, cpu_transcoder, port))) { 2168 drm_err(display->drm, 2169 "HDCP2.2 link stopped the encryption, %x\n", 2170 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); 2171 ret = -ENXIO; 2172 _intel_hdcp2_disable(connector, true); 2173 intel_hdcp_update_value(connector, 2174 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2175 true); 2176 goto out; 2177 } 2178 2179 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2180 if (ret == HDCP_LINK_PROTECTED) { 2181 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2182 intel_hdcp_update_value(connector, 2183 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2184 true); 2185 } 2186 goto out; 2187 } 2188 2189 if (ret == HDCP_TOPOLOGY_CHANGE) { 2190 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2191 goto out; 2192 2193 drm_dbg_kms(display->drm, 2194 "HDCP2.2 Downstream topology change\n"); 2195 2196 ret = hdcp2_authenticate_repeater_topology(connector); 2197 if (!ret) { 2198 intel_hdcp_update_value(connector, 2199 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2200 true); 2201 goto out; 2202 } 2203 2204 drm_dbg_kms(display->drm, 2205 "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n", 2206 connector->base.base.id, connector->base.name, 2207 ret); 2208 } else { 2209 drm_dbg_kms(display->drm, 2210 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", 2211 connector->base.base.id, connector->base.name); 2212 } 2213 2214 ret = _intel_hdcp2_disable(connector, true); 2215 if (ret) { 2216 drm_err(display->drm, 2217 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", 2218 connector->base.base.id, connector->base.name, ret); 2219 intel_hdcp_update_value(connector, 2220 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2221 goto out; 2222 } 2223 2224 intel_hdcp_update_value(connector, 2225 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2226 out: 2227 mutex_unlock(&dig_port->hdcp.mutex); 2228 mutex_unlock(&hdcp->mutex); 2229 return ret; 2230 } 2231 2232 static void intel_hdcp_check_work(struct work_struct *work) 2233 { 2234 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2235 struct intel_hdcp, 2236 check_work); 2237 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2238 struct intel_display *display = to_intel_display(connector); 2239 struct drm_i915_private *i915 = to_i915(display->drm); 2240 2241 if (drm_connector_is_unregistered(&connector->base)) 2242 return; 2243 2244 if (!intel_hdcp2_check_link(connector)) 2245 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2246 DRM_HDCP2_CHECK_PERIOD_MS); 2247 else if (!intel_hdcp_check_link(connector)) 2248 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2249 DRM_HDCP_CHECK_PERIOD_MS); 2250 } 2251 2252 static int i915_hdcp_component_bind(struct device *drv_kdev, 2253 struct device *mei_kdev, void *data) 2254 { 2255 struct intel_display *display = to_intel_display(drv_kdev); 2256 2257 drm_dbg(display->drm, "I915 HDCP comp bind\n"); 2258 mutex_lock(&display->hdcp.hdcp_mutex); 2259 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2260 display->hdcp.arbiter->hdcp_dev = mei_kdev; 2261 mutex_unlock(&display->hdcp.hdcp_mutex); 2262 2263 return 0; 2264 } 2265 2266 static void i915_hdcp_component_unbind(struct device *drv_kdev, 2267 struct device *mei_kdev, void *data) 2268 { 2269 struct intel_display *display = to_intel_display(drv_kdev); 2270 2271 drm_dbg(display->drm, "I915 HDCP comp unbind\n"); 2272 mutex_lock(&display->hdcp.hdcp_mutex); 2273 display->hdcp.arbiter = NULL; 2274 mutex_unlock(&display->hdcp.hdcp_mutex); 2275 } 2276 2277 static const struct component_ops i915_hdcp_ops = { 2278 .bind = i915_hdcp_component_bind, 2279 .unbind = i915_hdcp_component_unbind, 2280 }; 2281 2282 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2283 { 2284 switch (port) { 2285 case PORT_A: 2286 return HDCP_DDI_A; 2287 case PORT_B ... PORT_F: 2288 return (enum hdcp_ddi)port; 2289 default: 2290 return HDCP_DDI_INVALID_PORT; 2291 } 2292 } 2293 2294 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2295 { 2296 switch (cpu_transcoder) { 2297 case TRANSCODER_A ... TRANSCODER_D: 2298 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2299 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2300 return HDCP_INVALID_TRANSCODER; 2301 } 2302 } 2303 2304 static int initialize_hdcp_port_data(struct intel_connector *connector, 2305 struct intel_digital_port *dig_port, 2306 const struct intel_hdcp_shim *shim) 2307 { 2308 struct intel_display *display = to_intel_display(connector); 2309 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 2310 enum port port = dig_port->base.port; 2311 2312 if (DISPLAY_VER(display) < 12) 2313 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2314 else 2315 /* 2316 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2317 * with zero(INVALID PORT index). 2318 */ 2319 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2320 2321 /* 2322 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2323 * is initialized to zero (invalid transcoder index). This will be 2324 * retained for <Gen12 forever. 2325 */ 2326 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2327 2328 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2329 data->protocol = (u8)shim->protocol; 2330 2331 if (!data->streams) 2332 data->streams = kcalloc(INTEL_NUM_PIPES(display), 2333 sizeof(struct hdcp2_streamid_type), 2334 GFP_KERNEL); 2335 if (!data->streams) { 2336 drm_err(display->drm, "Out of Memory\n"); 2337 return -ENOMEM; 2338 } 2339 2340 return 0; 2341 } 2342 2343 static bool is_hdcp2_supported(struct intel_display *display) 2344 { 2345 if (USE_HDCP_GSC(display)) 2346 return true; 2347 2348 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2349 return false; 2350 2351 return DISPLAY_VER(display) >= 10 || 2352 display->platform.kabylake || 2353 display->platform.coffeelake || 2354 display->platform.cometlake; 2355 } 2356 2357 void intel_hdcp_component_init(struct intel_display *display) 2358 { 2359 int ret; 2360 2361 if (!is_hdcp2_supported(display)) 2362 return; 2363 2364 mutex_lock(&display->hdcp.hdcp_mutex); 2365 drm_WARN_ON(display->drm, display->hdcp.comp_added); 2366 2367 display->hdcp.comp_added = true; 2368 mutex_unlock(&display->hdcp.hdcp_mutex); 2369 if (USE_HDCP_GSC(display)) 2370 ret = intel_hdcp_gsc_init(display); 2371 else 2372 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, 2373 I915_COMPONENT_HDCP); 2374 2375 if (ret < 0) { 2376 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", 2377 ret); 2378 mutex_lock(&display->hdcp.hdcp_mutex); 2379 display->hdcp.comp_added = false; 2380 mutex_unlock(&display->hdcp.hdcp_mutex); 2381 return; 2382 } 2383 } 2384 2385 static void intel_hdcp2_init(struct intel_connector *connector, 2386 struct intel_digital_port *dig_port, 2387 const struct intel_hdcp_shim *shim) 2388 { 2389 struct intel_display *display = to_intel_display(connector); 2390 struct intel_hdcp *hdcp = &connector->hdcp; 2391 int ret; 2392 2393 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2394 if (ret) { 2395 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); 2396 return; 2397 } 2398 2399 hdcp->hdcp2_supported = true; 2400 } 2401 2402 int intel_hdcp_init(struct intel_connector *connector, 2403 struct intel_digital_port *dig_port, 2404 const struct intel_hdcp_shim *shim) 2405 { 2406 struct intel_display *display = to_intel_display(connector); 2407 struct intel_hdcp *hdcp = &connector->hdcp; 2408 int ret; 2409 2410 if (!shim) 2411 return -EINVAL; 2412 2413 if (is_hdcp2_supported(display)) 2414 intel_hdcp2_init(connector, dig_port, shim); 2415 2416 ret = drm_connector_attach_content_protection_property(&connector->base, 2417 hdcp->hdcp2_supported); 2418 if (ret) { 2419 hdcp->hdcp2_supported = false; 2420 kfree(dig_port->hdcp.port_data.streams); 2421 return ret; 2422 } 2423 2424 hdcp->shim = shim; 2425 mutex_init(&hdcp->mutex); 2426 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2427 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2428 init_waitqueue_head(&hdcp->cp_irq_queue); 2429 2430 return 0; 2431 } 2432 2433 static int _intel_hdcp_enable(struct intel_atomic_state *state, 2434 struct intel_encoder *encoder, 2435 const struct intel_crtc_state *pipe_config, 2436 const struct drm_connector_state *conn_state) 2437 { 2438 struct intel_display *display = to_intel_display(encoder); 2439 struct drm_i915_private *i915 = to_i915(display->drm); 2440 struct intel_connector *connector = 2441 to_intel_connector(conn_state->connector); 2442 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2443 struct intel_hdcp *hdcp = &connector->hdcp; 2444 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2445 int ret = -EINVAL; 2446 2447 if (!hdcp->shim) 2448 return -ENOENT; 2449 2450 if (!connector->encoder) { 2451 drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", 2452 connector->base.base.id, connector->base.name); 2453 return -ENODEV; 2454 } 2455 2456 mutex_lock(&hdcp->mutex); 2457 mutex_lock(&dig_port->hdcp.mutex); 2458 drm_WARN_ON(display->drm, 2459 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2460 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2461 2462 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2463 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2464 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2465 } else { 2466 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2467 hdcp->stream_transcoder = INVALID_TRANSCODER; 2468 } 2469 2470 if (DISPLAY_VER(display) >= 12) 2471 dig_port->hdcp.port_data.hdcp_transcoder = 2472 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2473 2474 /* 2475 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2476 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2477 */ 2478 if (!hdcp->force_hdcp14 && intel_hdcp2_get_capability(connector)) { 2479 ret = _intel_hdcp2_enable(state, connector); 2480 if (!ret) 2481 check_link_interval = 2482 DRM_HDCP2_CHECK_PERIOD_MS; 2483 } 2484 2485 if (hdcp->force_hdcp14) 2486 drm_dbg_kms(display->drm, "Forcing HDCP 1.4\n"); 2487 2488 /* 2489 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2490 * be attempted. 2491 */ 2492 if (ret && intel_hdcp_get_capability(connector) && 2493 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2494 ret = intel_hdcp1_enable(connector); 2495 } 2496 2497 if (!ret) { 2498 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2499 check_link_interval); 2500 intel_hdcp_update_value(connector, 2501 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2502 true); 2503 } 2504 2505 mutex_unlock(&dig_port->hdcp.mutex); 2506 mutex_unlock(&hdcp->mutex); 2507 return ret; 2508 } 2509 2510 void intel_hdcp_enable(struct intel_atomic_state *state, 2511 struct intel_encoder *encoder, 2512 const struct intel_crtc_state *crtc_state, 2513 const struct drm_connector_state *conn_state) 2514 { 2515 struct intel_connector *connector = 2516 to_intel_connector(conn_state->connector); 2517 struct intel_hdcp *hdcp = &connector->hdcp; 2518 2519 /* 2520 * Enable hdcp if it's desired or if userspace is enabled and 2521 * driver set its state to undesired 2522 */ 2523 if (conn_state->content_protection == 2524 DRM_MODE_CONTENT_PROTECTION_DESIRED || 2525 (conn_state->content_protection == 2526 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 2527 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2528 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2529 } 2530 2531 int intel_hdcp_disable(struct intel_connector *connector) 2532 { 2533 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2534 struct intel_hdcp *hdcp = &connector->hdcp; 2535 int ret = 0; 2536 2537 if (!hdcp->shim) 2538 return -ENOENT; 2539 2540 mutex_lock(&hdcp->mutex); 2541 mutex_lock(&dig_port->hdcp.mutex); 2542 2543 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2544 goto out; 2545 2546 intel_hdcp_update_value(connector, 2547 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2548 if (hdcp->hdcp2_encrypted) 2549 ret = _intel_hdcp2_disable(connector, false); 2550 else if (hdcp->hdcp_encrypted) 2551 ret = _intel_hdcp_disable(connector); 2552 2553 out: 2554 mutex_unlock(&dig_port->hdcp.mutex); 2555 mutex_unlock(&hdcp->mutex); 2556 cancel_delayed_work_sync(&hdcp->check_work); 2557 return ret; 2558 } 2559 2560 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2561 struct intel_encoder *encoder, 2562 const struct intel_crtc_state *crtc_state, 2563 const struct drm_connector_state *conn_state) 2564 { 2565 struct intel_connector *connector = 2566 to_intel_connector(conn_state->connector); 2567 struct intel_hdcp *hdcp = &connector->hdcp; 2568 bool content_protection_type_changed, desired_and_not_enabled = false; 2569 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2570 2571 if (!connector->hdcp.shim) 2572 return; 2573 2574 content_protection_type_changed = 2575 (conn_state->hdcp_content_type != hdcp->content_type && 2576 conn_state->content_protection != 2577 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2578 2579 /* 2580 * During the HDCP encryption session if Type change is requested, 2581 * disable the HDCP and re-enable it with new TYPE value. 2582 */ 2583 if (conn_state->content_protection == 2584 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2585 content_protection_type_changed) 2586 intel_hdcp_disable(connector); 2587 2588 /* 2589 * Mark the hdcp state as DESIRED after the hdcp disable of type 2590 * change procedure. 2591 */ 2592 if (content_protection_type_changed) { 2593 mutex_lock(&hdcp->mutex); 2594 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2595 drm_connector_get(&connector->base); 2596 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2597 drm_connector_put(&connector->base); 2598 mutex_unlock(&hdcp->mutex); 2599 } 2600 2601 if (conn_state->content_protection == 2602 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2603 mutex_lock(&hdcp->mutex); 2604 /* Avoid enabling hdcp, if it already ENABLED */ 2605 desired_and_not_enabled = 2606 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2607 mutex_unlock(&hdcp->mutex); 2608 /* 2609 * If HDCP already ENABLED and CP property is DESIRED, schedule 2610 * prop_work to update correct CP property to user space. 2611 */ 2612 if (!desired_and_not_enabled && !content_protection_type_changed) { 2613 drm_connector_get(&connector->base); 2614 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2615 drm_connector_put(&connector->base); 2616 2617 } 2618 } 2619 2620 if (desired_and_not_enabled || content_protection_type_changed) 2621 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2622 } 2623 2624 void intel_hdcp_cancel_works(struct intel_connector *connector) 2625 { 2626 if (!connector->hdcp.shim) 2627 return; 2628 2629 cancel_delayed_work_sync(&connector->hdcp.check_work); 2630 cancel_work_sync(&connector->hdcp.prop_work); 2631 } 2632 2633 void intel_hdcp_component_fini(struct intel_display *display) 2634 { 2635 mutex_lock(&display->hdcp.hdcp_mutex); 2636 if (!display->hdcp.comp_added) { 2637 mutex_unlock(&display->hdcp.hdcp_mutex); 2638 return; 2639 } 2640 2641 display->hdcp.comp_added = false; 2642 mutex_unlock(&display->hdcp.hdcp_mutex); 2643 2644 if (USE_HDCP_GSC(display)) 2645 intel_hdcp_gsc_fini(display); 2646 else 2647 component_del(display->drm->dev, &i915_hdcp_ops); 2648 } 2649 2650 void intel_hdcp_cleanup(struct intel_connector *connector) 2651 { 2652 struct intel_hdcp *hdcp = &connector->hdcp; 2653 2654 if (!hdcp->shim) 2655 return; 2656 2657 /* 2658 * If the connector is registered, it's possible userspace could kick 2659 * off another HDCP enable, which would re-spawn the workers. 2660 */ 2661 drm_WARN_ON(connector->base.dev, 2662 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2663 2664 /* 2665 * Now that the connector is not registered, check_work won't be run, 2666 * but cancel any outstanding instances of it 2667 */ 2668 cancel_delayed_work_sync(&hdcp->check_work); 2669 2670 /* 2671 * We don't cancel prop_work in the same way as check_work since it 2672 * requires connection_mutex which could be held while calling this 2673 * function. Instead, we rely on the connector references grabbed before 2674 * scheduling prop_work to ensure the connector is alive when prop_work 2675 * is run. So if we're in the destroy path (which is where this 2676 * function should be called), we're "guaranteed" that prop_work is not 2677 * active (tl;dr This Should Never Happen). 2678 */ 2679 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2680 2681 mutex_lock(&hdcp->mutex); 2682 hdcp->shim = NULL; 2683 mutex_unlock(&hdcp->mutex); 2684 } 2685 2686 void intel_hdcp_atomic_check(struct drm_connector *connector, 2687 struct drm_connector_state *old_state, 2688 struct drm_connector_state *new_state) 2689 { 2690 u64 old_cp = old_state->content_protection; 2691 u64 new_cp = new_state->content_protection; 2692 struct drm_crtc_state *crtc_state; 2693 2694 if (!new_state->crtc) { 2695 /* 2696 * If the connector is being disabled with CP enabled, mark it 2697 * desired so it's re-enabled when the connector is brought back 2698 */ 2699 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2700 new_state->content_protection = 2701 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2702 return; 2703 } 2704 2705 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2706 new_state->crtc); 2707 /* 2708 * Fix the HDCP uapi content protection state in case of modeset. 2709 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2710 * need to be sent if there is transition from ENABLED->DESIRED. 2711 */ 2712 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2713 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2714 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2715 new_state->content_protection = 2716 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2717 2718 /* 2719 * Nothing to do if the state didn't change, or HDCP was activated since 2720 * the last commit. And also no change in hdcp content type. 2721 */ 2722 if (old_cp == new_cp || 2723 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2724 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2725 if (old_state->hdcp_content_type == 2726 new_state->hdcp_content_type) 2727 return; 2728 } 2729 2730 crtc_state->mode_changed = true; 2731 } 2732 2733 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2734 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2735 { 2736 struct intel_hdcp *hdcp = &connector->hdcp; 2737 struct intel_display *display = to_intel_display(connector); 2738 struct drm_i915_private *i915 = to_i915(display->drm); 2739 2740 if (!hdcp->shim) 2741 return; 2742 2743 atomic_inc(&connector->hdcp.cp_irq_count); 2744 wake_up_all(&connector->hdcp.cp_irq_queue); 2745 2746 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); 2747 } 2748 2749 static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector, 2750 bool remote_req) 2751 { 2752 bool hdcp_cap = false, hdcp2_cap = false; 2753 2754 if (!connector->hdcp.shim) { 2755 seq_puts(m, "No Connector Support"); 2756 goto out; 2757 } 2758 2759 if (remote_req) { 2760 intel_hdcp_get_remote_capability(connector, &hdcp_cap, &hdcp2_cap); 2761 } else { 2762 hdcp_cap = intel_hdcp_get_capability(connector); 2763 hdcp2_cap = intel_hdcp2_get_capability(connector); 2764 } 2765 2766 if (hdcp_cap) 2767 seq_puts(m, "HDCP1.4 "); 2768 if (hdcp2_cap) 2769 seq_puts(m, "HDCP2.2 "); 2770 2771 if (!hdcp_cap && !hdcp2_cap) 2772 seq_puts(m, "None"); 2773 2774 out: 2775 seq_puts(m, "\n"); 2776 } 2777 2778 void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector) 2779 { 2780 seq_puts(m, "\tHDCP version: "); 2781 if (connector->mst.dp) { 2782 __intel_hdcp_info(m, connector, true); 2783 seq_puts(m, "\tMST Hub HDCP version: "); 2784 } 2785 __intel_hdcp_info(m, connector, false); 2786 } 2787 2788 static int intel_hdcp_sink_capability_show(struct seq_file *m, void *data) 2789 { 2790 struct intel_connector *connector = m->private; 2791 struct intel_display *display = to_intel_display(connector); 2792 int ret; 2793 2794 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 2795 if (ret) 2796 return ret; 2797 2798 if (!connector->base.encoder || 2799 connector->base.status != connector_status_connected) { 2800 ret = -ENODEV; 2801 goto out; 2802 } 2803 2804 seq_printf(m, "%s:%d HDCP version: ", connector->base.name, 2805 connector->base.base.id); 2806 __intel_hdcp_info(m, connector, false); 2807 2808 out: 2809 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2810 2811 return ret; 2812 } 2813 DEFINE_SHOW_ATTRIBUTE(intel_hdcp_sink_capability); 2814 2815 static ssize_t intel_hdcp_force_14_write(struct file *file, 2816 const char __user *ubuf, 2817 size_t len, loff_t *offp) 2818 { 2819 struct seq_file *m = file->private_data; 2820 struct intel_connector *connector = m->private; 2821 struct intel_hdcp *hdcp = &connector->hdcp; 2822 bool force_hdcp14 = false; 2823 int ret; 2824 2825 if (len == 0) 2826 return 0; 2827 2828 ret = kstrtobool_from_user(ubuf, len, &force_hdcp14); 2829 if (ret < 0) 2830 return ret; 2831 2832 hdcp->force_hdcp14 = force_hdcp14; 2833 *offp += len; 2834 2835 return len; 2836 } 2837 2838 static int intel_hdcp_force_14_show(struct seq_file *m, void *data) 2839 { 2840 struct intel_connector *connector = m->private; 2841 struct intel_display *display = to_intel_display(connector); 2842 struct intel_encoder *encoder = intel_attached_encoder(connector); 2843 struct intel_hdcp *hdcp = &connector->hdcp; 2844 struct drm_crtc *crtc; 2845 int ret; 2846 2847 if (!encoder) 2848 return -ENODEV; 2849 2850 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 2851 if (ret) 2852 return ret; 2853 2854 crtc = connector->base.state->crtc; 2855 if (connector->base.status != connector_status_connected || !crtc) { 2856 ret = -ENODEV; 2857 goto out; 2858 } 2859 2860 seq_printf(m, "%s\n", 2861 str_yes_no(hdcp->force_hdcp14)); 2862 out: 2863 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2864 2865 return ret; 2866 } 2867 2868 static int intel_hdcp_force_14_open(struct inode *inode, 2869 struct file *file) 2870 { 2871 return single_open(file, intel_hdcp_force_14_show, 2872 inode->i_private); 2873 } 2874 2875 static const struct file_operations intel_hdcp_force_14_fops = { 2876 .owner = THIS_MODULE, 2877 .open = intel_hdcp_force_14_open, 2878 .read = seq_read, 2879 .llseek = seq_lseek, 2880 .release = single_release, 2881 .write = intel_hdcp_force_14_write 2882 }; 2883 2884 void intel_hdcp_connector_debugfs_add(struct intel_connector *connector) 2885 { 2886 struct dentry *root = connector->base.debugfs_entry; 2887 int connector_type = connector->base.connector_type; 2888 2889 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2890 connector_type == DRM_MODE_CONNECTOR_HDMIA || 2891 connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2892 debugfs_create_file("i915_hdcp_sink_capability", 0444, root, 2893 connector, &intel_hdcp_sink_capability_fops); 2894 debugfs_create_file("i915_force_hdcp14", 0644, root, 2895 connector, &intel_hdcp_force_14_fops); 2896 } 2897 } 2898