1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/debugfs.h> 13 #include <linux/i2c.h> 14 #include <linux/iopoll.h> 15 #include <linux/random.h> 16 17 #include <drm/display/drm_hdcp_helper.h> 18 #include <drm/drm_print.h> 19 #include <drm/intel/i915_component.h> 20 #include <drm/intel/intel_pcode_regs.h> 21 22 #include "intel_connector.h" 23 #include "intel_de.h" 24 #include "intel_display_jiffies.h" 25 #include "intel_display_power.h" 26 #include "intel_display_power_well.h" 27 #include "intel_display_regs.h" 28 #include "intel_display_rpm.h" 29 #include "intel_display_types.h" 30 #include "intel_dp_mst.h" 31 #include "intel_hdcp.h" 32 #include "intel_hdcp_gsc_message.h" 33 #include "intel_hdcp_regs.h" 34 #include "intel_hdcp_shim.h" 35 #include "intel_parent.h" 36 #include "intel_step.h" 37 38 #define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14) 39 40 #define KEY_LOAD_TRIES 5 41 #define HDCP2_LC_RETRY_CNT 3 42 43 static void 44 intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder, 45 struct intel_hdcp *hdcp, 46 bool enable) 47 { 48 struct intel_display *display = to_intel_display(encoder); 49 i915_reg_t rekey_reg; 50 u32 rekey_bit = 0; 51 52 /* Here we assume HDMI is in TMDS mode of operation */ 53 if (!intel_encoder_is_hdmi(encoder)) 54 return; 55 56 if (DISPLAY_VER(display) >= 30) { 57 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder); 58 rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE; 59 } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || 60 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) { 61 rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder); 62 rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE; 63 } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) { 64 rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder); 65 rekey_bit = HDCP_LINE_REKEY_DISABLE; 66 } 67 68 if (rekey_bit) 69 intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit); 70 } 71 72 static int intel_conn_to_vcpi(struct intel_atomic_state *state, 73 struct intel_connector *connector) 74 { 75 struct drm_dp_mst_topology_mgr *mgr; 76 struct drm_dp_mst_atomic_payload *payload; 77 struct drm_dp_mst_topology_state *mst_state; 78 79 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 80 if (!connector->mst.port) 81 return 0; 82 mgr = connector->mst.port->mgr; 83 84 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx); 85 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 86 payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port); 87 if (drm_WARN_ON(mgr->dev, !payload)) 88 return 0; 89 90 return payload->vcpi; 91 } 92 93 /* 94 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 95 * content_type for all streams in DP MST topology because security f/w doesn't 96 * have any provision to mark content_type for each stream separately, it marks 97 * all available streams with the content_type proivided at the time of port 98 * authentication. This may prohibit the userspace to use type1 content on 99 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 100 * DP MST topology. Though it is not compulsory, security fw should change its 101 * policy to mark different content_types for different streams. 102 */ 103 static int 104 intel_hdcp_required_content_stream(struct intel_atomic_state *state, 105 struct intel_digital_port *dig_port) 106 { 107 struct intel_display *display = to_intel_display(state); 108 struct drm_connector_list_iter conn_iter; 109 struct intel_digital_port *conn_dig_port; 110 struct intel_connector *connector; 111 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 112 bool enforce_type0 = false; 113 int k; 114 115 if (dig_port->hdcp.auth_status) 116 return 0; 117 118 data->k = 0; 119 120 if (!dig_port->hdcp.mst_type1_capable) 121 enforce_type0 = true; 122 123 drm_connector_list_iter_begin(display->drm, &conn_iter); 124 for_each_intel_connector_iter(connector, &conn_iter) { 125 if (connector->base.status == connector_status_disconnected) 126 continue; 127 128 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 129 continue; 130 131 conn_dig_port = intel_attached_dig_port(connector); 132 if (conn_dig_port != dig_port) 133 continue; 134 135 data->streams[data->k].stream_id = 136 intel_conn_to_vcpi(state, connector); 137 data->k++; 138 139 /* if there is only one active stream */ 140 if (intel_dp_mst_active_streams(&dig_port->dp) <= 1) 141 break; 142 } 143 drm_connector_list_iter_end(&conn_iter); 144 145 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) 146 return -EINVAL; 147 148 /* 149 * Apply common protection level across all streams in DP MST Topology. 150 * Use highest supported content type for all streams in DP MST Topology. 151 */ 152 for (k = 0; k < data->k; k++) 153 data->streams[k].stream_type = 154 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 155 156 return 0; 157 } 158 159 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state, 160 struct intel_connector *connector) 161 { 162 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 163 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 164 struct intel_hdcp *hdcp = &connector->hdcp; 165 166 if (intel_encoder_is_mst(intel_attached_encoder(connector))) 167 return intel_hdcp_required_content_stream(state, dig_port); 168 169 data->k = 1; 170 data->streams[0].stream_id = 0; 171 data->streams[0].stream_type = hdcp->content_type; 172 173 return 0; 174 } 175 176 static 177 bool intel_hdcp_is_ksv_valid(u8 *ksv) 178 { 179 int i, ones = 0; 180 /* KSV has 20 1's and 20 0's */ 181 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 182 ones += hweight8(ksv[i]); 183 if (ones != 20) 184 return false; 185 186 return true; 187 } 188 189 static 190 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 191 const struct intel_hdcp_shim *shim, u8 *bksv) 192 { 193 struct intel_display *display = to_intel_display(dig_port); 194 int ret, i, tries = 2; 195 196 /* HDCP spec states that we must retry the bksv if it is invalid */ 197 for (i = 0; i < tries; i++) { 198 ret = shim->read_bksv(dig_port, bksv); 199 if (ret) 200 return ret; 201 if (intel_hdcp_is_ksv_valid(bksv)) 202 break; 203 } 204 if (i == tries) { 205 drm_dbg_kms(display->drm, "Bksv is invalid\n"); 206 return -ENODEV; 207 } 208 209 return 0; 210 } 211 212 /* Is HDCP1.4 capable on Platform and Sink */ 213 static bool intel_hdcp_get_capability(struct intel_connector *connector) 214 { 215 struct intel_digital_port *dig_port; 216 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 217 bool capable = false; 218 u8 bksv[5]; 219 220 if (!intel_attached_encoder(connector)) 221 return capable; 222 223 dig_port = intel_attached_dig_port(connector); 224 225 if (!shim) 226 return capable; 227 228 if (shim->hdcp_get_capability) { 229 shim->hdcp_get_capability(dig_port, &capable); 230 } else { 231 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 232 capable = true; 233 } 234 235 return capable; 236 } 237 238 /* 239 * Check if the source has all the building blocks ready to make 240 * HDCP 2.2 work 241 */ 242 static bool intel_hdcp2_prerequisite(struct intel_connector *connector) 243 { 244 struct intel_display *display = to_intel_display(connector); 245 struct intel_hdcp *hdcp = &connector->hdcp; 246 247 /* I915 support for HDCP2.2 */ 248 if (!hdcp->hdcp2_supported) 249 return false; 250 251 /* If MTL+ make sure gsc is loaded and proxy is setup */ 252 if (USE_HDCP_GSC(display)) { 253 if (!intel_parent_hdcp_gsc_check_status(display)) 254 return false; 255 } 256 257 /* MEI/GSC interface is solid depending on which is used */ 258 mutex_lock(&display->hdcp.hdcp_mutex); 259 if (!display->hdcp.comp_added || !display->hdcp.arbiter) { 260 mutex_unlock(&display->hdcp.hdcp_mutex); 261 return false; 262 } 263 mutex_unlock(&display->hdcp.hdcp_mutex); 264 265 return true; 266 } 267 268 /* Is HDCP2.2 capable on Platform and Sink */ 269 static bool intel_hdcp2_get_capability(struct intel_connector *connector) 270 { 271 struct intel_hdcp *hdcp = &connector->hdcp; 272 bool capable = false; 273 274 if (!intel_hdcp2_prerequisite(connector)) 275 return false; 276 277 /* Sink's capability for HDCP2.2 */ 278 hdcp->shim->hdcp_2_2_get_capability(connector, &capable); 279 280 return capable; 281 } 282 283 static void intel_hdcp_get_remote_capability(struct intel_connector *connector, 284 bool *hdcp_capable, 285 bool *hdcp2_capable) 286 { 287 struct intel_hdcp *hdcp = &connector->hdcp; 288 289 if (!hdcp->shim->get_remote_hdcp_capability) 290 return; 291 292 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable, 293 hdcp2_capable); 294 295 if (!intel_hdcp2_prerequisite(connector)) 296 *hdcp2_capable = false; 297 } 298 299 static bool intel_hdcp_in_use(struct intel_display *display, 300 enum transcoder cpu_transcoder, enum port port) 301 { 302 return intel_de_read(display, 303 HDCP_STATUS(display, cpu_transcoder, port)) & 304 HDCP_STATUS_ENC; 305 } 306 307 static bool intel_hdcp2_in_use(struct intel_display *display, 308 enum transcoder cpu_transcoder, enum port port) 309 { 310 return intel_de_read(display, 311 HDCP2_STATUS(display, cpu_transcoder, port)) & 312 LINK_ENCRYPTION_STATUS; 313 } 314 315 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 316 const struct intel_hdcp_shim *shim) 317 { 318 int ret, read_ret; 319 bool ksv_ready; 320 321 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 322 ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready), 323 read_ret || ksv_ready, 324 100 * 1000, 5 * 1000 * 1000, false); 325 if (ret) 326 return ret; 327 if (read_ret) 328 return read_ret; 329 330 return 0; 331 } 332 333 static bool hdcp_key_loadable(struct intel_display *display) 334 { 335 enum i915_power_well_id id; 336 bool enabled = false; 337 338 /* 339 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 340 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 341 */ 342 if (display->platform.haswell || display->platform.broadwell) 343 id = HSW_DISP_PW_GLOBAL; 344 else 345 id = SKL_DISP_PW_1; 346 347 /* PG1 (power well #1) needs to be enabled */ 348 with_intel_display_rpm(display) 349 enabled = intel_display_power_well_is_enabled(display, id); 350 351 /* 352 * Another req for hdcp key loadability is enabled state of pll for 353 * cdclk. Without active crtc we won't land here. So we are assuming that 354 * cdclk is already on. 355 */ 356 357 return enabled; 358 } 359 360 static void intel_hdcp_clear_keys(struct intel_display *display) 361 { 362 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 363 intel_de_write(display, HDCP_KEY_STATUS, 364 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 365 } 366 367 static int intel_hdcp_load_keys(struct intel_display *display) 368 { 369 int ret; 370 u32 val; 371 372 val = intel_de_read(display, HDCP_KEY_STATUS); 373 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 374 return 0; 375 376 /* 377 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 378 * out of reset. So if Key is not already loaded, its an error state. 379 */ 380 if (display->platform.haswell || display->platform.broadwell) 381 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 382 return -ENXIO; 383 384 /* 385 * Initiate loading the HDCP key from fuses. 386 * 387 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 388 * version 9 platforms (minus BXT) differ in the key load trigger 389 * process from other platforms. These platforms use the GT Driver 390 * Mailbox interface. 391 */ 392 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) { 393 ret = intel_parent_pcode_write(display, SKL_PCODE_LOAD_HDCP_KEYS, 1); 394 if (ret) { 395 drm_err(display->drm, 396 "Failed to initiate HDCP key load (%d)\n", 397 ret); 398 return ret; 399 } 400 } else { 401 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 402 } 403 404 /* Wait for the keys to load (500us) */ 405 ret = intel_de_wait_ms(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, 406 HDCP_KEY_LOAD_DONE, 1, &val); 407 if (ret) 408 return ret; 409 else if (!(val & HDCP_KEY_LOAD_STATUS)) 410 return -ENXIO; 411 412 /* Send Aksv over to PCH display for use in authentication */ 413 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 414 415 return 0; 416 } 417 418 /* Returns updated SHA-1 index */ 419 static int intel_write_sha_text(struct intel_display *display, u32 sha_text) 420 { 421 intel_de_write(display, HDCP_SHA_TEXT, sha_text); 422 if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 423 drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); 424 return -ETIMEDOUT; 425 } 426 return 0; 427 } 428 429 static 430 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, 431 enum transcoder cpu_transcoder, enum port port) 432 { 433 if (DISPLAY_VER(display) >= 12) { 434 switch (cpu_transcoder) { 435 case TRANSCODER_A: 436 return HDCP_TRANSA_REP_PRESENT | 437 HDCP_TRANSA_SHA1_M0; 438 case TRANSCODER_B: 439 return HDCP_TRANSB_REP_PRESENT | 440 HDCP_TRANSB_SHA1_M0; 441 case TRANSCODER_C: 442 return HDCP_TRANSC_REP_PRESENT | 443 HDCP_TRANSC_SHA1_M0; 444 case TRANSCODER_D: 445 return HDCP_TRANSD_REP_PRESENT | 446 HDCP_TRANSD_SHA1_M0; 447 default: 448 drm_err(display->drm, "Unknown transcoder %d\n", 449 cpu_transcoder); 450 return 0; 451 } 452 } 453 454 switch (port) { 455 case PORT_A: 456 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 457 case PORT_B: 458 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 459 case PORT_C: 460 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 461 case PORT_D: 462 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 463 case PORT_E: 464 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 465 default: 466 drm_err(display->drm, "Unknown port %d\n", port); 467 return 0; 468 } 469 } 470 471 static 472 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 473 const struct intel_hdcp_shim *shim, 474 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 475 { 476 struct intel_display *display = to_intel_display(connector); 477 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 478 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 479 enum port port = dig_port->base.port; 480 u32 vprime, sha_text, sha_leftovers, rep_ctl; 481 int ret, i, j, sha_idx; 482 483 /* Process V' values from the receiver */ 484 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 485 ret = shim->read_v_prime_part(dig_port, i, &vprime); 486 if (ret) 487 return ret; 488 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); 489 } 490 491 /* 492 * We need to write the concatenation of all device KSVs, BINFO (DP) || 493 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 494 * stream is written via the HDCP_SHA_TEXT register in 32-bit 495 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 496 * index will keep track of our progress through the 64 bytes as well as 497 * helping us work the 40-bit KSVs through our 32-bit register. 498 * 499 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 500 */ 501 sha_idx = 0; 502 sha_text = 0; 503 sha_leftovers = 0; 504 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); 505 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 506 for (i = 0; i < num_downstream; i++) { 507 unsigned int sha_empty; 508 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 509 510 /* Fill up the empty slots in sha_text and write it out */ 511 sha_empty = sizeof(sha_text) - sha_leftovers; 512 for (j = 0; j < sha_empty; j++) { 513 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 514 sha_text |= ksv[j] << off; 515 } 516 517 ret = intel_write_sha_text(display, sha_text); 518 if (ret < 0) 519 return ret; 520 521 /* Programming guide writes this every 64 bytes */ 522 sha_idx += sizeof(sha_text); 523 if (!(sha_idx % 64)) 524 intel_de_write(display, HDCP_REP_CTL, 525 rep_ctl | HDCP_SHA1_TEXT_32); 526 527 /* Store the leftover bytes from the ksv in sha_text */ 528 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 529 sha_text = 0; 530 for (j = 0; j < sha_leftovers; j++) 531 sha_text |= ksv[sha_empty + j] << 532 ((sizeof(sha_text) - j - 1) * 8); 533 534 /* 535 * If we still have room in sha_text for more data, continue. 536 * Otherwise, write it out immediately. 537 */ 538 if (sizeof(sha_text) > sha_leftovers) 539 continue; 540 541 ret = intel_write_sha_text(display, sha_text); 542 if (ret < 0) 543 return ret; 544 sha_leftovers = 0; 545 sha_text = 0; 546 sha_idx += sizeof(sha_text); 547 } 548 549 /* 550 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 551 * bytes are leftover from the last ksv, we might be able to fit them 552 * all in sha_text (first 2 cases), or we might need to split them up 553 * into 2 writes (last 2 cases). 554 */ 555 if (sha_leftovers == 0) { 556 /* Write 16 bits of text, 16 bits of M0 */ 557 intel_de_write(display, HDCP_REP_CTL, 558 rep_ctl | HDCP_SHA1_TEXT_16); 559 ret = intel_write_sha_text(display, 560 bstatus[0] << 8 | bstatus[1]); 561 if (ret < 0) 562 return ret; 563 sha_idx += sizeof(sha_text); 564 565 /* Write 32 bits of M0 */ 566 intel_de_write(display, HDCP_REP_CTL, 567 rep_ctl | HDCP_SHA1_TEXT_0); 568 ret = intel_write_sha_text(display, 0); 569 if (ret < 0) 570 return ret; 571 sha_idx += sizeof(sha_text); 572 573 /* Write 16 bits of M0 */ 574 intel_de_write(display, HDCP_REP_CTL, 575 rep_ctl | HDCP_SHA1_TEXT_16); 576 ret = intel_write_sha_text(display, 0); 577 if (ret < 0) 578 return ret; 579 sha_idx += sizeof(sha_text); 580 581 } else if (sha_leftovers == 1) { 582 /* Write 24 bits of text, 8 bits of M0 */ 583 intel_de_write(display, HDCP_REP_CTL, 584 rep_ctl | HDCP_SHA1_TEXT_24); 585 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 586 /* Only 24-bits of data, must be in the LSB */ 587 sha_text = (sha_text & 0xffffff00) >> 8; 588 ret = intel_write_sha_text(display, sha_text); 589 if (ret < 0) 590 return ret; 591 sha_idx += sizeof(sha_text); 592 593 /* Write 32 bits of M0 */ 594 intel_de_write(display, HDCP_REP_CTL, 595 rep_ctl | HDCP_SHA1_TEXT_0); 596 ret = intel_write_sha_text(display, 0); 597 if (ret < 0) 598 return ret; 599 sha_idx += sizeof(sha_text); 600 601 /* Write 24 bits of M0 */ 602 intel_de_write(display, HDCP_REP_CTL, 603 rep_ctl | HDCP_SHA1_TEXT_8); 604 ret = intel_write_sha_text(display, 0); 605 if (ret < 0) 606 return ret; 607 sha_idx += sizeof(sha_text); 608 609 } else if (sha_leftovers == 2) { 610 /* Write 32 bits of text */ 611 intel_de_write(display, HDCP_REP_CTL, 612 rep_ctl | HDCP_SHA1_TEXT_32); 613 sha_text |= bstatus[0] << 8 | bstatus[1]; 614 ret = intel_write_sha_text(display, sha_text); 615 if (ret < 0) 616 return ret; 617 sha_idx += sizeof(sha_text); 618 619 /* Write 64 bits of M0 */ 620 intel_de_write(display, HDCP_REP_CTL, 621 rep_ctl | HDCP_SHA1_TEXT_0); 622 for (i = 0; i < 2; i++) { 623 ret = intel_write_sha_text(display, 0); 624 if (ret < 0) 625 return ret; 626 sha_idx += sizeof(sha_text); 627 } 628 629 /* 630 * Terminate the SHA-1 stream by hand. For the other leftover 631 * cases this is appended by the hardware. 632 */ 633 intel_de_write(display, HDCP_REP_CTL, 634 rep_ctl | HDCP_SHA1_TEXT_32); 635 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 636 ret = intel_write_sha_text(display, sha_text); 637 if (ret < 0) 638 return ret; 639 sha_idx += sizeof(sha_text); 640 } else if (sha_leftovers == 3) { 641 /* Write 32 bits of text (filled from LSB) */ 642 intel_de_write(display, HDCP_REP_CTL, 643 rep_ctl | HDCP_SHA1_TEXT_32); 644 sha_text |= bstatus[0]; 645 ret = intel_write_sha_text(display, sha_text); 646 if (ret < 0) 647 return ret; 648 sha_idx += sizeof(sha_text); 649 650 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 651 intel_de_write(display, HDCP_REP_CTL, 652 rep_ctl | HDCP_SHA1_TEXT_8); 653 ret = intel_write_sha_text(display, bstatus[1]); 654 if (ret < 0) 655 return ret; 656 sha_idx += sizeof(sha_text); 657 658 /* Write 32 bits of M0 */ 659 intel_de_write(display, HDCP_REP_CTL, 660 rep_ctl | HDCP_SHA1_TEXT_0); 661 ret = intel_write_sha_text(display, 0); 662 if (ret < 0) 663 return ret; 664 sha_idx += sizeof(sha_text); 665 666 /* Write 8 bits of M0 */ 667 intel_de_write(display, HDCP_REP_CTL, 668 rep_ctl | HDCP_SHA1_TEXT_24); 669 ret = intel_write_sha_text(display, 0); 670 if (ret < 0) 671 return ret; 672 sha_idx += sizeof(sha_text); 673 } else { 674 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", 675 sha_leftovers); 676 return -EINVAL; 677 } 678 679 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 680 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 681 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 682 ret = intel_write_sha_text(display, 0); 683 if (ret < 0) 684 return ret; 685 sha_idx += sizeof(sha_text); 686 } 687 688 /* 689 * Last write gets the length of the concatenation in bits. That is: 690 * - 5 bytes per device 691 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 692 */ 693 sha_text = (num_downstream * 5 + 10) * 8; 694 ret = intel_write_sha_text(display, sha_text); 695 if (ret < 0) 696 return ret; 697 698 /* Tell the HW we're done with the hash and wait for it to ACK */ 699 intel_de_write(display, HDCP_REP_CTL, 700 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 701 if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, 702 HDCP_SHA1_COMPLETE, 1)) { 703 drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); 704 return -ETIMEDOUT; 705 } 706 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 707 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); 708 return -ENXIO; 709 } 710 711 return 0; 712 } 713 714 /* Implements Part 2 of the HDCP authorization procedure */ 715 static 716 int intel_hdcp_auth_downstream(struct intel_connector *connector) 717 { 718 struct intel_display *display = to_intel_display(connector); 719 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 720 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 721 u8 bstatus[2], num_downstream, *ksv_fifo; 722 int ret, i, tries = 3; 723 724 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 725 if (ret) { 726 drm_dbg_kms(display->drm, 727 "KSV list failed to become ready (%d)\n", ret); 728 return ret; 729 } 730 731 ret = shim->read_bstatus(dig_port, bstatus); 732 if (ret) 733 return ret; 734 735 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 736 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 737 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); 738 return -EPERM; 739 } 740 741 /* 742 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 743 * the HDCP encryption. That implies that repeater can't have its own 744 * display. As there is no consumption of encrypted content in the 745 * repeater with 0 downstream devices, we are failing the 746 * authentication. 747 */ 748 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 749 if (num_downstream == 0) { 750 drm_dbg_kms(display->drm, 751 "Repeater with zero downstream devices\n"); 752 return -EINVAL; 753 } 754 755 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 756 if (!ksv_fifo) { 757 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); 758 return -ENOMEM; 759 } 760 761 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 762 if (ret) 763 goto err; 764 765 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, 766 num_downstream) > 0) { 767 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); 768 ret = -EPERM; 769 goto err; 770 } 771 772 /* 773 * When V prime mismatches, DP Spec mandates re-read of 774 * V prime atleast twice. 775 */ 776 for (i = 0; i < tries; i++) { 777 ret = intel_hdcp_validate_v_prime(connector, shim, 778 ksv_fifo, num_downstream, 779 bstatus); 780 if (!ret) 781 break; 782 } 783 784 if (i == tries) { 785 drm_dbg_kms(display->drm, 786 "V Prime validation failed.(%d)\n", ret); 787 goto err; 788 } 789 790 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", 791 num_downstream); 792 ret = 0; 793 err: 794 kfree(ksv_fifo); 795 return ret; 796 } 797 798 /* Implements Part 1 of the HDCP authorization procedure */ 799 static int intel_hdcp_auth(struct intel_connector *connector) 800 { 801 struct intel_display *display = to_intel_display(connector); 802 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 803 struct intel_hdcp *hdcp = &connector->hdcp; 804 const struct intel_hdcp_shim *shim = hdcp->shim; 805 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 806 enum port port = dig_port->base.port; 807 unsigned long r0_prime_gen_start; 808 int ret, i, tries = 2; 809 u32 val; 810 union { 811 u32 reg[2]; 812 u8 shim[DRM_HDCP_AN_LEN]; 813 } an; 814 union { 815 u32 reg[2]; 816 u8 shim[DRM_HDCP_KSV_LEN]; 817 } bksv; 818 union { 819 u32 reg; 820 u8 shim[DRM_HDCP_RI_LEN]; 821 } ri; 822 bool repeater_present, hdcp_capable; 823 824 /* 825 * Detects whether the display is HDCP capable. Although we check for 826 * valid Bksv below, the HDCP over DP spec requires that we check 827 * whether the display supports HDCP before we write An. For HDMI 828 * displays, this is not necessary. 829 */ 830 if (shim->hdcp_get_capability) { 831 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable); 832 if (ret) 833 return ret; 834 if (!hdcp_capable) { 835 drm_dbg_kms(display->drm, 836 "Panel is not HDCP capable\n"); 837 return -EINVAL; 838 } 839 } 840 841 /* Initialize An with 2 random values and acquire it */ 842 for (i = 0; i < 2; i++) 843 intel_de_write(display, 844 HDCP_ANINIT(display, cpu_transcoder, port), 845 get_random_u32()); 846 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 847 HDCP_CONF_CAPTURE_AN); 848 849 /* Wait for An to be acquired */ 850 if (intel_de_wait_for_set_ms(display, 851 HDCP_STATUS(display, cpu_transcoder, port), 852 HDCP_STATUS_AN_READY, 1)) { 853 drm_err(display->drm, "Timed out waiting for An\n"); 854 return -ETIMEDOUT; 855 } 856 857 an.reg[0] = intel_de_read(display, 858 HDCP_ANLO(display, cpu_transcoder, port)); 859 an.reg[1] = intel_de_read(display, 860 HDCP_ANHI(display, cpu_transcoder, port)); 861 ret = shim->write_an_aksv(dig_port, an.shim); 862 if (ret) 863 return ret; 864 865 r0_prime_gen_start = jiffies; 866 867 memset(&bksv, 0, sizeof(bksv)); 868 869 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 870 if (ret < 0) 871 return ret; 872 873 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { 874 drm_err(display->drm, "BKSV is revoked\n"); 875 return -EPERM; 876 } 877 878 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), 879 bksv.reg[0]); 880 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), 881 bksv.reg[1]); 882 883 ret = shim->repeater_present(dig_port, &repeater_present); 884 if (ret) 885 return ret; 886 if (repeater_present) 887 intel_de_write(display, HDCP_REP_CTL, 888 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); 889 890 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 891 if (ret) 892 return ret; 893 894 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 895 HDCP_CONF_AUTH_AND_ENC); 896 897 /* Wait for R0 ready */ 898 ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)), 899 val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 900 100, 1000, false); 901 if (ret) { 902 drm_err(display->drm, "Timed out waiting for R0 ready\n"); 903 return -ETIMEDOUT; 904 } 905 906 /* 907 * Wait for R0' to become available. The spec says 100ms from Aksv, but 908 * some monitors can take longer than this. We'll set the timeout at 909 * 300ms just to be sure. 910 * 911 * On DP, there's an R0_READY bit available but no such bit 912 * exists on HDMI. Since the upper-bound is the same, we'll just do 913 * the stupid thing instead of polling on one and not the other. 914 */ 915 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 916 917 tries = 3; 918 919 /* 920 * DP HDCP Spec mandates the two more reattempt to read R0, incase 921 * of R0 mismatch. 922 */ 923 for (i = 0; i < tries; i++) { 924 ri.reg = 0; 925 ret = shim->read_ri_prime(dig_port, ri.shim); 926 if (ret) 927 return ret; 928 intel_de_write(display, 929 HDCP_RPRIME(display, cpu_transcoder, port), 930 ri.reg); 931 932 /* Wait for Ri prime match */ 933 ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)), 934 val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 935 100, 1000, false); 936 if (!ret) 937 break; 938 } 939 940 if (i == tries) { 941 drm_dbg_kms(display->drm, 942 "Timed out waiting for Ri prime match (%x)\n", val); 943 return -ETIMEDOUT; 944 } 945 946 /* Wait for encryption confirmation */ 947 if (intel_de_wait_for_set_ms(display, 948 HDCP_STATUS(display, cpu_transcoder, port), 949 HDCP_STATUS_ENC, 950 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 951 drm_err(display->drm, "Timed out waiting for encryption\n"); 952 return -ETIMEDOUT; 953 } 954 955 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 956 if (shim->stream_encryption) { 957 ret = shim->stream_encryption(connector, true); 958 if (ret) { 959 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", 960 connector->base.base.id, connector->base.name); 961 return ret; 962 } 963 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 964 transcoder_name(hdcp->stream_transcoder)); 965 } 966 967 if (repeater_present) 968 return intel_hdcp_auth_downstream(connector); 969 970 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); 971 return 0; 972 } 973 974 static int _intel_hdcp_disable(struct intel_connector *connector) 975 { 976 struct intel_display *display = to_intel_display(connector); 977 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 978 struct intel_hdcp *hdcp = &connector->hdcp; 979 enum port port = dig_port->base.port; 980 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 981 u32 repeater_ctl; 982 int ret; 983 984 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", 985 connector->base.base.id, connector->base.name); 986 987 if (hdcp->shim->stream_encryption) { 988 ret = hdcp->shim->stream_encryption(connector, false); 989 if (ret) { 990 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", 991 connector->base.base.id, connector->base.name); 992 return ret; 993 } 994 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 995 transcoder_name(hdcp->stream_transcoder)); 996 /* 997 * If there are other connectors on this port using HDCP, 998 * don't disable it until it disabled HDCP encryption for 999 * all connectors in MST topology. 1000 */ 1001 if (dig_port->hdcp.num_streams > 0) 1002 return 0; 1003 } 1004 1005 hdcp->hdcp_encrypted = false; 1006 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); 1007 if (intel_de_wait_for_clear_ms(display, 1008 HDCP_STATUS(display, cpu_transcoder, port), 1009 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 1010 drm_err(display->drm, 1011 "Failed to disable HDCP, timeout clearing status\n"); 1012 return -ETIMEDOUT; 1013 } 1014 1015 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, 1016 port); 1017 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); 1018 1019 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 1020 if (ret) { 1021 drm_err(display->drm, "Failed to disable HDCP signalling\n"); 1022 return ret; 1023 } 1024 1025 drm_dbg_kms(display->drm, "HDCP is disabled\n"); 1026 return 0; 1027 } 1028 1029 static int intel_hdcp1_enable(struct intel_connector *connector) 1030 { 1031 struct intel_display *display = to_intel_display(connector); 1032 struct intel_hdcp *hdcp = &connector->hdcp; 1033 int i, ret, tries = 3; 1034 1035 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", 1036 connector->base.base.id, connector->base.name); 1037 1038 if (!hdcp_key_loadable(display)) { 1039 drm_err(display->drm, "HDCP key Load is not possible\n"); 1040 return -ENXIO; 1041 } 1042 1043 for (i = 0; i < KEY_LOAD_TRIES; i++) { 1044 ret = intel_hdcp_load_keys(display); 1045 if (!ret) 1046 break; 1047 intel_hdcp_clear_keys(display); 1048 } 1049 if (ret) { 1050 drm_err(display->drm, "Could not load HDCP keys, (%d)\n", 1051 ret); 1052 return ret; 1053 } 1054 1055 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true); 1056 1057 /* Incase of authentication failures, HDCP spec expects reauth. */ 1058 for (i = 0; i < tries; i++) { 1059 ret = intel_hdcp_auth(connector); 1060 if (!ret) { 1061 hdcp->hdcp_encrypted = true; 1062 return 0; 1063 } 1064 1065 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); 1066 1067 /* Ensuring HDCP encryption and signalling are stopped. */ 1068 _intel_hdcp_disable(connector); 1069 } 1070 1071 drm_dbg_kms(display->drm, 1072 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 1073 return ret; 1074 } 1075 1076 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 1077 { 1078 return container_of(hdcp, struct intel_connector, hdcp); 1079 } 1080 1081 static void intel_hdcp_update_value(struct intel_connector *connector, 1082 u64 value, bool update_property) 1083 { 1084 struct intel_display *display = to_intel_display(connector); 1085 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1086 struct intel_hdcp *hdcp = &connector->hdcp; 1087 1088 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); 1089 1090 if (hdcp->value == value) 1091 return; 1092 1093 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex)); 1094 1095 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1096 if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0)) 1097 dig_port->hdcp.num_streams--; 1098 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1099 dig_port->hdcp.num_streams++; 1100 } 1101 1102 hdcp->value = value; 1103 if (update_property) { 1104 drm_connector_get(&connector->base); 1105 if (!queue_work(display->wq.unordered, &hdcp->prop_work)) 1106 drm_connector_put(&connector->base); 1107 } 1108 } 1109 1110 /* Implements Part 3 of the HDCP authorization procedure */ 1111 static int intel_hdcp_check_link(struct intel_connector *connector) 1112 { 1113 struct intel_display *display = to_intel_display(connector); 1114 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1115 struct intel_hdcp *hdcp = &connector->hdcp; 1116 enum port port = dig_port->base.port; 1117 enum transcoder cpu_transcoder; 1118 int ret = 0; 1119 1120 mutex_lock(&hdcp->mutex); 1121 mutex_lock(&dig_port->hdcp.mutex); 1122 1123 cpu_transcoder = hdcp->cpu_transcoder; 1124 1125 /* Check_link valid only when HDCP1.4 is enabled */ 1126 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1127 !hdcp->hdcp_encrypted) { 1128 ret = -EINVAL; 1129 goto out; 1130 } 1131 1132 if (drm_WARN_ON(display->drm, 1133 !intel_hdcp_in_use(display, cpu_transcoder, port))) { 1134 drm_err(display->drm, 1135 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", 1136 connector->base.base.id, connector->base.name, 1137 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); 1138 ret = -ENXIO; 1139 intel_hdcp_update_value(connector, 1140 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1141 true); 1142 goto out; 1143 } 1144 1145 if (hdcp->shim->check_link(dig_port, connector)) { 1146 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1147 intel_hdcp_update_value(connector, 1148 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1149 } 1150 goto out; 1151 } 1152 1153 drm_dbg_kms(display->drm, 1154 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", 1155 connector->base.base.id, connector->base.name); 1156 1157 ret = _intel_hdcp_disable(connector); 1158 if (ret) { 1159 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); 1160 intel_hdcp_update_value(connector, 1161 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1162 true); 1163 goto out; 1164 } 1165 1166 ret = intel_hdcp1_enable(connector); 1167 if (ret) { 1168 drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret); 1169 intel_hdcp_update_value(connector, 1170 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1171 true); 1172 goto out; 1173 } 1174 1175 out: 1176 mutex_unlock(&dig_port->hdcp.mutex); 1177 mutex_unlock(&hdcp->mutex); 1178 return ret; 1179 } 1180 1181 static void intel_hdcp_prop_work(struct work_struct *work) 1182 { 1183 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1184 prop_work); 1185 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1186 struct intel_display *display = to_intel_display(connector); 1187 1188 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); 1189 mutex_lock(&hdcp->mutex); 1190 1191 /* 1192 * This worker is only used to flip between ENABLED/DESIRED. Either of 1193 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1194 * we're running just after hdcp has been disabled, so just exit 1195 */ 1196 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1197 drm_hdcp_update_content_protection(&connector->base, 1198 hdcp->value); 1199 1200 mutex_unlock(&hdcp->mutex); 1201 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1202 1203 drm_connector_put(&connector->base); 1204 } 1205 1206 bool is_hdcp_supported(struct intel_display *display, enum port port) 1207 { 1208 return DISPLAY_RUNTIME_INFO(display)->has_hdcp && 1209 (DISPLAY_VER(display) >= 12 || port < PORT_E); 1210 } 1211 1212 static int 1213 hdcp2_prepare_ake_init(struct intel_connector *connector, 1214 struct hdcp2_ake_init *ake_data) 1215 { 1216 struct intel_display *display = to_intel_display(connector); 1217 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1218 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1219 struct i915_hdcp_arbiter *arbiter; 1220 int ret; 1221 1222 mutex_lock(&display->hdcp.hdcp_mutex); 1223 arbiter = display->hdcp.arbiter; 1224 1225 if (!arbiter || !arbiter->ops) { 1226 mutex_unlock(&display->hdcp.hdcp_mutex); 1227 return -EINVAL; 1228 } 1229 1230 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1231 if (ret) 1232 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", 1233 ret); 1234 mutex_unlock(&display->hdcp.hdcp_mutex); 1235 1236 return ret; 1237 } 1238 1239 static int 1240 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1241 struct hdcp2_ake_send_cert *rx_cert, 1242 bool *paired, 1243 struct hdcp2_ake_no_stored_km *ek_pub_km, 1244 size_t *msg_sz) 1245 { 1246 struct intel_display *display = to_intel_display(connector); 1247 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1248 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1249 struct i915_hdcp_arbiter *arbiter; 1250 int ret; 1251 1252 mutex_lock(&display->hdcp.hdcp_mutex); 1253 arbiter = display->hdcp.arbiter; 1254 1255 if (!arbiter || !arbiter->ops) { 1256 mutex_unlock(&display->hdcp.hdcp_mutex); 1257 return -EINVAL; 1258 } 1259 1260 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1261 rx_cert, paired, 1262 ek_pub_km, msg_sz); 1263 if (ret < 0) 1264 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", 1265 ret); 1266 mutex_unlock(&display->hdcp.hdcp_mutex); 1267 1268 return ret; 1269 } 1270 1271 static int hdcp2_verify_hprime(struct intel_connector *connector, 1272 struct hdcp2_ake_send_hprime *rx_hprime) 1273 { 1274 struct intel_display *display = to_intel_display(connector); 1275 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1276 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1277 struct i915_hdcp_arbiter *arbiter; 1278 int ret; 1279 1280 mutex_lock(&display->hdcp.hdcp_mutex); 1281 arbiter = display->hdcp.arbiter; 1282 1283 if (!arbiter || !arbiter->ops) { 1284 mutex_unlock(&display->hdcp.hdcp_mutex); 1285 return -EINVAL; 1286 } 1287 1288 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1289 if (ret < 0) 1290 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); 1291 mutex_unlock(&display->hdcp.hdcp_mutex); 1292 1293 return ret; 1294 } 1295 1296 static int 1297 hdcp2_store_pairing_info(struct intel_connector *connector, 1298 struct hdcp2_ake_send_pairing_info *pairing_info) 1299 { 1300 struct intel_display *display = to_intel_display(connector); 1301 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1302 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1303 struct i915_hdcp_arbiter *arbiter; 1304 int ret; 1305 1306 mutex_lock(&display->hdcp.hdcp_mutex); 1307 arbiter = display->hdcp.arbiter; 1308 1309 if (!arbiter || !arbiter->ops) { 1310 mutex_unlock(&display->hdcp.hdcp_mutex); 1311 return -EINVAL; 1312 } 1313 1314 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1315 if (ret < 0) 1316 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", 1317 ret); 1318 mutex_unlock(&display->hdcp.hdcp_mutex); 1319 1320 return ret; 1321 } 1322 1323 static int 1324 hdcp2_prepare_lc_init(struct intel_connector *connector, 1325 struct hdcp2_lc_init *lc_init) 1326 { 1327 struct intel_display *display = to_intel_display(connector); 1328 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1329 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1330 struct i915_hdcp_arbiter *arbiter; 1331 int ret; 1332 1333 mutex_lock(&display->hdcp.hdcp_mutex); 1334 arbiter = display->hdcp.arbiter; 1335 1336 if (!arbiter || !arbiter->ops) { 1337 mutex_unlock(&display->hdcp.hdcp_mutex); 1338 return -EINVAL; 1339 } 1340 1341 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1342 if (ret < 0) 1343 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", 1344 ret); 1345 mutex_unlock(&display->hdcp.hdcp_mutex); 1346 1347 return ret; 1348 } 1349 1350 static int 1351 hdcp2_verify_lprime(struct intel_connector *connector, 1352 struct hdcp2_lc_send_lprime *rx_lprime) 1353 { 1354 struct intel_display *display = to_intel_display(connector); 1355 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1356 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1357 struct i915_hdcp_arbiter *arbiter; 1358 int ret; 1359 1360 mutex_lock(&display->hdcp.hdcp_mutex); 1361 arbiter = display->hdcp.arbiter; 1362 1363 if (!arbiter || !arbiter->ops) { 1364 mutex_unlock(&display->hdcp.hdcp_mutex); 1365 return -EINVAL; 1366 } 1367 1368 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1369 if (ret < 0) 1370 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", 1371 ret); 1372 mutex_unlock(&display->hdcp.hdcp_mutex); 1373 1374 return ret; 1375 } 1376 1377 static int hdcp2_prepare_skey(struct intel_connector *connector, 1378 struct hdcp2_ske_send_eks *ske_data) 1379 { 1380 struct intel_display *display = to_intel_display(connector); 1381 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1382 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1383 struct i915_hdcp_arbiter *arbiter; 1384 int ret; 1385 1386 mutex_lock(&display->hdcp.hdcp_mutex); 1387 arbiter = display->hdcp.arbiter; 1388 1389 if (!arbiter || !arbiter->ops) { 1390 mutex_unlock(&display->hdcp.hdcp_mutex); 1391 return -EINVAL; 1392 } 1393 1394 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1395 if (ret < 0) 1396 drm_dbg_kms(display->drm, "Get session key failed. %d\n", 1397 ret); 1398 mutex_unlock(&display->hdcp.hdcp_mutex); 1399 1400 return ret; 1401 } 1402 1403 static int 1404 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1405 struct hdcp2_rep_send_receiverid_list 1406 *rep_topology, 1407 struct hdcp2_rep_send_ack *rep_send_ack) 1408 { 1409 struct intel_display *display = to_intel_display(connector); 1410 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1411 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1412 struct i915_hdcp_arbiter *arbiter; 1413 int ret; 1414 1415 mutex_lock(&display->hdcp.hdcp_mutex); 1416 arbiter = display->hdcp.arbiter; 1417 1418 if (!arbiter || !arbiter->ops) { 1419 mutex_unlock(&display->hdcp.hdcp_mutex); 1420 return -EINVAL; 1421 } 1422 1423 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1424 data, 1425 rep_topology, 1426 rep_send_ack); 1427 if (ret < 0) 1428 drm_dbg_kms(display->drm, 1429 "Verify rep topology failed. %d\n", ret); 1430 mutex_unlock(&display->hdcp.hdcp_mutex); 1431 1432 return ret; 1433 } 1434 1435 static int 1436 hdcp2_verify_mprime(struct intel_connector *connector, 1437 struct hdcp2_rep_stream_ready *stream_ready) 1438 { 1439 struct intel_display *display = to_intel_display(connector); 1440 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1441 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1442 struct i915_hdcp_arbiter *arbiter; 1443 int ret; 1444 1445 mutex_lock(&display->hdcp.hdcp_mutex); 1446 arbiter = display->hdcp.arbiter; 1447 1448 if (!arbiter || !arbiter->ops) { 1449 mutex_unlock(&display->hdcp.hdcp_mutex); 1450 return -EINVAL; 1451 } 1452 1453 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1454 if (ret < 0) 1455 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); 1456 mutex_unlock(&display->hdcp.hdcp_mutex); 1457 1458 return ret; 1459 } 1460 1461 static int hdcp2_authenticate_port(struct intel_connector *connector) 1462 { 1463 struct intel_display *display = to_intel_display(connector); 1464 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1465 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1466 struct i915_hdcp_arbiter *arbiter; 1467 int ret; 1468 1469 mutex_lock(&display->hdcp.hdcp_mutex); 1470 arbiter = display->hdcp.arbiter; 1471 1472 if (!arbiter || !arbiter->ops) { 1473 mutex_unlock(&display->hdcp.hdcp_mutex); 1474 return -EINVAL; 1475 } 1476 1477 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1478 if (ret < 0) 1479 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", 1480 ret); 1481 mutex_unlock(&display->hdcp.hdcp_mutex); 1482 1483 return ret; 1484 } 1485 1486 static int hdcp2_close_session(struct intel_connector *connector) 1487 { 1488 struct intel_display *display = to_intel_display(connector); 1489 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1490 struct i915_hdcp_arbiter *arbiter; 1491 int ret; 1492 1493 mutex_lock(&display->hdcp.hdcp_mutex); 1494 arbiter = display->hdcp.arbiter; 1495 1496 if (!arbiter || !arbiter->ops) { 1497 mutex_unlock(&display->hdcp.hdcp_mutex); 1498 return -EINVAL; 1499 } 1500 1501 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1502 &dig_port->hdcp.port_data); 1503 mutex_unlock(&display->hdcp.hdcp_mutex); 1504 1505 return ret; 1506 } 1507 1508 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1509 { 1510 return hdcp2_close_session(connector); 1511 } 1512 1513 /* Authentication flow starts from here */ 1514 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1515 { 1516 struct intel_display *display = to_intel_display(connector); 1517 struct intel_digital_port *dig_port = 1518 intel_attached_dig_port(connector); 1519 struct intel_hdcp *hdcp = &connector->hdcp; 1520 union { 1521 struct hdcp2_ake_init ake_init; 1522 struct hdcp2_ake_send_cert send_cert; 1523 struct hdcp2_ake_no_stored_km no_stored_km; 1524 struct hdcp2_ake_send_hprime send_hprime; 1525 struct hdcp2_ake_send_pairing_info pairing_info; 1526 } msgs; 1527 const struct intel_hdcp_shim *shim = hdcp->shim; 1528 size_t size; 1529 int ret, i, max_retries; 1530 1531 /* Init for seq_num */ 1532 hdcp->seq_num_v = 0; 1533 hdcp->seq_num_m = 0; 1534 1535 if (intel_encoder_is_dp(&dig_port->base) || 1536 intel_encoder_is_mst(&dig_port->base)) 1537 max_retries = 10; 1538 else 1539 max_retries = 1; 1540 1541 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1542 if (ret < 0) 1543 return ret; 1544 1545 /* 1546 * Retry the first read and write to downstream at least 10 times 1547 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders 1548 * (dock decides to stop advertising hdcp2 capability for some reason). 1549 * The reason being that during suspend resume dock usually keeps the 1550 * HDCP2 registers inaccessible causing AUX error. This wouldn't be a 1551 * big problem if the userspace just kept retrying with some delay while 1552 * it continues to play low value content but most userspace applications 1553 * end up throwing an error when it receives one from KMD. This makes 1554 * sure we give the dock and the sink devices to complete its power cycle 1555 * and then try HDCP authentication. The values of 10 and delay of 50ms 1556 * was decided based on multiple trial and errors. 1557 */ 1558 for (i = 0; i < max_retries; i++) { 1559 if (!intel_hdcp2_get_capability(connector)) { 1560 msleep(50); 1561 continue; 1562 } 1563 1564 ret = shim->write_2_2_msg(connector, &msgs.ake_init, 1565 sizeof(msgs.ake_init)); 1566 if (ret < 0) 1567 continue; 1568 1569 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, 1570 &msgs.send_cert, sizeof(msgs.send_cert)); 1571 if (ret > 0) 1572 break; 1573 } 1574 1575 if (ret < 0) 1576 return ret; 1577 1578 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1579 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1580 return -EINVAL; 1581 } 1582 1583 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1584 1585 if (drm_hdcp_check_ksvs_revoked(display->drm, 1586 msgs.send_cert.cert_rx.receiver_id, 1587 1) > 0) { 1588 drm_err(display->drm, "Receiver ID is revoked\n"); 1589 return -EPERM; 1590 } 1591 1592 /* 1593 * Here msgs.no_stored_km will hold msgs corresponding to the km 1594 * stored also. 1595 */ 1596 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1597 &hdcp->is_paired, 1598 &msgs.no_stored_km, &size); 1599 if (ret < 0) 1600 return ret; 1601 1602 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size); 1603 if (ret < 0) 1604 return ret; 1605 1606 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME, 1607 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1608 if (ret < 0) 1609 return ret; 1610 1611 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1612 if (ret < 0) 1613 return ret; 1614 1615 if (!hdcp->is_paired) { 1616 /* Pairing is required */ 1617 ret = shim->read_2_2_msg(connector, 1618 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1619 &msgs.pairing_info, 1620 sizeof(msgs.pairing_info)); 1621 if (ret < 0) 1622 return ret; 1623 1624 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1625 if (ret < 0) 1626 return ret; 1627 hdcp->is_paired = true; 1628 } 1629 1630 return 0; 1631 } 1632 1633 static int hdcp2_locality_check(struct intel_connector *connector) 1634 { 1635 struct intel_hdcp *hdcp = &connector->hdcp; 1636 union { 1637 struct hdcp2_lc_init lc_init; 1638 struct hdcp2_lc_send_lprime send_lprime; 1639 } msgs; 1640 const struct intel_hdcp_shim *shim = hdcp->shim; 1641 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1642 1643 for (i = 0; i < tries; i++) { 1644 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1645 if (ret < 0) 1646 continue; 1647 1648 ret = shim->write_2_2_msg(connector, &msgs.lc_init, 1649 sizeof(msgs.lc_init)); 1650 if (ret < 0) 1651 continue; 1652 1653 ret = shim->read_2_2_msg(connector, 1654 HDCP_2_2_LC_SEND_LPRIME, 1655 &msgs.send_lprime, 1656 sizeof(msgs.send_lprime)); 1657 if (ret < 0) 1658 continue; 1659 1660 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1661 if (!ret) 1662 break; 1663 } 1664 1665 return ret; 1666 } 1667 1668 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1669 { 1670 struct intel_hdcp *hdcp = &connector->hdcp; 1671 struct hdcp2_ske_send_eks send_eks; 1672 int ret; 1673 1674 ret = hdcp2_prepare_skey(connector, &send_eks); 1675 if (ret < 0) 1676 return ret; 1677 1678 ret = hdcp->shim->write_2_2_msg(connector, &send_eks, 1679 sizeof(send_eks)); 1680 if (ret < 0) 1681 return ret; 1682 1683 return 0; 1684 } 1685 1686 static 1687 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1688 { 1689 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1690 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1691 struct intel_hdcp *hdcp = &connector->hdcp; 1692 union { 1693 struct hdcp2_rep_stream_manage stream_manage; 1694 struct hdcp2_rep_stream_ready stream_ready; 1695 } msgs; 1696 const struct intel_hdcp_shim *shim = hdcp->shim; 1697 int ret, streams_size_delta, i; 1698 1699 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1700 return -ERANGE; 1701 1702 /* Prepare RepeaterAuth_Stream_Manage msg */ 1703 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1704 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1705 1706 msgs.stream_manage.k = cpu_to_be16(data->k); 1707 1708 for (i = 0; i < data->k; i++) { 1709 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1710 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1711 } 1712 1713 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1714 sizeof(struct hdcp2_streamid_type); 1715 /* Send it to Repeater */ 1716 ret = shim->write_2_2_msg(connector, &msgs.stream_manage, 1717 sizeof(msgs.stream_manage) - streams_size_delta); 1718 if (ret < 0) 1719 goto out; 1720 1721 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY, 1722 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1723 if (ret < 0) 1724 goto out; 1725 1726 data->seq_num_m = hdcp->seq_num_m; 1727 1728 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1729 1730 out: 1731 hdcp->seq_num_m++; 1732 1733 return ret; 1734 } 1735 1736 static 1737 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1738 { 1739 struct intel_display *display = to_intel_display(connector); 1740 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1741 struct intel_hdcp *hdcp = &connector->hdcp; 1742 union { 1743 struct hdcp2_rep_send_receiverid_list recvid_list; 1744 struct hdcp2_rep_send_ack rep_ack; 1745 } msgs; 1746 const struct intel_hdcp_shim *shim = hdcp->shim; 1747 u32 seq_num_v, device_cnt; 1748 u8 *rx_info; 1749 int ret; 1750 1751 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST, 1752 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1753 if (ret < 0) 1754 return ret; 1755 1756 rx_info = msgs.recvid_list.rx_info; 1757 1758 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1759 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1760 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); 1761 return -EINVAL; 1762 } 1763 1764 /* 1765 * MST topology is not Type 1 capable if it contains a downstream 1766 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1767 */ 1768 dig_port->hdcp.mst_type1_capable = 1769 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1770 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1771 1772 if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) { 1773 drm_dbg_kms(display->drm, 1774 "HDCP1.x or 2.0 Legacy Device Downstream\n"); 1775 return -EINVAL; 1776 } 1777 1778 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1779 seq_num_v = 1780 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1781 1782 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1783 drm_dbg_kms(display->drm, 1784 "Non zero Seq_num_v at first RecvId_List msg\n"); 1785 return -EINVAL; 1786 } 1787 1788 if (seq_num_v < hdcp->seq_num_v) { 1789 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1790 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); 1791 return -EINVAL; 1792 } 1793 1794 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1795 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1796 if (drm_hdcp_check_ksvs_revoked(display->drm, 1797 msgs.recvid_list.receiver_ids, 1798 device_cnt) > 0) { 1799 drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); 1800 return -EPERM; 1801 } 1802 1803 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1804 &msgs.recvid_list, 1805 &msgs.rep_ack); 1806 if (ret < 0) 1807 return ret; 1808 1809 hdcp->seq_num_v = seq_num_v; 1810 ret = shim->write_2_2_msg(connector, &msgs.rep_ack, 1811 sizeof(msgs.rep_ack)); 1812 if (ret < 0) 1813 return ret; 1814 1815 return 0; 1816 } 1817 1818 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1819 { 1820 struct intel_display *display = to_intel_display(connector); 1821 struct intel_hdcp *hdcp = &connector->hdcp; 1822 const struct intel_hdcp_shim *shim = hdcp->shim; 1823 int ret; 1824 1825 ret = hdcp2_authentication_key_exchange(connector); 1826 if (ret < 0) { 1827 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); 1828 return ret; 1829 } 1830 1831 ret = hdcp2_locality_check(connector); 1832 if (ret < 0) { 1833 drm_dbg_kms(display->drm, 1834 "Locality Check failed. Err : %d\n", ret); 1835 return ret; 1836 } 1837 1838 ret = hdcp2_session_key_exchange(connector); 1839 if (ret < 0) { 1840 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); 1841 return ret; 1842 } 1843 1844 if (shim->config_stream_type) { 1845 ret = shim->config_stream_type(connector, 1846 hdcp->is_repeater, 1847 hdcp->content_type); 1848 if (ret < 0) 1849 return ret; 1850 } 1851 1852 if (hdcp->is_repeater) { 1853 ret = hdcp2_authenticate_repeater_topology(connector); 1854 if (ret < 0) { 1855 drm_dbg_kms(display->drm, 1856 "Repeater Auth Failed. Err: %d\n", ret); 1857 return ret; 1858 } 1859 } 1860 1861 return ret; 1862 } 1863 1864 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1865 { 1866 struct intel_display *display = to_intel_display(connector); 1867 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1868 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 1869 struct intel_hdcp *hdcp = &connector->hdcp; 1870 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1871 enum port port = dig_port->base.port; 1872 int ret = 0; 1873 1874 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1875 LINK_ENCRYPTION_STATUS)) { 1876 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", 1877 connector->base.base.id, connector->base.name); 1878 ret = -EPERM; 1879 goto link_recover; 1880 } 1881 1882 if (hdcp->shim->stream_2_2_encryption) { 1883 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1884 if (ret) { 1885 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", 1886 connector->base.base.id, connector->base.name); 1887 return ret; 1888 } 1889 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1890 transcoder_name(hdcp->stream_transcoder)); 1891 } 1892 1893 return 0; 1894 1895 link_recover: 1896 if (hdcp2_deauthenticate_port(connector) < 0) 1897 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 1898 1899 dig_port->hdcp.auth_status = false; 1900 data->k = 0; 1901 1902 return ret; 1903 } 1904 1905 static int hdcp2_enable_encryption(struct intel_connector *connector) 1906 { 1907 struct intel_display *display = to_intel_display(connector); 1908 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1909 struct intel_hdcp *hdcp = &connector->hdcp; 1910 enum port port = dig_port->base.port; 1911 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1912 int ret; 1913 1914 drm_WARN_ON(display->drm, 1915 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1916 LINK_ENCRYPTION_STATUS); 1917 if (hdcp->shim->toggle_signalling) { 1918 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1919 true); 1920 if (ret) { 1921 drm_err(display->drm, 1922 "Failed to enable HDCP signalling. %d\n", 1923 ret); 1924 return ret; 1925 } 1926 } 1927 1928 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1929 LINK_AUTH_STATUS) 1930 /* Link is Authenticated. Now set for Encryption */ 1931 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1932 0, CTL_LINK_ENCRYPTION_REQ); 1933 1934 ret = intel_de_wait_for_set_ms(display, 1935 HDCP2_STATUS(display, cpu_transcoder, port), 1936 LINK_ENCRYPTION_STATUS, 1937 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1938 dig_port->hdcp.auth_status = true; 1939 1940 return ret; 1941 } 1942 1943 static int hdcp2_disable_encryption(struct intel_connector *connector) 1944 { 1945 struct intel_display *display = to_intel_display(connector); 1946 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1947 struct intel_hdcp *hdcp = &connector->hdcp; 1948 enum port port = dig_port->base.port; 1949 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1950 int ret; 1951 1952 drm_WARN_ON(display->drm, 1953 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1954 LINK_ENCRYPTION_STATUS)); 1955 1956 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1957 CTL_LINK_ENCRYPTION_REQ, 0); 1958 1959 ret = intel_de_wait_for_clear_ms(display, 1960 HDCP2_STATUS(display, cpu_transcoder, port), 1961 LINK_ENCRYPTION_STATUS, 1962 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1963 if (ret == -ETIMEDOUT) 1964 drm_dbg_kms(display->drm, "Disable Encryption Timedout"); 1965 1966 if (hdcp->shim->toggle_signalling) { 1967 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1968 false); 1969 if (ret) { 1970 drm_err(display->drm, 1971 "Failed to disable HDCP signalling. %d\n", 1972 ret); 1973 return ret; 1974 } 1975 } 1976 1977 return ret; 1978 } 1979 1980 static int 1981 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1982 { 1983 struct intel_display *display = to_intel_display(connector); 1984 int i, tries = 3, ret; 1985 1986 if (!connector->hdcp.is_repeater) 1987 return 0; 1988 1989 for (i = 0; i < tries; i++) { 1990 ret = _hdcp2_propagate_stream_management_info(connector); 1991 if (!ret) 1992 break; 1993 1994 /* Lets restart the auth incase of seq_num_m roll over */ 1995 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1996 drm_dbg_kms(display->drm, 1997 "seq_num_m roll over.(%d)\n", ret); 1998 break; 1999 } 2000 2001 drm_dbg_kms(display->drm, 2002 "HDCP2 stream management %d of %d Failed.(%d)\n", 2003 i + 1, tries, ret); 2004 } 2005 2006 return ret; 2007 } 2008 2009 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, 2010 struct intel_connector *connector) 2011 { 2012 struct intel_display *display = to_intel_display(connector); 2013 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2014 int ret = 0, i, tries = 3; 2015 2016 for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) { 2017 ret = hdcp2_authenticate_sink(connector); 2018 if (!ret) { 2019 ret = intel_hdcp_prepare_streams(state, connector); 2020 if (ret) { 2021 drm_dbg_kms(display->drm, 2022 "Prepare stream failed.(%d)\n", 2023 ret); 2024 break; 2025 } 2026 2027 ret = hdcp2_propagate_stream_management_info(connector); 2028 if (ret) { 2029 drm_dbg_kms(display->drm, 2030 "Stream management failed.(%d)\n", 2031 ret); 2032 break; 2033 } 2034 2035 ret = hdcp2_authenticate_port(connector); 2036 if (!ret) 2037 break; 2038 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", 2039 ret); 2040 } 2041 2042 /* Clearing the mei hdcp session */ 2043 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 2044 i + 1, tries, ret); 2045 if (hdcp2_deauthenticate_port(connector) < 0) 2046 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2047 } 2048 2049 if (!ret && !dig_port->hdcp.auth_status) { 2050 /* 2051 * Ensuring the required 200mSec min time interval between 2052 * Session Key Exchange and encryption. 2053 */ 2054 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 2055 ret = hdcp2_enable_encryption(connector); 2056 if (ret < 0) { 2057 drm_dbg_kms(display->drm, 2058 "Encryption Enable Failed.(%d)\n", ret); 2059 if (hdcp2_deauthenticate_port(connector) < 0) 2060 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2061 } 2062 } 2063 2064 if (!ret) 2065 ret = hdcp2_enable_stream_encryption(connector); 2066 2067 return ret; 2068 } 2069 2070 static int _intel_hdcp2_enable(struct intel_atomic_state *state, 2071 struct intel_connector *connector) 2072 { 2073 struct intel_display *display = to_intel_display(connector); 2074 struct intel_hdcp *hdcp = &connector->hdcp; 2075 int ret; 2076 2077 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", 2078 connector->base.base.id, connector->base.name, 2079 hdcp->content_type); 2080 2081 intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false); 2082 2083 ret = hdcp2_authenticate_and_encrypt(state, connector); 2084 if (ret) { 2085 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 2086 hdcp->content_type, ret); 2087 return ret; 2088 } 2089 2090 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", 2091 connector->base.base.id, connector->base.name, 2092 hdcp->content_type); 2093 2094 hdcp->hdcp2_encrypted = true; 2095 return 0; 2096 } 2097 2098 static int 2099 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 2100 { 2101 struct intel_display *display = to_intel_display(connector); 2102 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2103 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 2104 struct intel_hdcp *hdcp = &connector->hdcp; 2105 int ret; 2106 2107 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", 2108 connector->base.base.id, connector->base.name); 2109 2110 if (hdcp->shim->stream_2_2_encryption) { 2111 ret = hdcp->shim->stream_2_2_encryption(connector, false); 2112 if (ret) { 2113 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", 2114 connector->base.base.id, connector->base.name); 2115 return ret; 2116 } 2117 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 2118 transcoder_name(hdcp->stream_transcoder)); 2119 2120 if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery) 2121 return 0; 2122 } 2123 2124 ret = hdcp2_disable_encryption(connector); 2125 2126 if (hdcp2_deauthenticate_port(connector) < 0) 2127 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2128 2129 connector->hdcp.hdcp2_encrypted = false; 2130 dig_port->hdcp.auth_status = false; 2131 data->k = 0; 2132 2133 return ret; 2134 } 2135 2136 /* Implements the Link Integrity Check for HDCP2.2 */ 2137 static int intel_hdcp2_check_link(struct intel_connector *connector) 2138 { 2139 struct intel_display *display = to_intel_display(connector); 2140 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2141 struct intel_hdcp *hdcp = &connector->hdcp; 2142 enum port port = dig_port->base.port; 2143 enum transcoder cpu_transcoder; 2144 int ret = 0; 2145 2146 mutex_lock(&hdcp->mutex); 2147 mutex_lock(&dig_port->hdcp.mutex); 2148 cpu_transcoder = hdcp->cpu_transcoder; 2149 2150 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2151 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2152 !hdcp->hdcp2_encrypted) { 2153 ret = -EINVAL; 2154 goto out; 2155 } 2156 2157 if (drm_WARN_ON(display->drm, 2158 !intel_hdcp2_in_use(display, cpu_transcoder, port))) { 2159 drm_err(display->drm, 2160 "HDCP2.2 link stopped the encryption, %x\n", 2161 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); 2162 ret = -ENXIO; 2163 _intel_hdcp2_disable(connector, true); 2164 intel_hdcp_update_value(connector, 2165 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2166 true); 2167 goto out; 2168 } 2169 2170 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2171 if (ret == HDCP_LINK_PROTECTED) { 2172 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2173 intel_hdcp_update_value(connector, 2174 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2175 true); 2176 } 2177 goto out; 2178 } 2179 2180 if (ret == HDCP_TOPOLOGY_CHANGE) { 2181 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2182 goto out; 2183 2184 drm_dbg_kms(display->drm, 2185 "HDCP2.2 Downstream topology change\n"); 2186 2187 ret = hdcp2_authenticate_repeater_topology(connector); 2188 if (!ret) { 2189 intel_hdcp_update_value(connector, 2190 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2191 true); 2192 goto out; 2193 } 2194 2195 drm_dbg_kms(display->drm, 2196 "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n", 2197 connector->base.base.id, connector->base.name, 2198 ret); 2199 } else { 2200 drm_dbg_kms(display->drm, 2201 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", 2202 connector->base.base.id, connector->base.name); 2203 } 2204 2205 ret = _intel_hdcp2_disable(connector, true); 2206 if (ret) { 2207 drm_err(display->drm, 2208 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", 2209 connector->base.base.id, connector->base.name, ret); 2210 intel_hdcp_update_value(connector, 2211 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2212 goto out; 2213 } 2214 2215 intel_hdcp_update_value(connector, 2216 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2217 out: 2218 mutex_unlock(&dig_port->hdcp.mutex); 2219 mutex_unlock(&hdcp->mutex); 2220 return ret; 2221 } 2222 2223 static void intel_hdcp_check_work(struct work_struct *work) 2224 { 2225 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2226 struct intel_hdcp, 2227 check_work); 2228 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2229 struct intel_display *display = to_intel_display(connector); 2230 2231 if (drm_connector_is_unregistered(&connector->base)) 2232 return; 2233 2234 if (!hdcp->force_hdcp14 && !intel_hdcp2_check_link(connector)) 2235 queue_delayed_work(display->wq.unordered, &hdcp->check_work, 2236 DRM_HDCP2_CHECK_PERIOD_MS); 2237 else if (!intel_hdcp_check_link(connector)) 2238 queue_delayed_work(display->wq.unordered, &hdcp->check_work, 2239 DRM_HDCP_CHECK_PERIOD_MS); 2240 } 2241 2242 static int i915_hdcp_component_bind(struct device *drv_kdev, 2243 struct device *mei_kdev, void *data) 2244 { 2245 struct intel_display *display = to_intel_display(drv_kdev); 2246 2247 drm_dbg(display->drm, "I915 HDCP comp bind\n"); 2248 mutex_lock(&display->hdcp.hdcp_mutex); 2249 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2250 display->hdcp.arbiter->hdcp_dev = mei_kdev; 2251 mutex_unlock(&display->hdcp.hdcp_mutex); 2252 2253 return 0; 2254 } 2255 2256 static void i915_hdcp_component_unbind(struct device *drv_kdev, 2257 struct device *mei_kdev, void *data) 2258 { 2259 struct intel_display *display = to_intel_display(drv_kdev); 2260 2261 drm_dbg(display->drm, "I915 HDCP comp unbind\n"); 2262 mutex_lock(&display->hdcp.hdcp_mutex); 2263 display->hdcp.arbiter = NULL; 2264 mutex_unlock(&display->hdcp.hdcp_mutex); 2265 } 2266 2267 static const struct component_ops i915_hdcp_ops = { 2268 .bind = i915_hdcp_component_bind, 2269 .unbind = i915_hdcp_component_unbind, 2270 }; 2271 2272 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2273 { 2274 switch (port) { 2275 case PORT_A: 2276 return HDCP_DDI_A; 2277 case PORT_B ... PORT_F: 2278 return (enum hdcp_ddi)port; 2279 default: 2280 return HDCP_DDI_INVALID_PORT; 2281 } 2282 } 2283 2284 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2285 { 2286 switch (cpu_transcoder) { 2287 case TRANSCODER_A ... TRANSCODER_D: 2288 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2289 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2290 return HDCP_INVALID_TRANSCODER; 2291 } 2292 } 2293 2294 static int initialize_hdcp_port_data(struct intel_connector *connector, 2295 struct intel_digital_port *dig_port, 2296 const struct intel_hdcp_shim *shim) 2297 { 2298 struct intel_display *display = to_intel_display(connector); 2299 struct hdcp_port_data *data = &dig_port->hdcp.port_data; 2300 enum port port = dig_port->base.port; 2301 2302 if (DISPLAY_VER(display) < 12) 2303 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2304 else 2305 /* 2306 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2307 * with zero(INVALID PORT index). 2308 */ 2309 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2310 2311 /* 2312 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2313 * is initialized to zero (invalid transcoder index). This will be 2314 * retained for <Gen12 forever. 2315 */ 2316 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2317 2318 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2319 data->protocol = (u8)shim->protocol; 2320 2321 if (!data->streams) 2322 data->streams = kzalloc_objs(struct hdcp2_streamid_type, 2323 INTEL_NUM_PIPES(display)); 2324 if (!data->streams) { 2325 drm_err(display->drm, "Out of Memory\n"); 2326 return -ENOMEM; 2327 } 2328 2329 return 0; 2330 } 2331 2332 static bool is_hdcp2_supported(struct intel_display *display) 2333 { 2334 if (USE_HDCP_GSC(display)) 2335 return true; 2336 2337 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2338 return false; 2339 2340 return DISPLAY_VER(display) >= 10 || 2341 display->platform.kabylake || 2342 display->platform.coffeelake || 2343 display->platform.cometlake; 2344 } 2345 2346 void intel_hdcp_component_init(struct intel_display *display) 2347 { 2348 int ret; 2349 2350 if (!is_hdcp2_supported(display)) 2351 return; 2352 2353 mutex_lock(&display->hdcp.hdcp_mutex); 2354 drm_WARN_ON(display->drm, display->hdcp.comp_added); 2355 2356 display->hdcp.comp_added = true; 2357 mutex_unlock(&display->hdcp.hdcp_mutex); 2358 if (USE_HDCP_GSC(display)) 2359 ret = intel_hdcp_gsc_init(display); 2360 else 2361 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, 2362 I915_COMPONENT_HDCP); 2363 2364 if (ret < 0) { 2365 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", 2366 ret); 2367 mutex_lock(&display->hdcp.hdcp_mutex); 2368 display->hdcp.comp_added = false; 2369 mutex_unlock(&display->hdcp.hdcp_mutex); 2370 return; 2371 } 2372 } 2373 2374 static void intel_hdcp2_init(struct intel_connector *connector, 2375 struct intel_digital_port *dig_port, 2376 const struct intel_hdcp_shim *shim) 2377 { 2378 struct intel_display *display = to_intel_display(connector); 2379 struct intel_hdcp *hdcp = &connector->hdcp; 2380 int ret; 2381 2382 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2383 if (ret) { 2384 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); 2385 return; 2386 } 2387 2388 hdcp->hdcp2_supported = true; 2389 } 2390 2391 int intel_hdcp_init(struct intel_connector *connector, 2392 struct intel_digital_port *dig_port, 2393 const struct intel_hdcp_shim *shim) 2394 { 2395 struct intel_display *display = to_intel_display(connector); 2396 struct intel_hdcp *hdcp = &connector->hdcp; 2397 int ret; 2398 2399 if (!shim) 2400 return -EINVAL; 2401 2402 if (is_hdcp2_supported(display)) 2403 intel_hdcp2_init(connector, dig_port, shim); 2404 2405 ret = drm_connector_attach_content_protection_property(&connector->base, 2406 hdcp->hdcp2_supported); 2407 if (ret) { 2408 hdcp->hdcp2_supported = false; 2409 kfree(dig_port->hdcp.port_data.streams); 2410 return ret; 2411 } 2412 2413 hdcp->shim = shim; 2414 mutex_init(&hdcp->mutex); 2415 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2416 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2417 init_waitqueue_head(&hdcp->cp_irq_queue); 2418 2419 return 0; 2420 } 2421 2422 static int _intel_hdcp_enable(struct intel_atomic_state *state, 2423 struct intel_encoder *encoder, 2424 const struct intel_crtc_state *pipe_config, 2425 const struct drm_connector_state *conn_state) 2426 { 2427 struct intel_display *display = to_intel_display(encoder); 2428 struct intel_connector *connector = 2429 to_intel_connector(conn_state->connector); 2430 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2431 struct intel_hdcp *hdcp = &connector->hdcp; 2432 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2433 int ret = -EINVAL; 2434 2435 if (!hdcp->shim) 2436 return -ENOENT; 2437 2438 mutex_lock(&hdcp->mutex); 2439 mutex_lock(&dig_port->hdcp.mutex); 2440 drm_WARN_ON(display->drm, 2441 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2442 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2443 2444 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2445 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2446 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2447 } else { 2448 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2449 hdcp->stream_transcoder = INVALID_TRANSCODER; 2450 } 2451 2452 if (DISPLAY_VER(display) >= 12) 2453 dig_port->hdcp.port_data.hdcp_transcoder = 2454 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2455 2456 /* 2457 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2458 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2459 */ 2460 if (!hdcp->force_hdcp14 && intel_hdcp2_get_capability(connector)) { 2461 ret = _intel_hdcp2_enable(state, connector); 2462 if (!ret) 2463 check_link_interval = 2464 DRM_HDCP2_CHECK_PERIOD_MS; 2465 } 2466 2467 if (hdcp->force_hdcp14) 2468 drm_dbg_kms(display->drm, "Forcing HDCP 1.4\n"); 2469 2470 /* 2471 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2472 * be attempted. 2473 */ 2474 if (ret && intel_hdcp_get_capability(connector) && 2475 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2476 ret = intel_hdcp1_enable(connector); 2477 } 2478 2479 if (!ret) { 2480 queue_delayed_work(display->wq.unordered, &hdcp->check_work, 2481 check_link_interval); 2482 intel_hdcp_update_value(connector, 2483 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2484 true); 2485 } 2486 2487 mutex_unlock(&dig_port->hdcp.mutex); 2488 mutex_unlock(&hdcp->mutex); 2489 return ret; 2490 } 2491 2492 void intel_hdcp_enable(struct intel_atomic_state *state, 2493 struct intel_encoder *encoder, 2494 const struct intel_crtc_state *crtc_state, 2495 const struct drm_connector_state *conn_state) 2496 { 2497 struct intel_connector *connector = 2498 to_intel_connector(conn_state->connector); 2499 struct intel_hdcp *hdcp = &connector->hdcp; 2500 2501 /* 2502 * Enable hdcp if it's desired or if userspace is enabled and 2503 * driver set its state to undesired 2504 */ 2505 if (conn_state->content_protection == 2506 DRM_MODE_CONTENT_PROTECTION_DESIRED || 2507 (conn_state->content_protection == 2508 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 2509 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2510 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2511 } 2512 2513 int intel_hdcp_disable(struct intel_connector *connector) 2514 { 2515 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2516 struct intel_hdcp *hdcp = &connector->hdcp; 2517 int ret = 0; 2518 2519 if (!hdcp->shim) 2520 return -ENOENT; 2521 2522 mutex_lock(&hdcp->mutex); 2523 mutex_lock(&dig_port->hdcp.mutex); 2524 2525 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2526 goto out; 2527 2528 intel_hdcp_update_value(connector, 2529 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2530 if (hdcp->hdcp2_encrypted) 2531 ret = _intel_hdcp2_disable(connector, false); 2532 else if (hdcp->hdcp_encrypted) 2533 ret = _intel_hdcp_disable(connector); 2534 2535 out: 2536 mutex_unlock(&dig_port->hdcp.mutex); 2537 mutex_unlock(&hdcp->mutex); 2538 cancel_delayed_work_sync(&hdcp->check_work); 2539 return ret; 2540 } 2541 2542 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2543 struct intel_encoder *encoder, 2544 const struct intel_crtc_state *crtc_state, 2545 const struct drm_connector_state *conn_state) 2546 { 2547 struct intel_connector *connector = 2548 to_intel_connector(conn_state->connector); 2549 struct intel_hdcp *hdcp = &connector->hdcp; 2550 bool content_protection_type_changed, desired_and_not_enabled = false; 2551 struct intel_display *display = to_intel_display(connector); 2552 2553 if (!connector->hdcp.shim) 2554 return; 2555 2556 content_protection_type_changed = 2557 (conn_state->hdcp_content_type != hdcp->content_type && 2558 conn_state->content_protection != 2559 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2560 2561 /* 2562 * During the HDCP encryption session if Type change is requested, 2563 * disable the HDCP and re-enable it with new TYPE value. 2564 */ 2565 if (conn_state->content_protection == 2566 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2567 content_protection_type_changed) 2568 intel_hdcp_disable(connector); 2569 2570 /* 2571 * Mark the hdcp state as DESIRED after the hdcp disable of type 2572 * change procedure. 2573 */ 2574 if (content_protection_type_changed) { 2575 mutex_lock(&hdcp->mutex); 2576 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2577 drm_connector_get(&connector->base); 2578 if (!queue_work(display->wq.unordered, &hdcp->prop_work)) 2579 drm_connector_put(&connector->base); 2580 mutex_unlock(&hdcp->mutex); 2581 } 2582 2583 if (conn_state->content_protection == 2584 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2585 mutex_lock(&hdcp->mutex); 2586 /* Avoid enabling hdcp, if it already ENABLED */ 2587 desired_and_not_enabled = 2588 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2589 mutex_unlock(&hdcp->mutex); 2590 /* 2591 * If HDCP already ENABLED and CP property is DESIRED, schedule 2592 * prop_work to update correct CP property to user space. 2593 */ 2594 if (!desired_and_not_enabled && !content_protection_type_changed) { 2595 drm_connector_get(&connector->base); 2596 if (!queue_work(display->wq.unordered, &hdcp->prop_work)) 2597 drm_connector_put(&connector->base); 2598 2599 } 2600 } 2601 2602 if (desired_and_not_enabled || content_protection_type_changed) 2603 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2604 } 2605 2606 void intel_hdcp_cancel_works(struct intel_connector *connector) 2607 { 2608 if (!connector->hdcp.shim) 2609 return; 2610 2611 cancel_delayed_work_sync(&connector->hdcp.check_work); 2612 cancel_work_sync(&connector->hdcp.prop_work); 2613 } 2614 2615 void intel_hdcp_component_fini(struct intel_display *display) 2616 { 2617 mutex_lock(&display->hdcp.hdcp_mutex); 2618 if (!display->hdcp.comp_added) { 2619 mutex_unlock(&display->hdcp.hdcp_mutex); 2620 return; 2621 } 2622 2623 display->hdcp.comp_added = false; 2624 mutex_unlock(&display->hdcp.hdcp_mutex); 2625 2626 if (USE_HDCP_GSC(display)) 2627 intel_hdcp_gsc_fini(display); 2628 else 2629 component_del(display->drm->dev, &i915_hdcp_ops); 2630 } 2631 2632 void intel_hdcp_cleanup(struct intel_connector *connector) 2633 { 2634 struct intel_hdcp *hdcp = &connector->hdcp; 2635 2636 if (!hdcp->shim) 2637 return; 2638 2639 /* 2640 * If the connector is registered, it's possible userspace could kick 2641 * off another HDCP enable, which would re-spawn the workers. 2642 */ 2643 drm_WARN_ON(connector->base.dev, 2644 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2645 2646 /* 2647 * Now that the connector is not registered, check_work won't be run, 2648 * but cancel any outstanding instances of it 2649 */ 2650 cancel_delayed_work_sync(&hdcp->check_work); 2651 2652 /* 2653 * We don't cancel prop_work in the same way as check_work since it 2654 * requires connection_mutex which could be held while calling this 2655 * function. Instead, we rely on the connector references grabbed before 2656 * scheduling prop_work to ensure the connector is alive when prop_work 2657 * is run. So if we're in the destroy path (which is where this 2658 * function should be called), we're "guaranteed" that prop_work is not 2659 * active (tl;dr This Should Never Happen). 2660 */ 2661 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2662 2663 mutex_lock(&hdcp->mutex); 2664 hdcp->shim = NULL; 2665 mutex_unlock(&hdcp->mutex); 2666 } 2667 2668 void intel_hdcp_atomic_check(struct drm_connector *connector, 2669 struct drm_connector_state *old_state, 2670 struct drm_connector_state *new_state) 2671 { 2672 u64 old_cp = old_state->content_protection; 2673 u64 new_cp = new_state->content_protection; 2674 struct drm_crtc_state *crtc_state; 2675 2676 if (!new_state->crtc) { 2677 /* 2678 * If the connector is being disabled with CP enabled, mark it 2679 * desired so it's re-enabled when the connector is brought back 2680 */ 2681 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2682 new_state->content_protection = 2683 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2684 return; 2685 } 2686 2687 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2688 new_state->crtc); 2689 /* 2690 * Fix the HDCP uapi content protection state in case of modeset. 2691 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2692 * need to be sent if there is transition from ENABLED->DESIRED. 2693 */ 2694 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2695 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2696 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2697 new_state->content_protection = 2698 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2699 2700 /* 2701 * Nothing to do if the state didn't change, or HDCP was activated since 2702 * the last commit. And also no change in hdcp content type. 2703 */ 2704 if (old_cp == new_cp || 2705 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2706 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2707 if (old_state->hdcp_content_type == 2708 new_state->hdcp_content_type) 2709 return; 2710 } 2711 2712 crtc_state->mode_changed = true; 2713 } 2714 2715 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2716 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2717 { 2718 struct intel_hdcp *hdcp = &connector->hdcp; 2719 struct intel_display *display = to_intel_display(connector); 2720 2721 if (!hdcp->shim) 2722 return; 2723 2724 atomic_inc(&connector->hdcp.cp_irq_count); 2725 wake_up_all(&connector->hdcp.cp_irq_queue); 2726 2727 queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0); 2728 } 2729 2730 static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector, 2731 bool remote_req) 2732 { 2733 bool hdcp_cap = false, hdcp2_cap = false; 2734 2735 if (!connector->hdcp.shim) { 2736 seq_puts(m, "No Connector Support"); 2737 goto out; 2738 } 2739 2740 if (remote_req) { 2741 intel_hdcp_get_remote_capability(connector, &hdcp_cap, &hdcp2_cap); 2742 } else { 2743 hdcp_cap = intel_hdcp_get_capability(connector); 2744 hdcp2_cap = intel_hdcp2_get_capability(connector); 2745 } 2746 2747 if (hdcp_cap) 2748 seq_puts(m, "HDCP1.4 "); 2749 if (hdcp2_cap) 2750 seq_puts(m, "HDCP2.2 "); 2751 2752 if (!hdcp_cap && !hdcp2_cap) 2753 seq_puts(m, "None"); 2754 2755 out: 2756 seq_puts(m, "\n"); 2757 } 2758 2759 void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector) 2760 { 2761 seq_puts(m, "\tHDCP version: "); 2762 if (connector->mst.dp) { 2763 __intel_hdcp_info(m, connector, true); 2764 seq_puts(m, "\tMST Hub HDCP version: "); 2765 } 2766 __intel_hdcp_info(m, connector, false); 2767 } 2768 2769 static int intel_hdcp_sink_capability_show(struct seq_file *m, void *data) 2770 { 2771 struct intel_connector *connector = m->private; 2772 struct intel_display *display = to_intel_display(connector); 2773 int ret; 2774 2775 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 2776 if (ret) 2777 return ret; 2778 2779 if (!connector->base.encoder || 2780 connector->base.status != connector_status_connected) { 2781 ret = -ENODEV; 2782 goto out; 2783 } 2784 2785 seq_printf(m, "%s:%d HDCP version: ", connector->base.name, 2786 connector->base.base.id); 2787 __intel_hdcp_info(m, connector, false); 2788 2789 out: 2790 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2791 2792 return ret; 2793 } 2794 DEFINE_SHOW_ATTRIBUTE(intel_hdcp_sink_capability); 2795 2796 static ssize_t intel_hdcp_force_14_write(struct file *file, 2797 const char __user *ubuf, 2798 size_t len, loff_t *offp) 2799 { 2800 struct seq_file *m = file->private_data; 2801 struct intel_connector *connector = m->private; 2802 struct intel_hdcp *hdcp = &connector->hdcp; 2803 bool force_hdcp14 = false; 2804 int ret; 2805 2806 if (len == 0) 2807 return 0; 2808 2809 ret = kstrtobool_from_user(ubuf, len, &force_hdcp14); 2810 if (ret < 0) 2811 return ret; 2812 2813 hdcp->force_hdcp14 = force_hdcp14; 2814 *offp += len; 2815 2816 return len; 2817 } 2818 2819 static int intel_hdcp_force_14_show(struct seq_file *m, void *data) 2820 { 2821 struct intel_connector *connector = m->private; 2822 struct intel_display *display = to_intel_display(connector); 2823 struct intel_encoder *encoder = intel_attached_encoder(connector); 2824 struct intel_hdcp *hdcp = &connector->hdcp; 2825 struct drm_crtc *crtc; 2826 int ret; 2827 2828 if (!encoder) 2829 return -ENODEV; 2830 2831 ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 2832 if (ret) 2833 return ret; 2834 2835 crtc = connector->base.state->crtc; 2836 if (connector->base.status != connector_status_connected || !crtc) { 2837 ret = -ENODEV; 2838 goto out; 2839 } 2840 2841 seq_printf(m, "%s\n", 2842 str_yes_no(hdcp->force_hdcp14)); 2843 out: 2844 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2845 2846 return ret; 2847 } 2848 2849 static int intel_hdcp_force_14_open(struct inode *inode, 2850 struct file *file) 2851 { 2852 return single_open(file, intel_hdcp_force_14_show, 2853 inode->i_private); 2854 } 2855 2856 static const struct file_operations intel_hdcp_force_14_fops = { 2857 .owner = THIS_MODULE, 2858 .open = intel_hdcp_force_14_open, 2859 .read = seq_read, 2860 .llseek = seq_lseek, 2861 .release = single_release, 2862 .write = intel_hdcp_force_14_write 2863 }; 2864 2865 void intel_hdcp_connector_debugfs_add(struct intel_connector *connector) 2866 { 2867 struct dentry *root = connector->base.debugfs_entry; 2868 int connector_type = connector->base.connector_type; 2869 2870 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2871 connector_type == DRM_MODE_CONNECTOR_HDMIA || 2872 connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2873 debugfs_create_file("i915_hdcp_sink_capability", 0444, root, 2874 connector, &intel_hdcp_sink_capability_fops); 2875 debugfs_create_file("i915_force_hdcp14", 0644, root, 2876 connector, &intel_hdcp_force_14_fops); 2877 } 2878 } 2879