1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/i2c.h> 13 #include <linux/random.h> 14 15 #include <drm/display/drm_hdcp_helper.h> 16 #include <drm/intel/i915_component.h> 17 18 #include "i915_drv.h" 19 #include "i915_reg.h" 20 #include "intel_connector.h" 21 #include "intel_de.h" 22 #include "intel_display_power.h" 23 #include "intel_display_power_well.h" 24 #include "intel_display_types.h" 25 #include "intel_hdcp.h" 26 #include "intel_hdcp_gsc.h" 27 #include "intel_hdcp_regs.h" 28 #include "intel_hdcp_shim.h" 29 #include "intel_pcode.h" 30 31 #define KEY_LOAD_TRIES 5 32 #define HDCP2_LC_RETRY_CNT 3 33 34 /* WA: 16022217614 */ 35 static void 36 intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder, 37 struct intel_hdcp *hdcp) 38 { 39 struct intel_display *display = to_intel_display(encoder); 40 41 /* Here we assume HDMI is in TMDS mode of operation */ 42 if (encoder->type != INTEL_OUTPUT_HDMI) 43 return; 44 45 if (DISPLAY_VER(display) >= 14) { 46 if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) 47 intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), 48 0, HDCP_LINE_REKEY_DISABLE); 49 else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || 50 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) 51 intel_de_rmw(display, 52 TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder), 53 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE); 54 } 55 } 56 57 static int intel_conn_to_vcpi(struct intel_atomic_state *state, 58 struct intel_connector *connector) 59 { 60 struct drm_dp_mst_topology_mgr *mgr; 61 struct drm_dp_mst_atomic_payload *payload; 62 struct drm_dp_mst_topology_state *mst_state; 63 int vcpi = 0; 64 65 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 66 if (!connector->port) 67 return 0; 68 mgr = connector->port->mgr; 69 70 drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx); 71 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 72 payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); 73 if (drm_WARN_ON(mgr->dev, !payload)) 74 goto out; 75 76 vcpi = payload->vcpi; 77 if (drm_WARN_ON(mgr->dev, vcpi < 0)) { 78 vcpi = 0; 79 goto out; 80 } 81 out: 82 return vcpi; 83 } 84 85 /* 86 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 87 * content_type for all streams in DP MST topology because security f/w doesn't 88 * have any provision to mark content_type for each stream separately, it marks 89 * all available streams with the content_type proivided at the time of port 90 * authentication. This may prohibit the userspace to use type1 content on 91 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 92 * DP MST topology. Though it is not compulsory, security fw should change its 93 * policy to mark different content_types for different streams. 94 */ 95 static int 96 intel_hdcp_required_content_stream(struct intel_atomic_state *state, 97 struct intel_digital_port *dig_port) 98 { 99 struct intel_display *display = to_intel_display(state); 100 struct drm_connector_list_iter conn_iter; 101 struct intel_digital_port *conn_dig_port; 102 struct intel_connector *connector; 103 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 104 bool enforce_type0 = false; 105 int k; 106 107 if (dig_port->hdcp_auth_status) 108 return 0; 109 110 data->k = 0; 111 112 if (!dig_port->hdcp_mst_type1_capable) 113 enforce_type0 = true; 114 115 drm_connector_list_iter_begin(display->drm, &conn_iter); 116 for_each_intel_connector_iter(connector, &conn_iter) { 117 if (connector->base.status == connector_status_disconnected) 118 continue; 119 120 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 121 continue; 122 123 conn_dig_port = intel_attached_dig_port(connector); 124 if (conn_dig_port != dig_port) 125 continue; 126 127 data->streams[data->k].stream_id = 128 intel_conn_to_vcpi(state, connector); 129 data->k++; 130 131 /* if there is only one active stream */ 132 if (dig_port->dp.active_mst_links <= 1) 133 break; 134 } 135 drm_connector_list_iter_end(&conn_iter); 136 137 if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) 138 return -EINVAL; 139 140 /* 141 * Apply common protection level across all streams in DP MST Topology. 142 * Use highest supported content type for all streams in DP MST Topology. 143 */ 144 for (k = 0; k < data->k; k++) 145 data->streams[k].stream_type = 146 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 147 148 return 0; 149 } 150 151 static int intel_hdcp_prepare_streams(struct intel_atomic_state *state, 152 struct intel_connector *connector) 153 { 154 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 155 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 156 struct intel_hdcp *hdcp = &connector->hdcp; 157 158 if (intel_encoder_is_mst(intel_attached_encoder(connector))) 159 return intel_hdcp_required_content_stream(state, dig_port); 160 161 data->k = 1; 162 data->streams[0].stream_id = 0; 163 data->streams[0].stream_type = hdcp->content_type; 164 165 return 0; 166 } 167 168 static 169 bool intel_hdcp_is_ksv_valid(u8 *ksv) 170 { 171 int i, ones = 0; 172 /* KSV has 20 1's and 20 0's */ 173 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 174 ones += hweight8(ksv[i]); 175 if (ones != 20) 176 return false; 177 178 return true; 179 } 180 181 static 182 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 183 const struct intel_hdcp_shim *shim, u8 *bksv) 184 { 185 struct intel_display *display = to_intel_display(dig_port); 186 int ret, i, tries = 2; 187 188 /* HDCP spec states that we must retry the bksv if it is invalid */ 189 for (i = 0; i < tries; i++) { 190 ret = shim->read_bksv(dig_port, bksv); 191 if (ret) 192 return ret; 193 if (intel_hdcp_is_ksv_valid(bksv)) 194 break; 195 } 196 if (i == tries) { 197 drm_dbg_kms(display->drm, "Bksv is invalid\n"); 198 return -ENODEV; 199 } 200 201 return 0; 202 } 203 204 /* Is HDCP1.4 capable on Platform and Sink */ 205 bool intel_hdcp_get_capability(struct intel_connector *connector) 206 { 207 struct intel_digital_port *dig_port; 208 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 209 bool capable = false; 210 u8 bksv[5]; 211 212 if (!intel_attached_encoder(connector)) 213 return capable; 214 215 dig_port = intel_attached_dig_port(connector); 216 217 if (!shim) 218 return capable; 219 220 if (shim->hdcp_get_capability) { 221 shim->hdcp_get_capability(dig_port, &capable); 222 } else { 223 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 224 capable = true; 225 } 226 227 return capable; 228 } 229 230 /* 231 * Check if the source has all the building blocks ready to make 232 * HDCP 2.2 work 233 */ 234 static bool intel_hdcp2_prerequisite(struct intel_connector *connector) 235 { 236 struct intel_display *display = to_intel_display(connector); 237 struct intel_hdcp *hdcp = &connector->hdcp; 238 239 /* I915 support for HDCP2.2 */ 240 if (!hdcp->hdcp2_supported) 241 return false; 242 243 /* If MTL+ make sure gsc is loaded and proxy is setup */ 244 if (intel_hdcp_gsc_cs_required(display)) { 245 if (!intel_hdcp_gsc_check_status(display)) 246 return false; 247 } 248 249 /* MEI/GSC interface is solid depending on which is used */ 250 mutex_lock(&display->hdcp.hdcp_mutex); 251 if (!display->hdcp.comp_added || !display->hdcp.arbiter) { 252 mutex_unlock(&display->hdcp.hdcp_mutex); 253 return false; 254 } 255 mutex_unlock(&display->hdcp.hdcp_mutex); 256 257 return true; 258 } 259 260 /* Is HDCP2.2 capable on Platform and Sink */ 261 bool intel_hdcp2_get_capability(struct intel_connector *connector) 262 { 263 struct intel_hdcp *hdcp = &connector->hdcp; 264 bool capable = false; 265 266 if (!intel_hdcp2_prerequisite(connector)) 267 return false; 268 269 /* Sink's capability for HDCP2.2 */ 270 hdcp->shim->hdcp_2_2_get_capability(connector, &capable); 271 272 return capable; 273 } 274 275 void intel_hdcp_get_remote_capability(struct intel_connector *connector, 276 bool *hdcp_capable, 277 bool *hdcp2_capable) 278 { 279 struct intel_hdcp *hdcp = &connector->hdcp; 280 281 if (!hdcp->shim->get_remote_hdcp_capability) 282 return; 283 284 hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable, 285 hdcp2_capable); 286 287 if (!intel_hdcp2_prerequisite(connector)) 288 *hdcp2_capable = false; 289 } 290 291 static bool intel_hdcp_in_use(struct intel_display *display, 292 enum transcoder cpu_transcoder, enum port port) 293 { 294 return intel_de_read(display, 295 HDCP_STATUS(display, cpu_transcoder, port)) & 296 HDCP_STATUS_ENC; 297 } 298 299 static bool intel_hdcp2_in_use(struct intel_display *display, 300 enum transcoder cpu_transcoder, enum port port) 301 { 302 return intel_de_read(display, 303 HDCP2_STATUS(display, cpu_transcoder, port)) & 304 LINK_ENCRYPTION_STATUS; 305 } 306 307 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 308 const struct intel_hdcp_shim *shim) 309 { 310 int ret, read_ret; 311 bool ksv_ready; 312 313 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 314 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, 315 &ksv_ready), 316 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 317 100 * 1000); 318 if (ret) 319 return ret; 320 if (read_ret) 321 return read_ret; 322 if (!ksv_ready) 323 return -ETIMEDOUT; 324 325 return 0; 326 } 327 328 static bool hdcp_key_loadable(struct intel_display *display) 329 { 330 struct drm_i915_private *i915 = to_i915(display->drm); 331 enum i915_power_well_id id; 332 intel_wakeref_t wakeref; 333 bool enabled = false; 334 335 /* 336 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 337 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 338 */ 339 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 340 id = HSW_DISP_PW_GLOBAL; 341 else 342 id = SKL_DISP_PW_1; 343 344 /* PG1 (power well #1) needs to be enabled */ 345 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 346 enabled = intel_display_power_well_is_enabled(i915, id); 347 348 /* 349 * Another req for hdcp key loadability is enabled state of pll for 350 * cdclk. Without active crtc we wont land here. So we are assuming that 351 * cdclk is already on. 352 */ 353 354 return enabled; 355 } 356 357 static void intel_hdcp_clear_keys(struct intel_display *display) 358 { 359 intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 360 intel_de_write(display, HDCP_KEY_STATUS, 361 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 362 } 363 364 static int intel_hdcp_load_keys(struct intel_display *display) 365 { 366 struct drm_i915_private *i915 = to_i915(display->drm); 367 int ret; 368 u32 val; 369 370 val = intel_de_read(display, HDCP_KEY_STATUS); 371 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 372 return 0; 373 374 /* 375 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 376 * out of reset. So if Key is not already loaded, its an error state. 377 */ 378 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 379 if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 380 return -ENXIO; 381 382 /* 383 * Initiate loading the HDCP key from fuses. 384 * 385 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 386 * version 9 platforms (minus BXT) differ in the key load trigger 387 * process from other platforms. These platforms use the GT Driver 388 * Mailbox interface. 389 */ 390 if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) { 391 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); 392 if (ret) { 393 drm_err(display->drm, 394 "Failed to initiate HDCP key load (%d)\n", 395 ret); 396 return ret; 397 } 398 } else { 399 intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 400 } 401 402 /* Wait for the keys to load (500us) */ 403 ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, 404 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 405 10, 1, &val); 406 if (ret) 407 return ret; 408 else if (!(val & HDCP_KEY_LOAD_STATUS)) 409 return -ENXIO; 410 411 /* Send Aksv over to PCH display for use in authentication */ 412 intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 413 414 return 0; 415 } 416 417 /* Returns updated SHA-1 index */ 418 static int intel_write_sha_text(struct intel_display *display, u32 sha_text) 419 { 420 intel_de_write(display, HDCP_SHA_TEXT, sha_text); 421 if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 422 drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); 423 return -ETIMEDOUT; 424 } 425 return 0; 426 } 427 428 static 429 u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, 430 enum transcoder cpu_transcoder, enum port port) 431 { 432 if (DISPLAY_VER(display) >= 12) { 433 switch (cpu_transcoder) { 434 case TRANSCODER_A: 435 return HDCP_TRANSA_REP_PRESENT | 436 HDCP_TRANSA_SHA1_M0; 437 case TRANSCODER_B: 438 return HDCP_TRANSB_REP_PRESENT | 439 HDCP_TRANSB_SHA1_M0; 440 case TRANSCODER_C: 441 return HDCP_TRANSC_REP_PRESENT | 442 HDCP_TRANSC_SHA1_M0; 443 case TRANSCODER_D: 444 return HDCP_TRANSD_REP_PRESENT | 445 HDCP_TRANSD_SHA1_M0; 446 default: 447 drm_err(display->drm, "Unknown transcoder %d\n", 448 cpu_transcoder); 449 return 0; 450 } 451 } 452 453 switch (port) { 454 case PORT_A: 455 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 456 case PORT_B: 457 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 458 case PORT_C: 459 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 460 case PORT_D: 461 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 462 case PORT_E: 463 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 464 default: 465 drm_err(display->drm, "Unknown port %d\n", port); 466 return 0; 467 } 468 } 469 470 static 471 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 472 const struct intel_hdcp_shim *shim, 473 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 474 { 475 struct intel_display *display = to_intel_display(connector); 476 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 477 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 478 enum port port = dig_port->base.port; 479 u32 vprime, sha_text, sha_leftovers, rep_ctl; 480 int ret, i, j, sha_idx; 481 482 /* Process V' values from the receiver */ 483 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 484 ret = shim->read_v_prime_part(dig_port, i, &vprime); 485 if (ret) 486 return ret; 487 intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); 488 } 489 490 /* 491 * We need to write the concatenation of all device KSVs, BINFO (DP) || 492 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 493 * stream is written via the HDCP_SHA_TEXT register in 32-bit 494 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 495 * index will keep track of our progress through the 64 bytes as well as 496 * helping us work the 40-bit KSVs through our 32-bit register. 497 * 498 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 499 */ 500 sha_idx = 0; 501 sha_text = 0; 502 sha_leftovers = 0; 503 rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); 504 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 505 for (i = 0; i < num_downstream; i++) { 506 unsigned int sha_empty; 507 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 508 509 /* Fill up the empty slots in sha_text and write it out */ 510 sha_empty = sizeof(sha_text) - sha_leftovers; 511 for (j = 0; j < sha_empty; j++) { 512 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 513 sha_text |= ksv[j] << off; 514 } 515 516 ret = intel_write_sha_text(display, sha_text); 517 if (ret < 0) 518 return ret; 519 520 /* Programming guide writes this every 64 bytes */ 521 sha_idx += sizeof(sha_text); 522 if (!(sha_idx % 64)) 523 intel_de_write(display, HDCP_REP_CTL, 524 rep_ctl | HDCP_SHA1_TEXT_32); 525 526 /* Store the leftover bytes from the ksv in sha_text */ 527 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 528 sha_text = 0; 529 for (j = 0; j < sha_leftovers; j++) 530 sha_text |= ksv[sha_empty + j] << 531 ((sizeof(sha_text) - j - 1) * 8); 532 533 /* 534 * If we still have room in sha_text for more data, continue. 535 * Otherwise, write it out immediately. 536 */ 537 if (sizeof(sha_text) > sha_leftovers) 538 continue; 539 540 ret = intel_write_sha_text(display, sha_text); 541 if (ret < 0) 542 return ret; 543 sha_leftovers = 0; 544 sha_text = 0; 545 sha_idx += sizeof(sha_text); 546 } 547 548 /* 549 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 550 * bytes are leftover from the last ksv, we might be able to fit them 551 * all in sha_text (first 2 cases), or we might need to split them up 552 * into 2 writes (last 2 cases). 553 */ 554 if (sha_leftovers == 0) { 555 /* Write 16 bits of text, 16 bits of M0 */ 556 intel_de_write(display, HDCP_REP_CTL, 557 rep_ctl | HDCP_SHA1_TEXT_16); 558 ret = intel_write_sha_text(display, 559 bstatus[0] << 8 | bstatus[1]); 560 if (ret < 0) 561 return ret; 562 sha_idx += sizeof(sha_text); 563 564 /* Write 32 bits of M0 */ 565 intel_de_write(display, HDCP_REP_CTL, 566 rep_ctl | HDCP_SHA1_TEXT_0); 567 ret = intel_write_sha_text(display, 0); 568 if (ret < 0) 569 return ret; 570 sha_idx += sizeof(sha_text); 571 572 /* Write 16 bits of M0 */ 573 intel_de_write(display, HDCP_REP_CTL, 574 rep_ctl | HDCP_SHA1_TEXT_16); 575 ret = intel_write_sha_text(display, 0); 576 if (ret < 0) 577 return ret; 578 sha_idx += sizeof(sha_text); 579 580 } else if (sha_leftovers == 1) { 581 /* Write 24 bits of text, 8 bits of M0 */ 582 intel_de_write(display, HDCP_REP_CTL, 583 rep_ctl | HDCP_SHA1_TEXT_24); 584 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 585 /* Only 24-bits of data, must be in the LSB */ 586 sha_text = (sha_text & 0xffffff00) >> 8; 587 ret = intel_write_sha_text(display, sha_text); 588 if (ret < 0) 589 return ret; 590 sha_idx += sizeof(sha_text); 591 592 /* Write 32 bits of M0 */ 593 intel_de_write(display, HDCP_REP_CTL, 594 rep_ctl | HDCP_SHA1_TEXT_0); 595 ret = intel_write_sha_text(display, 0); 596 if (ret < 0) 597 return ret; 598 sha_idx += sizeof(sha_text); 599 600 /* Write 24 bits of M0 */ 601 intel_de_write(display, HDCP_REP_CTL, 602 rep_ctl | HDCP_SHA1_TEXT_8); 603 ret = intel_write_sha_text(display, 0); 604 if (ret < 0) 605 return ret; 606 sha_idx += sizeof(sha_text); 607 608 } else if (sha_leftovers == 2) { 609 /* Write 32 bits of text */ 610 intel_de_write(display, HDCP_REP_CTL, 611 rep_ctl | HDCP_SHA1_TEXT_32); 612 sha_text |= bstatus[0] << 8 | bstatus[1]; 613 ret = intel_write_sha_text(display, sha_text); 614 if (ret < 0) 615 return ret; 616 sha_idx += sizeof(sha_text); 617 618 /* Write 64 bits of M0 */ 619 intel_de_write(display, HDCP_REP_CTL, 620 rep_ctl | HDCP_SHA1_TEXT_0); 621 for (i = 0; i < 2; i++) { 622 ret = intel_write_sha_text(display, 0); 623 if (ret < 0) 624 return ret; 625 sha_idx += sizeof(sha_text); 626 } 627 628 /* 629 * Terminate the SHA-1 stream by hand. For the other leftover 630 * cases this is appended by the hardware. 631 */ 632 intel_de_write(display, HDCP_REP_CTL, 633 rep_ctl | HDCP_SHA1_TEXT_32); 634 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 635 ret = intel_write_sha_text(display, sha_text); 636 if (ret < 0) 637 return ret; 638 sha_idx += sizeof(sha_text); 639 } else if (sha_leftovers == 3) { 640 /* Write 32 bits of text (filled from LSB) */ 641 intel_de_write(display, HDCP_REP_CTL, 642 rep_ctl | HDCP_SHA1_TEXT_32); 643 sha_text |= bstatus[0]; 644 ret = intel_write_sha_text(display, sha_text); 645 if (ret < 0) 646 return ret; 647 sha_idx += sizeof(sha_text); 648 649 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 650 intel_de_write(display, HDCP_REP_CTL, 651 rep_ctl | HDCP_SHA1_TEXT_8); 652 ret = intel_write_sha_text(display, bstatus[1]); 653 if (ret < 0) 654 return ret; 655 sha_idx += sizeof(sha_text); 656 657 /* Write 32 bits of M0 */ 658 intel_de_write(display, HDCP_REP_CTL, 659 rep_ctl | HDCP_SHA1_TEXT_0); 660 ret = intel_write_sha_text(display, 0); 661 if (ret < 0) 662 return ret; 663 sha_idx += sizeof(sha_text); 664 665 /* Write 8 bits of M0 */ 666 intel_de_write(display, HDCP_REP_CTL, 667 rep_ctl | HDCP_SHA1_TEXT_24); 668 ret = intel_write_sha_text(display, 0); 669 if (ret < 0) 670 return ret; 671 sha_idx += sizeof(sha_text); 672 } else { 673 drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", 674 sha_leftovers); 675 return -EINVAL; 676 } 677 678 intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 679 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 680 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 681 ret = intel_write_sha_text(display, 0); 682 if (ret < 0) 683 return ret; 684 sha_idx += sizeof(sha_text); 685 } 686 687 /* 688 * Last write gets the length of the concatenation in bits. That is: 689 * - 5 bytes per device 690 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 691 */ 692 sha_text = (num_downstream * 5 + 10) * 8; 693 ret = intel_write_sha_text(display, sha_text); 694 if (ret < 0) 695 return ret; 696 697 /* Tell the HW we're done with the hash and wait for it to ACK */ 698 intel_de_write(display, HDCP_REP_CTL, 699 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 700 if (intel_de_wait_for_set(display, HDCP_REP_CTL, 701 HDCP_SHA1_COMPLETE, 1)) { 702 drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); 703 return -ETIMEDOUT; 704 } 705 if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 706 drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); 707 return -ENXIO; 708 } 709 710 return 0; 711 } 712 713 /* Implements Part 2 of the HDCP authorization procedure */ 714 static 715 int intel_hdcp_auth_downstream(struct intel_connector *connector) 716 { 717 struct intel_display *display = to_intel_display(connector); 718 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 719 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 720 u8 bstatus[2], num_downstream, *ksv_fifo; 721 int ret, i, tries = 3; 722 723 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 724 if (ret) { 725 drm_dbg_kms(display->drm, 726 "KSV list failed to become ready (%d)\n", ret); 727 return ret; 728 } 729 730 ret = shim->read_bstatus(dig_port, bstatus); 731 if (ret) 732 return ret; 733 734 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 735 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 736 drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); 737 return -EPERM; 738 } 739 740 /* 741 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 742 * the HDCP encryption. That implies that repeater can't have its own 743 * display. As there is no consumption of encrypted content in the 744 * repeater with 0 downstream devices, we are failing the 745 * authentication. 746 */ 747 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 748 if (num_downstream == 0) { 749 drm_dbg_kms(display->drm, 750 "Repeater with zero downstream devices\n"); 751 return -EINVAL; 752 } 753 754 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 755 if (!ksv_fifo) { 756 drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); 757 return -ENOMEM; 758 } 759 760 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 761 if (ret) 762 goto err; 763 764 if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, 765 num_downstream) > 0) { 766 drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); 767 ret = -EPERM; 768 goto err; 769 } 770 771 /* 772 * When V prime mismatches, DP Spec mandates re-read of 773 * V prime atleast twice. 774 */ 775 for (i = 0; i < tries; i++) { 776 ret = intel_hdcp_validate_v_prime(connector, shim, 777 ksv_fifo, num_downstream, 778 bstatus); 779 if (!ret) 780 break; 781 } 782 783 if (i == tries) { 784 drm_dbg_kms(display->drm, 785 "V Prime validation failed.(%d)\n", ret); 786 goto err; 787 } 788 789 drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", 790 num_downstream); 791 ret = 0; 792 err: 793 kfree(ksv_fifo); 794 return ret; 795 } 796 797 /* Implements Part 1 of the HDCP authorization procedure */ 798 static int intel_hdcp_auth(struct intel_connector *connector) 799 { 800 struct intel_display *display = to_intel_display(connector); 801 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 802 struct intel_hdcp *hdcp = &connector->hdcp; 803 const struct intel_hdcp_shim *shim = hdcp->shim; 804 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 805 enum port port = dig_port->base.port; 806 unsigned long r0_prime_gen_start; 807 int ret, i, tries = 2; 808 union { 809 u32 reg[2]; 810 u8 shim[DRM_HDCP_AN_LEN]; 811 } an; 812 union { 813 u32 reg[2]; 814 u8 shim[DRM_HDCP_KSV_LEN]; 815 } bksv; 816 union { 817 u32 reg; 818 u8 shim[DRM_HDCP_RI_LEN]; 819 } ri; 820 bool repeater_present, hdcp_capable; 821 822 /* 823 * Detects whether the display is HDCP capable. Although we check for 824 * valid Bksv below, the HDCP over DP spec requires that we check 825 * whether the display supports HDCP before we write An. For HDMI 826 * displays, this is not necessary. 827 */ 828 if (shim->hdcp_get_capability) { 829 ret = shim->hdcp_get_capability(dig_port, &hdcp_capable); 830 if (ret) 831 return ret; 832 if (!hdcp_capable) { 833 drm_dbg_kms(display->drm, 834 "Panel is not HDCP capable\n"); 835 return -EINVAL; 836 } 837 } 838 839 /* Initialize An with 2 random values and acquire it */ 840 for (i = 0; i < 2; i++) 841 intel_de_write(display, 842 HDCP_ANINIT(display, cpu_transcoder, port), 843 get_random_u32()); 844 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 845 HDCP_CONF_CAPTURE_AN); 846 847 /* Wait for An to be acquired */ 848 if (intel_de_wait_for_set(display, 849 HDCP_STATUS(display, cpu_transcoder, port), 850 HDCP_STATUS_AN_READY, 1)) { 851 drm_err(display->drm, "Timed out waiting for An\n"); 852 return -ETIMEDOUT; 853 } 854 855 an.reg[0] = intel_de_read(display, 856 HDCP_ANLO(display, cpu_transcoder, port)); 857 an.reg[1] = intel_de_read(display, 858 HDCP_ANHI(display, cpu_transcoder, port)); 859 ret = shim->write_an_aksv(dig_port, an.shim); 860 if (ret) 861 return ret; 862 863 r0_prime_gen_start = jiffies; 864 865 memset(&bksv, 0, sizeof(bksv)); 866 867 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 868 if (ret < 0) 869 return ret; 870 871 if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { 872 drm_err(display->drm, "BKSV is revoked\n"); 873 return -EPERM; 874 } 875 876 intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), 877 bksv.reg[0]); 878 intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), 879 bksv.reg[1]); 880 881 ret = shim->repeater_present(dig_port, &repeater_present); 882 if (ret) 883 return ret; 884 if (repeater_present) 885 intel_de_write(display, HDCP_REP_CTL, 886 intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); 887 888 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 889 if (ret) 890 return ret; 891 892 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 893 HDCP_CONF_AUTH_AND_ENC); 894 895 /* Wait for R0 ready */ 896 if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 897 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 898 drm_err(display->drm, "Timed out waiting for R0 ready\n"); 899 return -ETIMEDOUT; 900 } 901 902 /* 903 * Wait for R0' to become available. The spec says 100ms from Aksv, but 904 * some monitors can take longer than this. We'll set the timeout at 905 * 300ms just to be sure. 906 * 907 * On DP, there's an R0_READY bit available but no such bit 908 * exists on HDMI. Since the upper-bound is the same, we'll just do 909 * the stupid thing instead of polling on one and not the other. 910 */ 911 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 912 913 tries = 3; 914 915 /* 916 * DP HDCP Spec mandates the two more reattempt to read R0, incase 917 * of R0 mismatch. 918 */ 919 for (i = 0; i < tries; i++) { 920 ri.reg = 0; 921 ret = shim->read_ri_prime(dig_port, ri.shim); 922 if (ret) 923 return ret; 924 intel_de_write(display, 925 HDCP_RPRIME(display, cpu_transcoder, port), 926 ri.reg); 927 928 /* Wait for Ri prime match */ 929 if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & 930 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 931 break; 932 } 933 934 if (i == tries) { 935 drm_dbg_kms(display->drm, 936 "Timed out waiting for Ri prime match (%x)\n", 937 intel_de_read(display, 938 HDCP_STATUS(display, cpu_transcoder, port))); 939 return -ETIMEDOUT; 940 } 941 942 /* Wait for encryption confirmation */ 943 if (intel_de_wait_for_set(display, 944 HDCP_STATUS(display, cpu_transcoder, port), 945 HDCP_STATUS_ENC, 946 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 947 drm_err(display->drm, "Timed out waiting for encryption\n"); 948 return -ETIMEDOUT; 949 } 950 951 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 952 if (shim->stream_encryption) { 953 ret = shim->stream_encryption(connector, true); 954 if (ret) { 955 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", 956 connector->base.base.id, connector->base.name); 957 return ret; 958 } 959 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 960 transcoder_name(hdcp->stream_transcoder)); 961 } 962 963 if (repeater_present) 964 return intel_hdcp_auth_downstream(connector); 965 966 drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); 967 return 0; 968 } 969 970 static int _intel_hdcp_disable(struct intel_connector *connector) 971 { 972 struct intel_display *display = to_intel_display(connector); 973 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 974 struct intel_hdcp *hdcp = &connector->hdcp; 975 enum port port = dig_port->base.port; 976 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 977 u32 repeater_ctl; 978 int ret; 979 980 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", 981 connector->base.base.id, connector->base.name); 982 983 if (hdcp->shim->stream_encryption) { 984 ret = hdcp->shim->stream_encryption(connector, false); 985 if (ret) { 986 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", 987 connector->base.base.id, connector->base.name); 988 return ret; 989 } 990 drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 991 transcoder_name(hdcp->stream_transcoder)); 992 /* 993 * If there are other connectors on this port using HDCP, 994 * don't disable it until it disabled HDCP encryption for 995 * all connectors in MST topology. 996 */ 997 if (dig_port->num_hdcp_streams > 0) 998 return 0; 999 } 1000 1001 hdcp->hdcp_encrypted = false; 1002 intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); 1003 if (intel_de_wait_for_clear(display, 1004 HDCP_STATUS(display, cpu_transcoder, port), 1005 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 1006 drm_err(display->drm, 1007 "Failed to disable HDCP, timeout clearing status\n"); 1008 return -ETIMEDOUT; 1009 } 1010 1011 repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, 1012 port); 1013 intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); 1014 1015 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 1016 if (ret) { 1017 drm_err(display->drm, "Failed to disable HDCP signalling\n"); 1018 return ret; 1019 } 1020 1021 drm_dbg_kms(display->drm, "HDCP is disabled\n"); 1022 return 0; 1023 } 1024 1025 static int intel_hdcp1_enable(struct intel_connector *connector) 1026 { 1027 struct intel_display *display = to_intel_display(connector); 1028 struct intel_hdcp *hdcp = &connector->hdcp; 1029 int i, ret, tries = 3; 1030 1031 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", 1032 connector->base.base.id, connector->base.name); 1033 1034 if (!hdcp_key_loadable(display)) { 1035 drm_err(display->drm, "HDCP key Load is not possible\n"); 1036 return -ENXIO; 1037 } 1038 1039 for (i = 0; i < KEY_LOAD_TRIES; i++) { 1040 ret = intel_hdcp_load_keys(display); 1041 if (!ret) 1042 break; 1043 intel_hdcp_clear_keys(display); 1044 } 1045 if (ret) { 1046 drm_err(display->drm, "Could not load HDCP keys, (%d)\n", 1047 ret); 1048 return ret; 1049 } 1050 1051 /* Incase of authentication failures, HDCP spec expects reauth. */ 1052 for (i = 0; i < tries; i++) { 1053 ret = intel_hdcp_auth(connector); 1054 if (!ret) { 1055 hdcp->hdcp_encrypted = true; 1056 return 0; 1057 } 1058 1059 drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); 1060 1061 /* Ensuring HDCP encryption and signalling are stopped. */ 1062 _intel_hdcp_disable(connector); 1063 } 1064 1065 drm_dbg_kms(display->drm, 1066 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 1067 return ret; 1068 } 1069 1070 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 1071 { 1072 return container_of(hdcp, struct intel_connector, hdcp); 1073 } 1074 1075 static void intel_hdcp_update_value(struct intel_connector *connector, 1076 u64 value, bool update_property) 1077 { 1078 struct intel_display *display = to_intel_display(connector); 1079 struct drm_i915_private *i915 = to_i915(display->drm); 1080 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1081 struct intel_hdcp *hdcp = &connector->hdcp; 1082 1083 drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); 1084 1085 if (hdcp->value == value) 1086 return; 1087 1088 drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex)); 1089 1090 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1091 if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0)) 1092 dig_port->num_hdcp_streams--; 1093 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1094 dig_port->num_hdcp_streams++; 1095 } 1096 1097 hdcp->value = value; 1098 if (update_property) { 1099 drm_connector_get(&connector->base); 1100 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 1101 drm_connector_put(&connector->base); 1102 } 1103 } 1104 1105 /* Implements Part 3 of the HDCP authorization procedure */ 1106 static int intel_hdcp_check_link(struct intel_connector *connector) 1107 { 1108 struct intel_display *display = to_intel_display(connector); 1109 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1110 struct intel_hdcp *hdcp = &connector->hdcp; 1111 enum port port = dig_port->base.port; 1112 enum transcoder cpu_transcoder; 1113 int ret = 0; 1114 1115 mutex_lock(&hdcp->mutex); 1116 mutex_lock(&dig_port->hdcp_mutex); 1117 1118 cpu_transcoder = hdcp->cpu_transcoder; 1119 1120 /* Check_link valid only when HDCP1.4 is enabled */ 1121 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1122 !hdcp->hdcp_encrypted) { 1123 ret = -EINVAL; 1124 goto out; 1125 } 1126 1127 if (drm_WARN_ON(display->drm, 1128 !intel_hdcp_in_use(display, cpu_transcoder, port))) { 1129 drm_err(display->drm, 1130 "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", 1131 connector->base.base.id, connector->base.name, 1132 intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); 1133 ret = -ENXIO; 1134 intel_hdcp_update_value(connector, 1135 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1136 true); 1137 goto out; 1138 } 1139 1140 if (hdcp->shim->check_link(dig_port, connector)) { 1141 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1142 intel_hdcp_update_value(connector, 1143 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1144 } 1145 goto out; 1146 } 1147 1148 drm_dbg_kms(display->drm, 1149 "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", 1150 connector->base.base.id, connector->base.name); 1151 1152 ret = _intel_hdcp_disable(connector); 1153 if (ret) { 1154 drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); 1155 intel_hdcp_update_value(connector, 1156 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1157 true); 1158 goto out; 1159 } 1160 1161 intel_hdcp_update_value(connector, 1162 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1163 true); 1164 out: 1165 mutex_unlock(&dig_port->hdcp_mutex); 1166 mutex_unlock(&hdcp->mutex); 1167 return ret; 1168 } 1169 1170 static void intel_hdcp_prop_work(struct work_struct *work) 1171 { 1172 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1173 prop_work); 1174 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1175 struct intel_display *display = to_intel_display(connector); 1176 1177 drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); 1178 mutex_lock(&hdcp->mutex); 1179 1180 /* 1181 * This worker is only used to flip between ENABLED/DESIRED. Either of 1182 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1183 * we're running just after hdcp has been disabled, so just exit 1184 */ 1185 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1186 drm_hdcp_update_content_protection(&connector->base, 1187 hdcp->value); 1188 1189 mutex_unlock(&hdcp->mutex); 1190 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1191 1192 drm_connector_put(&connector->base); 1193 } 1194 1195 bool is_hdcp_supported(struct intel_display *display, enum port port) 1196 { 1197 return DISPLAY_RUNTIME_INFO(display)->has_hdcp && 1198 (DISPLAY_VER(display) >= 12 || port < PORT_E); 1199 } 1200 1201 static int 1202 hdcp2_prepare_ake_init(struct intel_connector *connector, 1203 struct hdcp2_ake_init *ake_data) 1204 { 1205 struct intel_display *display = to_intel_display(connector); 1206 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1207 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1208 struct i915_hdcp_arbiter *arbiter; 1209 int ret; 1210 1211 mutex_lock(&display->hdcp.hdcp_mutex); 1212 arbiter = display->hdcp.arbiter; 1213 1214 if (!arbiter || !arbiter->ops) { 1215 mutex_unlock(&display->hdcp.hdcp_mutex); 1216 return -EINVAL; 1217 } 1218 1219 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1220 if (ret) 1221 drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", 1222 ret); 1223 mutex_unlock(&display->hdcp.hdcp_mutex); 1224 1225 return ret; 1226 } 1227 1228 static int 1229 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1230 struct hdcp2_ake_send_cert *rx_cert, 1231 bool *paired, 1232 struct hdcp2_ake_no_stored_km *ek_pub_km, 1233 size_t *msg_sz) 1234 { 1235 struct intel_display *display = to_intel_display(connector); 1236 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1237 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1238 struct i915_hdcp_arbiter *arbiter; 1239 int ret; 1240 1241 mutex_lock(&display->hdcp.hdcp_mutex); 1242 arbiter = display->hdcp.arbiter; 1243 1244 if (!arbiter || !arbiter->ops) { 1245 mutex_unlock(&display->hdcp.hdcp_mutex); 1246 return -EINVAL; 1247 } 1248 1249 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1250 rx_cert, paired, 1251 ek_pub_km, msg_sz); 1252 if (ret < 0) 1253 drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", 1254 ret); 1255 mutex_unlock(&display->hdcp.hdcp_mutex); 1256 1257 return ret; 1258 } 1259 1260 static int hdcp2_verify_hprime(struct intel_connector *connector, 1261 struct hdcp2_ake_send_hprime *rx_hprime) 1262 { 1263 struct intel_display *display = to_intel_display(connector); 1264 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1265 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1266 struct i915_hdcp_arbiter *arbiter; 1267 int ret; 1268 1269 mutex_lock(&display->hdcp.hdcp_mutex); 1270 arbiter = display->hdcp.arbiter; 1271 1272 if (!arbiter || !arbiter->ops) { 1273 mutex_unlock(&display->hdcp.hdcp_mutex); 1274 return -EINVAL; 1275 } 1276 1277 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1278 if (ret < 0) 1279 drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); 1280 mutex_unlock(&display->hdcp.hdcp_mutex); 1281 1282 return ret; 1283 } 1284 1285 static int 1286 hdcp2_store_pairing_info(struct intel_connector *connector, 1287 struct hdcp2_ake_send_pairing_info *pairing_info) 1288 { 1289 struct intel_display *display = to_intel_display(connector); 1290 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1291 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1292 struct i915_hdcp_arbiter *arbiter; 1293 int ret; 1294 1295 mutex_lock(&display->hdcp.hdcp_mutex); 1296 arbiter = display->hdcp.arbiter; 1297 1298 if (!arbiter || !arbiter->ops) { 1299 mutex_unlock(&display->hdcp.hdcp_mutex); 1300 return -EINVAL; 1301 } 1302 1303 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1304 if (ret < 0) 1305 drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", 1306 ret); 1307 mutex_unlock(&display->hdcp.hdcp_mutex); 1308 1309 return ret; 1310 } 1311 1312 static int 1313 hdcp2_prepare_lc_init(struct intel_connector *connector, 1314 struct hdcp2_lc_init *lc_init) 1315 { 1316 struct intel_display *display = to_intel_display(connector); 1317 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1318 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1319 struct i915_hdcp_arbiter *arbiter; 1320 int ret; 1321 1322 mutex_lock(&display->hdcp.hdcp_mutex); 1323 arbiter = display->hdcp.arbiter; 1324 1325 if (!arbiter || !arbiter->ops) { 1326 mutex_unlock(&display->hdcp.hdcp_mutex); 1327 return -EINVAL; 1328 } 1329 1330 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1331 if (ret < 0) 1332 drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", 1333 ret); 1334 mutex_unlock(&display->hdcp.hdcp_mutex); 1335 1336 return ret; 1337 } 1338 1339 static int 1340 hdcp2_verify_lprime(struct intel_connector *connector, 1341 struct hdcp2_lc_send_lprime *rx_lprime) 1342 { 1343 struct intel_display *display = to_intel_display(connector); 1344 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1345 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1346 struct i915_hdcp_arbiter *arbiter; 1347 int ret; 1348 1349 mutex_lock(&display->hdcp.hdcp_mutex); 1350 arbiter = display->hdcp.arbiter; 1351 1352 if (!arbiter || !arbiter->ops) { 1353 mutex_unlock(&display->hdcp.hdcp_mutex); 1354 return -EINVAL; 1355 } 1356 1357 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1358 if (ret < 0) 1359 drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", 1360 ret); 1361 mutex_unlock(&display->hdcp.hdcp_mutex); 1362 1363 return ret; 1364 } 1365 1366 static int hdcp2_prepare_skey(struct intel_connector *connector, 1367 struct hdcp2_ske_send_eks *ske_data) 1368 { 1369 struct intel_display *display = to_intel_display(connector); 1370 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1371 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1372 struct i915_hdcp_arbiter *arbiter; 1373 int ret; 1374 1375 mutex_lock(&display->hdcp.hdcp_mutex); 1376 arbiter = display->hdcp.arbiter; 1377 1378 if (!arbiter || !arbiter->ops) { 1379 mutex_unlock(&display->hdcp.hdcp_mutex); 1380 return -EINVAL; 1381 } 1382 1383 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1384 if (ret < 0) 1385 drm_dbg_kms(display->drm, "Get session key failed. %d\n", 1386 ret); 1387 mutex_unlock(&display->hdcp.hdcp_mutex); 1388 1389 return ret; 1390 } 1391 1392 static int 1393 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1394 struct hdcp2_rep_send_receiverid_list 1395 *rep_topology, 1396 struct hdcp2_rep_send_ack *rep_send_ack) 1397 { 1398 struct intel_display *display = to_intel_display(connector); 1399 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1400 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1401 struct i915_hdcp_arbiter *arbiter; 1402 int ret; 1403 1404 mutex_lock(&display->hdcp.hdcp_mutex); 1405 arbiter = display->hdcp.arbiter; 1406 1407 if (!arbiter || !arbiter->ops) { 1408 mutex_unlock(&display->hdcp.hdcp_mutex); 1409 return -EINVAL; 1410 } 1411 1412 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1413 data, 1414 rep_topology, 1415 rep_send_ack); 1416 if (ret < 0) 1417 drm_dbg_kms(display->drm, 1418 "Verify rep topology failed. %d\n", ret); 1419 mutex_unlock(&display->hdcp.hdcp_mutex); 1420 1421 return ret; 1422 } 1423 1424 static int 1425 hdcp2_verify_mprime(struct intel_connector *connector, 1426 struct hdcp2_rep_stream_ready *stream_ready) 1427 { 1428 struct intel_display *display = to_intel_display(connector); 1429 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1430 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1431 struct i915_hdcp_arbiter *arbiter; 1432 int ret; 1433 1434 mutex_lock(&display->hdcp.hdcp_mutex); 1435 arbiter = display->hdcp.arbiter; 1436 1437 if (!arbiter || !arbiter->ops) { 1438 mutex_unlock(&display->hdcp.hdcp_mutex); 1439 return -EINVAL; 1440 } 1441 1442 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1443 if (ret < 0) 1444 drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); 1445 mutex_unlock(&display->hdcp.hdcp_mutex); 1446 1447 return ret; 1448 } 1449 1450 static int hdcp2_authenticate_port(struct intel_connector *connector) 1451 { 1452 struct intel_display *display = to_intel_display(connector); 1453 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1454 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1455 struct i915_hdcp_arbiter *arbiter; 1456 int ret; 1457 1458 mutex_lock(&display->hdcp.hdcp_mutex); 1459 arbiter = display->hdcp.arbiter; 1460 1461 if (!arbiter || !arbiter->ops) { 1462 mutex_unlock(&display->hdcp.hdcp_mutex); 1463 return -EINVAL; 1464 } 1465 1466 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1467 if (ret < 0) 1468 drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", 1469 ret); 1470 mutex_unlock(&display->hdcp.hdcp_mutex); 1471 1472 return ret; 1473 } 1474 1475 static int hdcp2_close_session(struct intel_connector *connector) 1476 { 1477 struct intel_display *display = to_intel_display(connector); 1478 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1479 struct i915_hdcp_arbiter *arbiter; 1480 int ret; 1481 1482 mutex_lock(&display->hdcp.hdcp_mutex); 1483 arbiter = display->hdcp.arbiter; 1484 1485 if (!arbiter || !arbiter->ops) { 1486 mutex_unlock(&display->hdcp.hdcp_mutex); 1487 return -EINVAL; 1488 } 1489 1490 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1491 &dig_port->hdcp_port_data); 1492 mutex_unlock(&display->hdcp.hdcp_mutex); 1493 1494 return ret; 1495 } 1496 1497 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1498 { 1499 return hdcp2_close_session(connector); 1500 } 1501 1502 /* Authentication flow starts from here */ 1503 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1504 { 1505 struct intel_display *display = to_intel_display(connector); 1506 struct intel_digital_port *dig_port = 1507 intel_attached_dig_port(connector); 1508 struct intel_hdcp *hdcp = &connector->hdcp; 1509 union { 1510 struct hdcp2_ake_init ake_init; 1511 struct hdcp2_ake_send_cert send_cert; 1512 struct hdcp2_ake_no_stored_km no_stored_km; 1513 struct hdcp2_ake_send_hprime send_hprime; 1514 struct hdcp2_ake_send_pairing_info pairing_info; 1515 } msgs; 1516 const struct intel_hdcp_shim *shim = hdcp->shim; 1517 size_t size; 1518 int ret, i, max_retries; 1519 1520 /* Init for seq_num */ 1521 hdcp->seq_num_v = 0; 1522 hdcp->seq_num_m = 0; 1523 1524 if (intel_encoder_is_dp(&dig_port->base) || 1525 intel_encoder_is_mst(&dig_port->base)) 1526 max_retries = 10; 1527 else 1528 max_retries = 1; 1529 1530 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1531 if (ret < 0) 1532 return ret; 1533 1534 /* 1535 * Retry the first read and write to downstream at least 10 times 1536 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders 1537 * (dock decides to stop advertising hdcp2 capability for some reason). 1538 * The reason being that during suspend resume dock usually keeps the 1539 * HDCP2 registers inaccesible causing AUX error. This wouldn't be a 1540 * big problem if the userspace just kept retrying with some delay while 1541 * it continues to play low value content but most userpace applications 1542 * end up throwing an error when it receives one from KMD. This makes 1543 * sure we give the dock and the sink devices to complete its power cycle 1544 * and then try HDCP authentication. The values of 10 and delay of 50ms 1545 * was decided based on multiple trial and errors. 1546 */ 1547 for (i = 0; i < max_retries; i++) { 1548 if (!intel_hdcp2_get_capability(connector)) { 1549 msleep(50); 1550 continue; 1551 } 1552 1553 ret = shim->write_2_2_msg(connector, &msgs.ake_init, 1554 sizeof(msgs.ake_init)); 1555 if (ret < 0) 1556 continue; 1557 1558 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, 1559 &msgs.send_cert, sizeof(msgs.send_cert)); 1560 if (ret > 0) 1561 break; 1562 } 1563 1564 if (ret < 0) 1565 return ret; 1566 1567 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1568 drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1569 return -EINVAL; 1570 } 1571 1572 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1573 1574 if (drm_hdcp_check_ksvs_revoked(display->drm, 1575 msgs.send_cert.cert_rx.receiver_id, 1576 1) > 0) { 1577 drm_err(display->drm, "Receiver ID is revoked\n"); 1578 return -EPERM; 1579 } 1580 1581 /* 1582 * Here msgs.no_stored_km will hold msgs corresponding to the km 1583 * stored also. 1584 */ 1585 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1586 &hdcp->is_paired, 1587 &msgs.no_stored_km, &size); 1588 if (ret < 0) 1589 return ret; 1590 1591 ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size); 1592 if (ret < 0) 1593 return ret; 1594 1595 ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME, 1596 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1597 if (ret < 0) 1598 return ret; 1599 1600 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1601 if (ret < 0) 1602 return ret; 1603 1604 if (!hdcp->is_paired) { 1605 /* Pairing is required */ 1606 ret = shim->read_2_2_msg(connector, 1607 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1608 &msgs.pairing_info, 1609 sizeof(msgs.pairing_info)); 1610 if (ret < 0) 1611 return ret; 1612 1613 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1614 if (ret < 0) 1615 return ret; 1616 hdcp->is_paired = true; 1617 } 1618 1619 return 0; 1620 } 1621 1622 static int hdcp2_locality_check(struct intel_connector *connector) 1623 { 1624 struct intel_hdcp *hdcp = &connector->hdcp; 1625 union { 1626 struct hdcp2_lc_init lc_init; 1627 struct hdcp2_lc_send_lprime send_lprime; 1628 } msgs; 1629 const struct intel_hdcp_shim *shim = hdcp->shim; 1630 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1631 1632 for (i = 0; i < tries; i++) { 1633 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1634 if (ret < 0) 1635 continue; 1636 1637 ret = shim->write_2_2_msg(connector, &msgs.lc_init, 1638 sizeof(msgs.lc_init)); 1639 if (ret < 0) 1640 continue; 1641 1642 ret = shim->read_2_2_msg(connector, 1643 HDCP_2_2_LC_SEND_LPRIME, 1644 &msgs.send_lprime, 1645 sizeof(msgs.send_lprime)); 1646 if (ret < 0) 1647 continue; 1648 1649 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1650 if (!ret) 1651 break; 1652 } 1653 1654 return ret; 1655 } 1656 1657 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1658 { 1659 struct intel_hdcp *hdcp = &connector->hdcp; 1660 struct hdcp2_ske_send_eks send_eks; 1661 int ret; 1662 1663 ret = hdcp2_prepare_skey(connector, &send_eks); 1664 if (ret < 0) 1665 return ret; 1666 1667 ret = hdcp->shim->write_2_2_msg(connector, &send_eks, 1668 sizeof(send_eks)); 1669 if (ret < 0) 1670 return ret; 1671 1672 return 0; 1673 } 1674 1675 static 1676 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1677 { 1678 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1679 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1680 struct intel_hdcp *hdcp = &connector->hdcp; 1681 union { 1682 struct hdcp2_rep_stream_manage stream_manage; 1683 struct hdcp2_rep_stream_ready stream_ready; 1684 } msgs; 1685 const struct intel_hdcp_shim *shim = hdcp->shim; 1686 int ret, streams_size_delta, i; 1687 1688 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1689 return -ERANGE; 1690 1691 /* Prepare RepeaterAuth_Stream_Manage msg */ 1692 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1693 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1694 1695 msgs.stream_manage.k = cpu_to_be16(data->k); 1696 1697 for (i = 0; i < data->k; i++) { 1698 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1699 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1700 } 1701 1702 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1703 sizeof(struct hdcp2_streamid_type); 1704 /* Send it to Repeater */ 1705 ret = shim->write_2_2_msg(connector, &msgs.stream_manage, 1706 sizeof(msgs.stream_manage) - streams_size_delta); 1707 if (ret < 0) 1708 goto out; 1709 1710 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY, 1711 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1712 if (ret < 0) 1713 goto out; 1714 1715 data->seq_num_m = hdcp->seq_num_m; 1716 1717 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1718 1719 out: 1720 hdcp->seq_num_m++; 1721 1722 return ret; 1723 } 1724 1725 static 1726 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1727 { 1728 struct intel_display *display = to_intel_display(connector); 1729 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1730 struct intel_hdcp *hdcp = &connector->hdcp; 1731 union { 1732 struct hdcp2_rep_send_receiverid_list recvid_list; 1733 struct hdcp2_rep_send_ack rep_ack; 1734 } msgs; 1735 const struct intel_hdcp_shim *shim = hdcp->shim; 1736 u32 seq_num_v, device_cnt; 1737 u8 *rx_info; 1738 int ret; 1739 1740 ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST, 1741 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1742 if (ret < 0) 1743 return ret; 1744 1745 rx_info = msgs.recvid_list.rx_info; 1746 1747 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1748 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1749 drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); 1750 return -EINVAL; 1751 } 1752 1753 /* 1754 * MST topology is not Type 1 capable if it contains a downstream 1755 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1756 */ 1757 dig_port->hdcp_mst_type1_capable = 1758 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1759 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1760 1761 if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) { 1762 drm_dbg_kms(display->drm, 1763 "HDCP1.x or 2.0 Legacy Device Downstream\n"); 1764 return -EINVAL; 1765 } 1766 1767 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1768 seq_num_v = 1769 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1770 1771 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1772 drm_dbg_kms(display->drm, 1773 "Non zero Seq_num_v at first RecvId_List msg\n"); 1774 return -EINVAL; 1775 } 1776 1777 if (seq_num_v < hdcp->seq_num_v) { 1778 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1779 drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); 1780 return -EINVAL; 1781 } 1782 1783 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1784 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1785 if (drm_hdcp_check_ksvs_revoked(display->drm, 1786 msgs.recvid_list.receiver_ids, 1787 device_cnt) > 0) { 1788 drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); 1789 return -EPERM; 1790 } 1791 1792 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1793 &msgs.recvid_list, 1794 &msgs.rep_ack); 1795 if (ret < 0) 1796 return ret; 1797 1798 hdcp->seq_num_v = seq_num_v; 1799 ret = shim->write_2_2_msg(connector, &msgs.rep_ack, 1800 sizeof(msgs.rep_ack)); 1801 if (ret < 0) 1802 return ret; 1803 1804 return 0; 1805 } 1806 1807 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1808 { 1809 struct intel_display *display = to_intel_display(connector); 1810 struct intel_hdcp *hdcp = &connector->hdcp; 1811 const struct intel_hdcp_shim *shim = hdcp->shim; 1812 int ret; 1813 1814 ret = hdcp2_authentication_key_exchange(connector); 1815 if (ret < 0) { 1816 drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); 1817 return ret; 1818 } 1819 1820 ret = hdcp2_locality_check(connector); 1821 if (ret < 0) { 1822 drm_dbg_kms(display->drm, 1823 "Locality Check failed. Err : %d\n", ret); 1824 return ret; 1825 } 1826 1827 ret = hdcp2_session_key_exchange(connector); 1828 if (ret < 0) { 1829 drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); 1830 return ret; 1831 } 1832 1833 if (shim->config_stream_type) { 1834 ret = shim->config_stream_type(connector, 1835 hdcp->is_repeater, 1836 hdcp->content_type); 1837 if (ret < 0) 1838 return ret; 1839 } 1840 1841 if (hdcp->is_repeater) { 1842 ret = hdcp2_authenticate_repeater_topology(connector); 1843 if (ret < 0) { 1844 drm_dbg_kms(display->drm, 1845 "Repeater Auth Failed. Err: %d\n", ret); 1846 return ret; 1847 } 1848 } 1849 1850 return ret; 1851 } 1852 1853 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1854 { 1855 struct intel_display *display = to_intel_display(connector); 1856 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1857 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1858 struct intel_hdcp *hdcp = &connector->hdcp; 1859 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1860 enum port port = dig_port->base.port; 1861 int ret = 0; 1862 1863 if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1864 LINK_ENCRYPTION_STATUS)) { 1865 drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", 1866 connector->base.base.id, connector->base.name); 1867 ret = -EPERM; 1868 goto link_recover; 1869 } 1870 1871 if (hdcp->shim->stream_2_2_encryption) { 1872 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1873 if (ret) { 1874 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", 1875 connector->base.base.id, connector->base.name); 1876 return ret; 1877 } 1878 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1879 transcoder_name(hdcp->stream_transcoder)); 1880 } 1881 1882 return 0; 1883 1884 link_recover: 1885 if (hdcp2_deauthenticate_port(connector) < 0) 1886 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 1887 1888 dig_port->hdcp_auth_status = false; 1889 data->k = 0; 1890 1891 return ret; 1892 } 1893 1894 static int hdcp2_enable_encryption(struct intel_connector *connector) 1895 { 1896 struct intel_display *display = to_intel_display(connector); 1897 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1898 struct intel_hdcp *hdcp = &connector->hdcp; 1899 enum port port = dig_port->base.port; 1900 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1901 int ret; 1902 1903 drm_WARN_ON(display->drm, 1904 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1905 LINK_ENCRYPTION_STATUS); 1906 if (hdcp->shim->toggle_signalling) { 1907 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1908 true); 1909 if (ret) { 1910 drm_err(display->drm, 1911 "Failed to enable HDCP signalling. %d\n", 1912 ret); 1913 return ret; 1914 } 1915 } 1916 1917 if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1918 LINK_AUTH_STATUS) 1919 /* Link is Authenticated. Now set for Encryption */ 1920 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1921 0, CTL_LINK_ENCRYPTION_REQ); 1922 1923 ret = intel_de_wait_for_set(display, 1924 HDCP2_STATUS(display, cpu_transcoder, 1925 port), 1926 LINK_ENCRYPTION_STATUS, 1927 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1928 dig_port->hdcp_auth_status = true; 1929 1930 return ret; 1931 } 1932 1933 static int hdcp2_disable_encryption(struct intel_connector *connector) 1934 { 1935 struct intel_display *display = to_intel_display(connector); 1936 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1937 struct intel_hdcp *hdcp = &connector->hdcp; 1938 enum port port = dig_port->base.port; 1939 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1940 int ret; 1941 1942 drm_WARN_ON(display->drm, 1943 !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & 1944 LINK_ENCRYPTION_STATUS)); 1945 1946 intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 1947 CTL_LINK_ENCRYPTION_REQ, 0); 1948 1949 ret = intel_de_wait_for_clear(display, 1950 HDCP2_STATUS(display, cpu_transcoder, 1951 port), 1952 LINK_ENCRYPTION_STATUS, 1953 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1954 if (ret == -ETIMEDOUT) 1955 drm_dbg_kms(display->drm, "Disable Encryption Timedout"); 1956 1957 if (hdcp->shim->toggle_signalling) { 1958 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1959 false); 1960 if (ret) { 1961 drm_err(display->drm, 1962 "Failed to disable HDCP signalling. %d\n", 1963 ret); 1964 return ret; 1965 } 1966 } 1967 1968 return ret; 1969 } 1970 1971 static int 1972 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1973 { 1974 struct intel_display *display = to_intel_display(connector); 1975 int i, tries = 3, ret; 1976 1977 if (!connector->hdcp.is_repeater) 1978 return 0; 1979 1980 for (i = 0; i < tries; i++) { 1981 ret = _hdcp2_propagate_stream_management_info(connector); 1982 if (!ret) 1983 break; 1984 1985 /* Lets restart the auth incase of seq_num_m roll over */ 1986 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1987 drm_dbg_kms(display->drm, 1988 "seq_num_m roll over.(%d)\n", ret); 1989 break; 1990 } 1991 1992 drm_dbg_kms(display->drm, 1993 "HDCP2 stream management %d of %d Failed.(%d)\n", 1994 i + 1, tries, ret); 1995 } 1996 1997 return ret; 1998 } 1999 2000 static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, 2001 struct intel_connector *connector) 2002 { 2003 struct intel_display *display = to_intel_display(connector); 2004 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2005 int ret = 0, i, tries = 3; 2006 2007 for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { 2008 ret = hdcp2_authenticate_sink(connector); 2009 if (!ret) { 2010 ret = intel_hdcp_prepare_streams(state, connector); 2011 if (ret) { 2012 drm_dbg_kms(display->drm, 2013 "Prepare stream failed.(%d)\n", 2014 ret); 2015 break; 2016 } 2017 2018 ret = hdcp2_propagate_stream_management_info(connector); 2019 if (ret) { 2020 drm_dbg_kms(display->drm, 2021 "Stream management failed.(%d)\n", 2022 ret); 2023 break; 2024 } 2025 2026 ret = hdcp2_authenticate_port(connector); 2027 if (!ret) 2028 break; 2029 drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", 2030 ret); 2031 } 2032 2033 /* Clearing the mei hdcp session */ 2034 drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 2035 i + 1, tries, ret); 2036 if (hdcp2_deauthenticate_port(connector) < 0) 2037 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2038 } 2039 2040 if (!ret && !dig_port->hdcp_auth_status) { 2041 /* 2042 * Ensuring the required 200mSec min time interval between 2043 * Session Key Exchange and encryption. 2044 */ 2045 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 2046 ret = hdcp2_enable_encryption(connector); 2047 if (ret < 0) { 2048 drm_dbg_kms(display->drm, 2049 "Encryption Enable Failed.(%d)\n", ret); 2050 if (hdcp2_deauthenticate_port(connector) < 0) 2051 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2052 } 2053 } 2054 2055 if (!ret) 2056 ret = hdcp2_enable_stream_encryption(connector); 2057 2058 return ret; 2059 } 2060 2061 static int _intel_hdcp2_enable(struct intel_atomic_state *state, 2062 struct intel_connector *connector) 2063 { 2064 struct intel_display *display = to_intel_display(connector); 2065 struct intel_hdcp *hdcp = &connector->hdcp; 2066 int ret; 2067 2068 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", 2069 connector->base.base.id, connector->base.name, 2070 hdcp->content_type); 2071 2072 intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp); 2073 2074 ret = hdcp2_authenticate_and_encrypt(state, connector); 2075 if (ret) { 2076 drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 2077 hdcp->content_type, ret); 2078 return ret; 2079 } 2080 2081 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", 2082 connector->base.base.id, connector->base.name, 2083 hdcp->content_type); 2084 2085 hdcp->hdcp2_encrypted = true; 2086 return 0; 2087 } 2088 2089 static int 2090 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 2091 { 2092 struct intel_display *display = to_intel_display(connector); 2093 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2094 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2095 struct intel_hdcp *hdcp = &connector->hdcp; 2096 int ret; 2097 2098 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", 2099 connector->base.base.id, connector->base.name); 2100 2101 if (hdcp->shim->stream_2_2_encryption) { 2102 ret = hdcp->shim->stream_2_2_encryption(connector, false); 2103 if (ret) { 2104 drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", 2105 connector->base.base.id, connector->base.name); 2106 return ret; 2107 } 2108 drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 2109 transcoder_name(hdcp->stream_transcoder)); 2110 2111 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) 2112 return 0; 2113 } 2114 2115 ret = hdcp2_disable_encryption(connector); 2116 2117 if (hdcp2_deauthenticate_port(connector) < 0) 2118 drm_dbg_kms(display->drm, "Port deauth failed.\n"); 2119 2120 connector->hdcp.hdcp2_encrypted = false; 2121 dig_port->hdcp_auth_status = false; 2122 data->k = 0; 2123 2124 return ret; 2125 } 2126 2127 /* Implements the Link Integrity Check for HDCP2.2 */ 2128 static int intel_hdcp2_check_link(struct intel_connector *connector) 2129 { 2130 struct intel_display *display = to_intel_display(connector); 2131 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2132 struct intel_hdcp *hdcp = &connector->hdcp; 2133 enum port port = dig_port->base.port; 2134 enum transcoder cpu_transcoder; 2135 int ret = 0; 2136 2137 mutex_lock(&hdcp->mutex); 2138 mutex_lock(&dig_port->hdcp_mutex); 2139 cpu_transcoder = hdcp->cpu_transcoder; 2140 2141 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2142 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2143 !hdcp->hdcp2_encrypted) { 2144 ret = -EINVAL; 2145 goto out; 2146 } 2147 2148 if (drm_WARN_ON(display->drm, 2149 !intel_hdcp2_in_use(display, cpu_transcoder, port))) { 2150 drm_err(display->drm, 2151 "HDCP2.2 link stopped the encryption, %x\n", 2152 intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); 2153 ret = -ENXIO; 2154 _intel_hdcp2_disable(connector, true); 2155 intel_hdcp_update_value(connector, 2156 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2157 true); 2158 goto out; 2159 } 2160 2161 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2162 if (ret == HDCP_LINK_PROTECTED) { 2163 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2164 intel_hdcp_update_value(connector, 2165 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2166 true); 2167 } 2168 goto out; 2169 } 2170 2171 if (ret == HDCP_TOPOLOGY_CHANGE) { 2172 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2173 goto out; 2174 2175 drm_dbg_kms(display->drm, 2176 "HDCP2.2 Downstream topology change\n"); 2177 } else { 2178 drm_dbg_kms(display->drm, 2179 "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", 2180 connector->base.base.id, connector->base.name); 2181 } 2182 2183 ret = _intel_hdcp2_disable(connector, true); 2184 if (ret) { 2185 drm_err(display->drm, 2186 "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", 2187 connector->base.base.id, connector->base.name, ret); 2188 intel_hdcp_update_value(connector, 2189 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2190 goto out; 2191 } 2192 2193 intel_hdcp_update_value(connector, 2194 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2195 out: 2196 mutex_unlock(&dig_port->hdcp_mutex); 2197 mutex_unlock(&hdcp->mutex); 2198 return ret; 2199 } 2200 2201 static void intel_hdcp_check_work(struct work_struct *work) 2202 { 2203 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2204 struct intel_hdcp, 2205 check_work); 2206 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2207 struct intel_display *display = to_intel_display(connector); 2208 struct drm_i915_private *i915 = to_i915(display->drm); 2209 2210 if (drm_connector_is_unregistered(&connector->base)) 2211 return; 2212 2213 if (!intel_hdcp2_check_link(connector)) 2214 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2215 DRM_HDCP2_CHECK_PERIOD_MS); 2216 else if (!intel_hdcp_check_link(connector)) 2217 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2218 DRM_HDCP_CHECK_PERIOD_MS); 2219 } 2220 2221 static int i915_hdcp_component_bind(struct device *drv_kdev, 2222 struct device *mei_kdev, void *data) 2223 { 2224 struct intel_display *display = to_intel_display(drv_kdev); 2225 2226 drm_dbg(display->drm, "I915 HDCP comp bind\n"); 2227 mutex_lock(&display->hdcp.hdcp_mutex); 2228 display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2229 display->hdcp.arbiter->hdcp_dev = mei_kdev; 2230 mutex_unlock(&display->hdcp.hdcp_mutex); 2231 2232 return 0; 2233 } 2234 2235 static void i915_hdcp_component_unbind(struct device *drv_kdev, 2236 struct device *mei_kdev, void *data) 2237 { 2238 struct intel_display *display = to_intel_display(drv_kdev); 2239 2240 drm_dbg(display->drm, "I915 HDCP comp unbind\n"); 2241 mutex_lock(&display->hdcp.hdcp_mutex); 2242 display->hdcp.arbiter = NULL; 2243 mutex_unlock(&display->hdcp.hdcp_mutex); 2244 } 2245 2246 static const struct component_ops i915_hdcp_ops = { 2247 .bind = i915_hdcp_component_bind, 2248 .unbind = i915_hdcp_component_unbind, 2249 }; 2250 2251 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2252 { 2253 switch (port) { 2254 case PORT_A: 2255 return HDCP_DDI_A; 2256 case PORT_B ... PORT_F: 2257 return (enum hdcp_ddi)port; 2258 default: 2259 return HDCP_DDI_INVALID_PORT; 2260 } 2261 } 2262 2263 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2264 { 2265 switch (cpu_transcoder) { 2266 case TRANSCODER_A ... TRANSCODER_D: 2267 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2268 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2269 return HDCP_INVALID_TRANSCODER; 2270 } 2271 } 2272 2273 static int initialize_hdcp_port_data(struct intel_connector *connector, 2274 struct intel_digital_port *dig_port, 2275 const struct intel_hdcp_shim *shim) 2276 { 2277 struct intel_display *display = to_intel_display(connector); 2278 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2279 enum port port = dig_port->base.port; 2280 2281 if (DISPLAY_VER(display) < 12) 2282 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2283 else 2284 /* 2285 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2286 * with zero(INVALID PORT index). 2287 */ 2288 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2289 2290 /* 2291 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2292 * is initialized to zero (invalid transcoder index). This will be 2293 * retained for <Gen12 forever. 2294 */ 2295 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2296 2297 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2298 data->protocol = (u8)shim->protocol; 2299 2300 if (!data->streams) 2301 data->streams = kcalloc(INTEL_NUM_PIPES(display), 2302 sizeof(struct hdcp2_streamid_type), 2303 GFP_KERNEL); 2304 if (!data->streams) { 2305 drm_err(display->drm, "Out of Memory\n"); 2306 return -ENOMEM; 2307 } 2308 2309 return 0; 2310 } 2311 2312 static bool is_hdcp2_supported(struct intel_display *display) 2313 { 2314 struct drm_i915_private *i915 = to_i915(display->drm); 2315 2316 if (intel_hdcp_gsc_cs_required(display)) 2317 return true; 2318 2319 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2320 return false; 2321 2322 return (DISPLAY_VER(display) >= 10 || 2323 IS_KABYLAKE(i915) || 2324 IS_COFFEELAKE(i915) || 2325 IS_COMETLAKE(i915)); 2326 } 2327 2328 void intel_hdcp_component_init(struct intel_display *display) 2329 { 2330 int ret; 2331 2332 if (!is_hdcp2_supported(display)) 2333 return; 2334 2335 mutex_lock(&display->hdcp.hdcp_mutex); 2336 drm_WARN_ON(display->drm, display->hdcp.comp_added); 2337 2338 display->hdcp.comp_added = true; 2339 mutex_unlock(&display->hdcp.hdcp_mutex); 2340 if (intel_hdcp_gsc_cs_required(display)) 2341 ret = intel_hdcp_gsc_init(display); 2342 else 2343 ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, 2344 I915_COMPONENT_HDCP); 2345 2346 if (ret < 0) { 2347 drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", 2348 ret); 2349 mutex_lock(&display->hdcp.hdcp_mutex); 2350 display->hdcp.comp_added = false; 2351 mutex_unlock(&display->hdcp.hdcp_mutex); 2352 return; 2353 } 2354 } 2355 2356 static void intel_hdcp2_init(struct intel_connector *connector, 2357 struct intel_digital_port *dig_port, 2358 const struct intel_hdcp_shim *shim) 2359 { 2360 struct intel_display *display = to_intel_display(connector); 2361 struct intel_hdcp *hdcp = &connector->hdcp; 2362 int ret; 2363 2364 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2365 if (ret) { 2366 drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); 2367 return; 2368 } 2369 2370 hdcp->hdcp2_supported = true; 2371 } 2372 2373 int intel_hdcp_init(struct intel_connector *connector, 2374 struct intel_digital_port *dig_port, 2375 const struct intel_hdcp_shim *shim) 2376 { 2377 struct intel_display *display = to_intel_display(connector); 2378 struct intel_hdcp *hdcp = &connector->hdcp; 2379 int ret; 2380 2381 if (!shim) 2382 return -EINVAL; 2383 2384 if (is_hdcp2_supported(display)) 2385 intel_hdcp2_init(connector, dig_port, shim); 2386 2387 ret = drm_connector_attach_content_protection_property(&connector->base, 2388 hdcp->hdcp2_supported); 2389 if (ret) { 2390 hdcp->hdcp2_supported = false; 2391 kfree(dig_port->hdcp_port_data.streams); 2392 return ret; 2393 } 2394 2395 hdcp->shim = shim; 2396 mutex_init(&hdcp->mutex); 2397 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2398 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2399 init_waitqueue_head(&hdcp->cp_irq_queue); 2400 2401 return 0; 2402 } 2403 2404 static int _intel_hdcp_enable(struct intel_atomic_state *state, 2405 struct intel_encoder *encoder, 2406 const struct intel_crtc_state *pipe_config, 2407 const struct drm_connector_state *conn_state) 2408 { 2409 struct intel_display *display = to_intel_display(encoder); 2410 struct drm_i915_private *i915 = to_i915(display->drm); 2411 struct intel_connector *connector = 2412 to_intel_connector(conn_state->connector); 2413 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2414 struct intel_hdcp *hdcp = &connector->hdcp; 2415 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2416 int ret = -EINVAL; 2417 2418 if (!hdcp->shim) 2419 return -ENOENT; 2420 2421 if (!connector->encoder) { 2422 drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", 2423 connector->base.base.id, connector->base.name); 2424 return -ENODEV; 2425 } 2426 2427 mutex_lock(&hdcp->mutex); 2428 mutex_lock(&dig_port->hdcp_mutex); 2429 drm_WARN_ON(display->drm, 2430 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2431 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2432 2433 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2434 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2435 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2436 } else { 2437 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2438 hdcp->stream_transcoder = INVALID_TRANSCODER; 2439 } 2440 2441 if (DISPLAY_VER(display) >= 12) 2442 dig_port->hdcp_port_data.hdcp_transcoder = 2443 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2444 2445 /* 2446 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2447 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2448 */ 2449 if (intel_hdcp2_get_capability(connector)) { 2450 ret = _intel_hdcp2_enable(state, connector); 2451 if (!ret) 2452 check_link_interval = 2453 DRM_HDCP2_CHECK_PERIOD_MS; 2454 } 2455 2456 /* 2457 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2458 * be attempted. 2459 */ 2460 if (ret && intel_hdcp_get_capability(connector) && 2461 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2462 ret = intel_hdcp1_enable(connector); 2463 } 2464 2465 if (!ret) { 2466 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2467 check_link_interval); 2468 intel_hdcp_update_value(connector, 2469 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2470 true); 2471 } 2472 2473 mutex_unlock(&dig_port->hdcp_mutex); 2474 mutex_unlock(&hdcp->mutex); 2475 return ret; 2476 } 2477 2478 void intel_hdcp_enable(struct intel_atomic_state *state, 2479 struct intel_encoder *encoder, 2480 const struct intel_crtc_state *crtc_state, 2481 const struct drm_connector_state *conn_state) 2482 { 2483 struct intel_connector *connector = 2484 to_intel_connector(conn_state->connector); 2485 struct intel_hdcp *hdcp = &connector->hdcp; 2486 2487 /* 2488 * Enable hdcp if it's desired or if userspace is enabled and 2489 * driver set its state to undesired 2490 */ 2491 if (conn_state->content_protection == 2492 DRM_MODE_CONTENT_PROTECTION_DESIRED || 2493 (conn_state->content_protection == 2494 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 2495 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2496 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2497 } 2498 2499 int intel_hdcp_disable(struct intel_connector *connector) 2500 { 2501 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2502 struct intel_hdcp *hdcp = &connector->hdcp; 2503 int ret = 0; 2504 2505 if (!hdcp->shim) 2506 return -ENOENT; 2507 2508 mutex_lock(&hdcp->mutex); 2509 mutex_lock(&dig_port->hdcp_mutex); 2510 2511 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2512 goto out; 2513 2514 intel_hdcp_update_value(connector, 2515 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2516 if (hdcp->hdcp2_encrypted) 2517 ret = _intel_hdcp2_disable(connector, false); 2518 else if (hdcp->hdcp_encrypted) 2519 ret = _intel_hdcp_disable(connector); 2520 2521 out: 2522 mutex_unlock(&dig_port->hdcp_mutex); 2523 mutex_unlock(&hdcp->mutex); 2524 cancel_delayed_work_sync(&hdcp->check_work); 2525 return ret; 2526 } 2527 2528 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2529 struct intel_encoder *encoder, 2530 const struct intel_crtc_state *crtc_state, 2531 const struct drm_connector_state *conn_state) 2532 { 2533 struct intel_connector *connector = 2534 to_intel_connector(conn_state->connector); 2535 struct intel_hdcp *hdcp = &connector->hdcp; 2536 bool content_protection_type_changed, desired_and_not_enabled = false; 2537 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2538 2539 if (!connector->hdcp.shim) 2540 return; 2541 2542 content_protection_type_changed = 2543 (conn_state->hdcp_content_type != hdcp->content_type && 2544 conn_state->content_protection != 2545 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2546 2547 /* 2548 * During the HDCP encryption session if Type change is requested, 2549 * disable the HDCP and reenable it with new TYPE value. 2550 */ 2551 if (conn_state->content_protection == 2552 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2553 content_protection_type_changed) 2554 intel_hdcp_disable(connector); 2555 2556 /* 2557 * Mark the hdcp state as DESIRED after the hdcp disable of type 2558 * change procedure. 2559 */ 2560 if (content_protection_type_changed) { 2561 mutex_lock(&hdcp->mutex); 2562 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2563 drm_connector_get(&connector->base); 2564 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2565 drm_connector_put(&connector->base); 2566 mutex_unlock(&hdcp->mutex); 2567 } 2568 2569 if (conn_state->content_protection == 2570 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2571 mutex_lock(&hdcp->mutex); 2572 /* Avoid enabling hdcp, if it already ENABLED */ 2573 desired_and_not_enabled = 2574 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2575 mutex_unlock(&hdcp->mutex); 2576 /* 2577 * If HDCP already ENABLED and CP property is DESIRED, schedule 2578 * prop_work to update correct CP property to user space. 2579 */ 2580 if (!desired_and_not_enabled && !content_protection_type_changed) { 2581 drm_connector_get(&connector->base); 2582 if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) 2583 drm_connector_put(&connector->base); 2584 2585 } 2586 } 2587 2588 if (desired_and_not_enabled || content_protection_type_changed) 2589 _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2590 } 2591 2592 void intel_hdcp_component_fini(struct intel_display *display) 2593 { 2594 mutex_lock(&display->hdcp.hdcp_mutex); 2595 if (!display->hdcp.comp_added) { 2596 mutex_unlock(&display->hdcp.hdcp_mutex); 2597 return; 2598 } 2599 2600 display->hdcp.comp_added = false; 2601 mutex_unlock(&display->hdcp.hdcp_mutex); 2602 2603 if (intel_hdcp_gsc_cs_required(display)) 2604 intel_hdcp_gsc_fini(display); 2605 else 2606 component_del(display->drm->dev, &i915_hdcp_ops); 2607 } 2608 2609 void intel_hdcp_cleanup(struct intel_connector *connector) 2610 { 2611 struct intel_hdcp *hdcp = &connector->hdcp; 2612 2613 if (!hdcp->shim) 2614 return; 2615 2616 /* 2617 * If the connector is registered, it's possible userspace could kick 2618 * off another HDCP enable, which would re-spawn the workers. 2619 */ 2620 drm_WARN_ON(connector->base.dev, 2621 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2622 2623 /* 2624 * Now that the connector is not registered, check_work won't be run, 2625 * but cancel any outstanding instances of it 2626 */ 2627 cancel_delayed_work_sync(&hdcp->check_work); 2628 2629 /* 2630 * We don't cancel prop_work in the same way as check_work since it 2631 * requires connection_mutex which could be held while calling this 2632 * function. Instead, we rely on the connector references grabbed before 2633 * scheduling prop_work to ensure the connector is alive when prop_work 2634 * is run. So if we're in the destroy path (which is where this 2635 * function should be called), we're "guaranteed" that prop_work is not 2636 * active (tl;dr This Should Never Happen). 2637 */ 2638 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2639 2640 mutex_lock(&hdcp->mutex); 2641 hdcp->shim = NULL; 2642 mutex_unlock(&hdcp->mutex); 2643 } 2644 2645 void intel_hdcp_atomic_check(struct drm_connector *connector, 2646 struct drm_connector_state *old_state, 2647 struct drm_connector_state *new_state) 2648 { 2649 u64 old_cp = old_state->content_protection; 2650 u64 new_cp = new_state->content_protection; 2651 struct drm_crtc_state *crtc_state; 2652 2653 if (!new_state->crtc) { 2654 /* 2655 * If the connector is being disabled with CP enabled, mark it 2656 * desired so it's re-enabled when the connector is brought back 2657 */ 2658 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2659 new_state->content_protection = 2660 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2661 return; 2662 } 2663 2664 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2665 new_state->crtc); 2666 /* 2667 * Fix the HDCP uapi content protection state in case of modeset. 2668 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2669 * need to be sent if there is transition from ENABLED->DESIRED. 2670 */ 2671 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2672 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2673 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2674 new_state->content_protection = 2675 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2676 2677 /* 2678 * Nothing to do if the state didn't change, or HDCP was activated since 2679 * the last commit. And also no change in hdcp content type. 2680 */ 2681 if (old_cp == new_cp || 2682 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2683 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2684 if (old_state->hdcp_content_type == 2685 new_state->hdcp_content_type) 2686 return; 2687 } 2688 2689 crtc_state->mode_changed = true; 2690 } 2691 2692 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2693 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2694 { 2695 struct intel_hdcp *hdcp = &connector->hdcp; 2696 struct intel_display *display = to_intel_display(connector); 2697 struct drm_i915_private *i915 = to_i915(display->drm); 2698 2699 if (!hdcp->shim) 2700 return; 2701 2702 atomic_inc(&connector->hdcp.cp_irq_count); 2703 wake_up_all(&connector->hdcp.cp_irq_queue); 2704 2705 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); 2706 } 2707