1 /* 2 * Copyright © 2014 Red Hat 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that copyright 7 * notice and this permission notice appear in supporting documentation, and 8 * that the name of the copyright holders not be used in advertising or 9 * publicity pertaining to distribution of the software without specific, 10 * written prior permission. The copyright holders make no representations 11 * about the suitability of this software for any purpose. It is provided "as 12 * is" without express or implied warranty. 13 * 14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 20 * OF THIS SOFTWARE. 21 */ 22 23 #include <linux/bitfield.h> 24 #include <linux/delay.h> 25 #include <linux/errno.h> 26 #include <linux/i2c.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/random.h> 30 #include <linux/sched.h> 31 #include <linux/seq_file.h> 32 #include <linux/iopoll.h> 33 34 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 35 #include <linux/stacktrace.h> 36 #include <linux/sort.h> 37 #include <linux/timekeeping.h> 38 #include <linux/math64.h> 39 #endif 40 41 #include <drm/display/drm_dp_mst_helper.h> 42 #include <drm/drm_atomic.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_drv.h> 45 #include <drm/drm_edid.h> 46 #include <drm/drm_fixed.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_probe_helper.h> 49 50 #include "drm_dp_helper_internal.h" 51 #include "drm_dp_mst_topology_internal.h" 52 53 /** 54 * DOC: dp mst helper 55 * 56 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 57 * protocol. The helpers contain a topology manager and bandwidth manager. 58 * The helpers encapsulate the sending and received of sideband msgs. 59 */ 60 struct drm_dp_pending_up_req { 61 struct drm_dp_sideband_msg_hdr hdr; 62 struct drm_dp_sideband_msg_req_body msg; 63 struct list_head next; 64 }; 65 66 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 67 char *buf); 68 69 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); 70 71 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 72 int id, u8 start_slot, u8 num_slots); 73 74 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 75 struct drm_dp_mst_port *port, 76 int offset, int size, u8 *bytes); 77 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 78 struct drm_dp_mst_port *port, 79 int offset, int size, u8 *bytes); 80 81 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 82 struct drm_dp_mst_branch *mstb); 83 84 static void 85 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 86 struct drm_dp_mst_branch *mstb); 87 88 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 89 struct drm_dp_mst_branch *mstb, 90 struct drm_dp_mst_port *port); 91 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 92 guid_t *guid); 93 94 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port); 95 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port); 96 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 97 98 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, 99 struct drm_dp_mst_branch *branch); 100 101 #define DBG_PREFIX "[dp_mst]" 102 103 #define DP_STR(x) [DP_ ## x] = #x 104 105 static const char *drm_dp_mst_req_type_str(u8 req_type) 106 { 107 static const char * const req_type_str[] = { 108 DP_STR(GET_MSG_TRANSACTION_VERSION), 109 DP_STR(LINK_ADDRESS), 110 DP_STR(CONNECTION_STATUS_NOTIFY), 111 DP_STR(ENUM_PATH_RESOURCES), 112 DP_STR(ALLOCATE_PAYLOAD), 113 DP_STR(QUERY_PAYLOAD), 114 DP_STR(RESOURCE_STATUS_NOTIFY), 115 DP_STR(CLEAR_PAYLOAD_ID_TABLE), 116 DP_STR(REMOTE_DPCD_READ), 117 DP_STR(REMOTE_DPCD_WRITE), 118 DP_STR(REMOTE_I2C_READ), 119 DP_STR(REMOTE_I2C_WRITE), 120 DP_STR(POWER_UP_PHY), 121 DP_STR(POWER_DOWN_PHY), 122 DP_STR(SINK_EVENT_NOTIFY), 123 DP_STR(QUERY_STREAM_ENC_STATUS), 124 }; 125 126 if (req_type >= ARRAY_SIZE(req_type_str) || 127 !req_type_str[req_type]) 128 return "unknown"; 129 130 return req_type_str[req_type]; 131 } 132 133 #undef DP_STR 134 #define DP_STR(x) [DP_NAK_ ## x] = #x 135 136 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason) 137 { 138 static const char * const nak_reason_str[] = { 139 DP_STR(WRITE_FAILURE), 140 DP_STR(INVALID_READ), 141 DP_STR(CRC_FAILURE), 142 DP_STR(BAD_PARAM), 143 DP_STR(DEFER), 144 DP_STR(LINK_FAILURE), 145 DP_STR(NO_RESOURCES), 146 DP_STR(DPCD_FAIL), 147 DP_STR(I2C_NAK), 148 DP_STR(ALLOCATE_FAIL), 149 }; 150 151 if (nak_reason >= ARRAY_SIZE(nak_reason_str) || 152 !nak_reason_str[nak_reason]) 153 return "unknown"; 154 155 return nak_reason_str[nak_reason]; 156 } 157 158 #undef DP_STR 159 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x 160 161 static const char *drm_dp_mst_sideband_tx_state_str(int state) 162 { 163 static const char * const sideband_reason_str[] = { 164 DP_STR(QUEUED), 165 DP_STR(START_SEND), 166 DP_STR(SENT), 167 DP_STR(RX), 168 DP_STR(TIMEOUT), 169 }; 170 171 if (state >= ARRAY_SIZE(sideband_reason_str) || 172 !sideband_reason_str[state]) 173 return "unknown"; 174 175 return sideband_reason_str[state]; 176 } 177 178 static int 179 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) 180 { 181 int i; 182 u8 unpacked_rad[16]; 183 184 for (i = 0; i < lct; i++) { 185 if (i % 2) 186 unpacked_rad[i] = rad[i / 2] >> 4; 187 else 188 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); 189 } 190 191 /* TODO: Eventually add something to printk so we can format the rad 192 * like this: 1.2.3 193 */ 194 return snprintf(out, len, "%*phC", lct, unpacked_rad); 195 } 196 197 /* sideband msg handling */ 198 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 199 { 200 u8 bitmask = 0x80; 201 u8 bitshift = 7; 202 u8 array_index = 0; 203 int number_of_bits = num_nibbles * 4; 204 u8 remainder = 0; 205 206 while (number_of_bits != 0) { 207 number_of_bits--; 208 remainder <<= 1; 209 remainder |= (data[array_index] & bitmask) >> bitshift; 210 bitmask >>= 1; 211 bitshift--; 212 if (bitmask == 0) { 213 bitmask = 0x80; 214 bitshift = 7; 215 array_index++; 216 } 217 if ((remainder & 0x10) == 0x10) 218 remainder ^= 0x13; 219 } 220 221 number_of_bits = 4; 222 while (number_of_bits != 0) { 223 number_of_bits--; 224 remainder <<= 1; 225 if ((remainder & 0x10) != 0) 226 remainder ^= 0x13; 227 } 228 229 return remainder; 230 } 231 232 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 233 { 234 u8 bitmask = 0x80; 235 u8 bitshift = 7; 236 u8 array_index = 0; 237 int number_of_bits = number_of_bytes * 8; 238 u16 remainder = 0; 239 240 while (number_of_bits != 0) { 241 number_of_bits--; 242 remainder <<= 1; 243 remainder |= (data[array_index] & bitmask) >> bitshift; 244 bitmask >>= 1; 245 bitshift--; 246 if (bitmask == 0) { 247 bitmask = 0x80; 248 bitshift = 7; 249 array_index++; 250 } 251 if ((remainder & 0x100) == 0x100) 252 remainder ^= 0xd5; 253 } 254 255 number_of_bits = 8; 256 while (number_of_bits != 0) { 257 number_of_bits--; 258 remainder <<= 1; 259 if ((remainder & 0x100) != 0) 260 remainder ^= 0xd5; 261 } 262 263 return remainder & 0xff; 264 } 265 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 266 { 267 u8 size = 3; 268 269 size += (hdr->lct / 2); 270 return size; 271 } 272 273 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 274 u8 *buf, int *len) 275 { 276 int idx = 0; 277 int i; 278 u8 crc4; 279 280 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 281 for (i = 0; i < (hdr->lct / 2); i++) 282 buf[idx++] = hdr->rad[i]; 283 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 284 (hdr->msg_len & 0x3f); 285 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 286 287 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 288 buf[idx - 1] |= (crc4 & 0xf); 289 290 *len = idx; 291 } 292 293 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr, 294 struct drm_dp_sideband_msg_hdr *hdr, 295 u8 *buf, int buflen, u8 *hdrlen) 296 { 297 u8 crc4; 298 u8 len; 299 int i; 300 u8 idx; 301 302 if (buf[0] == 0) 303 return false; 304 len = 3; 305 len += ((buf[0] & 0xf0) >> 4) / 2; 306 if (len > buflen) 307 return false; 308 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 309 310 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 311 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 312 return false; 313 } 314 315 hdr->lct = (buf[0] & 0xf0) >> 4; 316 hdr->lcr = (buf[0] & 0xf); 317 idx = 1; 318 for (i = 0; i < (hdr->lct / 2); i++) 319 hdr->rad[i] = buf[idx++]; 320 hdr->broadcast = (buf[idx] >> 7) & 0x1; 321 hdr->path_msg = (buf[idx] >> 6) & 0x1; 322 hdr->msg_len = buf[idx] & 0x3f; 323 if (hdr->msg_len < 1) /* min space for body CRC */ 324 return false; 325 326 idx++; 327 hdr->somt = (buf[idx] >> 7) & 0x1; 328 hdr->eomt = (buf[idx] >> 6) & 0x1; 329 hdr->seqno = (buf[idx] >> 4) & 0x1; 330 idx++; 331 *hdrlen = idx; 332 return true; 333 } 334 335 void 336 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, 337 struct drm_dp_sideband_msg_tx *raw) 338 { 339 int idx = 0; 340 int i; 341 u8 *buf = raw->msg; 342 343 buf[idx++] = req->req_type & 0x7f; 344 345 switch (req->req_type) { 346 case DP_ENUM_PATH_RESOURCES: 347 case DP_POWER_DOWN_PHY: 348 case DP_POWER_UP_PHY: 349 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 350 idx++; 351 break; 352 case DP_ALLOCATE_PAYLOAD: 353 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 354 (req->u.allocate_payload.number_sdp_streams & 0xf); 355 idx++; 356 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 357 idx++; 358 buf[idx] = (req->u.allocate_payload.pbn >> 8); 359 idx++; 360 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 361 idx++; 362 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 363 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 364 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 365 idx++; 366 } 367 if (req->u.allocate_payload.number_sdp_streams & 1) { 368 i = req->u.allocate_payload.number_sdp_streams - 1; 369 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 370 idx++; 371 } 372 break; 373 case DP_QUERY_PAYLOAD: 374 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 375 idx++; 376 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 377 idx++; 378 break; 379 case DP_REMOTE_DPCD_READ: 380 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 381 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 382 idx++; 383 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 384 idx++; 385 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 386 idx++; 387 buf[idx] = (req->u.dpcd_read.num_bytes); 388 idx++; 389 break; 390 391 case DP_REMOTE_DPCD_WRITE: 392 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 393 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 394 idx++; 395 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 396 idx++; 397 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 398 idx++; 399 buf[idx] = (req->u.dpcd_write.num_bytes); 400 idx++; 401 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 402 idx += req->u.dpcd_write.num_bytes; 403 break; 404 case DP_REMOTE_I2C_READ: 405 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 406 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 407 idx++; 408 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 409 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 410 idx++; 411 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 412 idx++; 413 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 414 idx += req->u.i2c_read.transactions[i].num_bytes; 415 416 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; 417 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 418 idx++; 419 } 420 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 421 idx++; 422 buf[idx] = (req->u.i2c_read.num_bytes_read); 423 idx++; 424 break; 425 426 case DP_REMOTE_I2C_WRITE: 427 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 428 idx++; 429 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 430 idx++; 431 buf[idx] = (req->u.i2c_write.num_bytes); 432 idx++; 433 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 434 idx += req->u.i2c_write.num_bytes; 435 break; 436 case DP_QUERY_STREAM_ENC_STATUS: { 437 const struct drm_dp_query_stream_enc_status *msg; 438 439 msg = &req->u.enc_status; 440 buf[idx] = msg->stream_id; 441 idx++; 442 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); 443 idx += sizeof(msg->client_id); 444 buf[idx] = 0; 445 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); 446 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; 447 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); 448 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; 449 idx++; 450 } 451 break; 452 } 453 raw->cur_len = idx; 454 } 455 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req); 456 457 /* Decode a sideband request we've encoded, mainly used for debugging */ 458 int 459 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, 460 struct drm_dp_sideband_msg_req_body *req) 461 { 462 const u8 *buf = raw->msg; 463 int i, idx = 0; 464 465 req->req_type = buf[idx++] & 0x7f; 466 switch (req->req_type) { 467 case DP_ENUM_PATH_RESOURCES: 468 case DP_POWER_DOWN_PHY: 469 case DP_POWER_UP_PHY: 470 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; 471 break; 472 case DP_ALLOCATE_PAYLOAD: 473 { 474 struct drm_dp_allocate_payload *a = 475 &req->u.allocate_payload; 476 477 a->number_sdp_streams = buf[idx] & 0xf; 478 a->port_number = (buf[idx] >> 4) & 0xf; 479 480 WARN_ON(buf[++idx] & 0x80); 481 a->vcpi = buf[idx] & 0x7f; 482 483 a->pbn = buf[++idx] << 8; 484 a->pbn |= buf[++idx]; 485 486 idx++; 487 for (i = 0; i < a->number_sdp_streams; i++) { 488 a->sdp_stream_sink[i] = 489 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf; 490 } 491 } 492 break; 493 case DP_QUERY_PAYLOAD: 494 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; 495 WARN_ON(buf[++idx] & 0x80); 496 req->u.query_payload.vcpi = buf[idx] & 0x7f; 497 break; 498 case DP_REMOTE_DPCD_READ: 499 { 500 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; 501 502 r->port_number = (buf[idx] >> 4) & 0xf; 503 504 r->dpcd_address = (buf[idx] << 16) & 0xf0000; 505 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; 506 r->dpcd_address |= buf[++idx] & 0xff; 507 508 r->num_bytes = buf[++idx]; 509 } 510 break; 511 case DP_REMOTE_DPCD_WRITE: 512 { 513 struct drm_dp_remote_dpcd_write *w = 514 &req->u.dpcd_write; 515 516 w->port_number = (buf[idx] >> 4) & 0xf; 517 518 w->dpcd_address = (buf[idx] << 16) & 0xf0000; 519 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; 520 w->dpcd_address |= buf[++idx] & 0xff; 521 522 w->num_bytes = buf[++idx]; 523 524 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 525 GFP_KERNEL); 526 if (!w->bytes) 527 return -ENOMEM; 528 } 529 break; 530 case DP_REMOTE_I2C_READ: 531 { 532 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; 533 struct drm_dp_remote_i2c_read_tx *tx; 534 bool failed = false; 535 536 r->num_transactions = buf[idx] & 0x3; 537 r->port_number = (buf[idx] >> 4) & 0xf; 538 for (i = 0; i < r->num_transactions; i++) { 539 tx = &r->transactions[i]; 540 541 tx->i2c_dev_id = buf[++idx] & 0x7f; 542 tx->num_bytes = buf[++idx]; 543 tx->bytes = kmemdup(&buf[++idx], 544 tx->num_bytes, 545 GFP_KERNEL); 546 if (!tx->bytes) { 547 failed = true; 548 break; 549 } 550 idx += tx->num_bytes; 551 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; 552 tx->i2c_transaction_delay = buf[idx] & 0xf; 553 } 554 555 if (failed) { 556 for (i = 0; i < r->num_transactions; i++) { 557 tx = &r->transactions[i]; 558 kfree(tx->bytes); 559 } 560 return -ENOMEM; 561 } 562 563 r->read_i2c_device_id = buf[++idx] & 0x7f; 564 r->num_bytes_read = buf[++idx]; 565 } 566 break; 567 case DP_REMOTE_I2C_WRITE: 568 { 569 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; 570 571 w->port_number = (buf[idx] >> 4) & 0xf; 572 w->write_i2c_device_id = buf[++idx] & 0x7f; 573 w->num_bytes = buf[++idx]; 574 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 575 GFP_KERNEL); 576 if (!w->bytes) 577 return -ENOMEM; 578 } 579 break; 580 case DP_QUERY_STREAM_ENC_STATUS: 581 req->u.enc_status.stream_id = buf[idx++]; 582 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) 583 req->u.enc_status.client_id[i] = buf[idx++]; 584 585 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), 586 buf[idx]); 587 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), 588 buf[idx]); 589 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), 590 buf[idx]); 591 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), 592 buf[idx]); 593 break; 594 } 595 596 return 0; 597 } 598 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req); 599 600 void 601 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req, 602 int indent, struct drm_printer *printer) 603 { 604 int i; 605 606 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__) 607 if (req->req_type == DP_LINK_ADDRESS) { 608 /* No contents to print */ 609 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); 610 return; 611 } 612 613 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); 614 indent++; 615 616 switch (req->req_type) { 617 case DP_ENUM_PATH_RESOURCES: 618 case DP_POWER_DOWN_PHY: 619 case DP_POWER_UP_PHY: 620 P("port=%d\n", req->u.port_num.port_number); 621 break; 622 case DP_ALLOCATE_PAYLOAD: 623 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n", 624 req->u.allocate_payload.port_number, 625 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, 626 req->u.allocate_payload.number_sdp_streams, 627 req->u.allocate_payload.number_sdp_streams, 628 req->u.allocate_payload.sdp_stream_sink); 629 break; 630 case DP_QUERY_PAYLOAD: 631 P("port=%d vcpi=%d\n", 632 req->u.query_payload.port_number, 633 req->u.query_payload.vcpi); 634 break; 635 case DP_REMOTE_DPCD_READ: 636 P("port=%d dpcd_addr=%05x len=%d\n", 637 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, 638 req->u.dpcd_read.num_bytes); 639 break; 640 case DP_REMOTE_DPCD_WRITE: 641 P("port=%d addr=%05x len=%d: %*ph\n", 642 req->u.dpcd_write.port_number, 643 req->u.dpcd_write.dpcd_address, 644 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, 645 req->u.dpcd_write.bytes); 646 break; 647 case DP_REMOTE_I2C_READ: 648 P("port=%d num_tx=%d id=%d size=%d:\n", 649 req->u.i2c_read.port_number, 650 req->u.i2c_read.num_transactions, 651 req->u.i2c_read.read_i2c_device_id, 652 req->u.i2c_read.num_bytes_read); 653 654 indent++; 655 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { 656 const struct drm_dp_remote_i2c_read_tx *rtx = 657 &req->u.i2c_read.transactions[i]; 658 659 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n", 660 i, rtx->i2c_dev_id, rtx->num_bytes, 661 rtx->no_stop_bit, rtx->i2c_transaction_delay, 662 rtx->num_bytes, rtx->bytes); 663 } 664 break; 665 case DP_REMOTE_I2C_WRITE: 666 P("port=%d id=%d size=%d: %*ph\n", 667 req->u.i2c_write.port_number, 668 req->u.i2c_write.write_i2c_device_id, 669 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, 670 req->u.i2c_write.bytes); 671 break; 672 case DP_QUERY_STREAM_ENC_STATUS: 673 P("stream_id=%u client_id=%*ph stream_event=%x " 674 "valid_event=%d stream_behavior=%x valid_behavior=%d", 675 req->u.enc_status.stream_id, 676 (int)ARRAY_SIZE(req->u.enc_status.client_id), 677 req->u.enc_status.client_id, req->u.enc_status.stream_event, 678 req->u.enc_status.valid_stream_event, 679 req->u.enc_status.stream_behavior, 680 req->u.enc_status.valid_stream_behavior); 681 break; 682 default: 683 P("???\n"); 684 break; 685 } 686 #undef P 687 } 688 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body); 689 690 static inline void 691 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p, 692 const struct drm_dp_sideband_msg_tx *txmsg) 693 { 694 struct drm_dp_sideband_msg_req_body req; 695 char buf[64]; 696 int ret; 697 int i; 698 699 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, 700 sizeof(buf)); 701 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n", 702 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, 703 drm_dp_mst_sideband_tx_state_str(txmsg->state), 704 txmsg->path_msg, buf); 705 706 ret = drm_dp_decode_sideband_req(txmsg, &req); 707 if (ret) { 708 drm_printf(p, "<failed to decode sideband req: %d>\n", ret); 709 return; 710 } 711 drm_dp_dump_sideband_msg_req_body(&req, 1, p); 712 713 switch (req.req_type) { 714 case DP_REMOTE_DPCD_WRITE: 715 kfree(req.u.dpcd_write.bytes); 716 break; 717 case DP_REMOTE_I2C_READ: 718 for (i = 0; i < req.u.i2c_read.num_transactions; i++) 719 kfree(req.u.i2c_read.transactions[i].bytes); 720 break; 721 case DP_REMOTE_I2C_WRITE: 722 kfree(req.u.i2c_write.bytes); 723 break; 724 } 725 } 726 727 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 728 { 729 u8 crc4; 730 731 crc4 = drm_dp_msg_data_crc4(msg, len); 732 msg[len] = crc4; 733 } 734 735 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 736 struct drm_dp_sideband_msg_tx *raw) 737 { 738 int idx = 0; 739 u8 *buf = raw->msg; 740 741 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 742 743 raw->cur_len = idx; 744 } 745 746 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg, 747 struct drm_dp_sideband_msg_hdr *hdr, 748 u8 hdrlen) 749 { 750 /* 751 * ignore out-of-order messages or messages that are part of a 752 * failed transaction 753 */ 754 if (!hdr->somt && !msg->have_somt) 755 return false; 756 757 /* get length contained in this portion */ 758 msg->curchunk_idx = 0; 759 msg->curchunk_len = hdr->msg_len; 760 msg->curchunk_hdrlen = hdrlen; 761 762 /* we have already gotten an somt - don't bother parsing */ 763 if (hdr->somt && msg->have_somt) 764 return false; 765 766 if (hdr->somt) { 767 memcpy(&msg->initial_hdr, hdr, 768 sizeof(struct drm_dp_sideband_msg_hdr)); 769 msg->have_somt = true; 770 } 771 if (hdr->eomt) 772 msg->have_eomt = true; 773 774 return true; 775 } 776 777 /* this adds a chunk of msg to the builder to get the final msg */ 778 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg, 779 u8 *replybuf, u8 replybuflen) 780 { 781 u8 crc4; 782 783 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 784 msg->curchunk_idx += replybuflen; 785 786 if (msg->curchunk_idx >= msg->curchunk_len) { 787 /* do CRC */ 788 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 789 if (crc4 != msg->chunk[msg->curchunk_len - 1]) 790 print_hex_dump(KERN_DEBUG, "wrong crc", 791 DUMP_PREFIX_NONE, 16, 1, 792 msg->chunk, msg->curchunk_len, false); 793 /* copy chunk into bigger msg */ 794 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 795 msg->curlen += msg->curchunk_len - 1; 796 } 797 return true; 798 } 799 800 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr, 801 struct drm_dp_sideband_msg_rx *raw, 802 struct drm_dp_sideband_msg_reply_body *repmsg) 803 { 804 int idx = 1; 805 int i; 806 807 import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]); 808 idx += 16; 809 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 810 idx++; 811 if (idx > raw->curlen) 812 goto fail_len; 813 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 814 if (raw->msg[idx] & 0x80) 815 repmsg->u.link_addr.ports[i].input_port = 1; 816 817 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 818 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 819 820 idx++; 821 if (idx > raw->curlen) 822 goto fail_len; 823 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 824 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 825 if (repmsg->u.link_addr.ports[i].input_port == 0) 826 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 827 idx++; 828 if (idx > raw->curlen) 829 goto fail_len; 830 if (repmsg->u.link_addr.ports[i].input_port == 0) { 831 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 832 idx++; 833 if (idx > raw->curlen) 834 goto fail_len; 835 import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]); 836 idx += 16; 837 if (idx > raw->curlen) 838 goto fail_len; 839 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 840 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 841 idx++; 842 843 } 844 if (idx > raw->curlen) 845 goto fail_len; 846 } 847 848 return true; 849 fail_len: 850 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 851 return false; 852 } 853 854 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 855 struct drm_dp_sideband_msg_reply_body *repmsg) 856 { 857 int idx = 1; 858 859 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 860 idx++; 861 if (idx > raw->curlen) 862 goto fail_len; 863 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 864 idx++; 865 if (idx > raw->curlen) 866 goto fail_len; 867 868 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 869 return true; 870 fail_len: 871 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 872 return false; 873 } 874 875 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 876 struct drm_dp_sideband_msg_reply_body *repmsg) 877 { 878 int idx = 1; 879 880 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 881 idx++; 882 if (idx > raw->curlen) 883 goto fail_len; 884 return true; 885 fail_len: 886 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 887 return false; 888 } 889 890 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 891 struct drm_dp_sideband_msg_reply_body *repmsg) 892 { 893 int idx = 1; 894 895 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 896 idx++; 897 if (idx > raw->curlen) 898 goto fail_len; 899 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 900 idx++; 901 /* TODO check */ 902 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 903 return true; 904 fail_len: 905 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 906 return false; 907 } 908 909 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 910 struct drm_dp_sideband_msg_reply_body *repmsg) 911 { 912 int idx = 1; 913 914 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 915 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; 916 idx++; 917 if (idx > raw->curlen) 918 goto fail_len; 919 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 920 idx += 2; 921 if (idx > raw->curlen) 922 goto fail_len; 923 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 924 idx += 2; 925 if (idx > raw->curlen) 926 goto fail_len; 927 return true; 928 fail_len: 929 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 930 return false; 931 } 932 933 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 934 struct drm_dp_sideband_msg_reply_body *repmsg) 935 { 936 int idx = 1; 937 938 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 939 idx++; 940 if (idx > raw->curlen) 941 goto fail_len; 942 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 943 idx++; 944 if (idx > raw->curlen) 945 goto fail_len; 946 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 947 idx += 2; 948 if (idx > raw->curlen) 949 goto fail_len; 950 return true; 951 fail_len: 952 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 953 return false; 954 } 955 956 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 957 struct drm_dp_sideband_msg_reply_body *repmsg) 958 { 959 int idx = 1; 960 961 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 962 idx++; 963 if (idx > raw->curlen) 964 goto fail_len; 965 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 966 idx += 2; 967 if (idx > raw->curlen) 968 goto fail_len; 969 return true; 970 fail_len: 971 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 972 return false; 973 } 974 975 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw, 976 struct drm_dp_sideband_msg_reply_body *repmsg) 977 { 978 int idx = 1; 979 980 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; 981 idx++; 982 if (idx > raw->curlen) { 983 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n", 984 idx, raw->curlen); 985 return false; 986 } 987 return true; 988 } 989 990 static bool 991 drm_dp_sideband_parse_query_stream_enc_status( 992 struct drm_dp_sideband_msg_rx *raw, 993 struct drm_dp_sideband_msg_reply_body *repmsg) 994 { 995 struct drm_dp_query_stream_enc_status_ack_reply *reply; 996 997 reply = &repmsg->u.enc_status; 998 999 reply->stream_id = raw->msg[3]; 1000 1001 reply->reply_signed = raw->msg[2] & BIT(0); 1002 1003 /* 1004 * NOTE: It's my impression from reading the spec that the below parsing 1005 * is correct. However I noticed while testing with an HDCP 1.4 display 1006 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I 1007 * would expect both bits to be set. So keep the parsing following the 1008 * spec, but beware reality might not match the spec (at least for some 1009 * configurations). 1010 */ 1011 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); 1012 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); 1013 1014 reply->query_capable_device_present = raw->msg[2] & BIT(5); 1015 reply->legacy_device_present = raw->msg[2] & BIT(6); 1016 reply->unauthorizable_device_present = raw->msg[2] & BIT(7); 1017 1018 reply->auth_completed = !!(raw->msg[1] & BIT(3)); 1019 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); 1020 reply->repeater_present = !!(raw->msg[1] & BIT(5)); 1021 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; 1022 1023 return true; 1024 } 1025 1026 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr, 1027 struct drm_dp_sideband_msg_rx *raw, 1028 struct drm_dp_sideband_msg_reply_body *msg) 1029 { 1030 memset(msg, 0, sizeof(*msg)); 1031 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 1032 msg->req_type = (raw->msg[0] & 0x7f); 1033 1034 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { 1035 import_guid(&msg->u.nak.guid, &raw->msg[1]); 1036 msg->u.nak.reason = raw->msg[17]; 1037 msg->u.nak.nak_data = raw->msg[18]; 1038 return false; 1039 } 1040 1041 switch (msg->req_type) { 1042 case DP_LINK_ADDRESS: 1043 return drm_dp_sideband_parse_link_address(mgr, raw, msg); 1044 case DP_QUERY_PAYLOAD: 1045 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 1046 case DP_REMOTE_DPCD_READ: 1047 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 1048 case DP_REMOTE_DPCD_WRITE: 1049 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 1050 case DP_REMOTE_I2C_READ: 1051 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 1052 case DP_REMOTE_I2C_WRITE: 1053 return true; /* since there's nothing to parse */ 1054 case DP_ENUM_PATH_RESOURCES: 1055 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 1056 case DP_ALLOCATE_PAYLOAD: 1057 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 1058 case DP_POWER_DOWN_PHY: 1059 case DP_POWER_UP_PHY: 1060 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); 1061 case DP_CLEAR_PAYLOAD_ID_TABLE: 1062 return true; /* since there's nothing to parse */ 1063 case DP_QUERY_STREAM_ENC_STATUS: 1064 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg); 1065 default: 1066 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", 1067 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); 1068 return false; 1069 } 1070 } 1071 1072 static bool 1073 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr, 1074 struct drm_dp_sideband_msg_rx *raw, 1075 struct drm_dp_sideband_msg_req_body *msg) 1076 { 1077 int idx = 1; 1078 1079 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 1080 idx++; 1081 if (idx > raw->curlen) 1082 goto fail_len; 1083 1084 import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]); 1085 idx += 16; 1086 if (idx > raw->curlen) 1087 goto fail_len; 1088 1089 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 1090 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 1091 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 1092 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 1093 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 1094 idx++; 1095 return true; 1096 fail_len: 1097 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", 1098 idx, raw->curlen); 1099 return false; 1100 } 1101 1102 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr, 1103 struct drm_dp_sideband_msg_rx *raw, 1104 struct drm_dp_sideband_msg_req_body *msg) 1105 { 1106 int idx = 1; 1107 1108 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 1109 idx++; 1110 if (idx > raw->curlen) 1111 goto fail_len; 1112 1113 import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]); 1114 idx += 16; 1115 if (idx > raw->curlen) 1116 goto fail_len; 1117 1118 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 1119 idx++; 1120 return true; 1121 fail_len: 1122 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); 1123 return false; 1124 } 1125 1126 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr, 1127 struct drm_dp_sideband_msg_rx *raw, 1128 struct drm_dp_sideband_msg_req_body *msg) 1129 { 1130 memset(msg, 0, sizeof(*msg)); 1131 msg->req_type = (raw->msg[0] & 0x7f); 1132 1133 switch (msg->req_type) { 1134 case DP_CONNECTION_STATUS_NOTIFY: 1135 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg); 1136 case DP_RESOURCE_STATUS_NOTIFY: 1137 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg); 1138 default: 1139 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", 1140 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); 1141 return false; 1142 } 1143 } 1144 1145 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, 1146 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 1147 { 1148 struct drm_dp_sideband_msg_req_body req; 1149 1150 req.req_type = DP_REMOTE_DPCD_WRITE; 1151 req.u.dpcd_write.port_number = port_num; 1152 req.u.dpcd_write.dpcd_address = offset; 1153 req.u.dpcd_write.num_bytes = num_bytes; 1154 req.u.dpcd_write.bytes = bytes; 1155 drm_dp_encode_sideband_req(&req, msg); 1156 } 1157 1158 static void build_link_address(struct drm_dp_sideband_msg_tx *msg) 1159 { 1160 struct drm_dp_sideband_msg_req_body req; 1161 1162 req.req_type = DP_LINK_ADDRESS; 1163 drm_dp_encode_sideband_req(&req, msg); 1164 } 1165 1166 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) 1167 { 1168 struct drm_dp_sideband_msg_req_body req; 1169 1170 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; 1171 drm_dp_encode_sideband_req(&req, msg); 1172 msg->path_msg = true; 1173 } 1174 1175 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, 1176 int port_num) 1177 { 1178 struct drm_dp_sideband_msg_req_body req; 1179 1180 req.req_type = DP_ENUM_PATH_RESOURCES; 1181 req.u.port_num.port_number = port_num; 1182 drm_dp_encode_sideband_req(&req, msg); 1183 msg->path_msg = true; 1184 return 0; 1185 } 1186 1187 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, 1188 int port_num, 1189 u8 vcpi, uint16_t pbn, 1190 u8 number_sdp_streams, 1191 u8 *sdp_stream_sink) 1192 { 1193 struct drm_dp_sideband_msg_req_body req; 1194 1195 memset(&req, 0, sizeof(req)); 1196 req.req_type = DP_ALLOCATE_PAYLOAD; 1197 req.u.allocate_payload.port_number = port_num; 1198 req.u.allocate_payload.vcpi = vcpi; 1199 req.u.allocate_payload.pbn = pbn; 1200 req.u.allocate_payload.number_sdp_streams = number_sdp_streams; 1201 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, 1202 number_sdp_streams); 1203 drm_dp_encode_sideband_req(&req, msg); 1204 msg->path_msg = true; 1205 } 1206 1207 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, 1208 int port_num, bool power_up) 1209 { 1210 struct drm_dp_sideband_msg_req_body req; 1211 1212 if (power_up) 1213 req.req_type = DP_POWER_UP_PHY; 1214 else 1215 req.req_type = DP_POWER_DOWN_PHY; 1216 1217 req.u.port_num.port_number = port_num; 1218 drm_dp_encode_sideband_req(&req, msg); 1219 msg->path_msg = true; 1220 } 1221 1222 static int 1223 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id, 1224 u8 *q_id) 1225 { 1226 struct drm_dp_sideband_msg_req_body req; 1227 1228 req.req_type = DP_QUERY_STREAM_ENC_STATUS; 1229 req.u.enc_status.stream_id = stream_id; 1230 memcpy(req.u.enc_status.client_id, q_id, 1231 sizeof(req.u.enc_status.client_id)); 1232 req.u.enc_status.stream_event = 0; 1233 req.u.enc_status.valid_stream_event = false; 1234 req.u.enc_status.stream_behavior = 0; 1235 req.u.enc_status.valid_stream_behavior = false; 1236 1237 drm_dp_encode_sideband_req(&req, msg); 1238 return 0; 1239 } 1240 1241 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 1242 struct drm_dp_sideband_msg_tx *txmsg) 1243 { 1244 unsigned int state; 1245 1246 /* 1247 * All updates to txmsg->state are protected by mgr->qlock, and the two 1248 * cases we check here are terminal states. For those the barriers 1249 * provided by the wake_up/wait_event pair are enough. 1250 */ 1251 state = READ_ONCE(txmsg->state); 1252 return (state == DRM_DP_SIDEBAND_TX_RX || 1253 state == DRM_DP_SIDEBAND_TX_TIMEOUT); 1254 } 1255 1256 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 1257 struct drm_dp_sideband_msg_tx *txmsg) 1258 { 1259 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1260 unsigned long wait_timeout = msecs_to_jiffies(4000); 1261 unsigned long wait_expires = jiffies + wait_timeout; 1262 int ret; 1263 1264 for (;;) { 1265 /* 1266 * If the driver provides a way for this, change to 1267 * poll-waiting for the MST reply interrupt if we didn't receive 1268 * it for 50 msec. This would cater for cases where the HPD 1269 * pulse signal got lost somewhere, even though the sink raised 1270 * the corresponding MST interrupt correctly. One example is the 1271 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason 1272 * filters out short pulses with a duration less than ~540 usec. 1273 * 1274 * The poll period is 50 msec to avoid missing an interrupt 1275 * after the sink has cleared it (after a 110msec timeout 1276 * since it raised the interrupt). 1277 */ 1278 ret = wait_event_timeout(mgr->tx_waitq, 1279 check_txmsg_state(mgr, txmsg), 1280 mgr->cbs->poll_hpd_irq ? 1281 msecs_to_jiffies(50) : 1282 wait_timeout); 1283 1284 if (ret || !mgr->cbs->poll_hpd_irq || 1285 time_after(jiffies, wait_expires)) 1286 break; 1287 1288 mgr->cbs->poll_hpd_irq(mgr); 1289 } 1290 1291 mutex_lock(&mgr->qlock); 1292 if (ret > 0) { 1293 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 1294 ret = -EIO; 1295 goto out; 1296 } 1297 } else { 1298 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", 1299 txmsg, txmsg->state, txmsg->seqno); 1300 1301 /* dump some state */ 1302 ret = -EIO; 1303 1304 /* remove from q */ 1305 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 1306 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 1307 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) 1308 list_del(&txmsg->next); 1309 } 1310 out: 1311 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { 1312 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, 1313 DBG_PREFIX); 1314 1315 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 1316 } 1317 mutex_unlock(&mgr->qlock); 1318 1319 drm_dp_mst_kick_tx(mgr); 1320 return ret; 1321 } 1322 1323 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 1324 { 1325 struct drm_dp_mst_branch *mstb; 1326 1327 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 1328 if (!mstb) 1329 return NULL; 1330 1331 mstb->lct = lct; 1332 if (lct > 1) 1333 memcpy(mstb->rad, rad, lct / 2); 1334 INIT_LIST_HEAD(&mstb->ports); 1335 kref_init(&mstb->topology_kref); 1336 kref_init(&mstb->malloc_kref); 1337 return mstb; 1338 } 1339 1340 static void drm_dp_free_mst_branch_device(struct kref *kref) 1341 { 1342 struct drm_dp_mst_branch *mstb = 1343 container_of(kref, struct drm_dp_mst_branch, malloc_kref); 1344 1345 if (mstb->port_parent) 1346 drm_dp_mst_put_port_malloc(mstb->port_parent); 1347 1348 kfree(mstb); 1349 } 1350 1351 /** 1352 * DOC: Branch device and port refcounting 1353 * 1354 * Topology refcount overview 1355 * ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1356 * 1357 * The refcounting schemes for &struct drm_dp_mst_branch and &struct 1358 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have 1359 * two different kinds of refcounts: topology refcounts, and malloc refcounts. 1360 * 1361 * Topology refcounts are not exposed to drivers, and are handled internally 1362 * by the DP MST helpers. The helpers use them in order to prevent the 1363 * in-memory topology state from being changed in the middle of critical 1364 * operations like changing the internal state of payload allocations. This 1365 * means each branch and port will be considered to be connected to the rest 1366 * of the topology until its topology refcount reaches zero. Additionally, 1367 * for ports this means that their associated &struct drm_connector will stay 1368 * registered with userspace until the port's refcount reaches 0. 1369 * 1370 * Malloc refcount overview 1371 * ~~~~~~~~~~~~~~~~~~~~~~~~ 1372 * 1373 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct 1374 * drm_dp_mst_branch allocated even after all of its topology references have 1375 * been dropped, so that the driver or MST helpers can safely access each 1376 * branch's last known state before it was disconnected from the topology. 1377 * When the malloc refcount of a port or branch reaches 0, the memory 1378 * allocation containing the &struct drm_dp_mst_branch or &struct 1379 * drm_dp_mst_port respectively will be freed. 1380 * 1381 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed 1382 * to drivers. As of writing this documentation, there are no drivers that 1383 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST 1384 * helpers. Exposing this API to drivers in a race-free manner would take more 1385 * tweaking of the refcounting scheme, however patches are welcome provided 1386 * there is a legitimate driver usecase for this. 1387 * 1388 * Refcount relationships in a topology 1389 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1390 * 1391 * Let's take a look at why the relationship between topology and malloc 1392 * refcounts is designed the way it is. 1393 * 1394 * .. kernel-figure:: dp-mst/topology-figure-1.dot 1395 * 1396 * An example of topology and malloc refs in a DP MST topology with two 1397 * active payloads. Topology refcount increments are indicated by solid 1398 * lines, and malloc refcount increments are indicated by dashed lines. 1399 * Each starts from the branch which incremented the refcount, and ends at 1400 * the branch to which the refcount belongs to, i.e. the arrow points the 1401 * same way as the C pointers used to reference a structure. 1402 * 1403 * As you can see in the above figure, every branch increments the topology 1404 * refcount of its children, and increments the malloc refcount of its 1405 * parent. Additionally, every payload increments the malloc refcount of its 1406 * assigned port by 1. 1407 * 1408 * So, what would happen if MSTB #3 from the above figure was unplugged from 1409 * the system, but the driver hadn't yet removed payload #2 from port #3? The 1410 * topology would start to look like the figure below. 1411 * 1412 * .. kernel-figure:: dp-mst/topology-figure-2.dot 1413 * 1414 * Ports and branch devices which have been released from memory are 1415 * colored grey, and references which have been removed are colored red. 1416 * 1417 * Whenever a port or branch device's topology refcount reaches zero, it will 1418 * decrement the topology refcounts of all its children, the malloc refcount 1419 * of its parent, and finally its own malloc refcount. For MSTB #4 and port 1420 * #4, this means they both have been disconnected from the topology and freed 1421 * from memory. But, because payload #2 is still holding a reference to port 1422 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port 1423 * is still accessible from memory. This also means port #3 has not yet 1424 * decremented the malloc refcount of MSTB #3, so its &struct 1425 * drm_dp_mst_branch will also stay allocated in memory until port #3's 1426 * malloc refcount reaches 0. 1427 * 1428 * This relationship is necessary because in order to release payload #2, we 1429 * need to be able to figure out the last relative of port #3 that's still 1430 * connected to the topology. In this case, we would travel up the topology as 1431 * shown below. 1432 * 1433 * .. kernel-figure:: dp-mst/topology-figure-3.dot 1434 * 1435 * And finally, remove payload #2 by communicating with port #2 through 1436 * sideband transactions. 1437 */ 1438 1439 /** 1440 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch 1441 * device 1442 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of 1443 * 1444 * Increments &drm_dp_mst_branch.malloc_kref. When 1445 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1446 * will be released and @mstb may no longer be used. 1447 * 1448 * See also: drm_dp_mst_put_mstb_malloc() 1449 */ 1450 static void 1451 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) 1452 { 1453 kref_get(&mstb->malloc_kref); 1454 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); 1455 } 1456 1457 /** 1458 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch 1459 * device 1460 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of 1461 * 1462 * Decrements &drm_dp_mst_branch.malloc_kref. When 1463 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1464 * will be released and @mstb may no longer be used. 1465 * 1466 * See also: drm_dp_mst_get_mstb_malloc() 1467 */ 1468 static void 1469 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) 1470 { 1471 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); 1472 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); 1473 } 1474 1475 static void drm_dp_free_mst_port(struct kref *kref) 1476 { 1477 struct drm_dp_mst_port *port = 1478 container_of(kref, struct drm_dp_mst_port, malloc_kref); 1479 1480 drm_dp_mst_put_mstb_malloc(port->parent); 1481 kfree(port); 1482 } 1483 1484 /** 1485 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port 1486 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of 1487 * 1488 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1489 * reaches 0, the memory allocation for @port will be released and @port may 1490 * no longer be used. 1491 * 1492 * Because @port could potentially be freed at any time by the DP MST helpers 1493 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this 1494 * function, drivers that which to make use of &struct drm_dp_mst_port should 1495 * ensure that they grab at least one main malloc reference to their MST ports 1496 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before 1497 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. 1498 * 1499 * See also: drm_dp_mst_put_port_malloc() 1500 */ 1501 void 1502 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) 1503 { 1504 kref_get(&port->malloc_kref); 1505 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); 1506 } 1507 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); 1508 1509 /** 1510 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port 1511 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of 1512 * 1513 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1514 * reaches 0, the memory allocation for @port will be released and @port may 1515 * no longer be used. 1516 * 1517 * See also: drm_dp_mst_get_port_malloc() 1518 */ 1519 void 1520 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) 1521 { 1522 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); 1523 kref_put(&port->malloc_kref, drm_dp_free_mst_port); 1524 } 1525 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); 1526 1527 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 1528 1529 #define STACK_DEPTH 8 1530 1531 static noinline void 1532 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, 1533 struct drm_dp_mst_topology_ref_history *history, 1534 enum drm_dp_mst_topology_ref_type type) 1535 { 1536 struct drm_dp_mst_topology_ref_entry *entry = NULL; 1537 depot_stack_handle_t backtrace; 1538 ulong stack_entries[STACK_DEPTH]; 1539 uint n; 1540 int i; 1541 1542 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); 1543 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); 1544 if (!backtrace) 1545 return; 1546 1547 /* Try to find an existing entry for this backtrace */ 1548 for (i = 0; i < history->len; i++) { 1549 if (history->entries[i].backtrace == backtrace) { 1550 entry = &history->entries[i]; 1551 break; 1552 } 1553 } 1554 1555 /* Otherwise add one */ 1556 if (!entry) { 1557 struct drm_dp_mst_topology_ref_entry *new; 1558 int new_len = history->len + 1; 1559 1560 new = krealloc(history->entries, sizeof(*new) * new_len, 1561 GFP_KERNEL); 1562 if (!new) 1563 return; 1564 1565 entry = &new[history->len]; 1566 history->len = new_len; 1567 history->entries = new; 1568 1569 entry->backtrace = backtrace; 1570 entry->type = type; 1571 entry->count = 0; 1572 } 1573 entry->count++; 1574 entry->ts_nsec = ktime_get_ns(); 1575 } 1576 1577 static int 1578 topology_ref_history_cmp(const void *a, const void *b) 1579 { 1580 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; 1581 1582 if (entry_a->ts_nsec > entry_b->ts_nsec) 1583 return 1; 1584 else if (entry_a->ts_nsec < entry_b->ts_nsec) 1585 return -1; 1586 else 1587 return 0; 1588 } 1589 1590 static inline const char * 1591 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) 1592 { 1593 if (type == DRM_DP_MST_TOPOLOGY_REF_GET) 1594 return "get"; 1595 else 1596 return "put"; 1597 } 1598 1599 static void 1600 __dump_topology_ref_history(struct drm_device *drm, 1601 struct drm_dp_mst_topology_ref_history *history, 1602 void *ptr, const char *type_str) 1603 { 1604 struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX); 1605 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 1606 int i; 1607 1608 if (!buf) 1609 return; 1610 1611 if (!history->len) 1612 goto out; 1613 1614 /* First, sort the list so that it goes from oldest to newest 1615 * reference entry 1616 */ 1617 sort(history->entries, history->len, sizeof(*history->entries), 1618 topology_ref_history_cmp, NULL); 1619 1620 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", 1621 type_str, ptr); 1622 1623 for (i = 0; i < history->len; i++) { 1624 const struct drm_dp_mst_topology_ref_entry *entry = 1625 &history->entries[i]; 1626 u64 ts_nsec = entry->ts_nsec; 1627 u32 rem_nsec = do_div(ts_nsec, 1000000000); 1628 1629 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); 1630 1631 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", 1632 entry->count, 1633 topology_ref_type_to_str(entry->type), 1634 ts_nsec, rem_nsec / 1000, buf); 1635 } 1636 1637 /* Now free the history, since this is the only time we expose it */ 1638 kfree(history->entries); 1639 out: 1640 kfree(buf); 1641 } 1642 1643 static __always_inline void 1644 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) 1645 { 1646 __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history, 1647 mstb, "MSTB"); 1648 } 1649 1650 static __always_inline void 1651 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) 1652 { 1653 __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history, 1654 port, "Port"); 1655 } 1656 1657 static __always_inline void 1658 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, 1659 enum drm_dp_mst_topology_ref_type type) 1660 { 1661 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); 1662 } 1663 1664 static __always_inline void 1665 save_port_topology_ref(struct drm_dp_mst_port *port, 1666 enum drm_dp_mst_topology_ref_type type) 1667 { 1668 __topology_ref_save(port->mgr, &port->topology_ref_history, type); 1669 } 1670 1671 static inline void 1672 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) 1673 { 1674 mutex_lock(&mgr->topology_ref_history_lock); 1675 } 1676 1677 static inline void 1678 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) 1679 { 1680 mutex_unlock(&mgr->topology_ref_history_lock); 1681 } 1682 #else 1683 static inline void 1684 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} 1685 static inline void 1686 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} 1687 static inline void 1688 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} 1689 static inline void 1690 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} 1691 #define save_mstb_topology_ref(mstb, type) 1692 #define save_port_topology_ref(port, type) 1693 #endif 1694 1695 struct drm_dp_mst_atomic_payload * 1696 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, 1697 struct drm_dp_mst_port *port) 1698 { 1699 struct drm_dp_mst_atomic_payload *payload; 1700 1701 list_for_each_entry(payload, &state->payloads, next) 1702 if (payload->port == port) 1703 return payload; 1704 1705 return NULL; 1706 } 1707 EXPORT_SYMBOL(drm_atomic_get_mst_payload_state); 1708 1709 static void drm_dp_destroy_mst_branch_device(struct kref *kref) 1710 { 1711 struct drm_dp_mst_branch *mstb = 1712 container_of(kref, struct drm_dp_mst_branch, topology_kref); 1713 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1714 1715 drm_dp_mst_dump_mstb_topology_history(mstb); 1716 1717 INIT_LIST_HEAD(&mstb->destroy_next); 1718 1719 /* 1720 * This can get called under mgr->mutex, so we need to perform the 1721 * actual destruction of the mstb in another worker 1722 */ 1723 mutex_lock(&mgr->delayed_destroy_lock); 1724 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); 1725 mutex_unlock(&mgr->delayed_destroy_lock); 1726 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); 1727 } 1728 1729 /** 1730 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a 1731 * branch device unless it's zero 1732 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of 1733 * 1734 * Attempts to grab a topology reference to @mstb, if it hasn't yet been 1735 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has 1736 * reached 0). Holding a topology reference implies that a malloc reference 1737 * will be held to @mstb as long as the user holds the topology reference. 1738 * 1739 * Care should be taken to ensure that the user has at least one malloc 1740 * reference to @mstb. If you already have a topology reference to @mstb, you 1741 * should use drm_dp_mst_topology_get_mstb() instead. 1742 * 1743 * See also: 1744 * drm_dp_mst_topology_get_mstb() 1745 * drm_dp_mst_topology_put_mstb() 1746 * 1747 * Returns: 1748 * * 1: A topology reference was grabbed successfully 1749 * * 0: @port is no longer in the topology, no reference was grabbed 1750 */ 1751 static int __must_check 1752 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) 1753 { 1754 int ret; 1755 1756 topology_ref_history_lock(mstb->mgr); 1757 ret = kref_get_unless_zero(&mstb->topology_kref); 1758 if (ret) { 1759 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 1760 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1761 } 1762 1763 topology_ref_history_unlock(mstb->mgr); 1764 1765 return ret; 1766 } 1767 1768 /** 1769 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a 1770 * branch device 1771 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of 1772 * 1773 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or 1774 * not it's already reached 0. This is only valid to use in scenarios where 1775 * you are already guaranteed to have at least one active topology reference 1776 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. 1777 * 1778 * See also: 1779 * drm_dp_mst_topology_try_get_mstb() 1780 * drm_dp_mst_topology_put_mstb() 1781 */ 1782 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) 1783 { 1784 topology_ref_history_lock(mstb->mgr); 1785 1786 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1787 WARN_ON(kref_read(&mstb->topology_kref) == 0); 1788 kref_get(&mstb->topology_kref); 1789 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 1790 1791 topology_ref_history_unlock(mstb->mgr); 1792 } 1793 1794 /** 1795 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch 1796 * device 1797 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from 1798 * 1799 * Releases a topology reference from @mstb by decrementing 1800 * &drm_dp_mst_branch.topology_kref. 1801 * 1802 * See also: 1803 * drm_dp_mst_topology_try_get_mstb() 1804 * drm_dp_mst_topology_get_mstb() 1805 */ 1806 static void 1807 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) 1808 { 1809 topology_ref_history_lock(mstb->mgr); 1810 1811 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); 1812 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); 1813 1814 topology_ref_history_unlock(mstb->mgr); 1815 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); 1816 } 1817 1818 static void drm_dp_destroy_port(struct kref *kref) 1819 { 1820 struct drm_dp_mst_port *port = 1821 container_of(kref, struct drm_dp_mst_port, topology_kref); 1822 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1823 1824 drm_dp_mst_dump_port_topology_history(port); 1825 1826 /* There's nothing that needs locking to destroy an input port yet */ 1827 if (port->input) { 1828 drm_dp_mst_put_port_malloc(port); 1829 return; 1830 } 1831 1832 drm_edid_free(port->cached_edid); 1833 1834 /* 1835 * we can't destroy the connector here, as we might be holding the 1836 * mode_config.mutex from an EDID retrieval 1837 */ 1838 mutex_lock(&mgr->delayed_destroy_lock); 1839 list_add(&port->next, &mgr->destroy_port_list); 1840 mutex_unlock(&mgr->delayed_destroy_lock); 1841 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); 1842 } 1843 1844 /** 1845 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a 1846 * port unless it's zero 1847 * @port: &struct drm_dp_mst_port to increment the topology refcount of 1848 * 1849 * Attempts to grab a topology reference to @port, if it hasn't yet been 1850 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached 1851 * 0). Holding a topology reference implies that a malloc reference will be 1852 * held to @port as long as the user holds the topology reference. 1853 * 1854 * Care should be taken to ensure that the user has at least one malloc 1855 * reference to @port. If you already have a topology reference to @port, you 1856 * should use drm_dp_mst_topology_get_port() instead. 1857 * 1858 * See also: 1859 * drm_dp_mst_topology_get_port() 1860 * drm_dp_mst_topology_put_port() 1861 * 1862 * Returns: 1863 * * 1: A topology reference was grabbed successfully 1864 * * 0: @port is no longer in the topology, no reference was grabbed 1865 */ 1866 static int __must_check 1867 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) 1868 { 1869 int ret; 1870 1871 topology_ref_history_lock(port->mgr); 1872 ret = kref_get_unless_zero(&port->topology_kref); 1873 if (ret) { 1874 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); 1875 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1876 } 1877 1878 topology_ref_history_unlock(port->mgr); 1879 return ret; 1880 } 1881 1882 /** 1883 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port 1884 * @port: The &struct drm_dp_mst_port to increment the topology refcount of 1885 * 1886 * Increments &drm_dp_mst_port.topology_refcount without checking whether or 1887 * not it's already reached 0. This is only valid to use in scenarios where 1888 * you are already guaranteed to have at least one active topology reference 1889 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. 1890 * 1891 * See also: 1892 * drm_dp_mst_topology_try_get_port() 1893 * drm_dp_mst_topology_put_port() 1894 */ 1895 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) 1896 { 1897 topology_ref_history_lock(port->mgr); 1898 1899 WARN_ON(kref_read(&port->topology_kref) == 0); 1900 kref_get(&port->topology_kref); 1901 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); 1902 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1903 1904 topology_ref_history_unlock(port->mgr); 1905 } 1906 1907 /** 1908 * drm_dp_mst_topology_put_port() - release a topology reference to a port 1909 * @port: The &struct drm_dp_mst_port to release the topology reference from 1910 * 1911 * Releases a topology reference from @port by decrementing 1912 * &drm_dp_mst_port.topology_kref. 1913 * 1914 * See also: 1915 * drm_dp_mst_topology_try_get_port() 1916 * drm_dp_mst_topology_get_port() 1917 */ 1918 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) 1919 { 1920 topology_ref_history_lock(port->mgr); 1921 1922 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); 1923 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); 1924 1925 topology_ref_history_unlock(port->mgr); 1926 kref_put(&port->topology_kref, drm_dp_destroy_port); 1927 } 1928 1929 static struct drm_dp_mst_branch * 1930 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, 1931 struct drm_dp_mst_branch *to_find) 1932 { 1933 struct drm_dp_mst_port *port; 1934 struct drm_dp_mst_branch *rmstb; 1935 1936 if (to_find == mstb) 1937 return mstb; 1938 1939 list_for_each_entry(port, &mstb->ports, next) { 1940 if (port->mstb) { 1941 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1942 port->mstb, to_find); 1943 if (rmstb) 1944 return rmstb; 1945 } 1946 } 1947 return NULL; 1948 } 1949 1950 static struct drm_dp_mst_branch * 1951 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, 1952 struct drm_dp_mst_branch *mstb) 1953 { 1954 struct drm_dp_mst_branch *rmstb = NULL; 1955 1956 mutex_lock(&mgr->lock); 1957 if (mgr->mst_primary) { 1958 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1959 mgr->mst_primary, mstb); 1960 1961 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) 1962 rmstb = NULL; 1963 } 1964 mutex_unlock(&mgr->lock); 1965 return rmstb; 1966 } 1967 1968 static struct drm_dp_mst_port * 1969 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, 1970 struct drm_dp_mst_port *to_find) 1971 { 1972 struct drm_dp_mst_port *port, *mport; 1973 1974 list_for_each_entry(port, &mstb->ports, next) { 1975 if (port == to_find) 1976 return port; 1977 1978 if (port->mstb) { 1979 mport = drm_dp_mst_topology_get_port_validated_locked( 1980 port->mstb, to_find); 1981 if (mport) 1982 return mport; 1983 } 1984 } 1985 return NULL; 1986 } 1987 1988 static struct drm_dp_mst_port * 1989 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, 1990 struct drm_dp_mst_port *port) 1991 { 1992 struct drm_dp_mst_port *rport = NULL; 1993 1994 mutex_lock(&mgr->lock); 1995 if (mgr->mst_primary) { 1996 rport = drm_dp_mst_topology_get_port_validated_locked( 1997 mgr->mst_primary, port); 1998 1999 if (rport && !drm_dp_mst_topology_try_get_port(rport)) 2000 rport = NULL; 2001 } 2002 mutex_unlock(&mgr->lock); 2003 return rport; 2004 } 2005 2006 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 2007 { 2008 struct drm_dp_mst_port *port; 2009 int ret; 2010 2011 list_for_each_entry(port, &mstb->ports, next) { 2012 if (port->port_num == port_num) { 2013 ret = drm_dp_mst_topology_try_get_port(port); 2014 return ret ? port : NULL; 2015 } 2016 } 2017 2018 return NULL; 2019 } 2020 2021 /* 2022 * calculate a new RAD for this MST branch device 2023 * if parent has an LCT of 2 then it has 1 nibble of RAD, 2024 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 2025 */ 2026 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 2027 u8 *rad) 2028 { 2029 int parent_lct = port->parent->lct; 2030 int shift = 4; 2031 int idx = (parent_lct - 1) / 2; 2032 2033 if (parent_lct > 1) { 2034 memcpy(rad, port->parent->rad, idx + 1); 2035 shift = (parent_lct % 2) ? 4 : 0; 2036 } else 2037 rad[0] = 0; 2038 2039 rad[idx] |= port->port_num << shift; 2040 return parent_lct + 1; 2041 } 2042 2043 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs) 2044 { 2045 switch (pdt) { 2046 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2047 case DP_PEER_DEVICE_SST_SINK: 2048 return true; 2049 case DP_PEER_DEVICE_MST_BRANCHING: 2050 /* For sst branch device */ 2051 if (!mcs) 2052 return true; 2053 2054 return false; 2055 } 2056 return true; 2057 } 2058 2059 static int 2060 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, 2061 bool new_mcs) 2062 { 2063 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2064 struct drm_dp_mst_branch *mstb; 2065 u8 rad[8], lct; 2066 int ret = 0; 2067 2068 if (port->pdt == new_pdt && port->mcs == new_mcs) 2069 return 0; 2070 2071 /* Teardown the old pdt, if there is one */ 2072 if (port->pdt != DP_PEER_DEVICE_NONE) { 2073 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 2074 /* 2075 * If the new PDT would also have an i2c bus, 2076 * don't bother with reregistering it 2077 */ 2078 if (new_pdt != DP_PEER_DEVICE_NONE && 2079 drm_dp_mst_is_end_device(new_pdt, new_mcs)) { 2080 port->pdt = new_pdt; 2081 port->mcs = new_mcs; 2082 return 0; 2083 } 2084 2085 /* remove i2c over sideband */ 2086 drm_dp_mst_unregister_i2c_bus(port); 2087 } else { 2088 mutex_lock(&mgr->lock); 2089 drm_dp_mst_topology_put_mstb(port->mstb); 2090 port->mstb = NULL; 2091 mutex_unlock(&mgr->lock); 2092 } 2093 } 2094 2095 port->pdt = new_pdt; 2096 port->mcs = new_mcs; 2097 2098 if (port->pdt != DP_PEER_DEVICE_NONE) { 2099 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 2100 /* add i2c over sideband */ 2101 ret = drm_dp_mst_register_i2c_bus(port); 2102 } else { 2103 lct = drm_dp_calculate_rad(port, rad); 2104 mstb = drm_dp_add_mst_branch_device(lct, rad); 2105 if (!mstb) { 2106 ret = -ENOMEM; 2107 drm_err(mgr->dev, "Failed to create MSTB for port %p", port); 2108 goto out; 2109 } 2110 2111 mutex_lock(&mgr->lock); 2112 port->mstb = mstb; 2113 mstb->mgr = port->mgr; 2114 mstb->port_parent = port; 2115 2116 /* 2117 * Make sure this port's memory allocation stays 2118 * around until its child MSTB releases it 2119 */ 2120 drm_dp_mst_get_port_malloc(port); 2121 mutex_unlock(&mgr->lock); 2122 2123 /* And make sure we send a link address for this */ 2124 ret = 1; 2125 } 2126 } 2127 2128 out: 2129 if (ret < 0) 2130 port->pdt = DP_PEER_DEVICE_NONE; 2131 return ret; 2132 } 2133 2134 /** 2135 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband 2136 * @aux: Fake sideband AUX CH 2137 * @offset: address of the (first) register to read 2138 * @buffer: buffer to store the register values 2139 * @size: number of bytes in @buffer 2140 * 2141 * Performs the same functionality for remote devices via 2142 * sideband messaging as drm_dp_dpcd_read() does for local 2143 * devices via actual AUX CH. 2144 * 2145 * Return: Number of bytes read, or negative error code on failure. 2146 */ 2147 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, 2148 unsigned int offset, void *buffer, size_t size) 2149 { 2150 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2151 aux); 2152 2153 return drm_dp_send_dpcd_read(port->mgr, port, 2154 offset, size, buffer); 2155 } 2156 2157 /** 2158 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband 2159 * @aux: Fake sideband AUX CH 2160 * @offset: address of the (first) register to write 2161 * @buffer: buffer containing the values to write 2162 * @size: number of bytes in @buffer 2163 * 2164 * Performs the same functionality for remote devices via 2165 * sideband messaging as drm_dp_dpcd_write() does for local 2166 * devices via actual AUX CH. 2167 * 2168 * Return: number of bytes written on success, negative error code on failure. 2169 */ 2170 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, 2171 unsigned int offset, void *buffer, size_t size) 2172 { 2173 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2174 aux); 2175 2176 return drm_dp_send_dpcd_write(port->mgr, port, 2177 offset, size, buffer); 2178 } 2179 2180 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid) 2181 { 2182 int ret = 0; 2183 2184 guid_copy(&mstb->guid, guid); 2185 2186 if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) { 2187 u8 buf[UUID_SIZE]; 2188 2189 export_guid(buf, &mstb->guid); 2190 2191 if (mstb->port_parent) { 2192 ret = drm_dp_send_dpcd_write(mstb->mgr, 2193 mstb->port_parent, 2194 DP_GUID, sizeof(buf), buf); 2195 } else { 2196 ret = drm_dp_dpcd_write(mstb->mgr->aux, 2197 DP_GUID, buf, sizeof(buf)); 2198 } 2199 } 2200 2201 if (ret < 16 && ret > 0) 2202 return -EPROTO; 2203 2204 return ret == 16 ? 0 : ret; 2205 } 2206 2207 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 2208 int pnum, 2209 char *proppath, 2210 size_t proppath_size) 2211 { 2212 int i; 2213 char temp[8]; 2214 2215 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 2216 for (i = 0; i < (mstb->lct - 1); i++) { 2217 int shift = (i % 2) ? 0 : 4; 2218 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 2219 2220 snprintf(temp, sizeof(temp), "-%d", port_num); 2221 strlcat(proppath, temp, proppath_size); 2222 } 2223 snprintf(temp, sizeof(temp), "-%d", pnum); 2224 strlcat(proppath, temp, proppath_size); 2225 } 2226 2227 /** 2228 * drm_dp_mst_connector_late_register() - Late MST connector registration 2229 * @connector: The MST connector 2230 * @port: The MST port for this connector 2231 * 2232 * Helper to register the remote aux device for this MST port. Drivers should 2233 * call this from their mst connector's late_register hook to enable MST aux 2234 * devices. 2235 * 2236 * Return: 0 on success, negative error code on failure. 2237 */ 2238 int drm_dp_mst_connector_late_register(struct drm_connector *connector, 2239 struct drm_dp_mst_port *port) 2240 { 2241 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", 2242 port->aux.name, connector->kdev->kobj.name); 2243 2244 port->aux.dev = connector->kdev; 2245 return drm_dp_aux_register_devnode(&port->aux); 2246 } 2247 EXPORT_SYMBOL(drm_dp_mst_connector_late_register); 2248 2249 /** 2250 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration 2251 * @connector: The MST connector 2252 * @port: The MST port for this connector 2253 * 2254 * Helper to unregister the remote aux device for this MST port, registered by 2255 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst 2256 * connector's early_unregister hook. 2257 */ 2258 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, 2259 struct drm_dp_mst_port *port) 2260 { 2261 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", 2262 port->aux.name, connector->kdev->kobj.name); 2263 drm_dp_aux_unregister_devnode(&port->aux); 2264 } 2265 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); 2266 2267 static void 2268 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, 2269 struct drm_dp_mst_port *port) 2270 { 2271 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2272 char proppath[255]; 2273 int ret; 2274 2275 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 2276 port->connector = mgr->cbs->add_connector(mgr, port, proppath); 2277 if (!port->connector) { 2278 ret = -ENOMEM; 2279 goto error; 2280 } 2281 2282 if (port->pdt != DP_PEER_DEVICE_NONE && 2283 drm_dp_mst_is_end_device(port->pdt, port->mcs) && 2284 drm_dp_mst_port_is_logical(port)) 2285 port->cached_edid = drm_edid_read_ddc(port->connector, 2286 &port->aux.ddc); 2287 2288 drm_connector_register(port->connector); 2289 return; 2290 2291 error: 2292 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); 2293 } 2294 2295 /* 2296 * Drop a topology reference, and unlink the port from the in-memory topology 2297 * layout 2298 */ 2299 static void 2300 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, 2301 struct drm_dp_mst_port *port) 2302 { 2303 mutex_lock(&mgr->lock); 2304 port->parent->num_ports--; 2305 list_del(&port->next); 2306 mutex_unlock(&mgr->lock); 2307 drm_dp_mst_topology_put_port(port); 2308 } 2309 2310 static struct drm_dp_mst_port * 2311 drm_dp_mst_add_port(struct drm_device *dev, 2312 struct drm_dp_mst_topology_mgr *mgr, 2313 struct drm_dp_mst_branch *mstb, u8 port_number) 2314 { 2315 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); 2316 2317 if (!port) 2318 return NULL; 2319 2320 kref_init(&port->topology_kref); 2321 kref_init(&port->malloc_kref); 2322 port->parent = mstb; 2323 port->port_num = port_number; 2324 port->mgr = mgr; 2325 port->aux.name = "DPMST"; 2326 port->aux.dev = dev->dev; 2327 port->aux.is_remote = true; 2328 2329 /* initialize the MST downstream port's AUX crc work queue */ 2330 port->aux.drm_dev = dev; 2331 drm_dp_remote_aux_init(&port->aux); 2332 2333 /* 2334 * Make sure the memory allocation for our parent branch stays 2335 * around until our own memory allocation is released 2336 */ 2337 drm_dp_mst_get_mstb_malloc(mstb); 2338 2339 return port; 2340 } 2341 2342 static int 2343 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, 2344 struct drm_device *dev, 2345 struct drm_dp_link_addr_reply_port *port_msg) 2346 { 2347 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2348 struct drm_dp_mst_port *port; 2349 int ret; 2350 u8 new_pdt = DP_PEER_DEVICE_NONE; 2351 bool new_mcs = 0; 2352 bool created = false, send_link_addr = false, changed = false; 2353 2354 port = drm_dp_get_port(mstb, port_msg->port_number); 2355 if (!port) { 2356 port = drm_dp_mst_add_port(dev, mgr, mstb, 2357 port_msg->port_number); 2358 if (!port) 2359 return -ENOMEM; 2360 created = true; 2361 changed = true; 2362 } else if (!port->input && port_msg->input_port && port->connector) { 2363 /* Since port->connector can't be changed here, we create a 2364 * new port if input_port changes from 0 to 1 2365 */ 2366 drm_dp_mst_topology_unlink_port(mgr, port); 2367 drm_dp_mst_topology_put_port(port); 2368 port = drm_dp_mst_add_port(dev, mgr, mstb, 2369 port_msg->port_number); 2370 if (!port) 2371 return -ENOMEM; 2372 changed = true; 2373 created = true; 2374 } else if (port->input && !port_msg->input_port) { 2375 changed = true; 2376 } else if (port->connector) { 2377 /* We're updating a port that's exposed to userspace, so do it 2378 * under lock 2379 */ 2380 drm_modeset_lock(&mgr->base.lock, NULL); 2381 2382 changed = port->ddps != port_msg->ddps || 2383 (port->ddps && 2384 (port->ldps != port_msg->legacy_device_plug_status || 2385 port->dpcd_rev != port_msg->dpcd_revision || 2386 port->mcs != port_msg->mcs || 2387 port->pdt != port_msg->peer_device_type || 2388 port->num_sdp_stream_sinks != 2389 port_msg->num_sdp_stream_sinks)); 2390 } 2391 2392 port->input = port_msg->input_port; 2393 if (!port->input) 2394 new_pdt = port_msg->peer_device_type; 2395 new_mcs = port_msg->mcs; 2396 port->ddps = port_msg->ddps; 2397 port->ldps = port_msg->legacy_device_plug_status; 2398 port->dpcd_rev = port_msg->dpcd_revision; 2399 port->num_sdp_streams = port_msg->num_sdp_streams; 2400 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 2401 2402 /* manage mstb port lists with mgr lock - take a reference 2403 for this list */ 2404 if (created) { 2405 mutex_lock(&mgr->lock); 2406 drm_dp_mst_topology_get_port(port); 2407 list_add(&port->next, &mstb->ports); 2408 mstb->num_ports++; 2409 mutex_unlock(&mgr->lock); 2410 } 2411 2412 /* 2413 * Reprobe PBN caps on both hotplug, and when re-probing the link 2414 * for our parent mstb 2415 */ 2416 if (port->ddps && !port->input) { 2417 ret = drm_dp_send_enum_path_resources(mgr, mstb, 2418 port); 2419 if (ret == 1) 2420 changed = true; 2421 } else { 2422 port->full_pbn = 0; 2423 } 2424 2425 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2426 if (ret == 1) { 2427 send_link_addr = true; 2428 } else if (ret < 0) { 2429 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret); 2430 goto fail; 2431 } 2432 2433 /* 2434 * If this port wasn't just created, then we're reprobing because 2435 * we're coming out of suspend. In this case, always resend the link 2436 * address if there's an MSTB on this port 2437 */ 2438 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 2439 port->mcs) 2440 send_link_addr = true; 2441 2442 if (port->connector) 2443 drm_modeset_unlock(&mgr->base.lock); 2444 else if (!port->input) 2445 drm_dp_mst_port_add_connector(mstb, port); 2446 2447 if (send_link_addr && port->mstb) { 2448 ret = drm_dp_send_link_address(mgr, port->mstb); 2449 if (ret == 1) /* MSTB below us changed */ 2450 changed = true; 2451 else if (ret < 0) 2452 goto fail_put; 2453 } 2454 2455 /* put reference to this port */ 2456 drm_dp_mst_topology_put_port(port); 2457 return changed; 2458 2459 fail: 2460 drm_dp_mst_topology_unlink_port(mgr, port); 2461 if (port->connector) 2462 drm_modeset_unlock(&mgr->base.lock); 2463 fail_put: 2464 drm_dp_mst_topology_put_port(port); 2465 return ret; 2466 } 2467 2468 static int 2469 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, 2470 struct drm_dp_connection_status_notify *conn_stat) 2471 { 2472 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2473 struct drm_dp_mst_port *port; 2474 int old_ddps, ret; 2475 u8 new_pdt; 2476 bool new_mcs; 2477 bool dowork = false, create_connector = false; 2478 2479 port = drm_dp_get_port(mstb, conn_stat->port_number); 2480 if (!port) 2481 return 0; 2482 2483 if (port->connector) { 2484 if (!port->input && conn_stat->input_port) { 2485 /* 2486 * We can't remove a connector from an already exposed 2487 * port, so just throw the port out and make sure we 2488 * reprobe the link address of it's parent MSTB 2489 */ 2490 drm_dp_mst_topology_unlink_port(mgr, port); 2491 mstb->link_address_sent = false; 2492 dowork = true; 2493 goto out; 2494 } 2495 2496 /* Locking is only needed if the port's exposed to userspace */ 2497 drm_modeset_lock(&mgr->base.lock, NULL); 2498 } else if (port->input && !conn_stat->input_port) { 2499 create_connector = true; 2500 /* Reprobe link address so we get num_sdp_streams */ 2501 mstb->link_address_sent = false; 2502 dowork = true; 2503 } 2504 2505 old_ddps = port->ddps; 2506 port->input = conn_stat->input_port; 2507 port->ldps = conn_stat->legacy_device_plug_status; 2508 port->ddps = conn_stat->displayport_device_plug_status; 2509 2510 if (old_ddps != port->ddps) { 2511 if (port->ddps && !port->input) 2512 drm_dp_send_enum_path_resources(mgr, mstb, port); 2513 else 2514 port->full_pbn = 0; 2515 } 2516 2517 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; 2518 new_mcs = conn_stat->message_capability_status; 2519 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2520 if (ret == 1) { 2521 dowork = true; 2522 } else if (ret < 0) { 2523 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); 2524 dowork = false; 2525 } 2526 2527 if (port->connector) 2528 drm_modeset_unlock(&mgr->base.lock); 2529 else if (create_connector) 2530 drm_dp_mst_port_add_connector(mstb, port); 2531 2532 out: 2533 drm_dp_mst_topology_put_port(port); 2534 return dowork; 2535 } 2536 2537 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 2538 u8 lct, u8 *rad) 2539 { 2540 struct drm_dp_mst_branch *mstb; 2541 struct drm_dp_mst_port *port; 2542 int i, ret; 2543 /* find the port by iterating down */ 2544 2545 mutex_lock(&mgr->lock); 2546 mstb = mgr->mst_primary; 2547 2548 if (!mstb) 2549 goto out; 2550 2551 for (i = 0; i < lct - 1; i++) { 2552 int shift = (i % 2) ? 0 : 4; 2553 int port_num = (rad[i / 2] >> shift) & 0xf; 2554 2555 list_for_each_entry(port, &mstb->ports, next) { 2556 if (port->port_num == port_num) { 2557 mstb = port->mstb; 2558 if (!mstb) { 2559 drm_err(mgr->dev, 2560 "failed to lookup MSTB with lct %d, rad %02x\n", 2561 lct, rad[0]); 2562 goto out; 2563 } 2564 2565 break; 2566 } 2567 } 2568 } 2569 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2570 if (!ret) 2571 mstb = NULL; 2572 out: 2573 mutex_unlock(&mgr->lock); 2574 return mstb; 2575 } 2576 2577 static struct drm_dp_mst_branch * 2578 get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb, 2579 const guid_t *guid) 2580 { 2581 struct drm_dp_mst_branch *found_mstb; 2582 struct drm_dp_mst_port *port; 2583 2584 if (!mstb) 2585 return NULL; 2586 2587 if (guid_equal(&mstb->guid, guid)) 2588 return mstb; 2589 2590 list_for_each_entry(port, &mstb->ports, next) { 2591 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 2592 2593 if (found_mstb) 2594 return found_mstb; 2595 } 2596 2597 return NULL; 2598 } 2599 2600 static struct drm_dp_mst_branch * 2601 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, 2602 const guid_t *guid) 2603 { 2604 struct drm_dp_mst_branch *mstb; 2605 int ret; 2606 2607 /* find the port by iterating down */ 2608 mutex_lock(&mgr->lock); 2609 2610 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 2611 if (mstb) { 2612 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2613 if (!ret) 2614 mstb = NULL; 2615 } 2616 2617 mutex_unlock(&mgr->lock); 2618 return mstb; 2619 } 2620 2621 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2622 struct drm_dp_mst_branch *mstb) 2623 { 2624 struct drm_dp_mst_port *port; 2625 int ret; 2626 bool changed = false; 2627 2628 if (!mstb->link_address_sent) { 2629 ret = drm_dp_send_link_address(mgr, mstb); 2630 if (ret == 1) 2631 changed = true; 2632 else if (ret < 0) 2633 return ret; 2634 } 2635 2636 list_for_each_entry(port, &mstb->ports, next) { 2637 if (port->input || !port->ddps || !port->mstb) 2638 continue; 2639 2640 ret = drm_dp_check_and_send_link_address(mgr, port->mstb); 2641 if (ret == 1) 2642 changed = true; 2643 else if (ret < 0) 2644 return ret; 2645 } 2646 2647 return changed; 2648 } 2649 2650 static void drm_dp_mst_link_probe_work(struct work_struct *work) 2651 { 2652 struct drm_dp_mst_topology_mgr *mgr = 2653 container_of(work, struct drm_dp_mst_topology_mgr, work); 2654 struct drm_device *dev = mgr->dev; 2655 struct drm_dp_mst_branch *mstb; 2656 int ret; 2657 bool clear_payload_id_table; 2658 2659 mutex_lock(&mgr->probe_lock); 2660 2661 mutex_lock(&mgr->lock); 2662 clear_payload_id_table = !mgr->payload_id_table_cleared; 2663 mgr->payload_id_table_cleared = true; 2664 2665 mstb = mgr->mst_primary; 2666 if (mstb) { 2667 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2668 if (!ret) 2669 mstb = NULL; 2670 } 2671 mutex_unlock(&mgr->lock); 2672 if (!mstb) { 2673 mutex_unlock(&mgr->probe_lock); 2674 return; 2675 } 2676 2677 /* 2678 * Certain branch devices seem to incorrectly report an available_pbn 2679 * of 0 on downstream sinks, even after clearing the 2680 * DP_PAYLOAD_ALLOCATE_* registers in 2681 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C 2682 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make 2683 * things work again. 2684 */ 2685 if (clear_payload_id_table) { 2686 drm_dbg_kms(dev, "Clearing payload ID table\n"); 2687 drm_dp_send_clear_payload_id_table(mgr, mstb); 2688 } 2689 2690 ret = drm_dp_check_and_send_link_address(mgr, mstb); 2691 drm_dp_mst_topology_put_mstb(mstb); 2692 2693 mutex_unlock(&mgr->probe_lock); 2694 if (ret > 0) 2695 drm_kms_helper_hotplug_event(dev); 2696 } 2697 2698 static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr) 2699 { 2700 queue_work(system_long_wq, &mgr->work); 2701 } 2702 2703 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 2704 guid_t *guid) 2705 { 2706 if (!guid_is_null(guid)) 2707 return true; 2708 2709 guid_gen(guid); 2710 2711 return false; 2712 } 2713 2714 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, 2715 u8 port_num, u32 offset, u8 num_bytes) 2716 { 2717 struct drm_dp_sideband_msg_req_body req; 2718 2719 req.req_type = DP_REMOTE_DPCD_READ; 2720 req.u.dpcd_read.port_number = port_num; 2721 req.u.dpcd_read.dpcd_address = offset; 2722 req.u.dpcd_read.num_bytes = num_bytes; 2723 drm_dp_encode_sideband_req(&req, msg); 2724 } 2725 2726 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 2727 bool up, u8 *msg, int len) 2728 { 2729 int ret; 2730 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 2731 int tosend, total, offset; 2732 int retries = 0; 2733 2734 retry: 2735 total = len; 2736 offset = 0; 2737 do { 2738 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 2739 2740 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 2741 &msg[offset], 2742 tosend); 2743 if (ret != tosend) { 2744 if (ret == -EIO && retries < 5) { 2745 retries++; 2746 goto retry; 2747 } 2748 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); 2749 2750 return -EIO; 2751 } 2752 offset += tosend; 2753 total -= tosend; 2754 } while (total > 0); 2755 return 0; 2756 } 2757 2758 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 2759 struct drm_dp_sideband_msg_tx *txmsg) 2760 { 2761 struct drm_dp_mst_branch *mstb = txmsg->dst; 2762 u8 req_type; 2763 2764 req_type = txmsg->msg[0] & 0x7f; 2765 if (req_type == DP_CONNECTION_STATUS_NOTIFY || 2766 req_type == DP_RESOURCE_STATUS_NOTIFY || 2767 req_type == DP_CLEAR_PAYLOAD_ID_TABLE) 2768 hdr->broadcast = 1; 2769 else 2770 hdr->broadcast = 0; 2771 hdr->path_msg = txmsg->path_msg; 2772 if (hdr->broadcast) { 2773 hdr->lct = 1; 2774 hdr->lcr = 6; 2775 } else { 2776 hdr->lct = mstb->lct; 2777 hdr->lcr = mstb->lct - 1; 2778 } 2779 2780 memcpy(hdr->rad, mstb->rad, hdr->lct / 2); 2781 2782 return 0; 2783 } 2784 /* 2785 * process a single block of the next message in the sideband queue 2786 */ 2787 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 2788 struct drm_dp_sideband_msg_tx *txmsg, 2789 bool up) 2790 { 2791 u8 chunk[48]; 2792 struct drm_dp_sideband_msg_hdr hdr; 2793 int len, space, idx, tosend; 2794 int ret; 2795 2796 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT) 2797 return 0; 2798 2799 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 2800 2801 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) 2802 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 2803 2804 /* make hdr from dst mst */ 2805 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 2806 if (ret < 0) 2807 return ret; 2808 2809 /* amount left to send in this message */ 2810 len = txmsg->cur_len - txmsg->cur_offset; 2811 2812 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 2813 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 2814 2815 tosend = min(len, space); 2816 if (len == txmsg->cur_len) 2817 hdr.somt = 1; 2818 if (space >= len) 2819 hdr.eomt = 1; 2820 2821 2822 hdr.msg_len = tosend + 1; 2823 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 2824 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 2825 /* add crc at end */ 2826 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 2827 idx += tosend + 1; 2828 2829 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 2830 if (ret) { 2831 if (drm_debug_enabled(DRM_UT_DP)) { 2832 struct drm_printer p = drm_dbg_printer(mgr->dev, 2833 DRM_UT_DP, 2834 DBG_PREFIX); 2835 2836 drm_printf(&p, "sideband msg failed to send\n"); 2837 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2838 } 2839 return ret; 2840 } 2841 2842 txmsg->cur_offset += tosend; 2843 if (txmsg->cur_offset == txmsg->cur_len) { 2844 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 2845 return 1; 2846 } 2847 return 0; 2848 } 2849 2850 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 2851 { 2852 struct drm_dp_sideband_msg_tx *txmsg; 2853 int ret; 2854 2855 WARN_ON(!mutex_is_locked(&mgr->qlock)); 2856 2857 /* construct a chunk from the first msg in the tx_msg queue */ 2858 if (list_empty(&mgr->tx_msg_downq)) 2859 return; 2860 2861 txmsg = list_first_entry(&mgr->tx_msg_downq, 2862 struct drm_dp_sideband_msg_tx, next); 2863 ret = process_single_tx_qlock(mgr, txmsg, false); 2864 if (ret < 0) { 2865 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); 2866 list_del(&txmsg->next); 2867 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 2868 wake_up_all(&mgr->tx_waitq); 2869 } 2870 } 2871 2872 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 2873 struct drm_dp_sideband_msg_tx *txmsg) 2874 { 2875 mutex_lock(&mgr->qlock); 2876 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 2877 2878 if (drm_debug_enabled(DRM_UT_DP)) { 2879 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, 2880 DBG_PREFIX); 2881 2882 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2883 } 2884 2885 if (list_is_singular(&mgr->tx_msg_downq)) 2886 process_single_down_tx_qlock(mgr); 2887 mutex_unlock(&mgr->qlock); 2888 } 2889 2890 static void 2891 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr, 2892 struct drm_dp_link_address_ack_reply *reply) 2893 { 2894 struct drm_dp_link_addr_reply_port *port_reply; 2895 int i; 2896 2897 for (i = 0; i < reply->nports; i++) { 2898 port_reply = &reply->ports[i]; 2899 drm_dbg_kms(mgr->dev, 2900 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", 2901 i, 2902 port_reply->input_port, 2903 port_reply->peer_device_type, 2904 port_reply->port_number, 2905 port_reply->dpcd_revision, 2906 port_reply->mcs, 2907 port_reply->ddps, 2908 port_reply->legacy_device_plug_status, 2909 port_reply->num_sdp_streams, 2910 port_reply->num_sdp_stream_sinks); 2911 } 2912 } 2913 2914 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2915 struct drm_dp_mst_branch *mstb) 2916 { 2917 struct drm_dp_sideband_msg_tx *txmsg; 2918 struct drm_dp_link_address_ack_reply *reply; 2919 struct drm_dp_mst_port *port, *tmp; 2920 int i, ret, port_mask = 0; 2921 bool changed = false; 2922 2923 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2924 if (!txmsg) 2925 return -ENOMEM; 2926 2927 txmsg->dst = mstb; 2928 build_link_address(txmsg); 2929 2930 mstb->link_address_sent = true; 2931 drm_dp_queue_down_tx(mgr, txmsg); 2932 2933 /* FIXME: Actually do some real error handling here */ 2934 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2935 if (ret < 0) { 2936 drm_err(mgr->dev, "Sending link address failed with %d\n", ret); 2937 goto out; 2938 } 2939 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 2940 drm_err(mgr->dev, "link address NAK received\n"); 2941 ret = -EIO; 2942 goto out; 2943 } 2944 2945 reply = &txmsg->reply.u.link_addr; 2946 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); 2947 drm_dp_dump_link_address(mgr, reply); 2948 2949 ret = drm_dp_check_mstb_guid(mstb, &reply->guid); 2950 if (ret) { 2951 char buf[64]; 2952 2953 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); 2954 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); 2955 goto out; 2956 } 2957 2958 for (i = 0; i < reply->nports; i++) { 2959 port_mask |= BIT(reply->ports[i].port_number); 2960 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, 2961 &reply->ports[i]); 2962 if (ret == 1) 2963 changed = true; 2964 else if (ret < 0) 2965 goto out; 2966 } 2967 2968 /* Prune any ports that are currently a part of mstb in our in-memory 2969 * topology, but were not seen in this link address. Usually this 2970 * means that they were removed while the topology was out of sync, 2971 * e.g. during suspend/resume 2972 */ 2973 mutex_lock(&mgr->lock); 2974 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 2975 if (port_mask & BIT(port->port_num)) 2976 continue; 2977 2978 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", 2979 port->port_num); 2980 list_del(&port->next); 2981 drm_dp_mst_topology_put_port(port); 2982 changed = true; 2983 } 2984 mutex_unlock(&mgr->lock); 2985 2986 out: 2987 if (ret < 0) 2988 mstb->link_address_sent = false; 2989 kfree(txmsg); 2990 return ret < 0 ? ret : changed; 2991 } 2992 2993 static void 2994 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 2995 struct drm_dp_mst_branch *mstb) 2996 { 2997 struct drm_dp_sideband_msg_tx *txmsg; 2998 int ret; 2999 3000 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3001 if (!txmsg) 3002 return; 3003 3004 txmsg->dst = mstb; 3005 build_clear_payload_id_table(txmsg); 3006 3007 drm_dp_queue_down_tx(mgr, txmsg); 3008 3009 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3010 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3011 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); 3012 3013 kfree(txmsg); 3014 } 3015 3016 static int 3017 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 3018 struct drm_dp_mst_branch *mstb, 3019 struct drm_dp_mst_port *port) 3020 { 3021 struct drm_dp_enum_path_resources_ack_reply *path_res; 3022 struct drm_dp_sideband_msg_tx *txmsg; 3023 int ret; 3024 3025 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3026 if (!txmsg) 3027 return -ENOMEM; 3028 3029 txmsg->dst = mstb; 3030 build_enum_path_resources(txmsg, port->port_num); 3031 3032 drm_dp_queue_down_tx(mgr, txmsg); 3033 3034 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3035 if (ret > 0) { 3036 ret = 0; 3037 path_res = &txmsg->reply.u.path_resources; 3038 3039 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3040 drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); 3041 } else { 3042 if (port->port_num != path_res->port_number) 3043 DRM_ERROR("got incorrect port in response\n"); 3044 3045 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", 3046 path_res->port_number, 3047 path_res->full_payload_bw_number, 3048 path_res->avail_payload_bw_number); 3049 3050 /* 3051 * If something changed, make sure we send a 3052 * hotplug 3053 */ 3054 if (port->full_pbn != path_res->full_payload_bw_number || 3055 port->fec_capable != path_res->fec_capable) 3056 ret = 1; 3057 3058 port->full_pbn = path_res->full_payload_bw_number; 3059 port->fec_capable = path_res->fec_capable; 3060 } 3061 } 3062 3063 kfree(txmsg); 3064 return ret; 3065 } 3066 3067 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) 3068 { 3069 if (!mstb->port_parent) 3070 return NULL; 3071 3072 if (mstb->port_parent->mstb != mstb) 3073 return mstb->port_parent; 3074 3075 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 3076 } 3077 3078 /* 3079 * Searches upwards in the topology starting from mstb to try to find the 3080 * closest available parent of mstb that's still connected to the rest of the 3081 * topology. This can be used in order to perform operations like releasing 3082 * payloads, where the branch device which owned the payload may no longer be 3083 * around and thus would require that the payload on the last living relative 3084 * be freed instead. 3085 */ 3086 static struct drm_dp_mst_branch * 3087 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 3088 struct drm_dp_mst_branch *mstb, 3089 int *port_num) 3090 { 3091 struct drm_dp_mst_branch *rmstb = NULL; 3092 struct drm_dp_mst_port *found_port; 3093 3094 mutex_lock(&mgr->lock); 3095 if (!mgr->mst_primary) 3096 goto out; 3097 3098 do { 3099 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 3100 if (!found_port) 3101 break; 3102 3103 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { 3104 rmstb = found_port->parent; 3105 *port_num = found_port->port_num; 3106 } else { 3107 /* Search again, starting from this parent */ 3108 mstb = found_port->parent; 3109 } 3110 } while (!rmstb); 3111 out: 3112 mutex_unlock(&mgr->lock); 3113 return rmstb; 3114 } 3115 3116 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 3117 struct drm_dp_mst_port *port, 3118 int id, 3119 int pbn) 3120 { 3121 struct drm_dp_sideband_msg_tx *txmsg; 3122 struct drm_dp_mst_branch *mstb; 3123 int ret, port_num; 3124 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 3125 int i; 3126 3127 port_num = port->port_num; 3128 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3129 if (!mstb) { 3130 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, 3131 port->parent, 3132 &port_num); 3133 3134 if (!mstb) 3135 return -EINVAL; 3136 } 3137 3138 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3139 if (!txmsg) { 3140 ret = -ENOMEM; 3141 goto fail_put; 3142 } 3143 3144 for (i = 0; i < port->num_sdp_streams; i++) 3145 sinks[i] = i; 3146 3147 txmsg->dst = mstb; 3148 build_allocate_payload(txmsg, port_num, 3149 id, 3150 pbn, port->num_sdp_streams, sinks); 3151 3152 drm_dp_queue_down_tx(mgr, txmsg); 3153 3154 /* 3155 * FIXME: there is a small chance that between getting the last 3156 * connected mstb and sending the payload message, the last connected 3157 * mstb could also be removed from the topology. In the future, this 3158 * needs to be fixed by restarting the 3159 * drm_dp_get_last_connected_port_and_mstb() search in the event of a 3160 * timeout if the topology is still connected to the system. 3161 */ 3162 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3163 if (ret > 0) { 3164 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3165 ret = -EINVAL; 3166 else 3167 ret = 0; 3168 } 3169 kfree(txmsg); 3170 fail_put: 3171 drm_dp_mst_topology_put_mstb(mstb); 3172 return ret; 3173 } 3174 3175 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, 3176 struct drm_dp_mst_port *port, bool power_up) 3177 { 3178 struct drm_dp_sideband_msg_tx *txmsg; 3179 int ret; 3180 3181 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3182 if (!port) 3183 return -EINVAL; 3184 3185 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3186 if (!txmsg) { 3187 drm_dp_mst_topology_put_port(port); 3188 return -ENOMEM; 3189 } 3190 3191 txmsg->dst = port->parent; 3192 build_power_updown_phy(txmsg, port->port_num, power_up); 3193 drm_dp_queue_down_tx(mgr, txmsg); 3194 3195 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); 3196 if (ret > 0) { 3197 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3198 ret = -EINVAL; 3199 else 3200 ret = 0; 3201 } 3202 kfree(txmsg); 3203 drm_dp_mst_topology_put_port(port); 3204 3205 return ret; 3206 } 3207 EXPORT_SYMBOL(drm_dp_send_power_updown_phy); 3208 3209 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, 3210 struct drm_dp_mst_port *port, 3211 struct drm_dp_query_stream_enc_status_ack_reply *status) 3212 { 3213 struct drm_dp_mst_topology_state *state; 3214 struct drm_dp_mst_atomic_payload *payload; 3215 struct drm_dp_sideband_msg_tx *txmsg; 3216 u8 nonce[7]; 3217 int ret; 3218 3219 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3220 if (!txmsg) 3221 return -ENOMEM; 3222 3223 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3224 if (!port) { 3225 ret = -EINVAL; 3226 goto out_get_port; 3227 } 3228 3229 get_random_bytes(nonce, sizeof(nonce)); 3230 3231 drm_modeset_lock(&mgr->base.lock, NULL); 3232 state = to_drm_dp_mst_topology_state(mgr->base.state); 3233 payload = drm_atomic_get_mst_payload_state(state, port); 3234 3235 /* 3236 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message 3237 * transaction at the MST Branch device directly connected to the 3238 * Source" 3239 */ 3240 txmsg->dst = mgr->mst_primary; 3241 3242 build_query_stream_enc_status(txmsg, payload->vcpi, nonce); 3243 3244 drm_dp_queue_down_tx(mgr, txmsg); 3245 3246 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); 3247 if (ret < 0) { 3248 goto out; 3249 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3250 drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); 3251 ret = -ENXIO; 3252 goto out; 3253 } 3254 3255 ret = 0; 3256 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); 3257 3258 out: 3259 drm_modeset_unlock(&mgr->base.lock); 3260 drm_dp_mst_topology_put_port(port); 3261 out_get_port: 3262 kfree(txmsg); 3263 return ret; 3264 } 3265 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); 3266 3267 static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr, 3268 struct drm_dp_mst_atomic_payload *payload) 3269 { 3270 return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 3271 payload->time_slots); 3272 } 3273 3274 static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr, 3275 struct drm_dp_mst_atomic_payload *payload) 3276 { 3277 int ret; 3278 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); 3279 3280 if (!port) 3281 return -EIO; 3282 3283 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn); 3284 drm_dp_mst_topology_put_port(port); 3285 return ret; 3286 } 3287 3288 static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr, 3289 struct drm_dp_mst_topology_state *mst_state, 3290 struct drm_dp_mst_atomic_payload *payload) 3291 { 3292 drm_dbg_kms(mgr->dev, "\n"); 3293 3294 /* it's okay for these to fail */ 3295 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) { 3296 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0); 3297 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; 3298 } 3299 3300 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) 3301 drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0); 3302 } 3303 3304 /** 3305 * drm_dp_add_payload_part1() - Execute payload update part 1 3306 * @mgr: Manager to use. 3307 * @mst_state: The MST atomic state 3308 * @payload: The payload to write 3309 * 3310 * Determines the starting time slot for the given payload, and programs the VCPI for this payload 3311 * into the DPCD of DPRX. After calling this, the driver should generate ACT and payload packets. 3312 * 3313 * Returns: 0 on success, error code on failure. 3314 */ 3315 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, 3316 struct drm_dp_mst_topology_state *mst_state, 3317 struct drm_dp_mst_atomic_payload *payload) 3318 { 3319 struct drm_dp_mst_port *port; 3320 int ret; 3321 3322 /* Update mst mgr info */ 3323 if (mgr->payload_count == 0) 3324 mgr->next_start_slot = mst_state->start_slot; 3325 3326 payload->vc_start_slot = mgr->next_start_slot; 3327 3328 mgr->payload_count++; 3329 mgr->next_start_slot += payload->time_slots; 3330 3331 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; 3332 3333 /* Allocate payload to immediate downstream facing port */ 3334 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); 3335 if (!port) { 3336 drm_dbg_kms(mgr->dev, 3337 "VCPI %d for port %p not in topology, not creating a payload to remote\n", 3338 payload->vcpi, payload->port); 3339 return -EIO; 3340 } 3341 3342 ret = drm_dp_create_payload_at_dfp(mgr, payload); 3343 if (ret < 0) { 3344 drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n", 3345 payload->port, ret); 3346 goto put_port; 3347 } 3348 3349 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; 3350 3351 put_port: 3352 drm_dp_mst_topology_put_port(port); 3353 3354 return ret; 3355 } 3356 EXPORT_SYMBOL(drm_dp_add_payload_part1); 3357 3358 /** 3359 * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel 3360 * @mgr: Manager to use. 3361 * @mst_state: The MST atomic state 3362 * @payload: The payload to remove 3363 * 3364 * Removes a payload along the virtual channel if it was successfully allocated. 3365 * After calling this, the driver should set HW to generate ACT and then switch to new 3366 * payload allocation state. 3367 */ 3368 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr, 3369 struct drm_dp_mst_topology_state *mst_state, 3370 struct drm_dp_mst_atomic_payload *payload) 3371 { 3372 /* Remove remote payload allocation */ 3373 bool send_remove = false; 3374 3375 mutex_lock(&mgr->lock); 3376 send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary); 3377 mutex_unlock(&mgr->lock); 3378 3379 if (send_remove) 3380 drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload); 3381 else 3382 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n", 3383 payload->vcpi); 3384 3385 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; 3386 } 3387 EXPORT_SYMBOL(drm_dp_remove_payload_part1); 3388 3389 /** 3390 * drm_dp_remove_payload_part2() - Remove an MST payload locally 3391 * @mgr: Manager to use. 3392 * @mst_state: The MST atomic state 3393 * @old_payload: The payload with its old state 3394 * @new_payload: The payload with its latest state 3395 * 3396 * Updates the starting time slots of all other payloads which would have been shifted towards 3397 * the start of the payload ID table as a result of removing a payload. Driver should call this 3398 * function whenever it removes a payload in its HW. It's independent to the result of payload 3399 * allocation/deallocation at branch devices along the virtual channel. 3400 */ 3401 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr, 3402 struct drm_dp_mst_topology_state *mst_state, 3403 const struct drm_dp_mst_atomic_payload *old_payload, 3404 struct drm_dp_mst_atomic_payload *new_payload) 3405 { 3406 struct drm_dp_mst_atomic_payload *pos; 3407 3408 /* Remove local payload allocation */ 3409 list_for_each_entry(pos, &mst_state->payloads, next) { 3410 if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot) 3411 pos->vc_start_slot -= old_payload->time_slots; 3412 } 3413 new_payload->vc_start_slot = -1; 3414 3415 mgr->payload_count--; 3416 mgr->next_start_slot -= old_payload->time_slots; 3417 3418 if (new_payload->delete) 3419 drm_dp_mst_put_port_malloc(new_payload->port); 3420 3421 new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; 3422 } 3423 EXPORT_SYMBOL(drm_dp_remove_payload_part2); 3424 /** 3425 * drm_dp_add_payload_part2() - Execute payload update part 2 3426 * @mgr: Manager to use. 3427 * @payload: The payload to update 3428 * 3429 * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this 3430 * function will send the sideband messages to finish allocating this payload. 3431 * 3432 * Returns: 0 on success, negative error code on failure. 3433 */ 3434 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, 3435 struct drm_dp_mst_atomic_payload *payload) 3436 { 3437 int ret = 0; 3438 3439 /* Skip failed payloads */ 3440 if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) { 3441 drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", 3442 payload->port->connector->name); 3443 return -EIO; 3444 } 3445 3446 /* Allocate payload to remote end */ 3447 ret = drm_dp_create_payload_to_remote(mgr, payload); 3448 if (ret < 0) 3449 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n", 3450 payload->port, ret); 3451 else 3452 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE; 3453 3454 return ret; 3455 } 3456 EXPORT_SYMBOL(drm_dp_add_payload_part2); 3457 3458 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 3459 struct drm_dp_mst_port *port, 3460 int offset, int size, u8 *bytes) 3461 { 3462 int ret = 0; 3463 struct drm_dp_sideband_msg_tx *txmsg; 3464 struct drm_dp_mst_branch *mstb; 3465 3466 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3467 if (!mstb) 3468 return -EINVAL; 3469 3470 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3471 if (!txmsg) { 3472 ret = -ENOMEM; 3473 goto fail_put; 3474 } 3475 3476 build_dpcd_read(txmsg, port->port_num, offset, size); 3477 txmsg->dst = port->parent; 3478 3479 drm_dp_queue_down_tx(mgr, txmsg); 3480 3481 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3482 if (ret < 0) 3483 goto fail_free; 3484 3485 if (txmsg->reply.reply_type == 1) { 3486 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", 3487 mstb, port->port_num, offset, size); 3488 ret = -EIO; 3489 goto fail_free; 3490 } 3491 3492 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { 3493 ret = -EPROTO; 3494 goto fail_free; 3495 } 3496 3497 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, 3498 size); 3499 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); 3500 3501 fail_free: 3502 kfree(txmsg); 3503 fail_put: 3504 drm_dp_mst_topology_put_mstb(mstb); 3505 3506 return ret; 3507 } 3508 3509 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 3510 struct drm_dp_mst_port *port, 3511 int offset, int size, u8 *bytes) 3512 { 3513 int ret; 3514 struct drm_dp_sideband_msg_tx *txmsg; 3515 struct drm_dp_mst_branch *mstb; 3516 3517 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3518 if (!mstb) 3519 return -EINVAL; 3520 3521 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3522 if (!txmsg) { 3523 ret = -ENOMEM; 3524 goto fail_put; 3525 } 3526 3527 build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 3528 txmsg->dst = mstb; 3529 3530 drm_dp_queue_down_tx(mgr, txmsg); 3531 3532 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3533 if (ret > 0) { 3534 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3535 ret = -EIO; 3536 else 3537 ret = size; 3538 } 3539 3540 kfree(txmsg); 3541 fail_put: 3542 drm_dp_mst_topology_put_mstb(mstb); 3543 return ret; 3544 } 3545 3546 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 3547 { 3548 struct drm_dp_sideband_msg_reply_body reply; 3549 3550 reply.reply_type = DP_SIDEBAND_REPLY_ACK; 3551 reply.req_type = req_type; 3552 drm_dp_encode_sideband_reply(&reply, msg); 3553 return 0; 3554 } 3555 3556 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 3557 struct drm_dp_mst_branch *mstb, 3558 int req_type, bool broadcast) 3559 { 3560 struct drm_dp_sideband_msg_tx *txmsg; 3561 3562 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3563 if (!txmsg) 3564 return -ENOMEM; 3565 3566 txmsg->dst = mstb; 3567 drm_dp_encode_up_ack_reply(txmsg, req_type); 3568 3569 mutex_lock(&mgr->qlock); 3570 /* construct a chunk from the first msg in the tx_msg queue */ 3571 process_single_tx_qlock(mgr, txmsg, true); 3572 mutex_unlock(&mgr->qlock); 3573 3574 kfree(txmsg); 3575 return 0; 3576 } 3577 3578 /** 3579 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link 3580 * @mgr: The &drm_dp_mst_topology_mgr to use 3581 * @link_rate: link rate in 10kbits/s units 3582 * @link_lane_count: lane count 3583 * 3584 * Calculate the total bandwidth of a MultiStream Transport link. The returned 3585 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to 3586 * convert the number of PBNs required for a given stream to the number of 3587 * timeslots this stream requires in each MTP. 3588 * 3589 * Returns the BW / timeslot value in 20.12 fixed point format. 3590 */ 3591 fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 3592 int link_rate, int link_lane_count) 3593 { 3594 int ch_coding_efficiency = 3595 drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate)); 3596 fixed20_12 ret; 3597 3598 if (link_rate == 0 || link_lane_count == 0) 3599 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", 3600 link_rate, link_lane_count); 3601 3602 /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ 3603 ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count, 3604 ch_coding_efficiency), 3605 (1000000ULL * 8 * 5400) >> 12); 3606 3607 return ret; 3608 } 3609 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); 3610 3611 /** 3612 * drm_dp_read_mst_cap() - Read the sink's MST mode capability 3613 * @aux: The DP AUX channel to use 3614 * @dpcd: A cached copy of the DPCD capabilities for this sink 3615 * 3616 * Returns: enum drm_dp_mst_mode to indicate MST mode capability 3617 */ 3618 enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, 3619 const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 3620 { 3621 u8 mstm_cap; 3622 3623 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) 3624 return DRM_DP_SST; 3625 3626 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) 3627 return DRM_DP_SST; 3628 3629 if (mstm_cap & DP_MST_CAP) 3630 return DRM_DP_MST; 3631 3632 if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG) 3633 return DRM_DP_SST_SIDEBAND_MSG; 3634 3635 return DRM_DP_SST; 3636 } 3637 EXPORT_SYMBOL(drm_dp_read_mst_cap); 3638 3639 /** 3640 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 3641 * @mgr: manager to set state for 3642 * @mst_state: true to enable MST on this connector - false to disable. 3643 * 3644 * This is called by the driver when it detects an MST capable device plugged 3645 * into a DP MST capable port, or when a DP MST capable device is unplugged. 3646 */ 3647 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 3648 { 3649 int ret = 0; 3650 struct drm_dp_mst_branch *mstb = NULL; 3651 3652 mutex_lock(&mgr->lock); 3653 if (mst_state == mgr->mst_state) 3654 goto out_unlock; 3655 3656 mgr->mst_state = mst_state; 3657 /* set the device into MST mode */ 3658 if (mst_state) { 3659 WARN_ON(mgr->mst_primary); 3660 3661 /* get dpcd info */ 3662 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); 3663 if (ret < 0) { 3664 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", 3665 mgr->aux->name, ret); 3666 goto out_unlock; 3667 } 3668 3669 /* add initial branch device at LCT 1 */ 3670 mstb = drm_dp_add_mst_branch_device(1, NULL); 3671 if (mstb == NULL) { 3672 ret = -ENOMEM; 3673 goto out_unlock; 3674 } 3675 mstb->mgr = mgr; 3676 3677 /* give this the main reference */ 3678 mgr->mst_primary = mstb; 3679 drm_dp_mst_topology_get_mstb(mgr->mst_primary); 3680 3681 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3682 DP_MST_EN | 3683 DP_UP_REQ_EN | 3684 DP_UPSTREAM_IS_SRC); 3685 if (ret < 0) 3686 goto out_unlock; 3687 3688 /* Write reset payload */ 3689 drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f); 3690 3691 drm_dp_mst_queue_probe_work(mgr); 3692 3693 ret = 0; 3694 } else { 3695 /* disable MST on the device */ 3696 mstb = mgr->mst_primary; 3697 mgr->mst_primary = NULL; 3698 /* this can fail if the device is gone */ 3699 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 3700 ret = 0; 3701 mgr->payload_id_table_cleared = false; 3702 3703 mgr->reset_rx_state = true; 3704 } 3705 3706 out_unlock: 3707 mutex_unlock(&mgr->lock); 3708 if (mstb) 3709 drm_dp_mst_topology_put_mstb(mstb); 3710 return ret; 3711 3712 } 3713 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 3714 3715 static void 3716 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) 3717 { 3718 struct drm_dp_mst_port *port; 3719 3720 /* The link address will need to be re-sent on resume */ 3721 mstb->link_address_sent = false; 3722 3723 list_for_each_entry(port, &mstb->ports, next) 3724 if (port->mstb) 3725 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); 3726 } 3727 3728 /** 3729 * drm_dp_mst_topology_queue_probe - Queue a topology probe 3730 * @mgr: manager to probe 3731 * 3732 * Queue a work to probe the MST topology. Driver's should call this only to 3733 * sync the topology's HW->SW state after the MST link's parameters have 3734 * changed in a way the state could've become out-of-sync. This is the case 3735 * for instance when the link rate between the source and first downstream 3736 * branch device has switched between UHBR and non-UHBR rates. Except of those 3737 * cases - for instance when a sink gets plugged/unplugged to a port - the SW 3738 * state will get updated automatically via MST UP message notifications. 3739 */ 3740 void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr) 3741 { 3742 mutex_lock(&mgr->lock); 3743 3744 if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary)) 3745 goto out_unlock; 3746 3747 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); 3748 drm_dp_mst_queue_probe_work(mgr); 3749 3750 out_unlock: 3751 mutex_unlock(&mgr->lock); 3752 } 3753 EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe); 3754 3755 /** 3756 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 3757 * @mgr: manager to suspend 3758 * 3759 * This function tells the MST device that we can't handle UP messages 3760 * anymore. This should stop it from sending any since we are suspended. 3761 */ 3762 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 3763 { 3764 mutex_lock(&mgr->lock); 3765 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3766 DP_MST_EN | DP_UPSTREAM_IS_SRC); 3767 mutex_unlock(&mgr->lock); 3768 flush_work(&mgr->up_req_work); 3769 flush_work(&mgr->work); 3770 flush_work(&mgr->delayed_destroy_work); 3771 3772 mutex_lock(&mgr->lock); 3773 if (mgr->mst_state && mgr->mst_primary) 3774 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); 3775 mutex_unlock(&mgr->lock); 3776 } 3777 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 3778 3779 /** 3780 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 3781 * @mgr: manager to resume 3782 * @sync: whether or not to perform topology reprobing synchronously 3783 * 3784 * This will fetch DPCD and see if the device is still there, 3785 * if it is, it will rewrite the MSTM control bits, and return. 3786 * 3787 * If the device fails this returns -1, and the driver should do 3788 * a full MST reprobe, in case we were undocked. 3789 * 3790 * During system resume (where it is assumed that the driver will be calling 3791 * drm_atomic_helper_resume()) this function should be called beforehand with 3792 * @sync set to true. In contexts like runtime resume where the driver is not 3793 * expected to be calling drm_atomic_helper_resume(), this function should be 3794 * called with @sync set to false in order to avoid deadlocking. 3795 * 3796 * Returns: -1 if the MST topology was removed while we were suspended, 0 3797 * otherwise. 3798 */ 3799 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, 3800 bool sync) 3801 { 3802 u8 buf[UUID_SIZE]; 3803 guid_t guid; 3804 int ret; 3805 3806 mutex_lock(&mgr->lock); 3807 if (!mgr->mst_primary) 3808 goto out_fail; 3809 3810 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 3811 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 3812 goto out_fail; 3813 } 3814 3815 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3816 DP_MST_EN | 3817 DP_UP_REQ_EN | 3818 DP_UPSTREAM_IS_SRC); 3819 if (ret < 0) { 3820 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 3821 goto out_fail; 3822 } 3823 3824 /* Some hubs forget their guids after they resume */ 3825 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 3826 if (ret != sizeof(buf)) { 3827 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 3828 goto out_fail; 3829 } 3830 3831 import_guid(&guid, buf); 3832 3833 ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid); 3834 if (ret) { 3835 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); 3836 goto out_fail; 3837 } 3838 3839 /* 3840 * For the final step of resuming the topology, we need to bring the 3841 * state of our in-memory topology back into sync with reality. So, 3842 * restart the probing process as if we're probing a new hub 3843 */ 3844 drm_dp_mst_queue_probe_work(mgr); 3845 mutex_unlock(&mgr->lock); 3846 3847 if (sync) { 3848 drm_dbg_kms(mgr->dev, 3849 "Waiting for link probe work to finish re-syncing topology...\n"); 3850 flush_work(&mgr->work); 3851 } 3852 3853 return 0; 3854 3855 out_fail: 3856 mutex_unlock(&mgr->lock); 3857 return -1; 3858 } 3859 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 3860 3861 static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg) 3862 { 3863 memset(msg, 0, sizeof(*msg)); 3864 } 3865 3866 static bool 3867 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, 3868 struct drm_dp_mst_branch **mstb) 3869 { 3870 int len; 3871 u8 replyblock[32]; 3872 int replylen, curreply; 3873 int ret; 3874 u8 hdrlen; 3875 struct drm_dp_sideband_msg_hdr hdr; 3876 struct drm_dp_sideband_msg_rx *msg = 3877 up ? &mgr->up_req_recv : &mgr->down_rep_recv; 3878 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : 3879 DP_SIDEBAND_MSG_DOWN_REP_BASE; 3880 3881 if (!up) 3882 *mstb = NULL; 3883 3884 len = min(mgr->max_dpcd_transaction_bytes, 16); 3885 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); 3886 if (ret != len) { 3887 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); 3888 return false; 3889 } 3890 3891 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen); 3892 if (ret == false) { 3893 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 3894 1, replyblock, len, false); 3895 drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); 3896 return false; 3897 } 3898 3899 if (!up) { 3900 /* Caller is responsible for giving back this reference */ 3901 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad); 3902 if (!*mstb) { 3903 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); 3904 return false; 3905 } 3906 } 3907 3908 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) { 3909 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); 3910 return false; 3911 } 3912 3913 replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); 3914 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen); 3915 if (!ret) { 3916 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); 3917 return false; 3918 } 3919 3920 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; 3921 curreply = len; 3922 while (replylen > 0) { 3923 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 3924 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 3925 replyblock, len); 3926 if (ret != len) { 3927 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", 3928 len, ret); 3929 return false; 3930 } 3931 3932 ret = drm_dp_sideband_append_payload(msg, replyblock, len); 3933 if (!ret) { 3934 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); 3935 return false; 3936 } 3937 3938 curreply += len; 3939 replylen -= len; 3940 } 3941 return true; 3942 } 3943 3944 static int get_msg_request_type(u8 data) 3945 { 3946 return data & 0x7f; 3947 } 3948 3949 static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr, 3950 const struct drm_dp_sideband_msg_tx *txmsg, 3951 const struct drm_dp_sideband_msg_rx *rxmsg) 3952 { 3953 const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr; 3954 const struct drm_dp_mst_branch *mstb = txmsg->dst; 3955 int tx_req_type = get_msg_request_type(txmsg->msg[0]); 3956 int rx_req_type = get_msg_request_type(rxmsg->msg[0]); 3957 char rad_str[64]; 3958 3959 if (tx_req_type == rx_req_type) 3960 return true; 3961 3962 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str)); 3963 drm_dbg_kms(mgr->dev, 3964 "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n", 3965 mstb, hdr->seqno, mstb->lct, rad_str, 3966 drm_dp_mst_req_type_str(rx_req_type), rx_req_type, 3967 drm_dp_mst_req_type_str(tx_req_type), tx_req_type); 3968 3969 return false; 3970 } 3971 3972 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 3973 { 3974 struct drm_dp_sideband_msg_tx *txmsg; 3975 struct drm_dp_mst_branch *mstb = NULL; 3976 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; 3977 3978 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb)) 3979 goto out_clear_reply; 3980 3981 /* Multi-packet message transmission, don't clear the reply */ 3982 if (!msg->have_eomt) 3983 goto out; 3984 3985 /* find the message */ 3986 mutex_lock(&mgr->qlock); 3987 3988 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, 3989 struct drm_dp_sideband_msg_tx, next); 3990 3991 /* Were we actually expecting a response, and from this mstb? */ 3992 if (!txmsg || txmsg->dst != mstb) { 3993 struct drm_dp_sideband_msg_hdr *hdr; 3994 3995 hdr = &msg->initial_hdr; 3996 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", 3997 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); 3998 3999 mutex_unlock(&mgr->qlock); 4000 4001 goto out_clear_reply; 4002 } 4003 4004 if (!verify_rx_request_type(mgr, txmsg, msg)) { 4005 mutex_unlock(&mgr->qlock); 4006 4007 goto out_clear_reply; 4008 } 4009 4010 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); 4011 4012 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 4013 drm_dbg_kms(mgr->dev, 4014 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", 4015 txmsg->reply.req_type, 4016 drm_dp_mst_req_type_str(txmsg->reply.req_type), 4017 txmsg->reply.u.nak.reason, 4018 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), 4019 txmsg->reply.u.nak.nak_data); 4020 } 4021 4022 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 4023 list_del(&txmsg->next); 4024 4025 mutex_unlock(&mgr->qlock); 4026 4027 wake_up_all(&mgr->tx_waitq); 4028 4029 out_clear_reply: 4030 reset_msg_rx_state(msg); 4031 out: 4032 if (mstb) 4033 drm_dp_mst_topology_put_mstb(mstb); 4034 4035 return 0; 4036 } 4037 4038 static inline bool 4039 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, 4040 struct drm_dp_pending_up_req *up_req) 4041 { 4042 struct drm_dp_mst_branch *mstb = NULL; 4043 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; 4044 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; 4045 bool hotplug = false, dowork = false; 4046 4047 if (hdr->broadcast) { 4048 const guid_t *guid = NULL; 4049 4050 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) 4051 guid = &msg->u.conn_stat.guid; 4052 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) 4053 guid = &msg->u.resource_stat.guid; 4054 4055 if (guid) 4056 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); 4057 } else { 4058 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); 4059 } 4060 4061 if (!mstb) { 4062 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); 4063 return false; 4064 } 4065 4066 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ 4067 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { 4068 dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); 4069 hotplug = true; 4070 } 4071 4072 drm_dp_mst_topology_put_mstb(mstb); 4073 4074 if (dowork) 4075 queue_work(system_long_wq, &mgr->work); 4076 return hotplug; 4077 } 4078 4079 static void drm_dp_mst_up_req_work(struct work_struct *work) 4080 { 4081 struct drm_dp_mst_topology_mgr *mgr = 4082 container_of(work, struct drm_dp_mst_topology_mgr, 4083 up_req_work); 4084 struct drm_dp_pending_up_req *up_req; 4085 bool send_hotplug = false; 4086 4087 mutex_lock(&mgr->probe_lock); 4088 while (true) { 4089 mutex_lock(&mgr->up_req_lock); 4090 up_req = list_first_entry_or_null(&mgr->up_req_list, 4091 struct drm_dp_pending_up_req, 4092 next); 4093 if (up_req) 4094 list_del(&up_req->next); 4095 mutex_unlock(&mgr->up_req_lock); 4096 4097 if (!up_req) 4098 break; 4099 4100 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); 4101 kfree(up_req); 4102 } 4103 mutex_unlock(&mgr->probe_lock); 4104 4105 if (send_hotplug) 4106 drm_kms_helper_hotplug_event(mgr->dev); 4107 } 4108 4109 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 4110 { 4111 struct drm_dp_pending_up_req *up_req; 4112 struct drm_dp_mst_branch *mst_primary; 4113 int ret = 0; 4114 4115 if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) 4116 goto out_clear_reply; 4117 4118 if (!mgr->up_req_recv.have_eomt) 4119 return 0; 4120 4121 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); 4122 if (!up_req) { 4123 ret = -ENOMEM; 4124 goto out_clear_reply; 4125 } 4126 4127 INIT_LIST_HEAD(&up_req->next); 4128 4129 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); 4130 4131 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && 4132 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { 4133 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", 4134 up_req->msg.req_type); 4135 kfree(up_req); 4136 goto out_clear_reply; 4137 } 4138 4139 mutex_lock(&mgr->lock); 4140 mst_primary = mgr->mst_primary; 4141 if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) { 4142 mutex_unlock(&mgr->lock); 4143 kfree(up_req); 4144 goto out_clear_reply; 4145 } 4146 mutex_unlock(&mgr->lock); 4147 4148 drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, 4149 false); 4150 4151 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 4152 const struct drm_dp_connection_status_notify *conn_stat = 4153 &up_req->msg.u.conn_stat; 4154 bool handle_csn; 4155 4156 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", 4157 conn_stat->port_number, 4158 conn_stat->legacy_device_plug_status, 4159 conn_stat->displayport_device_plug_status, 4160 conn_stat->message_capability_status, 4161 conn_stat->input_port, 4162 conn_stat->peer_device_type); 4163 4164 mutex_lock(&mgr->probe_lock); 4165 handle_csn = mst_primary->link_address_sent; 4166 mutex_unlock(&mgr->probe_lock); 4167 4168 if (!handle_csn) { 4169 drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); 4170 kfree(up_req); 4171 goto out_put_primary; 4172 } 4173 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 4174 const struct drm_dp_resource_status_notify *res_stat = 4175 &up_req->msg.u.resource_stat; 4176 4177 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", 4178 res_stat->port_number, 4179 res_stat->available_pbn); 4180 } 4181 4182 up_req->hdr = mgr->up_req_recv.initial_hdr; 4183 mutex_lock(&mgr->up_req_lock); 4184 list_add_tail(&up_req->next, &mgr->up_req_list); 4185 mutex_unlock(&mgr->up_req_lock); 4186 queue_work(system_long_wq, &mgr->up_req_work); 4187 4188 out_put_primary: 4189 drm_dp_mst_topology_put_mstb(mst_primary); 4190 out_clear_reply: 4191 reset_msg_rx_state(&mgr->up_req_recv); 4192 return ret; 4193 } 4194 4195 static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr) 4196 { 4197 mutex_lock(&mgr->lock); 4198 if (mgr->reset_rx_state) { 4199 mgr->reset_rx_state = false; 4200 reset_msg_rx_state(&mgr->down_rep_recv); 4201 reset_msg_rx_state(&mgr->up_req_recv); 4202 } 4203 mutex_unlock(&mgr->lock); 4204 } 4205 4206 /** 4207 * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event 4208 * @mgr: manager to notify irq for. 4209 * @esi: 4 bytes from SINK_COUNT_ESI 4210 * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI 4211 * @handled: whether the hpd interrupt was consumed or not 4212 * 4213 * This should be called from the driver when it detects a HPD IRQ, 4214 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 4215 * topology manager will process the sideband messages received 4216 * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the 4217 * corresponding flags that Driver has to ack the DP receiver later. 4218 * 4219 * Note that driver shall also call 4220 * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set 4221 * after calling this function, to try to kick off a new request in 4222 * the queue if the previous message transaction is completed. 4223 * 4224 * See also: 4225 * drm_dp_mst_hpd_irq_send_new_request() 4226 */ 4227 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi, 4228 u8 *ack, bool *handled) 4229 { 4230 int ret = 0; 4231 int sc; 4232 *handled = false; 4233 sc = DP_GET_SINK_COUNT(esi[0]); 4234 4235 if (sc != mgr->sink_count) { 4236 mgr->sink_count = sc; 4237 *handled = true; 4238 } 4239 4240 update_msg_rx_state(mgr); 4241 4242 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 4243 ret = drm_dp_mst_handle_down_rep(mgr); 4244 *handled = true; 4245 ack[1] |= DP_DOWN_REP_MSG_RDY; 4246 } 4247 4248 if (esi[1] & DP_UP_REQ_MSG_RDY) { 4249 ret |= drm_dp_mst_handle_up_req(mgr); 4250 *handled = true; 4251 ack[1] |= DP_UP_REQ_MSG_RDY; 4252 } 4253 4254 return ret; 4255 } 4256 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event); 4257 4258 /** 4259 * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request 4260 * @mgr: manager to notify irq for. 4261 * 4262 * This should be called from the driver when mst irq event is handled 4263 * and acked. Note that new down request should only be sent when 4264 * previous message transaction is completed. Source is not supposed to generate 4265 * interleaved message transactions. 4266 */ 4267 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr) 4268 { 4269 struct drm_dp_sideband_msg_tx *txmsg; 4270 bool kick = true; 4271 4272 mutex_lock(&mgr->qlock); 4273 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, 4274 struct drm_dp_sideband_msg_tx, next); 4275 /* If last transaction is not completed yet*/ 4276 if (!txmsg || 4277 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 4278 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) 4279 kick = false; 4280 mutex_unlock(&mgr->qlock); 4281 4282 if (kick) 4283 drm_dp_mst_kick_tx(mgr); 4284 } 4285 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request); 4286 /** 4287 * drm_dp_mst_detect_port() - get connection status for an MST port 4288 * @connector: DRM connector for this port 4289 * @ctx: The acquisition context to use for grabbing locks 4290 * @mgr: manager for this port 4291 * @port: pointer to a port 4292 * 4293 * This returns the current connection state for a port. 4294 */ 4295 int 4296 drm_dp_mst_detect_port(struct drm_connector *connector, 4297 struct drm_modeset_acquire_ctx *ctx, 4298 struct drm_dp_mst_topology_mgr *mgr, 4299 struct drm_dp_mst_port *port) 4300 { 4301 int ret; 4302 4303 /* we need to search for the port in the mgr in case it's gone */ 4304 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4305 if (!port) 4306 return connector_status_disconnected; 4307 4308 ret = drm_modeset_lock(&mgr->base.lock, ctx); 4309 if (ret) 4310 goto out; 4311 4312 ret = connector_status_disconnected; 4313 4314 if (!port->ddps) 4315 goto out; 4316 4317 switch (port->pdt) { 4318 case DP_PEER_DEVICE_NONE: 4319 break; 4320 case DP_PEER_DEVICE_MST_BRANCHING: 4321 if (!port->mcs) 4322 ret = connector_status_connected; 4323 break; 4324 4325 case DP_PEER_DEVICE_SST_SINK: 4326 ret = connector_status_connected; 4327 /* for logical ports - cache the EDID */ 4328 if (drm_dp_mst_port_is_logical(port) && !port->cached_edid) 4329 port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc); 4330 break; 4331 case DP_PEER_DEVICE_DP_LEGACY_CONV: 4332 if (port->ldps) 4333 ret = connector_status_connected; 4334 break; 4335 } 4336 out: 4337 drm_dp_mst_topology_put_port(port); 4338 return ret; 4339 } 4340 EXPORT_SYMBOL(drm_dp_mst_detect_port); 4341 4342 /** 4343 * drm_dp_mst_edid_read() - get EDID for an MST port 4344 * @connector: toplevel connector to get EDID for 4345 * @mgr: manager for this port 4346 * @port: unverified pointer to a port. 4347 * 4348 * This returns an EDID for the port connected to a connector, 4349 * It validates the pointer still exists so the caller doesn't require a 4350 * reference. 4351 */ 4352 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector, 4353 struct drm_dp_mst_topology_mgr *mgr, 4354 struct drm_dp_mst_port *port) 4355 { 4356 const struct drm_edid *drm_edid; 4357 4358 /* we need to search for the port in the mgr in case it's gone */ 4359 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4360 if (!port) 4361 return NULL; 4362 4363 if (port->cached_edid) 4364 drm_edid = drm_edid_dup(port->cached_edid); 4365 else 4366 drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc); 4367 4368 drm_dp_mst_topology_put_port(port); 4369 4370 return drm_edid; 4371 } 4372 EXPORT_SYMBOL(drm_dp_mst_edid_read); 4373 4374 /** 4375 * drm_dp_mst_get_edid() - get EDID for an MST port 4376 * @connector: toplevel connector to get EDID for 4377 * @mgr: manager for this port 4378 * @port: unverified pointer to a port. 4379 * 4380 * This function is deprecated; please use drm_dp_mst_edid_read() instead. 4381 * 4382 * This returns an EDID for the port connected to a connector, 4383 * It validates the pointer still exists so the caller doesn't require a 4384 * reference. 4385 */ 4386 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, 4387 struct drm_dp_mst_topology_mgr *mgr, 4388 struct drm_dp_mst_port *port) 4389 { 4390 const struct drm_edid *drm_edid; 4391 struct edid *edid; 4392 4393 drm_edid = drm_dp_mst_edid_read(connector, mgr, port); 4394 4395 edid = drm_edid_duplicate(drm_edid_raw(drm_edid)); 4396 4397 drm_edid_free(drm_edid); 4398 4399 return edid; 4400 } 4401 EXPORT_SYMBOL(drm_dp_mst_get_edid); 4402 4403 /** 4404 * drm_dp_atomic_find_time_slots() - Find and add time slots to the state 4405 * @state: global atomic state 4406 * @mgr: MST topology manager for the port 4407 * @port: port to find time slots for 4408 * @pbn: bandwidth required for the mode in PBN 4409 * 4410 * Allocates time slots to @port, replacing any previous time slot allocations it may 4411 * have had. Any atomic drivers which support MST must call this function in 4412 * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to 4413 * change the current time slot allocation for the new state, and ensure the MST 4414 * atomic state is added whenever the state of payloads in the topology changes. 4415 * 4416 * Allocations set by this function are not checked against the bandwidth 4417 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). 4418 * 4419 * Additionally, it is OK to call this function multiple times on the same 4420 * @port as needed. It is not OK however, to call this function and 4421 * drm_dp_atomic_release_time_slots() in the same atomic check phase. 4422 * 4423 * See also: 4424 * drm_dp_atomic_release_time_slots() 4425 * drm_dp_mst_atomic_check() 4426 * 4427 * Returns: 4428 * Total slots in the atomic state assigned for this port, or a negative error 4429 * code if the port no longer exists 4430 */ 4431 int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, 4432 struct drm_dp_mst_topology_mgr *mgr, 4433 struct drm_dp_mst_port *port, int pbn) 4434 { 4435 struct drm_dp_mst_topology_state *topology_state; 4436 struct drm_dp_mst_atomic_payload *payload = NULL; 4437 struct drm_connector_state *conn_state; 4438 int prev_slots = 0, prev_bw = 0, req_slots; 4439 4440 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4441 if (IS_ERR(topology_state)) 4442 return PTR_ERR(topology_state); 4443 4444 conn_state = drm_atomic_get_new_connector_state(state, port->connector); 4445 topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc); 4446 4447 /* Find the current allocation for this port, if any */ 4448 payload = drm_atomic_get_mst_payload_state(topology_state, port); 4449 if (payload) { 4450 prev_slots = payload->time_slots; 4451 prev_bw = payload->pbn; 4452 4453 /* 4454 * This should never happen, unless the driver tries 4455 * releasing and allocating the same timeslot allocation, 4456 * which is an error 4457 */ 4458 if (drm_WARN_ON(mgr->dev, payload->delete)) { 4459 drm_err(mgr->dev, 4460 "cannot allocate and release time slots on [MST PORT:%p] in the same state\n", 4461 port); 4462 return -EINVAL; 4463 } 4464 } 4465 4466 req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full); 4467 4468 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", 4469 port->connector->base.id, port->connector->name, 4470 port, prev_slots, req_slots); 4471 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", 4472 port->connector->base.id, port->connector->name, 4473 port, prev_bw, pbn); 4474 4475 /* Add the new allocation to the state, note the VCPI isn't assigned until the end */ 4476 if (!payload) { 4477 payload = kzalloc(sizeof(*payload), GFP_KERNEL); 4478 if (!payload) 4479 return -ENOMEM; 4480 4481 drm_dp_mst_get_port_malloc(port); 4482 payload->port = port; 4483 payload->vc_start_slot = -1; 4484 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; 4485 list_add(&payload->next, &topology_state->payloads); 4486 } 4487 payload->time_slots = req_slots; 4488 payload->pbn = pbn; 4489 4490 return req_slots; 4491 } 4492 EXPORT_SYMBOL(drm_dp_atomic_find_time_slots); 4493 4494 /** 4495 * drm_dp_atomic_release_time_slots() - Release allocated time slots 4496 * @state: global atomic state 4497 * @mgr: MST topology manager for the port 4498 * @port: The port to release the time slots from 4499 * 4500 * Releases any time slots that have been allocated to a port in the atomic 4501 * state. Any atomic drivers which support MST must call this function 4502 * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback. 4503 * This helper will check whether time slots would be released by the new state and 4504 * respond accordingly, along with ensuring the MST state is always added to the 4505 * atomic state whenever a new state would modify the state of payloads on the 4506 * topology. 4507 * 4508 * It is OK to call this even if @port has been removed from the system. 4509 * Additionally, it is OK to call this function multiple times on the same 4510 * @port as needed. It is not OK however, to call this function and 4511 * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check 4512 * phase. 4513 * 4514 * See also: 4515 * drm_dp_atomic_find_time_slots() 4516 * drm_dp_mst_atomic_check() 4517 * 4518 * Returns: 4519 * 0 on success, negative error code otherwise 4520 */ 4521 int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, 4522 struct drm_dp_mst_topology_mgr *mgr, 4523 struct drm_dp_mst_port *port) 4524 { 4525 struct drm_dp_mst_topology_state *topology_state; 4526 struct drm_dp_mst_atomic_payload *payload; 4527 struct drm_connector_state *old_conn_state, *new_conn_state; 4528 bool update_payload = true; 4529 4530 old_conn_state = drm_atomic_get_old_connector_state(state, port->connector); 4531 if (!old_conn_state->crtc) 4532 return 0; 4533 4534 /* If the CRTC isn't disabled by this state, don't release it's payload */ 4535 new_conn_state = drm_atomic_get_new_connector_state(state, port->connector); 4536 if (new_conn_state->crtc) { 4537 struct drm_crtc_state *crtc_state = 4538 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 4539 4540 /* No modeset means no payload changes, so it's safe to not pull in the MST state */ 4541 if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state)) 4542 return 0; 4543 4544 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) 4545 update_payload = false; 4546 } 4547 4548 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4549 if (IS_ERR(topology_state)) 4550 return PTR_ERR(topology_state); 4551 4552 topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); 4553 if (!update_payload) 4554 return 0; 4555 4556 payload = drm_atomic_get_mst_payload_state(topology_state, port); 4557 if (WARN_ON(!payload)) { 4558 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n", 4559 port, &topology_state->base); 4560 return -EINVAL; 4561 } 4562 4563 if (new_conn_state->crtc) 4564 return 0; 4565 4566 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots); 4567 if (!payload->delete) { 4568 payload->pbn = 0; 4569 payload->delete = true; 4570 topology_state->payload_mask &= ~BIT(payload->vcpi - 1); 4571 } 4572 4573 return 0; 4574 } 4575 EXPORT_SYMBOL(drm_dp_atomic_release_time_slots); 4576 4577 /** 4578 * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers 4579 * @state: global atomic state 4580 * 4581 * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs 4582 * currently assigned to an MST topology. Drivers must call this hook from their 4583 * &drm_mode_config_helper_funcs.atomic_commit_setup hook. 4584 * 4585 * Returns: 4586 * 0 if all CRTC commits were retrieved successfully, negative error code otherwise 4587 */ 4588 int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state) 4589 { 4590 struct drm_dp_mst_topology_mgr *mgr; 4591 struct drm_dp_mst_topology_state *mst_state; 4592 struct drm_crtc *crtc; 4593 struct drm_crtc_state *crtc_state; 4594 int i, j, commit_idx, num_commit_deps; 4595 4596 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 4597 if (!mst_state->pending_crtc_mask) 4598 continue; 4599 4600 num_commit_deps = hweight32(mst_state->pending_crtc_mask); 4601 mst_state->commit_deps = kmalloc_array(num_commit_deps, 4602 sizeof(*mst_state->commit_deps), GFP_KERNEL); 4603 if (!mst_state->commit_deps) 4604 return -ENOMEM; 4605 mst_state->num_commit_deps = num_commit_deps; 4606 4607 commit_idx = 0; 4608 for_each_new_crtc_in_state(state, crtc, crtc_state, j) { 4609 if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) { 4610 mst_state->commit_deps[commit_idx++] = 4611 drm_crtc_commit_get(crtc_state->commit); 4612 } 4613 } 4614 } 4615 4616 return 0; 4617 } 4618 EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit); 4619 4620 /** 4621 * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies, 4622 * prepare new MST state for commit 4623 * @state: global atomic state 4624 * 4625 * Goes through any MST topologies in this atomic state, and waits for any pending commits which 4626 * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before 4627 * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing 4628 * with eachother by forcing them to be executed sequentially in situations where the only resources 4629 * the modeset objects in these commits share are an MST topology. 4630 * 4631 * This function also prepares the new MST state for commit by performing some state preparation 4632 * which can't be done until this point, such as reading back the final VC start slots (which are 4633 * determined at commit-time) from the previous state. 4634 * 4635 * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(), 4636 * or whatever their equivalent of that is. 4637 */ 4638 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) 4639 { 4640 struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state; 4641 struct drm_dp_mst_topology_mgr *mgr; 4642 struct drm_dp_mst_atomic_payload *old_payload, *new_payload; 4643 int i, j, ret; 4644 4645 for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) { 4646 for (j = 0; j < old_mst_state->num_commit_deps; j++) { 4647 ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]); 4648 if (ret < 0) 4649 drm_err(state->dev, "Failed to wait for %s: %d\n", 4650 old_mst_state->commit_deps[j]->crtc->name, ret); 4651 } 4652 4653 /* Now that previous state is committed, it's safe to copy over the start slot 4654 * and allocation status assignments 4655 */ 4656 list_for_each_entry(old_payload, &old_mst_state->payloads, next) { 4657 if (old_payload->delete) 4658 continue; 4659 4660 new_payload = drm_atomic_get_mst_payload_state(new_mst_state, 4661 old_payload->port); 4662 new_payload->vc_start_slot = old_payload->vc_start_slot; 4663 new_payload->payload_allocation_status = 4664 old_payload->payload_allocation_status; 4665 } 4666 } 4667 } 4668 EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies); 4669 4670 /** 4671 * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating 4672 * in SST mode 4673 * @new_conn_state: The new connector state of the &drm_connector 4674 * @mgr: The MST topology manager for the &drm_connector 4675 * 4676 * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to 4677 * serialize non-blocking commits happening on the real DP connector of an MST topology switching 4678 * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's 4679 * MST topology will never share the same &drm_encoder. 4680 * 4681 * This function takes care of this serialization issue, by checking a root MST connector's atomic 4682 * state to determine if it is about to have a modeset - and then pulling in the MST topology state 4683 * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask. 4684 * 4685 * Drivers implementing MST must call this function from the 4686 * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of 4687 * driving MST sinks. 4688 * 4689 * Returns: 4690 * 0 on success, negative error code otherwise 4691 */ 4692 int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, 4693 struct drm_dp_mst_topology_mgr *mgr) 4694 { 4695 struct drm_atomic_state *state = new_conn_state->state; 4696 struct drm_connector_state *old_conn_state = 4697 drm_atomic_get_old_connector_state(state, new_conn_state->connector); 4698 struct drm_crtc_state *crtc_state; 4699 struct drm_dp_mst_topology_state *mst_state = NULL; 4700 4701 if (new_conn_state->crtc) { 4702 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 4703 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) { 4704 mst_state = drm_atomic_get_mst_topology_state(state, mgr); 4705 if (IS_ERR(mst_state)) 4706 return PTR_ERR(mst_state); 4707 4708 mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc); 4709 } 4710 } 4711 4712 if (old_conn_state->crtc) { 4713 crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc); 4714 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) { 4715 if (!mst_state) { 4716 mst_state = drm_atomic_get_mst_topology_state(state, mgr); 4717 if (IS_ERR(mst_state)) 4718 return PTR_ERR(mst_state); 4719 } 4720 4721 mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); 4722 } 4723 } 4724 4725 return 0; 4726 } 4727 EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check); 4728 4729 /** 4730 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format 4731 * @mst_state: mst_state to update 4732 * @link_encoding_cap: the ecoding format on the link 4733 */ 4734 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap) 4735 { 4736 if (link_encoding_cap == DP_CAP_ANSI_128B132B) { 4737 mst_state->total_avail_slots = 64; 4738 mst_state->start_slot = 0; 4739 } else { 4740 mst_state->total_avail_slots = 63; 4741 mst_state->start_slot = 1; 4742 } 4743 4744 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n", 4745 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b", 4746 mst_state); 4747 } 4748 EXPORT_SYMBOL(drm_dp_mst_update_slots); 4749 4750 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 4751 int id, u8 start_slot, u8 num_slots) 4752 { 4753 u8 payload_alloc[3], status; 4754 int ret; 4755 int retries = 0; 4756 4757 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 4758 DP_PAYLOAD_TABLE_UPDATED); 4759 4760 payload_alloc[0] = id; 4761 payload_alloc[1] = start_slot; 4762 payload_alloc[2] = num_slots; 4763 4764 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 4765 if (ret != 3) { 4766 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); 4767 goto fail; 4768 } 4769 4770 retry: 4771 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4772 if (ret < 0) { 4773 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); 4774 goto fail; 4775 } 4776 4777 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 4778 retries++; 4779 if (retries < 20) { 4780 usleep_range(10000, 20000); 4781 goto retry; 4782 } 4783 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", 4784 status); 4785 ret = -EINVAL; 4786 goto fail; 4787 } 4788 ret = 0; 4789 fail: 4790 return ret; 4791 } 4792 4793 static int do_get_act_status(struct drm_dp_aux *aux) 4794 { 4795 int ret; 4796 u8 status; 4797 4798 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4799 if (ret < 0) 4800 return ret; 4801 4802 return status; 4803 } 4804 4805 /** 4806 * drm_dp_check_act_status() - Polls for ACT handled status. 4807 * @mgr: manager to use 4808 * 4809 * Tries waiting for the MST hub to finish updating it's payload table by 4810 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really 4811 * take that long). 4812 * 4813 * Returns: 4814 * 0 if the ACT was handled in time, negative error code on failure. 4815 */ 4816 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 4817 { 4818 /* 4819 * There doesn't seem to be any recommended retry count or timeout in 4820 * the MST specification. Since some hubs have been observed to take 4821 * over 1 second to update their payload allocations under certain 4822 * conditions, we use a rather large timeout value. 4823 */ 4824 const int timeout_ms = 3000; 4825 int ret, status; 4826 4827 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, 4828 status & DP_PAYLOAD_ACT_HANDLED || status < 0, 4829 200, timeout_ms * USEC_PER_MSEC); 4830 if (ret < 0 && status >= 0) { 4831 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", 4832 timeout_ms, status); 4833 return -EINVAL; 4834 } else if (status < 0) { 4835 /* 4836 * Failure here isn't unexpected - the hub may have 4837 * just been unplugged 4838 */ 4839 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); 4840 return status; 4841 } 4842 4843 return 0; 4844 } 4845 EXPORT_SYMBOL(drm_dp_check_act_status); 4846 4847 /** 4848 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 4849 * @clock: dot clock 4850 * @bpp: bpp as .4 binary fixed point 4851 * 4852 * This uses the formula in the spec to calculate the PBN value for a mode. 4853 */ 4854 int drm_dp_calc_pbn_mode(int clock, int bpp) 4855 { 4856 /* 4857 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 4858 * common multiplier to render an integer PBN for all link rate/lane 4859 * counts combinations 4860 * calculate 4861 * peak_kbps = clock * bpp / 16 4862 * peak_kbps *= SSC overhead / 1000000 4863 * peak_kbps /= 8 convert to Kbytes 4864 * peak_kBps *= (64/54) / 1000 convert to PBN 4865 */ 4866 /* 4867 * TODO: Use the actual link and mode parameters to calculate 4868 * the overhead. For now it's assumed that these are 4869 * 4 link lanes, 4096 hactive pixels, which don't add any 4870 * significant data padding overhead and that there is no DSC 4871 * or FEC overhead. 4872 */ 4873 int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp, 4874 DRM_DP_BW_OVERHEAD_MST | 4875 DRM_DP_BW_OVERHEAD_SSC_REF_CLK); 4876 4877 return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4), 4878 1000000ULL * 8 * 54 * 1000); 4879 } 4880 EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 4881 4882 /* we want to kick the TX after we've ack the up/down IRQs. */ 4883 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 4884 { 4885 queue_work(system_long_wq, &mgr->tx_work); 4886 } 4887 4888 /* 4889 * Helper function for parsing DP device types into convenient strings 4890 * for use with dp_mst_topology 4891 */ 4892 static const char *pdt_to_string(u8 pdt) 4893 { 4894 switch (pdt) { 4895 case DP_PEER_DEVICE_NONE: 4896 return "NONE"; 4897 case DP_PEER_DEVICE_SOURCE_OR_SST: 4898 return "SOURCE OR SST"; 4899 case DP_PEER_DEVICE_MST_BRANCHING: 4900 return "MST BRANCHING"; 4901 case DP_PEER_DEVICE_SST_SINK: 4902 return "SST SINK"; 4903 case DP_PEER_DEVICE_DP_LEGACY_CONV: 4904 return "DP LEGACY CONV"; 4905 default: 4906 return "ERR"; 4907 } 4908 } 4909 4910 static void drm_dp_mst_dump_mstb(struct seq_file *m, 4911 struct drm_dp_mst_branch *mstb) 4912 { 4913 struct drm_dp_mst_port *port; 4914 int tabs = mstb->lct; 4915 char prefix[10]; 4916 int i; 4917 4918 for (i = 0; i < tabs; i++) 4919 prefix[i] = '\t'; 4920 prefix[i] = '\0'; 4921 4922 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports); 4923 list_for_each_entry(port, &mstb->ports, next) { 4924 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n", 4925 prefix, 4926 port->port_num, 4927 port, 4928 port->input ? "input" : "output", 4929 pdt_to_string(port->pdt), 4930 port->ddps, 4931 port->ldps, 4932 port->num_sdp_streams, 4933 port->num_sdp_stream_sinks, 4934 port->fec_capable ? "true" : "false", 4935 port->connector); 4936 if (port->mstb) 4937 drm_dp_mst_dump_mstb(m, port->mstb); 4938 } 4939 } 4940 4941 #define DP_PAYLOAD_TABLE_SIZE 64 4942 4943 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 4944 char *buf) 4945 { 4946 int i; 4947 4948 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { 4949 if (drm_dp_dpcd_read(mgr->aux, 4950 DP_PAYLOAD_TABLE_UPDATE_STATUS + i, 4951 &buf[i], 16) != 16) 4952 return false; 4953 } 4954 return true; 4955 } 4956 4957 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, 4958 struct drm_dp_mst_port *port, char *name, 4959 int namelen) 4960 { 4961 struct edid *mst_edid; 4962 4963 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); 4964 drm_edid_get_monitor_name(mst_edid, name, namelen); 4965 kfree(mst_edid); 4966 } 4967 4968 /** 4969 * drm_dp_mst_dump_topology(): dump topology to seq file. 4970 * @m: seq_file to dump output to 4971 * @mgr: manager to dump current topology for. 4972 * 4973 * helper to dump MST topology to a seq file for debugfs. 4974 */ 4975 void drm_dp_mst_dump_topology(struct seq_file *m, 4976 struct drm_dp_mst_topology_mgr *mgr) 4977 { 4978 struct drm_dp_mst_topology_state *state; 4979 struct drm_dp_mst_atomic_payload *payload; 4980 int i, ret; 4981 4982 static const char *const status[] = { 4983 "None", 4984 "Local", 4985 "DFP", 4986 "Remote", 4987 }; 4988 4989 mutex_lock(&mgr->lock); 4990 if (mgr->mst_primary) 4991 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 4992 4993 /* dump VCPIs */ 4994 mutex_unlock(&mgr->lock); 4995 4996 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock); 4997 if (ret < 0) 4998 return; 4999 5000 state = to_drm_dp_mst_topology_state(mgr->base.state); 5001 seq_printf(m, "\n*** Atomic state info ***\n"); 5002 seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n", 5003 state->payload_mask, mgr->max_payloads, state->start_slot, 5004 dfixed_trunc(state->pbn_div)); 5005 5006 seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n"); 5007 for (i = 0; i < mgr->max_payloads; i++) { 5008 list_for_each_entry(payload, &state->payloads, next) { 5009 char name[14]; 5010 5011 if (payload->vcpi != i || payload->delete) 5012 continue; 5013 5014 fetch_monitor_name(mgr, payload->port, name, sizeof(name)); 5015 seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n", 5016 i, 5017 payload->port->port_num, 5018 payload->vcpi, 5019 payload->vc_start_slot, 5020 payload->vc_start_slot + payload->time_slots - 1, 5021 payload->pbn, 5022 payload->dsc_enabled ? "Y" : "N", 5023 status[payload->payload_allocation_status], 5024 (*name != 0) ? name : "Unknown"); 5025 } 5026 } 5027 5028 seq_printf(m, "\n*** DPCD Info ***\n"); 5029 mutex_lock(&mgr->lock); 5030 if (mgr->mst_primary) { 5031 u8 buf[DP_PAYLOAD_TABLE_SIZE]; 5032 int ret; 5033 5034 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) { 5035 seq_printf(m, "dpcd read failed\n"); 5036 goto out; 5037 } 5038 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); 5039 5040 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 5041 if (ret != 2) { 5042 seq_printf(m, "faux/mst read failed\n"); 5043 goto out; 5044 } 5045 seq_printf(m, "faux/mst: %*ph\n", 2, buf); 5046 5047 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 5048 if (ret != 1) { 5049 seq_printf(m, "mst ctrl read failed\n"); 5050 goto out; 5051 } 5052 seq_printf(m, "mst ctrl: %*ph\n", 1, buf); 5053 5054 /* dump the standard OUI branch header */ 5055 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); 5056 if (ret != DP_BRANCH_OUI_HEADER_SIZE) { 5057 seq_printf(m, "branch oui read failed\n"); 5058 goto out; 5059 } 5060 seq_printf(m, "branch oui: %*phN devid: ", 3, buf); 5061 5062 for (i = 0x3; i < 0x8 && buf[i]; i++) 5063 seq_putc(m, buf[i]); 5064 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", 5065 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 5066 if (dump_dp_payload_table(mgr, buf)) 5067 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); 5068 } 5069 5070 out: 5071 mutex_unlock(&mgr->lock); 5072 drm_modeset_unlock(&mgr->base.lock); 5073 } 5074 EXPORT_SYMBOL(drm_dp_mst_dump_topology); 5075 5076 static void drm_dp_tx_work(struct work_struct *work) 5077 { 5078 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 5079 5080 mutex_lock(&mgr->qlock); 5081 if (!list_empty(&mgr->tx_msg_downq)) 5082 process_single_down_tx_qlock(mgr); 5083 mutex_unlock(&mgr->qlock); 5084 } 5085 5086 static inline void 5087 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) 5088 { 5089 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); 5090 5091 if (port->connector) { 5092 drm_connector_unregister(port->connector); 5093 drm_connector_put(port->connector); 5094 } 5095 5096 drm_dp_mst_put_port_malloc(port); 5097 } 5098 5099 static inline void 5100 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) 5101 { 5102 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 5103 struct drm_dp_mst_port *port, *port_tmp; 5104 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp; 5105 bool wake_tx = false; 5106 5107 mutex_lock(&mgr->lock); 5108 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) { 5109 list_del(&port->next); 5110 drm_dp_mst_topology_put_port(port); 5111 } 5112 mutex_unlock(&mgr->lock); 5113 5114 /* drop any tx slot msg */ 5115 mutex_lock(&mstb->mgr->qlock); 5116 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) { 5117 if (txmsg->dst != mstb) 5118 continue; 5119 5120 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 5121 list_del(&txmsg->next); 5122 wake_tx = true; 5123 } 5124 mutex_unlock(&mstb->mgr->qlock); 5125 5126 if (wake_tx) 5127 wake_up_all(&mstb->mgr->tx_waitq); 5128 5129 drm_dp_mst_put_mstb_malloc(mstb); 5130 } 5131 5132 static void drm_dp_delayed_destroy_work(struct work_struct *work) 5133 { 5134 struct drm_dp_mst_topology_mgr *mgr = 5135 container_of(work, struct drm_dp_mst_topology_mgr, 5136 delayed_destroy_work); 5137 bool send_hotplug = false, go_again; 5138 5139 /* 5140 * Not a regular list traverse as we have to drop the destroy 5141 * connector lock before destroying the mstb/port, to avoid AB->BA 5142 * ordering between this lock and the config mutex. 5143 */ 5144 do { 5145 go_again = false; 5146 5147 for (;;) { 5148 struct drm_dp_mst_branch *mstb; 5149 5150 mutex_lock(&mgr->delayed_destroy_lock); 5151 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, 5152 struct drm_dp_mst_branch, 5153 destroy_next); 5154 if (mstb) 5155 list_del(&mstb->destroy_next); 5156 mutex_unlock(&mgr->delayed_destroy_lock); 5157 5158 if (!mstb) 5159 break; 5160 5161 drm_dp_delayed_destroy_mstb(mstb); 5162 go_again = true; 5163 } 5164 5165 for (;;) { 5166 struct drm_dp_mst_port *port; 5167 5168 mutex_lock(&mgr->delayed_destroy_lock); 5169 port = list_first_entry_or_null(&mgr->destroy_port_list, 5170 struct drm_dp_mst_port, 5171 next); 5172 if (port) 5173 list_del(&port->next); 5174 mutex_unlock(&mgr->delayed_destroy_lock); 5175 5176 if (!port) 5177 break; 5178 5179 drm_dp_delayed_destroy_port(port); 5180 send_hotplug = true; 5181 go_again = true; 5182 } 5183 } while (go_again); 5184 5185 if (send_hotplug) 5186 drm_kms_helper_hotplug_event(mgr->dev); 5187 } 5188 5189 static struct drm_private_state * 5190 drm_dp_mst_duplicate_state(struct drm_private_obj *obj) 5191 { 5192 struct drm_dp_mst_topology_state *state, *old_state = 5193 to_dp_mst_topology_state(obj->state); 5194 struct drm_dp_mst_atomic_payload *pos, *payload; 5195 5196 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); 5197 if (!state) 5198 return NULL; 5199 5200 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 5201 5202 INIT_LIST_HEAD(&state->payloads); 5203 state->commit_deps = NULL; 5204 state->num_commit_deps = 0; 5205 state->pending_crtc_mask = 0; 5206 5207 list_for_each_entry(pos, &old_state->payloads, next) { 5208 /* Prune leftover freed timeslot allocations */ 5209 if (pos->delete) 5210 continue; 5211 5212 payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL); 5213 if (!payload) 5214 goto fail; 5215 5216 drm_dp_mst_get_port_malloc(payload->port); 5217 list_add(&payload->next, &state->payloads); 5218 } 5219 5220 return &state->base; 5221 5222 fail: 5223 list_for_each_entry_safe(pos, payload, &state->payloads, next) { 5224 drm_dp_mst_put_port_malloc(pos->port); 5225 kfree(pos); 5226 } 5227 kfree(state); 5228 5229 return NULL; 5230 } 5231 5232 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, 5233 struct drm_private_state *state) 5234 { 5235 struct drm_dp_mst_topology_state *mst_state = 5236 to_dp_mst_topology_state(state); 5237 struct drm_dp_mst_atomic_payload *pos, *tmp; 5238 int i; 5239 5240 list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) { 5241 /* We only keep references to ports with active payloads */ 5242 if (!pos->delete) 5243 drm_dp_mst_put_port_malloc(pos->port); 5244 kfree(pos); 5245 } 5246 5247 for (i = 0; i < mst_state->num_commit_deps; i++) 5248 drm_crtc_commit_put(mst_state->commit_deps[i]); 5249 5250 kfree(mst_state->commit_deps); 5251 kfree(mst_state); 5252 } 5253 5254 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, 5255 struct drm_dp_mst_branch *branch) 5256 { 5257 while (port->parent) { 5258 if (port->parent == branch) 5259 return true; 5260 5261 if (port->parent->port_parent) 5262 port = port->parent->port_parent; 5263 else 5264 break; 5265 } 5266 return false; 5267 } 5268 5269 static bool 5270 drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr, 5271 struct drm_dp_mst_port *port, 5272 struct drm_dp_mst_port *parent) 5273 { 5274 if (!mgr->mst_primary) 5275 return false; 5276 5277 port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, 5278 port); 5279 if (!port) 5280 return false; 5281 5282 if (!parent) 5283 return true; 5284 5285 parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, 5286 parent); 5287 if (!parent) 5288 return false; 5289 5290 if (!parent->mstb) 5291 return false; 5292 5293 return drm_dp_mst_port_downstream_of_branch(port, parent->mstb); 5294 } 5295 5296 /** 5297 * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port 5298 * @mgr: MST topology manager 5299 * @port: the port being looked up 5300 * @parent: the parent port 5301 * 5302 * The function returns %true if @port is downstream of @parent. If @parent is 5303 * %NULL - denoting the root port - the function returns %true if @port is in 5304 * @mgr's topology. 5305 */ 5306 bool 5307 drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, 5308 struct drm_dp_mst_port *port, 5309 struct drm_dp_mst_port *parent) 5310 { 5311 bool ret; 5312 5313 mutex_lock(&mgr->lock); 5314 ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent); 5315 mutex_unlock(&mgr->lock); 5316 5317 return ret; 5318 } 5319 EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent); 5320 5321 static int 5322 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5323 struct drm_dp_mst_topology_state *state, 5324 struct drm_dp_mst_port **failing_port); 5325 5326 static int 5327 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, 5328 struct drm_dp_mst_topology_state *state, 5329 struct drm_dp_mst_port **failing_port) 5330 { 5331 struct drm_dp_mst_atomic_payload *payload; 5332 struct drm_dp_mst_port *port; 5333 int pbn_used = 0, ret; 5334 bool found = false; 5335 5336 /* Check that we have at least one port in our state that's downstream 5337 * of this branch, otherwise we can skip this branch 5338 */ 5339 list_for_each_entry(payload, &state->payloads, next) { 5340 if (!payload->pbn || 5341 !drm_dp_mst_port_downstream_of_branch(payload->port, mstb)) 5342 continue; 5343 5344 found = true; 5345 break; 5346 } 5347 if (!found) 5348 return 0; 5349 5350 if (mstb->port_parent) 5351 drm_dbg_atomic(mstb->mgr->dev, 5352 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", 5353 mstb->port_parent->parent, mstb->port_parent, mstb); 5354 else 5355 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); 5356 5357 list_for_each_entry(port, &mstb->ports, next) { 5358 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port); 5359 if (ret < 0) 5360 return ret; 5361 5362 pbn_used += ret; 5363 } 5364 5365 return pbn_used; 5366 } 5367 5368 static int 5369 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5370 struct drm_dp_mst_topology_state *state, 5371 struct drm_dp_mst_port **failing_port) 5372 { 5373 struct drm_dp_mst_atomic_payload *payload; 5374 int pbn_used = 0; 5375 5376 if (port->pdt == DP_PEER_DEVICE_NONE) 5377 return 0; 5378 5379 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 5380 payload = drm_atomic_get_mst_payload_state(state, port); 5381 if (!payload) 5382 return 0; 5383 5384 /* 5385 * This could happen if the sink deasserted its HPD line, but 5386 * the branch device still reports it as attached (PDT != NONE). 5387 */ 5388 if (!port->full_pbn) { 5389 drm_dbg_atomic(port->mgr->dev, 5390 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n", 5391 port->parent, port); 5392 *failing_port = port; 5393 return -EINVAL; 5394 } 5395 5396 pbn_used = payload->pbn; 5397 } else { 5398 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, 5399 state, 5400 failing_port); 5401 if (pbn_used <= 0) 5402 return pbn_used; 5403 } 5404 5405 if (pbn_used > port->full_pbn) { 5406 drm_dbg_atomic(port->mgr->dev, 5407 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", 5408 port->parent, port, pbn_used, port->full_pbn); 5409 *failing_port = port; 5410 return -ENOSPC; 5411 } 5412 5413 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", 5414 port->parent, port, pbn_used, port->full_pbn); 5415 5416 return pbn_used; 5417 } 5418 5419 static inline int 5420 drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr, 5421 struct drm_dp_mst_topology_state *mst_state) 5422 { 5423 struct drm_dp_mst_atomic_payload *payload; 5424 int avail_slots = mst_state->total_avail_slots, payload_count = 0; 5425 5426 list_for_each_entry(payload, &mst_state->payloads, next) { 5427 /* Releasing payloads is always OK-even if the port is gone */ 5428 if (payload->delete) { 5429 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n", 5430 payload->port); 5431 continue; 5432 } 5433 5434 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n", 5435 payload->port, payload->time_slots); 5436 5437 avail_slots -= payload->time_slots; 5438 if (avail_slots < 0) { 5439 drm_dbg_atomic(mgr->dev, 5440 "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n", 5441 payload->port, mst_state, avail_slots + payload->time_slots); 5442 return -ENOSPC; 5443 } 5444 5445 if (++payload_count > mgr->max_payloads) { 5446 drm_dbg_atomic(mgr->dev, 5447 "[MST MGR:%p] state %p has too many payloads (max=%d)\n", 5448 mgr, mst_state, mgr->max_payloads); 5449 return -EINVAL; 5450 } 5451 5452 /* Assign a VCPI */ 5453 if (!payload->vcpi) { 5454 payload->vcpi = ffz(mst_state->payload_mask) + 1; 5455 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n", 5456 payload->port, payload->vcpi); 5457 mst_state->payload_mask |= BIT(payload->vcpi - 1); 5458 } 5459 } 5460 5461 if (!payload_count) 5462 mst_state->pbn_div.full = dfixed_const(0); 5463 5464 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", 5465 mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots, 5466 mst_state->total_avail_slots - avail_slots); 5467 5468 return 0; 5469 } 5470 5471 /** 5472 * drm_dp_mst_add_affected_dsc_crtcs 5473 * @state: Pointer to the new struct drm_dp_mst_topology_state 5474 * @mgr: MST topology manager 5475 * 5476 * Whenever there is a change in mst topology 5477 * DSC configuration would have to be recalculated 5478 * therefore we need to trigger modeset on all affected 5479 * CRTCs in that topology 5480 * 5481 * See also: 5482 * drm_dp_mst_atomic_enable_dsc() 5483 */ 5484 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) 5485 { 5486 struct drm_dp_mst_topology_state *mst_state; 5487 struct drm_dp_mst_atomic_payload *pos; 5488 struct drm_connector *connector; 5489 struct drm_connector_state *conn_state; 5490 struct drm_crtc *crtc; 5491 struct drm_crtc_state *crtc_state; 5492 5493 mst_state = drm_atomic_get_mst_topology_state(state, mgr); 5494 5495 if (IS_ERR(mst_state)) 5496 return PTR_ERR(mst_state); 5497 5498 list_for_each_entry(pos, &mst_state->payloads, next) { 5499 5500 connector = pos->port->connector; 5501 5502 if (!connector) 5503 return -EINVAL; 5504 5505 conn_state = drm_atomic_get_connector_state(state, connector); 5506 5507 if (IS_ERR(conn_state)) 5508 return PTR_ERR(conn_state); 5509 5510 crtc = conn_state->crtc; 5511 5512 if (!crtc) 5513 continue; 5514 5515 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) 5516 continue; 5517 5518 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); 5519 5520 if (IS_ERR(crtc_state)) 5521 return PTR_ERR(crtc_state); 5522 5523 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", 5524 mgr, crtc); 5525 5526 crtc_state->mode_changed = true; 5527 } 5528 return 0; 5529 } 5530 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); 5531 5532 /** 5533 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off 5534 * @state: Pointer to the new drm_atomic_state 5535 * @port: Pointer to the affected MST Port 5536 * @pbn: Newly recalculated bw required for link with DSC enabled 5537 * @enable: Boolean flag to enable or disable DSC on the port 5538 * 5539 * This function enables DSC on the given Port 5540 * by recalculating its vcpi from pbn provided 5541 * and sets dsc_enable flag to keep track of which 5542 * ports have DSC enabled 5543 * 5544 */ 5545 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, 5546 struct drm_dp_mst_port *port, 5547 int pbn, bool enable) 5548 { 5549 struct drm_dp_mst_topology_state *mst_state; 5550 struct drm_dp_mst_atomic_payload *payload; 5551 int time_slots = 0; 5552 5553 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); 5554 if (IS_ERR(mst_state)) 5555 return PTR_ERR(mst_state); 5556 5557 payload = drm_atomic_get_mst_payload_state(mst_state, port); 5558 if (!payload) { 5559 drm_dbg_atomic(state->dev, 5560 "[MST PORT:%p] Couldn't find payload in mst state %p\n", 5561 port, mst_state); 5562 return -EINVAL; 5563 } 5564 5565 if (payload->dsc_enabled == enable) { 5566 drm_dbg_atomic(state->dev, 5567 "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n", 5568 port, enable, payload->time_slots); 5569 time_slots = payload->time_slots; 5570 } 5571 5572 if (enable) { 5573 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn); 5574 drm_dbg_atomic(state->dev, 5575 "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n", 5576 port, time_slots); 5577 if (time_slots < 0) 5578 return -EINVAL; 5579 } 5580 5581 payload->dsc_enabled = enable; 5582 5583 return time_slots; 5584 } 5585 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); 5586 5587 /** 5588 * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager 5589 * @state: The global atomic state 5590 * @mgr: Manager to check 5591 * @mst_state: The MST atomic state for @mgr 5592 * @failing_port: Returns the port with a BW limitation 5593 * 5594 * Checks the given MST manager's topology state for an atomic update to ensure 5595 * that it's valid. This includes checking whether there's enough bandwidth to 5596 * support the new timeslot allocations in the atomic update. 5597 * 5598 * Any atomic drivers supporting DP MST must make sure to call this or 5599 * the drm_dp_mst_atomic_check() function after checking the rest of their state 5600 * in their &drm_mode_config_funcs.atomic_check() callback. 5601 * 5602 * See also: 5603 * drm_dp_mst_atomic_check() 5604 * drm_dp_atomic_find_time_slots() 5605 * drm_dp_atomic_release_time_slots() 5606 * 5607 * Returns: 5608 * - 0 if the new state is valid 5609 * - %-ENOSPC, if the new state is invalid, because of BW limitation 5610 * @failing_port is set to: 5611 * 5612 * - The non-root port where a BW limit check failed 5613 * with all the ports downstream of @failing_port passing 5614 * the BW limit check. 5615 * The returned port pointer is valid until at least 5616 * one payload downstream of it exists. 5617 * - %NULL if the BW limit check failed at the root port 5618 * with all the ports downstream of the root port passing 5619 * the BW limit check. 5620 * 5621 * - %-EINVAL, if the new state is invalid, because the root port has 5622 * too many payloads. 5623 */ 5624 int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, 5625 struct drm_dp_mst_topology_mgr *mgr, 5626 struct drm_dp_mst_topology_state *mst_state, 5627 struct drm_dp_mst_port **failing_port) 5628 { 5629 int ret; 5630 5631 *failing_port = NULL; 5632 5633 if (!mgr->mst_state) 5634 return 0; 5635 5636 mutex_lock(&mgr->lock); 5637 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, 5638 mst_state, 5639 failing_port); 5640 mutex_unlock(&mgr->lock); 5641 5642 if (ret < 0) 5643 return ret; 5644 5645 return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); 5646 } 5647 EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr); 5648 5649 /** 5650 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an 5651 * atomic update is valid 5652 * @state: Pointer to the new &struct drm_dp_mst_topology_state 5653 * 5654 * Checks the given topology state for an atomic update to ensure that it's 5655 * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the 5656 * atomic state. This includes checking whether there's enough bandwidth to 5657 * support the new timeslot allocations in the atomic update. 5658 * 5659 * Any atomic drivers supporting DP MST must make sure to call this after 5660 * checking the rest of their state in their 5661 * &drm_mode_config_funcs.atomic_check() callback. 5662 * 5663 * See also: 5664 * drm_dp_mst_atomic_check_mgr() 5665 * drm_dp_atomic_find_time_slots() 5666 * drm_dp_atomic_release_time_slots() 5667 * 5668 * Returns: 5669 * 0 if the new state is valid, negative error code otherwise. 5670 */ 5671 int drm_dp_mst_atomic_check(struct drm_atomic_state *state) 5672 { 5673 struct drm_dp_mst_topology_mgr *mgr; 5674 struct drm_dp_mst_topology_state *mst_state; 5675 int i, ret = 0; 5676 5677 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 5678 struct drm_dp_mst_port *tmp_port; 5679 5680 ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port); 5681 if (ret) 5682 break; 5683 } 5684 5685 return ret; 5686 } 5687 EXPORT_SYMBOL(drm_dp_mst_atomic_check); 5688 5689 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { 5690 .atomic_duplicate_state = drm_dp_mst_duplicate_state, 5691 .atomic_destroy_state = drm_dp_mst_destroy_state, 5692 }; 5693 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); 5694 5695 /** 5696 * drm_atomic_get_mst_topology_state: get MST topology state 5697 * @state: global atomic state 5698 * @mgr: MST topology manager, also the private object in this case 5699 * 5700 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic 5701 * state vtable so that the private object state returned is that of a MST 5702 * topology object. 5703 * 5704 * RETURNS: 5705 * The MST topology state or error pointer. 5706 */ 5707 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 5708 struct drm_dp_mst_topology_mgr *mgr) 5709 { 5710 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); 5711 } 5712 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); 5713 5714 /** 5715 * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any 5716 * @state: global atomic state 5717 * @mgr: MST topology manager, also the private object in this case 5718 * 5719 * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic 5720 * state vtable so that the private object state returned is that of a MST 5721 * topology object. 5722 * 5723 * Returns: 5724 * The old MST topology state, or NULL if there's no topology state for this MST mgr 5725 * in the global atomic state 5726 */ 5727 struct drm_dp_mst_topology_state * 5728 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state, 5729 struct drm_dp_mst_topology_mgr *mgr) 5730 { 5731 struct drm_private_state *old_priv_state = 5732 drm_atomic_get_old_private_obj_state(state, &mgr->base); 5733 5734 return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL; 5735 } 5736 EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state); 5737 5738 /** 5739 * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any 5740 * @state: global atomic state 5741 * @mgr: MST topology manager, also the private object in this case 5742 * 5743 * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic 5744 * state vtable so that the private object state returned is that of a MST 5745 * topology object. 5746 * 5747 * Returns: 5748 * The new MST topology state, or NULL if there's no topology state for this MST mgr 5749 * in the global atomic state 5750 */ 5751 struct drm_dp_mst_topology_state * 5752 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, 5753 struct drm_dp_mst_topology_mgr *mgr) 5754 { 5755 struct drm_private_state *new_priv_state = 5756 drm_atomic_get_new_private_obj_state(state, &mgr->base); 5757 5758 return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL; 5759 } 5760 EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state); 5761 5762 /** 5763 * drm_dp_mst_topology_mgr_init - initialise a topology manager 5764 * @mgr: manager struct to initialise 5765 * @dev: device providing this structure - for i2c addition. 5766 * @aux: DP helper aux channel to talk to this device 5767 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 5768 * @max_payloads: maximum number of payloads this GPU can source 5769 * @conn_base_id: the connector object ID the MST device is connected to. 5770 * 5771 * Return 0 for success, or negative error code on failure 5772 */ 5773 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 5774 struct drm_device *dev, struct drm_dp_aux *aux, 5775 int max_dpcd_transaction_bytes, int max_payloads, 5776 int conn_base_id) 5777 { 5778 struct drm_dp_mst_topology_state *mst_state; 5779 5780 mutex_init(&mgr->lock); 5781 mutex_init(&mgr->qlock); 5782 mutex_init(&mgr->delayed_destroy_lock); 5783 mutex_init(&mgr->up_req_lock); 5784 mutex_init(&mgr->probe_lock); 5785 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5786 mutex_init(&mgr->topology_ref_history_lock); 5787 stack_depot_init(); 5788 #endif 5789 INIT_LIST_HEAD(&mgr->tx_msg_downq); 5790 INIT_LIST_HEAD(&mgr->destroy_port_list); 5791 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); 5792 INIT_LIST_HEAD(&mgr->up_req_list); 5793 5794 /* 5795 * delayed_destroy_work will be queued on a dedicated WQ, so that any 5796 * requeuing will be also flushed when deiniting the topology manager. 5797 */ 5798 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); 5799 if (mgr->delayed_destroy_wq == NULL) 5800 return -ENOMEM; 5801 5802 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 5803 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 5804 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); 5805 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); 5806 init_waitqueue_head(&mgr->tx_waitq); 5807 mgr->dev = dev; 5808 mgr->aux = aux; 5809 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 5810 mgr->max_payloads = max_payloads; 5811 mgr->conn_base_id = conn_base_id; 5812 5813 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); 5814 if (mst_state == NULL) 5815 return -ENOMEM; 5816 5817 mst_state->total_avail_slots = 63; 5818 mst_state->start_slot = 1; 5819 5820 mst_state->mgr = mgr; 5821 INIT_LIST_HEAD(&mst_state->payloads); 5822 5823 drm_atomic_private_obj_init(dev, &mgr->base, 5824 &mst_state->base, 5825 &drm_dp_mst_topology_state_funcs); 5826 5827 return 0; 5828 } 5829 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 5830 5831 /** 5832 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 5833 * @mgr: manager to destroy 5834 */ 5835 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 5836 { 5837 drm_dp_mst_topology_mgr_set_mst(mgr, false); 5838 flush_work(&mgr->work); 5839 /* The following will also drain any requeued work on the WQ. */ 5840 if (mgr->delayed_destroy_wq) { 5841 destroy_workqueue(mgr->delayed_destroy_wq); 5842 mgr->delayed_destroy_wq = NULL; 5843 } 5844 mgr->dev = NULL; 5845 mgr->aux = NULL; 5846 drm_atomic_private_obj_fini(&mgr->base); 5847 mgr->funcs = NULL; 5848 5849 mutex_destroy(&mgr->delayed_destroy_lock); 5850 mutex_destroy(&mgr->qlock); 5851 mutex_destroy(&mgr->lock); 5852 mutex_destroy(&mgr->up_req_lock); 5853 mutex_destroy(&mgr->probe_lock); 5854 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5855 mutex_destroy(&mgr->topology_ref_history_lock); 5856 #endif 5857 } 5858 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 5859 5860 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) 5861 { 5862 int i; 5863 5864 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) 5865 return false; 5866 5867 for (i = 0; i < num - 1; i++) { 5868 if (msgs[i].flags & I2C_M_RD || 5869 msgs[i].len > 0xff) 5870 return false; 5871 } 5872 5873 return msgs[num - 1].flags & I2C_M_RD && 5874 msgs[num - 1].len <= 0xff; 5875 } 5876 5877 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num) 5878 { 5879 int i; 5880 5881 for (i = 0; i < num - 1; i++) { 5882 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) || 5883 msgs[i].len > 0xff) 5884 return false; 5885 } 5886 5887 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; 5888 } 5889 5890 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb, 5891 struct drm_dp_mst_port *port, 5892 struct i2c_msg *msgs, int num) 5893 { 5894 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5895 unsigned int i; 5896 struct drm_dp_sideband_msg_req_body msg; 5897 struct drm_dp_sideband_msg_tx *txmsg = NULL; 5898 int ret; 5899 5900 memset(&msg, 0, sizeof(msg)); 5901 msg.req_type = DP_REMOTE_I2C_READ; 5902 msg.u.i2c_read.num_transactions = num - 1; 5903 msg.u.i2c_read.port_number = port->port_num; 5904 for (i = 0; i < num - 1; i++) { 5905 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 5906 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 5907 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 5908 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); 5909 } 5910 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 5911 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 5912 5913 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 5914 if (!txmsg) { 5915 ret = -ENOMEM; 5916 goto out; 5917 } 5918 5919 txmsg->dst = mstb; 5920 drm_dp_encode_sideband_req(&msg, txmsg); 5921 5922 drm_dp_queue_down_tx(mgr, txmsg); 5923 5924 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 5925 if (ret > 0) { 5926 5927 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 5928 ret = -EREMOTEIO; 5929 goto out; 5930 } 5931 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 5932 ret = -EIO; 5933 goto out; 5934 } 5935 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 5936 ret = num; 5937 } 5938 out: 5939 kfree(txmsg); 5940 return ret; 5941 } 5942 5943 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb, 5944 struct drm_dp_mst_port *port, 5945 struct i2c_msg *msgs, int num) 5946 { 5947 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5948 unsigned int i; 5949 struct drm_dp_sideband_msg_req_body msg; 5950 struct drm_dp_sideband_msg_tx *txmsg = NULL; 5951 int ret; 5952 5953 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 5954 if (!txmsg) { 5955 ret = -ENOMEM; 5956 goto out; 5957 } 5958 for (i = 0; i < num; i++) { 5959 memset(&msg, 0, sizeof(msg)); 5960 msg.req_type = DP_REMOTE_I2C_WRITE; 5961 msg.u.i2c_write.port_number = port->port_num; 5962 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr; 5963 msg.u.i2c_write.num_bytes = msgs[i].len; 5964 msg.u.i2c_write.bytes = msgs[i].buf; 5965 5966 memset(txmsg, 0, sizeof(*txmsg)); 5967 txmsg->dst = mstb; 5968 5969 drm_dp_encode_sideband_req(&msg, txmsg); 5970 drm_dp_queue_down_tx(mgr, txmsg); 5971 5972 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 5973 if (ret > 0) { 5974 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 5975 ret = -EREMOTEIO; 5976 goto out; 5977 } 5978 } else { 5979 goto out; 5980 } 5981 } 5982 ret = num; 5983 out: 5984 kfree(txmsg); 5985 return ret; 5986 } 5987 5988 /* I2C device */ 5989 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, 5990 struct i2c_msg *msgs, int num) 5991 { 5992 struct drm_dp_aux *aux = adapter->algo_data; 5993 struct drm_dp_mst_port *port = 5994 container_of(aux, struct drm_dp_mst_port, aux); 5995 struct drm_dp_mst_branch *mstb; 5996 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5997 int ret; 5998 5999 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 6000 if (!mstb) 6001 return -EREMOTEIO; 6002 6003 if (remote_i2c_read_ok(msgs, num)) { 6004 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num); 6005 } else if (remote_i2c_write_ok(msgs, num)) { 6006 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num); 6007 } else { 6008 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); 6009 ret = -EIO; 6010 } 6011 6012 drm_dp_mst_topology_put_mstb(mstb); 6013 return ret; 6014 } 6015 6016 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 6017 { 6018 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 6019 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 6020 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 6021 I2C_FUNC_10BIT_ADDR; 6022 } 6023 6024 static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 6025 .functionality = drm_dp_mst_i2c_functionality, 6026 .master_xfer = drm_dp_mst_i2c_xfer, 6027 }; 6028 6029 /** 6030 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 6031 * @port: The port to add the I2C bus on 6032 * 6033 * Returns 0 on success or a negative error code on failure. 6034 */ 6035 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port) 6036 { 6037 struct drm_dp_aux *aux = &port->aux; 6038 struct device *parent_dev = port->mgr->dev->dev; 6039 6040 aux->ddc.algo = &drm_dp_mst_i2c_algo; 6041 aux->ddc.algo_data = aux; 6042 aux->ddc.retries = 3; 6043 6044 aux->ddc.owner = THIS_MODULE; 6045 /* FIXME: set the kdev of the port's connector as parent */ 6046 aux->ddc.dev.parent = parent_dev; 6047 aux->ddc.dev.of_node = parent_dev->of_node; 6048 6049 strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), 6050 sizeof(aux->ddc.name)); 6051 6052 return i2c_add_adapter(&aux->ddc); 6053 } 6054 6055 /** 6056 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 6057 * @port: The port to remove the I2C bus from 6058 */ 6059 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port) 6060 { 6061 i2c_del_adapter(&port->aux.ddc); 6062 } 6063 6064 /** 6065 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device 6066 * @port: The port to check 6067 * 6068 * A single physical MST hub object can be represented in the topology 6069 * by multiple branches, with virtual ports between those branches. 6070 * 6071 * As of DP1.4, An MST hub with internal (virtual) ports must expose 6072 * certain DPCD registers over those ports. See sections 2.6.1.1.1 6073 * and 2.6.1.1.2 of Display Port specification v1.4 for details. 6074 * 6075 * May acquire mgr->lock 6076 * 6077 * Returns: 6078 * true if the port is a virtual DP peer device, false otherwise 6079 */ 6080 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) 6081 { 6082 struct drm_dp_mst_port *downstream_port; 6083 6084 if (!port || port->dpcd_rev < DP_DPCD_REV_14) 6085 return false; 6086 6087 /* Virtual DP Sink (Internal Display Panel) */ 6088 if (drm_dp_mst_port_is_logical(port)) 6089 return true; 6090 6091 /* DP-to-HDMI Protocol Converter */ 6092 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && 6093 !port->mcs && 6094 port->ldps) 6095 return true; 6096 6097 /* DP-to-DP */ 6098 mutex_lock(&port->mgr->lock); 6099 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 6100 port->mstb && 6101 port->mstb->num_ports == 2) { 6102 list_for_each_entry(downstream_port, &port->mstb->ports, next) { 6103 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && 6104 !downstream_port->input) { 6105 mutex_unlock(&port->mgr->lock); 6106 return true; 6107 } 6108 } 6109 } 6110 mutex_unlock(&port->mgr->lock); 6111 6112 return false; 6113 } 6114 6115 /** 6116 * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent 6117 * @port: MST port whose parent's AUX device is returned 6118 * 6119 * Return the AUX device for @port's parent or NULL if port's parent is the 6120 * root port. 6121 */ 6122 struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port) 6123 { 6124 if (!port->parent || !port->parent->port_parent) 6125 return NULL; 6126 6127 return &port->parent->port_parent->aux; 6128 } 6129 EXPORT_SYMBOL(drm_dp_mst_aux_for_parent); 6130 6131 /** 6132 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC 6133 * @port: The port to check. A leaf of the MST tree with an attached display. 6134 * 6135 * Depending on the situation, DSC may be enabled via the endpoint aux, 6136 * the immediately upstream aux, or the connector's physical aux. 6137 * 6138 * This is both the correct aux to read DSC_CAPABILITY and the 6139 * correct aux to write DSC_ENABLED. 6140 * 6141 * This operation can be expensive (up to four aux reads), so 6142 * the caller should cache the return. 6143 * 6144 * Returns: 6145 * NULL if DSC cannot be enabled on this port, otherwise the aux device 6146 */ 6147 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) 6148 { 6149 struct drm_dp_mst_port *immediate_upstream_port; 6150 struct drm_dp_aux *immediate_upstream_aux; 6151 struct drm_dp_mst_port *fec_port; 6152 struct drm_dp_desc desc = {}; 6153 u8 upstream_dsc; 6154 u8 endpoint_fec; 6155 u8 endpoint_dsc; 6156 6157 if (!port) 6158 return NULL; 6159 6160 if (port->parent->port_parent) 6161 immediate_upstream_port = port->parent->port_parent; 6162 else 6163 immediate_upstream_port = NULL; 6164 6165 fec_port = immediate_upstream_port; 6166 while (fec_port) { 6167 /* 6168 * Each physical link (i.e. not a virtual port) between the 6169 * output and the primary device must support FEC 6170 */ 6171 if (!drm_dp_mst_is_virtual_dpcd(fec_port) && 6172 !fec_port->fec_capable) 6173 return NULL; 6174 6175 fec_port = fec_port->parent->port_parent; 6176 } 6177 6178 /* DP-to-DP peer device */ 6179 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { 6180 if (drm_dp_dpcd_read(&port->aux, 6181 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 6182 return NULL; 6183 if (drm_dp_dpcd_read(&port->aux, 6184 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 6185 return NULL; 6186 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, 6187 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) 6188 return NULL; 6189 6190 /* Enpoint decompression with DP-to-DP peer device */ 6191 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 6192 (endpoint_fec & DP_FEC_CAPABLE) && 6193 (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) { 6194 port->passthrough_aux = &immediate_upstream_port->aux; 6195 return &port->aux; 6196 } 6197 6198 /* Virtual DPCD decompression with DP-to-DP peer device */ 6199 return &immediate_upstream_port->aux; 6200 } 6201 6202 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ 6203 if (drm_dp_mst_is_virtual_dpcd(port)) 6204 return &port->aux; 6205 6206 /* 6207 * Synaptics quirk 6208 * Applies to ports for which: 6209 * - Physical aux has Synaptics OUI 6210 * - DPv1.4 or higher 6211 * - Port is on primary branch device 6212 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) 6213 */ 6214 if (immediate_upstream_port) 6215 immediate_upstream_aux = &immediate_upstream_port->aux; 6216 else 6217 immediate_upstream_aux = port->mgr->aux; 6218 6219 if (drm_dp_read_desc(immediate_upstream_aux, &desc, true)) 6220 return NULL; 6221 6222 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { 6223 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; 6224 6225 if (drm_dp_dpcd_read(immediate_upstream_aux, 6226 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) 6227 return NULL; 6228 6229 if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) 6230 return NULL; 6231 6232 if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) 6233 return NULL; 6234 6235 if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 && 6236 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && 6237 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) 6238 != DP_DWN_STRM_PORT_TYPE_ANALOG))) 6239 return immediate_upstream_aux; 6240 } 6241 6242 /* 6243 * The check below verifies if the MST sink 6244 * connected to the GPU is capable of DSC - 6245 * therefore the endpoint needs to be 6246 * both DSC and FEC capable. 6247 */ 6248 if (drm_dp_dpcd_read(&port->aux, 6249 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 6250 return NULL; 6251 if (drm_dp_dpcd_read(&port->aux, 6252 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 6253 return NULL; 6254 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 6255 (endpoint_fec & DP_FEC_CAPABLE)) 6256 return &port->aux; 6257 6258 return NULL; 6259 } 6260 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port); 6261