Lines Matching defs:mgr
66 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
71 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
78 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
82 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
88 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
93 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
302 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
320 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
809 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
1035 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1052 return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1075 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
1082 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1106 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
1111 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1131 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
1135 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1144 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1146 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1148 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
1250 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1256 * All updates to txmsg->state are protected by mgr->qlock, and the two
1268 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1287 ret = wait_event_timeout(mgr->tx_waitq,
1288 check_txmsg_state(mgr, txmsg),
1289 mgr->cbs->poll_hpd_irq ?
1293 if (ret || !mgr->cbs->poll_hpd_irq ||
1297 mgr->cbs->poll_hpd_irq(mgr);
1300 mutex_lock(&mgr->qlock);
1307 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
1321 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
1326 mutex_unlock(&mgr->qlock);
1328 drm_dp_mst_kick_tx(mgr);
1463 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1480 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1514 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
1531 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1541 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1655 __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
1662 __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
1670 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1677 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1681 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1683 mutex_lock(&mgr->topology_ref_history_lock);
1687 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1689 mutex_unlock(&mgr->topology_ref_history_lock);
1693 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1695 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1722 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1729 * This can get called under mgr->mutex, so we need to perform the
1732 mutex_lock(&mgr->delayed_destroy_lock);
1733 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1734 mutex_unlock(&mgr->delayed_destroy_lock);
1735 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1765 topology_ref_history_lock(mstb->mgr);
1768 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1772 topology_ref_history_unlock(mstb->mgr);
1793 topology_ref_history_lock(mstb->mgr);
1798 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1800 topology_ref_history_unlock(mstb->mgr);
1818 topology_ref_history_lock(mstb->mgr);
1820 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
1823 topology_ref_history_unlock(mstb->mgr);
1831 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1847 mutex_lock(&mgr->delayed_destroy_lock);
1848 list_add(&port->next, &mgr->destroy_port_list);
1849 mutex_unlock(&mgr->delayed_destroy_lock);
1850 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1880 topology_ref_history_lock(port->mgr);
1883 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1887 topology_ref_history_unlock(port->mgr);
1906 topology_ref_history_lock(port->mgr);
1910 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1913 topology_ref_history_unlock(port->mgr);
1929 topology_ref_history_lock(port->mgr);
1931 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
1934 topology_ref_history_unlock(port->mgr);
1960 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1965 mutex_lock(&mgr->lock);
1966 if (mgr->mst_primary) {
1968 mgr->mst_primary, mstb);
1973 mutex_unlock(&mgr->lock);
1998 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2003 mutex_lock(&mgr->lock);
2004 if (mgr->mst_primary) {
2006 mgr->mst_primary, port);
2011 mutex_unlock(&mgr->lock);
2072 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2097 mutex_lock(&mgr->lock);
2100 mutex_unlock(&mgr->lock);
2116 drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
2120 mutex_lock(&mgr->lock);
2122 mstb->mgr = port->mgr;
2130 mutex_unlock(&mgr->lock);
2162 return drm_dp_send_dpcd_read(port->mgr, port,
2185 return drm_dp_send_dpcd_write(port->mgr, port,
2195 if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
2204 aux = mstb->mgr->aux;
2220 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2246 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
2266 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
2276 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2281 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2297 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
2305 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2308 mutex_lock(&mgr->lock);
2311 mutex_unlock(&mgr->lock);
2317 struct drm_dp_mst_topology_mgr *mgr,
2329 port->mgr = mgr;
2352 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2361 port = drm_dp_mst_add_port(dev, mgr, mstb,
2371 drm_dp_mst_topology_unlink_port(mgr, port);
2373 port = drm_dp_mst_add_port(dev, mgr, mstb,
2385 drm_modeset_lock(&mgr->base.lock, NULL);
2407 /* manage mstb port lists with mgr lock - take a reference
2410 mutex_lock(&mgr->lock);
2414 mutex_unlock(&mgr->lock);
2422 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2448 drm_modeset_unlock(&mgr->base.lock);
2453 ret = drm_dp_send_link_address(mgr, port->mstb);
2465 drm_dp_mst_topology_unlink_port(mgr, port);
2467 drm_modeset_unlock(&mgr->base.lock);
2477 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2495 drm_dp_mst_topology_unlink_port(mgr, port);
2502 drm_modeset_lock(&mgr->base.lock, NULL);
2517 drm_dp_send_enum_path_resources(mgr, mstb, port);
2528 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
2533 drm_modeset_unlock(&mgr->base.lock);
2542 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2550 mutex_lock(&mgr->lock);
2551 mstb = mgr->mst_primary;
2563 drm_err(mgr->dev,
2577 mutex_unlock(&mgr->lock);
2605 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2612 mutex_lock(&mgr->lock);
2614 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2621 mutex_unlock(&mgr->lock);
2625 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2633 ret = drm_dp_send_link_address(mgr, mstb);
2644 ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
2656 struct drm_dp_mst_topology_mgr *mgr =
2658 struct drm_device *dev = mgr->dev;
2663 mutex_lock(&mgr->probe_lock);
2665 mutex_lock(&mgr->lock);
2666 clear_payload_id_table = !mgr->payload_id_table_cleared;
2667 mgr->payload_id_table_cleared = true;
2669 mstb = mgr->mst_primary;
2675 mutex_unlock(&mgr->lock);
2677 mutex_unlock(&mgr->probe_lock);
2691 drm_dp_send_clear_payload_id_table(mgr, mstb);
2694 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2697 mutex_unlock(&mgr->probe_lock);
2702 static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)
2704 queue_work(system_long_wq, &mgr->work);
2707 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2730 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2742 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2744 ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
2751 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
2790 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2832 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2835 struct drm_printer p = drm_dbg_printer(mgr->dev,
2853 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2858 WARN_ON(!mutex_is_locked(&mgr->qlock));
2861 if (list_empty(&mgr->tx_msg_downq))
2864 txmsg = list_first_entry(&mgr->tx_msg_downq,
2866 ret = process_single_tx_qlock(mgr, txmsg, false);
2868 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
2871 wake_up_all(&mgr->tx_waitq);
2875 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2878 mutex_lock(&mgr->qlock);
2879 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2882 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
2888 if (list_is_singular(&mgr->tx_msg_downq))
2889 process_single_down_tx_qlock(mgr);
2890 mutex_unlock(&mgr->qlock);
2894 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2902 drm_dbg_kms(mgr->dev,
2917 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2934 drm_dp_queue_down_tx(mgr, txmsg);
2939 drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
2943 drm_err(mgr->dev, "link address NAK received\n");
2949 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
2950 drm_dp_dump_link_address(mgr, reply);
2957 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
2963 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2976 mutex_lock(&mgr->lock);
2981 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
2987 mutex_unlock(&mgr->lock);
2997 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3010 drm_dp_queue_down_tx(mgr, txmsg);
3014 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
3020 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3035 drm_dp_queue_down_tx(mgr, txmsg);
3043 drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
3048 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
3090 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3097 mutex_lock(&mgr->lock);
3098 if (!mgr->mst_primary)
3115 mutex_unlock(&mgr->lock);
3119 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3131 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3133 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3155 drm_dp_queue_down_tx(mgr, txmsg);
3178 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3184 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3196 drm_dp_queue_down_tx(mgr, txmsg);
3212 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3226 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3234 drm_modeset_lock(&mgr->base.lock, NULL);
3235 state = to_drm_dp_mst_topology_state(mgr->base.state);
3243 txmsg->dst = mgr->mst_primary;
3247 drm_dp_queue_down_tx(mgr, txmsg);
3249 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3253 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3262 drm_modeset_unlock(&mgr->base.lock);
3270 static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
3273 return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
3277 static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr,
3281 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3286 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
3291 static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr,
3295 drm_dbg_kms(mgr->dev, "\n");
3299 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
3304 drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
3309 * @mgr: Manager to use.
3318 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3325 /* Update mst mgr info */
3326 if (mgr->payload_count == 0)
3327 mgr->next_start_slot = mst_state->start_slot;
3329 payload->vc_start_slot = mgr->next_start_slot;
3331 mgr->payload_count++;
3332 mgr->next_start_slot += payload->time_slots;
3337 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3339 drm_dbg_kms(mgr->dev,
3345 ret = drm_dp_create_payload_at_dfp(mgr, payload);
3347 drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",
3363 * @mgr: Manager to use.
3371 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3378 mutex_lock(&mgr->lock);
3379 send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
3380 mutex_unlock(&mgr->lock);
3383 drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload);
3385 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
3394 * @mgr: Manager to use.
3404 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3418 mgr->payload_count--;
3419 mgr->next_start_slot -= old_payload->time_slots;
3429 * @mgr: Manager to use.
3437 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3444 drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
3450 ret = drm_dp_create_payload_to_remote(mgr, payload);
3452 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
3461 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3469 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3482 drm_dp_queue_down_tx(mgr, txmsg);
3489 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3512 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3520 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3533 drm_dp_queue_down_tx(mgr, txmsg);
3559 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3572 mutex_lock(&mgr->qlock);
3574 process_single_tx_qlock(mgr, txmsg, true);
3575 mutex_unlock(&mgr->qlock);
3638 * @mgr: manager to set state for
3644 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3649 mutex_lock(&mgr->lock);
3650 if (mst_state == mgr->mst_state)
3653 mgr->mst_state = mst_state;
3656 WARN_ON(mgr->mst_primary);
3659 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3661 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
3662 mgr->aux->name, ret);
3672 mstb->mgr = mgr;
3675 mgr->mst_primary = mstb;
3676 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3678 ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3686 drm_dp_dpcd_clear_payload(mgr->aux);
3688 drm_dp_mst_queue_probe_work(mgr);
3693 mstb = mgr->mst_primary;
3694 mgr->mst_primary = NULL;
3696 drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
3698 mgr->payload_id_table_cleared = false;
3700 mgr->reset_rx_state = true;
3704 mutex_unlock(&mgr->lock);
3727 * @mgr: manager to probe
3737 void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)
3739 mutex_lock(&mgr->lock);
3741 if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))
3744 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3745 drm_dp_mst_queue_probe_work(mgr);
3748 mutex_unlock(&mgr->lock);
3754 * @mgr: manager to suspend
3759 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3761 mutex_lock(&mgr->lock);
3762 drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3764 mutex_unlock(&mgr->lock);
3765 flush_work(&mgr->up_req_work);
3766 flush_work(&mgr->work);
3767 flush_work(&mgr->delayed_destroy_work);
3769 mutex_lock(&mgr->lock);
3770 if (mgr->mst_state && mgr->mst_primary)
3771 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3772 mutex_unlock(&mgr->lock);
3778 * @mgr: manager to resume
3796 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3803 mutex_lock(&mgr->lock);
3804 if (!mgr->mst_primary)
3807 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
3808 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3812 ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3817 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
3822 ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
3824 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3830 ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);
3832 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
3841 drm_dp_mst_queue_probe_work(mgr);
3842 mutex_unlock(&mgr->lock);
3845 drm_dbg_kms(mgr->dev,
3847 flush_work(&mgr->work);
3853 mutex_unlock(&mgr->lock);
3864 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3874 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3881 len = min(mgr->max_dpcd_transaction_bytes, 16);
3882 ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
3884 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
3888 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
3892 drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
3898 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3900 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
3906 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
3913 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
3920 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3921 ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
3924 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
3931 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
3946 static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
3960 drm_dbg_kms(mgr->dev,
3969 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3973 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3975 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3983 mutex_lock(&mgr->qlock);
3985 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3993 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
3996 mutex_unlock(&mgr->qlock);
4001 if (!verify_rx_request_type(mgr, txmsg, msg)) {
4002 mutex_unlock(&mgr->qlock);
4007 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
4010 drm_dbg_kms(mgr->dev,
4022 mutex_unlock(&mgr->qlock);
4024 wake_up_all(&mgr->tx_waitq);
4035 static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
4039 mutex_lock(&mgr->lock);
4041 if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
4042 probing_done = mgr->mst_primary->link_address_sent;
4043 drm_dp_mst_topology_put_mstb(mgr->mst_primary);
4046 mutex_unlock(&mgr->lock);
4052 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4069 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4071 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4075 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
4081 if (!primary_mstb_probing_is_done(mgr)) {
4082 drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
4092 queue_work(system_long_wq, &mgr->work);
4098 struct drm_dp_mst_topology_mgr *mgr =
4104 mutex_lock(&mgr->probe_lock);
4106 mutex_lock(&mgr->up_req_lock);
4107 up_req = list_first_entry_or_null(&mgr->up_req_list,
4112 mutex_unlock(&mgr->up_req_lock);
4117 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4120 mutex_unlock(&mgr->probe_lock);
4123 drm_kms_helper_hotplug_event(mgr->dev);
4126 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4132 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4135 if (!mgr->up_req_recv.have_eomt)
4146 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4150 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
4156 mutex_lock(&mgr->lock);
4157 mst_primary = mgr->mst_primary;
4159 mutex_unlock(&mgr->lock);
4163 mutex_unlock(&mgr->lock);
4165 drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
4174 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4185 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
4190 up_req->hdr = mgr->up_req_recv.initial_hdr;
4191 mutex_lock(&mgr->up_req_lock);
4192 list_add_tail(&up_req->next, &mgr->up_req_list);
4193 mutex_unlock(&mgr->up_req_lock);
4194 queue_work(system_long_wq, &mgr->up_req_work);
4196 reset_msg_rx_state(&mgr->up_req_recv);
4200 static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
4202 mutex_lock(&mgr->lock);
4203 if (mgr->reset_rx_state) {
4204 mgr->reset_rx_state = false;
4205 reset_msg_rx_state(&mgr->down_rep_recv);
4206 reset_msg_rx_state(&mgr->up_req_recv);
4208 mutex_unlock(&mgr->lock);
4213 * @mgr: manager to notify irq for.
4232 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
4240 if (sc != mgr->sink_count) {
4241 mgr->sink_count = sc;
4245 update_msg_rx_state(mgr);
4248 ret = drm_dp_mst_handle_down_rep(mgr);
4254 ret |= drm_dp_mst_handle_up_req(mgr);
4265 * @mgr: manager to notify irq for.
4272 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
4277 mutex_lock(&mgr->qlock);
4278 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4285 mutex_unlock(&mgr->qlock);
4288 drm_dp_mst_kick_tx(mgr);
4295 * @mgr: manager for this port
4303 struct drm_dp_mst_topology_mgr *mgr,
4308 /* we need to search for the port in the mgr in case it's gone */
4309 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4313 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4350 * @mgr: manager for this port
4358 struct drm_dp_mst_topology_mgr *mgr,
4363 /* we need to search for the port in the mgr in case it's gone */
4364 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4382 * @mgr: manager for this port
4392 struct drm_dp_mst_topology_mgr *mgr,
4398 drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
4411 * @mgr: MST topology manager for the port
4422 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4437 struct drm_dp_mst_topology_mgr *mgr,
4445 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4463 if (drm_WARN_ON(mgr->dev, payload->delete)) {
4464 drm_err(mgr->dev,
4473 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
4476 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4502 * @mgr: MST topology manager for the port
4527 struct drm_dp_mst_topology_mgr *mgr,
4553 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4563 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
4571 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
4595 struct drm_dp_mst_topology_mgr *mgr;
4601 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4646 struct drm_dp_mst_topology_mgr *mgr;
4650 for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
4679 * @mgr: The MST topology manager for the &drm_connector
4698 struct drm_dp_mst_topology_mgr *mgr)
4709 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4721 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4757 * @mgr: manager to use
4766 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4774 return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
4814 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4816 queue_work(system_long_wq, &mgr->tx_work);
4874 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4880 if (drm_dp_dpcd_read_data(mgr->aux,
4888 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4894 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4902 * @mgr: manager to dump current topology for.
4907 struct drm_dp_mst_topology_mgr *mgr)
4920 mutex_lock(&mgr->lock);
4921 if (mgr->mst_primary)
4922 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4925 mutex_unlock(&mgr->lock);
4927 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
4931 state = to_drm_dp_mst_topology_state(mgr->base.state);
4934 state->payload_mask, mgr->max_payloads, state->start_slot,
4938 for (i = 0; i < mgr->max_payloads; i++) {
4945 fetch_monitor_name(mgr, payload->port, name, sizeof(name));
4960 mutex_lock(&mgr->lock);
4961 if (mgr->mst_primary) {
4965 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
4971 ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
4978 ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
4986 ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
4998 if (dump_dp_payload_table(mgr, buf))
5003 mutex_unlock(&mgr->lock);
5004 drm_modeset_unlock(&mgr->base.lock);
5010 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
5012 mutex_lock(&mgr->qlock);
5013 if (!list_empty(&mgr->tx_msg_downq))
5014 process_single_down_tx_qlock(mgr);
5015 mutex_unlock(&mgr->qlock);
5034 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
5039 mutex_lock(&mgr->lock);
5044 mutex_unlock(&mgr->lock);
5047 mutex_lock(&mstb->mgr->qlock);
5048 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
5056 mutex_unlock(&mstb->mgr->qlock);
5059 wake_up_all(&mstb->mgr->tx_waitq);
5066 struct drm_dp_mst_topology_mgr *mgr =
5082 mutex_lock(&mgr->delayed_destroy_lock);
5083 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
5088 mutex_unlock(&mgr->delayed_destroy_lock);
5100 mutex_lock(&mgr->delayed_destroy_lock);
5101 port = list_first_entry_or_null(&mgr->destroy_port_list,
5106 mutex_unlock(&mgr->delayed_destroy_lock);
5118 drm_kms_helper_hotplug_event(mgr->dev);
5202 drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
5206 if (!mgr->mst_primary)
5209 port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5217 parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5230 * @mgr: MST topology manager
5236 * @mgr's topology.
5239 drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
5245 mutex_lock(&mgr->lock);
5246 ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
5247 mutex_unlock(&mgr->lock);
5283 drm_dbg_atomic(mstb->mgr->dev,
5287 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
5321 drm_dbg_atomic(port->mgr->dev,
5338 drm_dbg_atomic(port->mgr->dev,
5345 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5352 drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
5361 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
5366 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
5371 drm_dbg_atomic(mgr->dev,
5377 if (++payload_count > mgr->max_payloads) {
5378 drm_dbg_atomic(mgr->dev,
5380 mgr, mst_state, mgr->max_payloads);
5387 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
5396 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
5397 mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
5406 * @mgr: MST topology manager
5416 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5425 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5455 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5456 mgr, crtc);
5485 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5505 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
5522 * @mgr: Manager to check
5523 * @mst_state: The MST atomic state for @mgr
5557 struct drm_dp_mst_topology_mgr *mgr,
5565 if (!mgr->mst_state)
5568 mutex_lock(&mgr->lock);
5569 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5572 mutex_unlock(&mgr->lock);
5577 return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
5605 struct drm_dp_mst_topology_mgr *mgr;
5609 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5612 ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
5630 * @mgr: MST topology manager, also the private object in this case
5640 struct drm_dp_mst_topology_mgr *mgr)
5642 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5649 * @mgr: MST topology manager, also the private object in this case
5656 * The old MST topology state, or NULL if there's no topology state for this MST mgr
5661 struct drm_dp_mst_topology_mgr *mgr)
5664 drm_atomic_get_old_private_obj_state(state, &mgr->base);
5673 * @mgr: MST topology manager, also the private object in this case
5680 * The new MST topology state, or NULL if there's no topology state for this MST mgr
5685 struct drm_dp_mst_topology_mgr *mgr)
5688 drm_atomic_get_new_private_obj_state(state, &mgr->base);
5696 * @mgr: manager struct to initialise
5705 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5712 mutex_init(&mgr->lock);
5713 mutex_init(&mgr->qlock);
5714 mutex_init(&mgr->delayed_destroy_lock);
5715 mutex_init(&mgr->up_req_lock);
5716 mutex_init(&mgr->probe_lock);
5718 mutex_init(&mgr->topology_ref_history_lock);
5721 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5722 INIT_LIST_HEAD(&mgr->destroy_port_list);
5723 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5724 INIT_LIST_HEAD(&mgr->up_req_list);
5730 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5731 if (mgr->delayed_destroy_wq == NULL)
5734 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5735 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5736 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5737 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5738 init_waitqueue_head(&mgr->tx_waitq);
5739 mgr->dev = dev;
5740 mgr->aux = aux;
5741 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5742 mgr->max_payloads = max_payloads;
5743 mgr->conn_base_id = conn_base_id;
5752 mst_state->mgr = mgr;
5755 drm_atomic_private_obj_init(dev, &mgr->base,
5765 * @mgr: manager to destroy
5767 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5769 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5770 flush_work(&mgr->work);
5772 if (mgr->delayed_destroy_wq) {
5773 destroy_workqueue(mgr->delayed_destroy_wq);
5774 mgr->delayed_destroy_wq = NULL;
5776 mgr->dev = NULL;
5777 mgr->aux = NULL;
5778 drm_atomic_private_obj_fini(&mgr->base);
5779 mgr->funcs = NULL;
5781 mutex_destroy(&mgr->delayed_destroy_lock);
5782 mutex_destroy(&mgr->qlock);
5783 mutex_destroy(&mgr->lock);
5784 mutex_destroy(&mgr->up_req_lock);
5785 mutex_destroy(&mgr->probe_lock);
5787 mutex_destroy(&mgr->topology_ref_history_lock);
5826 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5854 drm_dp_queue_down_tx(mgr, txmsg);
5879 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5902 drm_dp_queue_down_tx(mgr, txmsg);
5928 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5931 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5940 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
5970 struct device *parent_dev = port->mgr->dev->dev;
6007 * May acquire mgr->lock
6030 mutex_lock(&port->mgr->lock);
6037 mutex_unlock(&port->mgr->lock);
6042 mutex_unlock(&port->mgr->lock);
6149 immediate_upstream_aux = port->mgr->aux;