]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/dp/mst: always send reply for UP request
authorMykola Lysenko <Mykola.Lysenko@amd.com>
Fri, 18 Dec 2015 22:14:43 +0000 (17:14 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 3 Mar 2016 23:07:20 +0000 (15:07 -0800)
commit 1f16ee7fa13649f4e55aa48ad31c3eb0722a62d3 upstream.

We should always send reply for UP request in order
to make downstream device clean-up resources appropriately.

Issue was that reply for UP request was sent only once.

Acked-by: Dave Airlie <airlied@gmail.com>
Signed-off-by: Mykola Lysenko <Mykola.Lysenko@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/drm_dp_mst_topology.c
include/drm/drm_dp_mst_helper.h

index 3baa95c1b14b5f58a19b9135a9e6153618551569..bda9be9a308702e032d9168e3c021f3349dbf16a 100644 (file)
@@ -1489,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 }
 
 /* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+                                      struct drm_dp_sideband_msg_tx *txmsg)
 {
-       struct drm_dp_sideband_msg_tx *txmsg;
        int ret;
 
        /* construct a chunk from the first msg in the tx_msg queue */
-       if (list_empty(&mgr->tx_msg_upq)) {
-               mgr->tx_up_in_progress = false;
-               return;
-       }
-
-       txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, true);
-       if (ret == 1) {
-               /* up txmsgs aren't put in slots - so free after we send it */
-               list_del(&txmsg->next);
-               kfree(txmsg);
-       } else if (ret)
+
+       if (ret != 1)
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-       mgr->tx_up_in_progress = true;
+
+       txmsg->dst->tx_slots[txmsg->seqno] = NULL;
 }
 
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1895,11 +1887,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
        drm_dp_encode_up_ack_reply(txmsg, req_type);
 
        mutex_lock(&mgr->qlock);
-       list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
-       if (!mgr->tx_up_in_progress) {
-               process_single_up_tx_qlock(mgr);
-       }
+
+       process_single_up_tx_qlock(mgr, txmsg);
+
        mutex_unlock(&mgr->qlock);
+
+       kfree(txmsg);
        return 0;
 }
 
@@ -2809,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        mutex_init(&mgr->qlock);
        mutex_init(&mgr->payload_lock);
        mutex_init(&mgr->destroy_connector_lock);
-       INIT_LIST_HEAD(&mgr->tx_msg_upq);
        INIT_LIST_HEAD(&mgr->tx_msg_downq);
        INIT_LIST_HEAD(&mgr->destroy_connector_list);
        INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
index 5340099741aec8c48575b1c1056f23903e150ed6..006062a27639b9c1fe377dc910fccd1cf1a62304 100644 (file)
@@ -450,9 +450,7 @@ struct drm_dp_mst_topology_mgr {
           the mstb tx_slots and txmsg->state once they are queued */
        struct mutex qlock;
        struct list_head tx_msg_downq;
-       struct list_head tx_msg_upq;
        bool tx_down_in_progress;
-       bool tx_up_in_progress;
 
        /* payload info + lock for it */
        struct mutex payload_lock;