]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ath10k: cleanup copy engine send completion
authorRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Fri, 23 Oct 2015 12:31:05 +0000 (18:01 +0530)
committerKalle Valo <kvalo@qca.qualcomm.com>
Thu, 29 Oct 2015 10:58:19 +0000 (12:58 +0200)
The physical address necessary to unmap DMA ('bufferp') is stored
in ath10k_skb_cb as 'paddr'. ath10k doesn't rely on the meta/transfer_id
when handling send completion (htc ep id is stored in sk_buff control
buffer). So the unused output arguments {bufferp, nbytesp and transfer_idp}
are removed from CE send completion. This change is needed before removing
the shadow copy of copy engine (CE) descriptors in follow up patch.

Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/pci.c

index f63376b3e258535b5145e92004a272da193e5c62..52021a9d28d7d143c24ccd653e136876ac6e70cc 100644 (file)
@@ -578,17 +578,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  * The caller takes responsibility for any necessary locking.
  */
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-                                        void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp)
+                                        void **per_transfer_contextp)
 {
        struct ath10k_ce_ring *src_ring = ce_state->src_ring;
        u32 ctrl_addr = ce_state->ctrl_addr;
        struct ath10k *ar = ce_state->ar;
        unsigned int nentries_mask = src_ring->nentries_mask;
        unsigned int sw_index = src_ring->sw_index;
-       struct ce_desc *sdesc, *sbase;
        unsigned int read_index;
 
        if (src_ring->hw_index == sw_index) {
@@ -613,15 +609,6 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
        if (read_index == sw_index)
                return -EIO;
 
-       sbase = src_ring->base_addr_owner_space;
-       sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
-
-       /* Return data from completed source descriptor */
-       *bufferp = __le32_to_cpu(sdesc->addr);
-       *nbytesp = __le16_to_cpu(sdesc->nbytes);
-       *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
-                          CE_DESC_FLAGS_META_DATA);
-
        if (per_transfer_contextp)
                *per_transfer_contextp =
                        src_ring->per_transfer_context[sw_index];
@@ -696,10 +683,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 }
 
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-                                 void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp)
+                                 void **per_transfer_contextp)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -707,9 +691,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 
        spin_lock_bh(&ar_pci->ce_lock);
        ret = ath10k_ce_completed_send_next_nolock(ce_state,
-                                                  per_transfer_contextp,
-                                                  bufferp, nbytesp,
-                                                  transfer_idp);
+                                                  per_transfer_contextp);
        spin_unlock_bh(&ar_pci->ce_lock);
 
        return ret;
index dbb94fdb274b0bc591098cd0a555285b102edb05..67f90ec5de53af0ecb25267ac54b526bb6f09cd6 100644 (file)
@@ -192,16 +192,10 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
  * Pops 1 completed send buffer from Source ring.
  */
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-                                 void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp);
+                                 void **per_transfer_contextp);
 
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-                                        void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp);
+                                        void **per_transfer_contextp);
 
 /*==================CE Engine Initialization=======================*/
 
index 6f3c3e0ad9075fa5619fd0d2fea94cdcd665dbdb..81000257bc1d3fc24276df62fbb380d73bd92ff6 100644 (file)
@@ -910,9 +910,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                        goto done;
 
                i = 0;
-               while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id) != 0) {
+               while (ath10k_ce_completed_send_next_nolock(ce_diag,
+                                                           NULL) != 0) {
                        mdelay(1);
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
                                ret = -EBUSY;
@@ -1073,9 +1072,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                        goto done;
 
                i = 0;
-               while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id) != 0) {
+               while (ath10k_ce_completed_send_next_nolock(ce_diag,
+                                                           NULL) != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1139,13 +1137,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
        struct ath10k *ar = ce_state->ar;
        struct sk_buff_head list;
        struct sk_buff *skb;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
        __skb_queue_head_init(&list);
-       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
-                                            &nbytes, &transfer_id) == 0) {
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
                /* no need to call tx completion for NULL pointers */
                if (skb == NULL)
                        continue;
@@ -1215,12 +1209,8 @@ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct sk_buff *skb;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
-       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
-                                            &nbytes, &transfer_id) == 0) {
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
                /* no need to call tx completion for NULL pointers */
                if (!skb)
                        continue;
@@ -1796,12 +1786,8 @@ err_dma:
 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
        struct bmi_xfer *xfer;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
-       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
-                                         &nbytes, &transfer_id))
+       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
                return;
 
        xfer->tx_done = true;